1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 17 struct sk_buff *skb); 18 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 19 struct rtw89_wait_info *wait, unsigned int cond); 20 21 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 22 bool header) 23 { 24 struct sk_buff *skb; 25 u32 header_len = 0; 26 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 27 28 if (header) 29 header_len = H2C_HEADER_LEN; 30 31 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 32 if (!skb) 33 return NULL; 34 skb_reserve(skb, header_len + h2c_desc_size); 35 memset(skb->data, 0, len); 36 37 return skb; 38 } 39 40 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 41 { 42 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 43 } 44 45 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 46 { 47 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 48 } 49 50 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 51 { 52 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 53 54 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 55 } 56 57 #define FWDL_WAIT_CNT 400000 58 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 59 { 60 u8 val; 61 int ret; 62 63 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 64 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 65 1, FWDL_WAIT_CNT, false, rtwdev); 66 if (ret) { 67 switch (val) { 68 case RTW89_FWDL_CHECKSUM_FAIL: 69 rtw89_err(rtwdev, "fw checksum fail\n"); 70 return -EINVAL; 71 72 case RTW89_FWDL_SECURITY_FAIL: 73 rtw89_err(rtwdev, "fw security fail\n"); 74 return -EINVAL; 75 76 case RTW89_FWDL_CV_NOT_MATCH: 77 rtw89_err(rtwdev, "fw cv not match\n"); 78 return -EINVAL; 79 80 default: 81 return -EBUSY; 82 } 83 } 84 85 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 86 87 return 0; 88 } 89 90 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 91 struct rtw89_fw_bin_info *info) 92 { 93 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 94 struct rtw89_fw_hdr_section_info *section_info; 95 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 96 const struct rtw89_fw_hdr_section *section; 97 const u8 *fw_end = fw + len; 98 const u8 *bin; 99 u32 base_hdr_len; 100 u32 mssc_len = 0; 101 u32 i; 102 103 if (!info) 104 return -EINVAL; 105 106 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 107 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 108 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 109 110 if (info->dynamic_hdr_en) { 111 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 112 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 113 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 114 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 115 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 116 return -EINVAL; 117 } 118 } else { 119 info->hdr_len = base_hdr_len; 120 info->dynamic_hdr_len = 0; 121 } 122 123 bin = fw + info->hdr_len; 124 125 /* jump to section header */ 126 section_info = info->section_info; 127 for (i = 0; i < info->section_num; i++) { 128 section = &fw_hdr->sections[i]; 129 section_info->type = 130 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 131 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 132 section_info->mssc = 133 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 134 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 135 } else { 136 section_info->mssc = 0; 137 } 138 139 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 140 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 141 section_info->len += FWDL_SECTION_CHKSUM_LEN; 142 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 143 section_info->dladdr = 144 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 145 section_info->addr = bin; 146 bin += section_info->len; 147 section_info++; 148 } 149 150 if (fw_end != bin + mssc_len) { 151 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 152 return -EINVAL; 153 } 154 155 return 0; 156 } 157 158 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 159 struct rtw89_fw_bin_info *info) 160 { 161 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 162 struct rtw89_fw_hdr_section_info *section_info; 163 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 164 const struct rtw89_fw_hdr_section_v1 *section; 165 const u8 *fw_end = fw + len; 166 const u8 *bin; 167 u32 base_hdr_len; 168 u32 mssc_len = 0; 169 u32 i; 170 171 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 172 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 173 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 174 175 if (info->dynamic_hdr_en) { 176 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 177 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 178 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 179 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 180 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 181 return -EINVAL; 182 } 183 } else { 184 info->hdr_len = base_hdr_len; 185 info->dynamic_hdr_len = 0; 186 } 187 188 bin = fw + info->hdr_len; 189 190 /* jump to section header */ 191 section_info = info->section_info; 192 for (i = 0; i < info->section_num; i++) { 193 section = &fw_hdr->sections[i]; 194 section_info->type = 195 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 196 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 197 section_info->mssc = 198 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 199 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 200 } else { 201 section_info->mssc = 0; 202 } 203 204 section_info->len = 205 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 206 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 207 section_info->len += FWDL_SECTION_CHKSUM_LEN; 208 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 209 section_info->dladdr = 210 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 211 section_info->addr = bin; 212 bin += section_info->len; 213 section_info++; 214 } 215 216 if (fw_end != bin + mssc_len) { 217 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 218 return -EINVAL; 219 } 220 221 return 0; 222 } 223 224 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 225 const struct rtw89_fw_suit *fw_suit, 226 struct rtw89_fw_bin_info *info) 227 { 228 const u8 *fw = fw_suit->data; 229 u32 len = fw_suit->size; 230 231 if (!fw || !len) { 232 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 233 return -ENOENT; 234 } 235 236 switch (fw_suit->hdr_ver) { 237 case 0: 238 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 239 case 1: 240 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 241 default: 242 return -ENOENT; 243 } 244 } 245 246 static 247 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 248 struct rtw89_fw_suit *fw_suit, bool nowarn) 249 { 250 struct rtw89_fw_info *fw_info = &rtwdev->fw; 251 const struct firmware *firmware = fw_info->req.firmware; 252 const u8 *mfw = firmware->data; 253 u32 mfw_len = firmware->size; 254 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 255 const struct rtw89_mfw_info *mfw_info; 256 int i; 257 258 if (mfw_hdr->sig != RTW89_MFW_SIG) { 259 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 260 /* legacy firmware support normal type only */ 261 if (type != RTW89_FW_NORMAL) 262 return -EINVAL; 263 fw_suit->data = mfw; 264 fw_suit->size = mfw_len; 265 return 0; 266 } 267 268 for (i = 0; i < mfw_hdr->fw_nr; i++) { 269 mfw_info = &mfw_hdr->info[i]; 270 if (mfw_info->type == type) { 271 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) 272 goto found; 273 if (type == RTW89_FW_LOGFMT) 274 goto found; 275 } 276 } 277 278 if (!nowarn) 279 rtw89_err(rtwdev, "no suitable firmware found\n"); 280 return -ENOENT; 281 282 found: 283 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 284 fw_suit->size = le32_to_cpu(mfw_info->size); 285 return 0; 286 } 287 288 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 289 { 290 struct rtw89_fw_info *fw_info = &rtwdev->fw; 291 const struct firmware *firmware = fw_info->req.firmware; 292 const struct rtw89_mfw_hdr *mfw_hdr = 293 (const struct rtw89_mfw_hdr *)firmware->data; 294 const struct rtw89_mfw_info *mfw_info; 295 u32 size; 296 297 if (mfw_hdr->sig != RTW89_MFW_SIG) { 298 rtw89_warn(rtwdev, "not mfw format\n"); 299 return 0; 300 } 301 302 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 303 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 304 305 return size; 306 } 307 308 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 309 struct rtw89_fw_suit *fw_suit, 310 const struct rtw89_fw_hdr *hdr) 311 { 312 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 313 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 314 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 315 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 316 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 317 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 318 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 319 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 320 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 321 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 322 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 323 } 324 325 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 326 struct rtw89_fw_suit *fw_suit, 327 const struct rtw89_fw_hdr_v1 *hdr) 328 { 329 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 330 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 331 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 332 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 333 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 334 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 335 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 336 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 337 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 338 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 339 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 340 } 341 342 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 343 enum rtw89_fw_type type, 344 struct rtw89_fw_suit *fw_suit) 345 { 346 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 347 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 348 349 if (type == RTW89_FW_LOGFMT) 350 return 0; 351 352 fw_suit->type = type; 353 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 354 355 switch (fw_suit->hdr_ver) { 356 case 0: 357 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 358 break; 359 case 1: 360 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 361 break; 362 default: 363 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 364 fw_suit->hdr_ver); 365 return -ENOENT; 366 } 367 368 rtw89_info(rtwdev, 369 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 370 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 371 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 372 373 return 0; 374 } 375 376 static 377 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 378 bool nowarn) 379 { 380 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 381 int ret; 382 383 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 384 if (ret) 385 return ret; 386 387 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 388 } 389 390 static 391 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 392 const struct rtw89_fw_element_hdr *elm, 393 const void *data) 394 { 395 enum rtw89_fw_type type = (enum rtw89_fw_type)data; 396 struct rtw89_fw_suit *fw_suit; 397 398 fw_suit = rtw89_fw_suit_get(rtwdev, type); 399 fw_suit->data = elm->u.common.contents; 400 fw_suit->size = le32_to_cpu(elm->size); 401 402 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 403 } 404 405 #define __DEF_FW_FEAT_COND(__cond, __op) \ 406 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 407 { \ 408 return suit_ver_code __op comp_ver_code; \ 409 } 410 411 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 412 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 413 __DEF_FW_FEAT_COND(lt, <); /* less than */ 414 415 struct __fw_feat_cfg { 416 enum rtw89_core_chip_id chip_id; 417 enum rtw89_fw_feature feature; 418 u32 ver_code; 419 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 420 }; 421 422 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 423 { \ 424 .chip_id = _chip, \ 425 .feature = RTW89_FW_FEATURE_ ## _feat, \ 426 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 427 .cond = __fw_feat_cond_ ## _cond, \ 428 } 429 430 static const struct __fw_feat_cfg fw_feat_tbl[] = { 431 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 432 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 433 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 434 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 435 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 436 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 437 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 438 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 439 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 440 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 441 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 442 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 443 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 444 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 445 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 446 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 447 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 448 }; 449 450 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 451 const struct rtw89_chip_info *chip, 452 u32 ver_code) 453 { 454 int i; 455 456 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 457 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 458 459 if (chip->chip_id != ent->chip_id) 460 continue; 461 462 if (ent->cond(ver_code, ent->ver_code)) 463 RTW89_SET_FW_FEATURE(ent->feature, fw); 464 } 465 } 466 467 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 468 { 469 const struct rtw89_chip_info *chip = rtwdev->chip; 470 const struct rtw89_fw_suit *fw_suit; 471 u32 suit_ver_code; 472 473 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 474 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 475 476 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 477 } 478 479 const struct firmware * 480 rtw89_early_fw_feature_recognize(struct device *device, 481 const struct rtw89_chip_info *chip, 482 struct rtw89_fw_info *early_fw, 483 int *used_fw_format) 484 { 485 const struct firmware *firmware; 486 char fw_name[64]; 487 int fw_format; 488 u32 ver_code; 489 int ret; 490 491 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 492 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 493 chip->fw_basename, fw_format); 494 495 ret = request_firmware(&firmware, fw_name, device); 496 if (!ret) { 497 dev_info(device, "loaded firmware %s\n", fw_name); 498 *used_fw_format = fw_format; 499 break; 500 } 501 } 502 503 if (ret) { 504 dev_err(device, "failed to early request firmware: %d\n", ret); 505 return NULL; 506 } 507 508 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 509 510 if (!ver_code) 511 goto out; 512 513 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 514 515 out: 516 return firmware; 517 } 518 519 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 520 { 521 const struct rtw89_chip_info *chip = rtwdev->chip; 522 int ret; 523 524 if (chip->try_ce_fw) { 525 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 526 if (!ret) 527 goto normal_done; 528 } 529 530 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 531 if (ret) 532 return ret; 533 534 normal_done: 535 /* It still works if wowlan firmware isn't existing. */ 536 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 537 538 /* It still works if log format file isn't existing. */ 539 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 540 541 rtw89_fw_recognize_features(rtwdev); 542 543 rtw89_coex_recognize_ver(rtwdev); 544 545 return 0; 546 } 547 548 static 549 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 550 const struct rtw89_fw_element_hdr *elm, 551 const void *data) 552 { 553 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 554 struct rtw89_phy_table *tbl; 555 struct rtw89_reg2_def *regs; 556 enum rtw89_rf_path rf_path; 557 u32 n_regs, i; 558 u8 idx; 559 560 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 561 if (!tbl) 562 return -ENOMEM; 563 564 switch (le32_to_cpu(elm->id)) { 565 case RTW89_FW_ELEMENT_ID_BB_REG: 566 elm_info->bb_tbl = tbl; 567 break; 568 case RTW89_FW_ELEMENT_ID_BB_GAIN: 569 elm_info->bb_gain = tbl; 570 break; 571 case RTW89_FW_ELEMENT_ID_RADIO_A: 572 case RTW89_FW_ELEMENT_ID_RADIO_B: 573 case RTW89_FW_ELEMENT_ID_RADIO_C: 574 case RTW89_FW_ELEMENT_ID_RADIO_D: 575 rf_path = (enum rtw89_rf_path)data; 576 idx = elm->u.reg2.idx; 577 578 elm_info->rf_radio[idx] = tbl; 579 tbl->rf_path = rf_path; 580 tbl->config = rtw89_phy_config_rf_reg_v1; 581 break; 582 case RTW89_FW_ELEMENT_ID_RF_NCTL: 583 elm_info->rf_nctl = tbl; 584 break; 585 default: 586 kfree(tbl); 587 return -ENOENT; 588 } 589 590 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 591 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 592 if (!regs) 593 goto out; 594 595 for (i = 0; i < n_regs; i++) { 596 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 597 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 598 } 599 600 tbl->n_regs = n_regs; 601 tbl->regs = regs; 602 603 return 0; 604 605 out: 606 kfree(tbl); 607 return -ENOMEM; 608 } 609 610 struct rtw89_fw_element_handler { 611 int (*fn)(struct rtw89_dev *rtwdev, 612 const struct rtw89_fw_element_hdr *elm, const void *data); 613 const void *data; 614 const char *name; 615 }; 616 617 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 618 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 619 (const void *)RTW89_FW_BBMCU0, NULL}, 620 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 621 (const void *)RTW89_FW_BBMCU1, NULL}, 622 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, NULL, "BB"}, 623 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, NULL, NULL}, 624 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 625 (const void *)RF_PATH_A, "radio A"}, 626 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 627 (const void *)RF_PATH_B, NULL}, 628 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 629 (const void *)RF_PATH_C, NULL}, 630 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 631 (const void *)RF_PATH_D, NULL}, 632 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, NULL, "NCTL"}, 633 }; 634 635 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 636 { 637 struct rtw89_fw_info *fw_info = &rtwdev->fw; 638 const struct firmware *firmware = fw_info->req.firmware; 639 const struct rtw89_chip_info *chip = rtwdev->chip; 640 u32 unrecognized_elements = chip->needed_fw_elms; 641 const struct rtw89_fw_element_handler *handler; 642 const struct rtw89_fw_element_hdr *hdr; 643 u32 elm_size; 644 u32 elem_id; 645 u32 offset; 646 int ret; 647 648 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 649 650 offset = rtw89_mfw_get_size(rtwdev); 651 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 652 if (offset == 0) 653 return -EINVAL; 654 655 while (offset + sizeof(*hdr) < firmware->size) { 656 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 657 658 elm_size = le32_to_cpu(hdr->size); 659 if (offset + elm_size >= firmware->size) { 660 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 661 break; 662 } 663 664 elem_id = le32_to_cpu(hdr->id); 665 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 666 goto next; 667 668 handler = &__fw_element_handlers[elem_id]; 669 if (!handler->fn) 670 goto next; 671 672 ret = handler->fn(rtwdev, hdr, handler->data); 673 if (ret) 674 return ret; 675 676 if (handler->name) 677 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 678 handler->name, hdr->ver); 679 680 unrecognized_elements &= ~BIT(elem_id); 681 next: 682 offset += sizeof(*hdr) + elm_size; 683 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 684 } 685 686 if (unrecognized_elements) { 687 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 688 unrecognized_elements); 689 return -ENOENT; 690 } 691 692 return 0; 693 } 694 695 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 696 u8 type, u8 cat, u8 class, u8 func, 697 bool rack, bool dack, u32 len) 698 { 699 struct fwcmd_hdr *hdr; 700 701 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 702 703 if (!(rtwdev->fw.h2c_seq % 4)) 704 rack = true; 705 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 706 FIELD_PREP(H2C_HDR_CAT, cat) | 707 FIELD_PREP(H2C_HDR_CLASS, class) | 708 FIELD_PREP(H2C_HDR_FUNC, func) | 709 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 710 711 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 712 len + H2C_HEADER_LEN) | 713 (rack ? H2C_HDR_REC_ACK : 0) | 714 (dack ? H2C_HDR_DONE_ACK : 0)); 715 716 rtwdev->fw.h2c_seq++; 717 } 718 719 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 720 struct sk_buff *skb, 721 u8 type, u8 cat, u8 class, u8 func, 722 u32 len) 723 { 724 struct fwcmd_hdr *hdr; 725 726 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 727 728 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 729 FIELD_PREP(H2C_HDR_CAT, cat) | 730 FIELD_PREP(H2C_HDR_CLASS, class) | 731 FIELD_PREP(H2C_HDR_FUNC, func) | 732 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 733 734 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 735 len + H2C_HEADER_LEN)); 736 } 737 738 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 739 { 740 struct sk_buff *skb; 741 u32 ret = 0; 742 743 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 744 if (!skb) { 745 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 746 return -ENOMEM; 747 } 748 749 skb_put_data(skb, fw, len); 750 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 751 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 752 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 753 H2C_FUNC_MAC_FWHDR_DL, len); 754 755 ret = rtw89_h2c_tx(rtwdev, skb, false); 756 if (ret) { 757 rtw89_err(rtwdev, "failed to send h2c\n"); 758 ret = -1; 759 goto fail; 760 } 761 762 return 0; 763 fail: 764 dev_kfree_skb_any(skb); 765 766 return ret; 767 } 768 769 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 770 { 771 u8 val; 772 int ret; 773 774 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 775 if (ret) { 776 rtw89_err(rtwdev, "[ERR]FW header download\n"); 777 return ret; 778 } 779 780 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 781 1, FWDL_WAIT_CNT, false, 782 rtwdev, R_AX_WCPU_FW_CTRL); 783 if (ret) { 784 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 785 return ret; 786 } 787 788 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 789 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 790 791 return 0; 792 } 793 794 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 795 struct rtw89_fw_hdr_section_info *info) 796 { 797 struct sk_buff *skb; 798 const u8 *section = info->addr; 799 u32 residue_len = info->len; 800 u32 pkt_len; 801 int ret; 802 803 while (residue_len) { 804 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 805 pkt_len = FWDL_SECTION_PER_PKT_LEN; 806 else 807 pkt_len = residue_len; 808 809 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 810 if (!skb) { 811 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 812 return -ENOMEM; 813 } 814 skb_put_data(skb, section, pkt_len); 815 816 ret = rtw89_h2c_tx(rtwdev, skb, true); 817 if (ret) { 818 rtw89_err(rtwdev, "failed to send h2c\n"); 819 ret = -1; 820 goto fail; 821 } 822 823 section += pkt_len; 824 residue_len -= pkt_len; 825 } 826 827 return 0; 828 fail: 829 dev_kfree_skb_any(skb); 830 831 return ret; 832 } 833 834 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 835 struct rtw89_fw_bin_info *info) 836 { 837 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 838 u8 section_num = info->section_num; 839 int ret; 840 841 while (section_num--) { 842 ret = __rtw89_fw_download_main(rtwdev, section_info); 843 if (ret) 844 return ret; 845 section_info++; 846 } 847 848 mdelay(5); 849 850 ret = rtw89_fw_check_rdy(rtwdev); 851 if (ret) { 852 rtw89_warn(rtwdev, "download firmware fail\n"); 853 return ret; 854 } 855 856 return 0; 857 } 858 859 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 860 { 861 u32 val32; 862 u16 index; 863 864 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 865 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 866 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 867 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 868 869 for (index = 0; index < 15; index++) { 870 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 871 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 872 fsleep(10); 873 } 874 } 875 876 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 877 { 878 u32 val32; 879 u16 val16; 880 881 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 882 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 883 884 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 885 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 886 887 rtw89_fw_prog_cnt_dump(rtwdev); 888 } 889 890 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 891 { 892 struct rtw89_fw_info *fw_info = &rtwdev->fw; 893 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 894 struct rtw89_fw_bin_info info; 895 u8 val; 896 int ret; 897 898 rtw89_mac_disable_cpu(rtwdev); 899 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 900 if (ret) 901 return ret; 902 903 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 904 if (ret) { 905 rtw89_err(rtwdev, "parse fw header fail\n"); 906 goto fwdl_err; 907 } 908 909 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 910 1, FWDL_WAIT_CNT, false, 911 rtwdev, R_AX_WCPU_FW_CTRL); 912 if (ret) { 913 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 914 goto fwdl_err; 915 } 916 917 ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len - 918 info.dynamic_hdr_len); 919 if (ret) { 920 ret = -EBUSY; 921 goto fwdl_err; 922 } 923 924 ret = rtw89_fw_download_main(rtwdev, fw_suit->data, &info); 925 if (ret) { 926 ret = -EBUSY; 927 goto fwdl_err; 928 } 929 930 fw_info->h2c_seq = 0; 931 fw_info->rec_seq = 0; 932 fw_info->h2c_counter = 0; 933 fw_info->c2h_counter = 0; 934 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 935 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 936 937 return ret; 938 939 fwdl_err: 940 rtw89_fw_dl_fail_dump(rtwdev); 941 return ret; 942 } 943 944 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 945 { 946 struct rtw89_fw_info *fw = &rtwdev->fw; 947 948 wait_for_completion(&fw->req.completion); 949 if (!fw->req.firmware) 950 return -EINVAL; 951 952 return 0; 953 } 954 955 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 956 struct rtw89_fw_req_info *req, 957 const char *fw_name, bool nowarn) 958 { 959 int ret; 960 961 if (req->firmware) { 962 rtw89_debug(rtwdev, RTW89_DBG_FW, 963 "full firmware has been early requested\n"); 964 complete_all(&req->completion); 965 return 0; 966 } 967 968 if (nowarn) 969 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 970 else 971 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 972 973 complete_all(&req->completion); 974 975 return ret; 976 } 977 978 void rtw89_load_firmware_work(struct work_struct *work) 979 { 980 struct rtw89_dev *rtwdev = 981 container_of(work, struct rtw89_dev, load_firmware_work); 982 const struct rtw89_chip_info *chip = rtwdev->chip; 983 char fw_name[64]; 984 985 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 986 chip->fw_basename, rtwdev->fw.fw_format); 987 988 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 989 } 990 991 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 992 { 993 if (!tbl) 994 return; 995 996 kfree(tbl->regs); 997 kfree(tbl); 998 } 999 1000 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1001 { 1002 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1003 int i; 1004 1005 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1006 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1007 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1008 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1009 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1010 } 1011 1012 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1013 { 1014 struct rtw89_fw_info *fw = &rtwdev->fw; 1015 1016 cancel_work_sync(&rtwdev->load_firmware_work); 1017 1018 if (fw->req.firmware) { 1019 release_firmware(fw->req.firmware); 1020 1021 /* assign NULL back in case rtw89_free_ieee80211_hw() 1022 * try to release the same one again. 1023 */ 1024 fw->req.firmware = NULL; 1025 } 1026 1027 kfree(fw->log.fmts); 1028 rtw89_unload_firmware_elements(rtwdev); 1029 } 1030 1031 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1032 { 1033 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1034 u32 i; 1035 1036 if (fmt_id > fw_log->last_fmt_id) 1037 return 0; 1038 1039 for (i = 0; i < fw_log->fmt_count; i++) { 1040 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1041 return i; 1042 } 1043 return 0; 1044 } 1045 1046 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1047 { 1048 struct rtw89_fw_log *log = &rtwdev->fw.log; 1049 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1050 struct rtw89_fw_suit *suit = &log->suit; 1051 const void *fmts_ptr, *fmts_end_ptr; 1052 u32 fmt_count; 1053 int i; 1054 1055 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1056 fmt_count = le32_to_cpu(suit_hdr->count); 1057 log->fmt_ids = suit_hdr->ids; 1058 fmts_ptr = &suit_hdr->ids[fmt_count]; 1059 fmts_end_ptr = suit->data + suit->size; 1060 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1061 if (!log->fmts) 1062 return -ENOMEM; 1063 1064 for (i = 0; i < fmt_count; i++) { 1065 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1066 if (!fmts_ptr) 1067 break; 1068 1069 (*log->fmts)[i] = fmts_ptr; 1070 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1071 log->fmt_count++; 1072 fmts_ptr += strlen(fmts_ptr); 1073 } 1074 1075 return 0; 1076 } 1077 1078 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1079 { 1080 struct rtw89_fw_log *log = &rtwdev->fw.log; 1081 struct rtw89_fw_suit *suit = &log->suit; 1082 1083 if (!suit || !suit->data) { 1084 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1085 return -EINVAL; 1086 } 1087 if (log->fmts) 1088 return 0; 1089 1090 return rtw89_fw_log_create_fmts_dict(rtwdev); 1091 } 1092 1093 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1094 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1095 u32 fmt_idx, u8 para_int, bool raw_data) 1096 { 1097 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1098 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1099 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1100 int i; 1101 1102 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1103 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1104 log_fmt->argc); 1105 return; 1106 } 1107 1108 if (para_int) 1109 for (i = 0 ; i < log_fmt->argc; i++) 1110 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1111 1112 if (raw_data) { 1113 if (para_int) 1114 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1115 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1116 para_int, log_fmt->argc, (int)sizeof(args), args); 1117 else 1118 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1119 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1120 para_int, log_fmt->argc, log_fmt->u.raw); 1121 } else { 1122 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1123 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1124 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1125 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1126 args[0xf]); 1127 } 1128 1129 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1130 } 1131 1132 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1133 { 1134 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1135 u8 para_int; 1136 u32 fmt_idx; 1137 1138 if (len < RTW89_C2H_HEADER_LEN) { 1139 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1140 return; 1141 } 1142 1143 buf += RTW89_C2H_HEADER_LEN; 1144 len -= RTW89_C2H_HEADER_LEN; 1145 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1146 1147 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1148 goto plain_log; 1149 1150 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1151 goto plain_log; 1152 1153 if (!rtwdev->fw.log.fmts) 1154 return; 1155 1156 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1157 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1158 1159 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1160 rtw89_info(rtwdev, "C2H log: %s%s", 1161 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1162 else if (fmt_idx != 0 && para_int) 1163 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1164 else 1165 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1166 return; 1167 1168 plain_log: 1169 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1170 1171 } 1172 1173 #define H2C_CAM_LEN 60 1174 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1175 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1176 { 1177 struct sk_buff *skb; 1178 int ret; 1179 1180 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1181 if (!skb) { 1182 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1183 return -ENOMEM; 1184 } 1185 skb_put(skb, H2C_CAM_LEN); 1186 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1187 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1188 1189 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1190 H2C_CAT_MAC, 1191 H2C_CL_MAC_ADDR_CAM_UPDATE, 1192 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1193 H2C_CAM_LEN); 1194 1195 ret = rtw89_h2c_tx(rtwdev, skb, false); 1196 if (ret) { 1197 rtw89_err(rtwdev, "failed to send h2c\n"); 1198 goto fail; 1199 } 1200 1201 return 0; 1202 fail: 1203 dev_kfree_skb_any(skb); 1204 1205 return ret; 1206 } 1207 1208 #define H2C_DCTL_SEC_CAM_LEN 68 1209 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1210 struct rtw89_vif *rtwvif, 1211 struct rtw89_sta *rtwsta) 1212 { 1213 struct sk_buff *skb; 1214 int ret; 1215 1216 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 1217 if (!skb) { 1218 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1219 return -ENOMEM; 1220 } 1221 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 1222 1223 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 1224 1225 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1226 H2C_CAT_MAC, 1227 H2C_CL_MAC_FR_EXCHG, 1228 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1229 H2C_DCTL_SEC_CAM_LEN); 1230 1231 ret = rtw89_h2c_tx(rtwdev, skb, false); 1232 if (ret) { 1233 rtw89_err(rtwdev, "failed to send h2c\n"); 1234 goto fail; 1235 } 1236 1237 return 0; 1238 fail: 1239 dev_kfree_skb_any(skb); 1240 1241 return ret; 1242 } 1243 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1244 1245 #define H2C_BA_CAM_LEN 8 1246 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1247 bool valid, struct ieee80211_ampdu_params *params) 1248 { 1249 const struct rtw89_chip_info *chip = rtwdev->chip; 1250 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1251 u8 macid = rtwsta->mac_id; 1252 struct sk_buff *skb; 1253 u8 entry_idx; 1254 int ret; 1255 1256 ret = valid ? 1257 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1258 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1259 if (ret) { 1260 /* it still works even if we don't have static BA CAM, because 1261 * hardware can create dynamic BA CAM automatically. 1262 */ 1263 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1264 "failed to %s entry tid=%d for h2c ba cam\n", 1265 valid ? "alloc" : "free", params->tid); 1266 return 0; 1267 } 1268 1269 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1270 if (!skb) { 1271 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1272 return -ENOMEM; 1273 } 1274 skb_put(skb, H2C_BA_CAM_LEN); 1275 SET_BA_CAM_MACID(skb->data, macid); 1276 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1277 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1278 else 1279 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 1280 if (!valid) 1281 goto end; 1282 SET_BA_CAM_VALID(skb->data, valid); 1283 SET_BA_CAM_TID(skb->data, params->tid); 1284 if (params->buf_size > 64) 1285 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 1286 else 1287 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 1288 /* If init req is set, hw will set the ssn */ 1289 SET_BA_CAM_INIT_REQ(skb->data, 1); 1290 SET_BA_CAM_SSN(skb->data, params->ssn); 1291 1292 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1293 SET_BA_CAM_STD_EN(skb->data, 1); 1294 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 1295 } 1296 1297 end: 1298 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1299 H2C_CAT_MAC, 1300 H2C_CL_BA_CAM, 1301 H2C_FUNC_MAC_BA_CAM, 0, 1, 1302 H2C_BA_CAM_LEN); 1303 1304 ret = rtw89_h2c_tx(rtwdev, skb, false); 1305 if (ret) { 1306 rtw89_err(rtwdev, "failed to send h2c\n"); 1307 goto fail; 1308 } 1309 1310 return 0; 1311 fail: 1312 dev_kfree_skb_any(skb); 1313 1314 return ret; 1315 } 1316 1317 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1318 u8 entry_idx, u8 uid) 1319 { 1320 struct sk_buff *skb; 1321 int ret; 1322 1323 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1324 if (!skb) { 1325 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1326 return -ENOMEM; 1327 } 1328 skb_put(skb, H2C_BA_CAM_LEN); 1329 1330 SET_BA_CAM_VALID(skb->data, 1); 1331 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1332 SET_BA_CAM_UID(skb->data, uid); 1333 SET_BA_CAM_BAND(skb->data, 0); 1334 SET_BA_CAM_STD_EN(skb->data, 0); 1335 1336 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1337 H2C_CAT_MAC, 1338 H2C_CL_BA_CAM, 1339 H2C_FUNC_MAC_BA_CAM, 0, 1, 1340 H2C_BA_CAM_LEN); 1341 1342 ret = rtw89_h2c_tx(rtwdev, skb, false); 1343 if (ret) { 1344 rtw89_err(rtwdev, "failed to send h2c\n"); 1345 goto fail; 1346 } 1347 1348 return 0; 1349 fail: 1350 dev_kfree_skb_any(skb); 1351 1352 return ret; 1353 } 1354 1355 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 1356 { 1357 const struct rtw89_chip_info *chip = rtwdev->chip; 1358 u8 entry_idx = chip->bacam_num; 1359 u8 uid = 0; 1360 int i; 1361 1362 for (i = 0; i < chip->bacam_dynamic_num; i++) { 1363 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 1364 entry_idx++; 1365 uid++; 1366 } 1367 } 1368 1369 #define H2C_LOG_CFG_LEN 12 1370 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 1371 { 1372 struct sk_buff *skb; 1373 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 1374 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 1375 int ret; 1376 1377 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 1378 if (!skb) { 1379 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 1380 return -ENOMEM; 1381 } 1382 1383 skb_put(skb, H2C_LOG_CFG_LEN); 1384 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 1385 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 1386 SET_LOG_CFG_COMP(skb->data, comp); 1387 SET_LOG_CFG_COMP_EXT(skb->data, 0); 1388 1389 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1390 H2C_CAT_MAC, 1391 H2C_CL_FW_INFO, 1392 H2C_FUNC_LOG_CFG, 0, 0, 1393 H2C_LOG_CFG_LEN); 1394 1395 ret = rtw89_h2c_tx(rtwdev, skb, false); 1396 if (ret) { 1397 rtw89_err(rtwdev, "failed to send h2c\n"); 1398 goto fail; 1399 } 1400 1401 return 0; 1402 fail: 1403 dev_kfree_skb_any(skb); 1404 1405 return ret; 1406 } 1407 1408 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 1409 struct rtw89_vif *rtwvif, 1410 enum rtw89_fw_pkt_ofld_type type, 1411 u8 *id) 1412 { 1413 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1414 struct rtw89_pktofld_info *info; 1415 struct sk_buff *skb; 1416 int ret; 1417 1418 info = kzalloc(sizeof(*info), GFP_KERNEL); 1419 if (!info) 1420 return -ENOMEM; 1421 1422 switch (type) { 1423 case RTW89_PKT_OFLD_TYPE_PS_POLL: 1424 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 1425 break; 1426 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 1427 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 1428 break; 1429 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 1430 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 1431 break; 1432 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 1433 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 1434 break; 1435 default: 1436 goto err; 1437 } 1438 1439 if (!skb) 1440 goto err; 1441 1442 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 1443 kfree_skb(skb); 1444 1445 if (ret) 1446 goto err; 1447 1448 list_add_tail(&info->list, &rtwvif->general_pkt_list); 1449 *id = info->id; 1450 return 0; 1451 1452 err: 1453 kfree(info); 1454 return -ENOMEM; 1455 } 1456 1457 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 1458 struct rtw89_vif *rtwvif, bool notify_fw) 1459 { 1460 struct list_head *pkt_list = &rtwvif->general_pkt_list; 1461 struct rtw89_pktofld_info *info, *tmp; 1462 1463 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1464 if (notify_fw) 1465 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1466 else 1467 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1468 list_del(&info->list); 1469 kfree(info); 1470 } 1471 } 1472 1473 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1474 { 1475 struct rtw89_vif *rtwvif; 1476 1477 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1478 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1479 } 1480 1481 #define H2C_GENERAL_PKT_LEN 6 1482 #define H2C_GENERAL_PKT_ID_UND 0xff 1483 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1484 struct rtw89_vif *rtwvif, u8 macid) 1485 { 1486 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1487 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1488 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1489 struct sk_buff *skb; 1490 int ret; 1491 1492 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1493 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1494 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1495 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1496 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1497 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1498 1499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1500 if (!skb) { 1501 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1502 return -ENOMEM; 1503 } 1504 skb_put(skb, H2C_GENERAL_PKT_LEN); 1505 SET_GENERAL_PKT_MACID(skb->data, macid); 1506 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1507 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1508 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1509 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1510 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1511 1512 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1513 H2C_CAT_MAC, 1514 H2C_CL_FW_INFO, 1515 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1516 H2C_GENERAL_PKT_LEN); 1517 1518 ret = rtw89_h2c_tx(rtwdev, skb, false); 1519 if (ret) { 1520 rtw89_err(rtwdev, "failed to send h2c\n"); 1521 goto fail; 1522 } 1523 1524 return 0; 1525 fail: 1526 dev_kfree_skb_any(skb); 1527 1528 return ret; 1529 } 1530 1531 #define H2C_LPS_PARM_LEN 8 1532 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1533 struct rtw89_lps_parm *lps_param) 1534 { 1535 struct sk_buff *skb; 1536 int ret; 1537 1538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1539 if (!skb) { 1540 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1541 return -ENOMEM; 1542 } 1543 skb_put(skb, H2C_LPS_PARM_LEN); 1544 1545 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1546 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1547 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1548 SET_LPS_PARM_RLBM(skb->data, 1); 1549 SET_LPS_PARM_SMARTPS(skb->data, 1); 1550 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1551 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1552 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1553 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1554 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1555 1556 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1557 H2C_CAT_MAC, 1558 H2C_CL_MAC_PS, 1559 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1560 H2C_LPS_PARM_LEN); 1561 1562 ret = rtw89_h2c_tx(rtwdev, skb, false); 1563 if (ret) { 1564 rtw89_err(rtwdev, "failed to send h2c\n"); 1565 goto fail; 1566 } 1567 1568 return 0; 1569 fail: 1570 dev_kfree_skb_any(skb); 1571 1572 return ret; 1573 } 1574 1575 #define H2C_P2P_ACT_LEN 20 1576 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1577 struct ieee80211_p2p_noa_desc *desc, 1578 u8 act, u8 noa_id) 1579 { 1580 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1581 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1582 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1583 struct sk_buff *skb; 1584 u8 *cmd; 1585 int ret; 1586 1587 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1588 if (!skb) { 1589 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1590 return -ENOMEM; 1591 } 1592 skb_put(skb, H2C_P2P_ACT_LEN); 1593 cmd = skb->data; 1594 1595 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1596 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1597 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1598 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1599 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1600 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1601 if (desc) { 1602 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1603 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1604 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1605 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1606 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1607 } 1608 1609 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1610 H2C_CAT_MAC, H2C_CL_MAC_PS, 1611 H2C_FUNC_P2P_ACT, 0, 0, 1612 H2C_P2P_ACT_LEN); 1613 1614 ret = rtw89_h2c_tx(rtwdev, skb, false); 1615 if (ret) { 1616 rtw89_err(rtwdev, "failed to send h2c\n"); 1617 goto fail; 1618 } 1619 1620 return 0; 1621 fail: 1622 dev_kfree_skb_any(skb); 1623 1624 return ret; 1625 } 1626 1627 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1628 struct sk_buff *skb) 1629 { 1630 const struct rtw89_chip_info *chip = rtwdev->chip; 1631 struct rtw89_hal *hal = &rtwdev->hal; 1632 u8 ntx_path; 1633 u8 map_b; 1634 1635 if (chip->rf_path_num == 1) { 1636 ntx_path = RF_A; 1637 map_b = 0; 1638 } else { 1639 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1640 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1641 } 1642 1643 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1644 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1645 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1646 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1647 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1648 } 1649 1650 #define H2C_CMC_TBL_LEN 68 1651 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1652 struct rtw89_vif *rtwvif) 1653 { 1654 const struct rtw89_chip_info *chip = rtwdev->chip; 1655 struct sk_buff *skb; 1656 u8 macid = rtwvif->mac_id; 1657 int ret; 1658 1659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1660 if (!skb) { 1661 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1662 return -ENOMEM; 1663 } 1664 skb_put(skb, H2C_CMC_TBL_LEN); 1665 SET_CTRL_INFO_MACID(skb->data, macid); 1666 SET_CTRL_INFO_OPERATION(skb->data, 1); 1667 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1668 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1669 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1670 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1671 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1672 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1673 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1674 } 1675 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1676 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1677 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1678 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1679 1680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1681 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1682 chip->h2c_cctl_func_id, 0, 1, 1683 H2C_CMC_TBL_LEN); 1684 1685 ret = rtw89_h2c_tx(rtwdev, skb, false); 1686 if (ret) { 1687 rtw89_err(rtwdev, "failed to send h2c\n"); 1688 goto fail; 1689 } 1690 1691 return 0; 1692 fail: 1693 dev_kfree_skb_any(skb); 1694 1695 return ret; 1696 } 1697 1698 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1699 struct ieee80211_sta *sta, u8 *pads) 1700 { 1701 bool ppe_th; 1702 u8 ppe16, ppe8; 1703 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1704 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1705 u8 ru_bitmap; 1706 u8 n, idx, sh; 1707 u16 ppe; 1708 int i; 1709 1710 if (!sta->deflink.he_cap.has_he) 1711 return; 1712 1713 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1714 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1715 if (!ppe_th) { 1716 u8 pad; 1717 1718 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1719 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1720 1721 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1722 pads[i] = pad; 1723 1724 return; 1725 } 1726 1727 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1728 n = hweight8(ru_bitmap); 1729 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1730 1731 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1732 if (!(ru_bitmap & BIT(i))) { 1733 pads[i] = 1; 1734 continue; 1735 } 1736 1737 idx = n >> 3; 1738 sh = n & 7; 1739 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1740 1741 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1742 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1743 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1744 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1745 1746 if (ppe16 != 7 && ppe8 == 7) 1747 pads[i] = 2; 1748 else if (ppe8 != 7) 1749 pads[i] = 1; 1750 else 1751 pads[i] = 0; 1752 } 1753 } 1754 1755 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1756 struct ieee80211_vif *vif, 1757 struct ieee80211_sta *sta) 1758 { 1759 const struct rtw89_chip_info *chip = rtwdev->chip; 1760 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1761 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1762 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 1763 rtwvif->sub_entity_idx); 1764 struct sk_buff *skb; 1765 u8 pads[RTW89_PPE_BW_NUM]; 1766 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1767 u16 lowest_rate; 1768 int ret; 1769 1770 memset(pads, 0, sizeof(pads)); 1771 if (sta) 1772 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1773 1774 if (vif->p2p) 1775 lowest_rate = RTW89_HW_RATE_OFDM6; 1776 else if (chan->band_type == RTW89_BAND_2G) 1777 lowest_rate = RTW89_HW_RATE_CCK1; 1778 else 1779 lowest_rate = RTW89_HW_RATE_OFDM6; 1780 1781 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1782 if (!skb) { 1783 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1784 return -ENOMEM; 1785 } 1786 skb_put(skb, H2C_CMC_TBL_LEN); 1787 SET_CTRL_INFO_MACID(skb->data, mac_id); 1788 SET_CTRL_INFO_OPERATION(skb->data, 1); 1789 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1790 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1791 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1792 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1793 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1794 if (vif->type == NL80211_IFTYPE_STATION) 1795 SET_CMC_TBL_ULDL(skb->data, 1); 1796 else 1797 SET_CMC_TBL_ULDL(skb->data, 0); 1798 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1799 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1800 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1801 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1802 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1803 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1804 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1805 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1806 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1807 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1808 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1809 } 1810 if (sta) 1811 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1812 sta->deflink.he_cap.has_he); 1813 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1814 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1815 1816 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1817 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1818 chip->h2c_cctl_func_id, 0, 1, 1819 H2C_CMC_TBL_LEN); 1820 1821 ret = rtw89_h2c_tx(rtwdev, skb, false); 1822 if (ret) { 1823 rtw89_err(rtwdev, "failed to send h2c\n"); 1824 goto fail; 1825 } 1826 1827 return 0; 1828 fail: 1829 dev_kfree_skb_any(skb); 1830 1831 return ret; 1832 } 1833 1834 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1835 struct rtw89_sta *rtwsta) 1836 { 1837 const struct rtw89_chip_info *chip = rtwdev->chip; 1838 struct sk_buff *skb; 1839 int ret; 1840 1841 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1842 if (!skb) { 1843 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1844 return -ENOMEM; 1845 } 1846 skb_put(skb, H2C_CMC_TBL_LEN); 1847 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1848 SET_CTRL_INFO_OPERATION(skb->data, 1); 1849 if (rtwsta->cctl_tx_time) { 1850 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1851 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1852 } 1853 if (rtwsta->cctl_tx_retry_limit) { 1854 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1855 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1856 } 1857 1858 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1859 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1860 chip->h2c_cctl_func_id, 0, 1, 1861 H2C_CMC_TBL_LEN); 1862 1863 ret = rtw89_h2c_tx(rtwdev, skb, false); 1864 if (ret) { 1865 rtw89_err(rtwdev, "failed to send h2c\n"); 1866 goto fail; 1867 } 1868 1869 return 0; 1870 fail: 1871 dev_kfree_skb_any(skb); 1872 1873 return ret; 1874 } 1875 1876 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1877 struct rtw89_sta *rtwsta) 1878 { 1879 const struct rtw89_chip_info *chip = rtwdev->chip; 1880 struct sk_buff *skb; 1881 int ret; 1882 1883 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1884 return 0; 1885 1886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1887 if (!skb) { 1888 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1889 return -ENOMEM; 1890 } 1891 skb_put(skb, H2C_CMC_TBL_LEN); 1892 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1893 SET_CTRL_INFO_OPERATION(skb->data, 1); 1894 1895 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1896 1897 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1898 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1899 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1900 H2C_CMC_TBL_LEN); 1901 1902 ret = rtw89_h2c_tx(rtwdev, skb, false); 1903 if (ret) { 1904 rtw89_err(rtwdev, "failed to send h2c\n"); 1905 goto fail; 1906 } 1907 1908 return 0; 1909 fail: 1910 dev_kfree_skb_any(skb); 1911 1912 return ret; 1913 } 1914 1915 #define H2C_BCN_BASE_LEN 12 1916 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1917 struct rtw89_vif *rtwvif) 1918 { 1919 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1920 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 1921 rtwvif->sub_entity_idx); 1922 struct sk_buff *skb; 1923 struct sk_buff *skb_beacon; 1924 u16 tim_offset; 1925 int bcn_total_len; 1926 u16 beacon_rate; 1927 void *noa_data; 1928 u8 noa_len; 1929 int ret; 1930 1931 if (vif->p2p) 1932 beacon_rate = RTW89_HW_RATE_OFDM6; 1933 else if (chan->band_type == RTW89_BAND_2G) 1934 beacon_rate = RTW89_HW_RATE_CCK1; 1935 else 1936 beacon_rate = RTW89_HW_RATE_OFDM6; 1937 1938 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1939 NULL, 0); 1940 if (!skb_beacon) { 1941 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1942 return -ENOMEM; 1943 } 1944 1945 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 1946 if (noa_len && 1947 (noa_len <= skb_tailroom(skb_beacon) || 1948 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 1949 skb_put_data(skb_beacon, noa_data, noa_len); 1950 } 1951 1952 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1953 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1954 if (!skb) { 1955 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1956 dev_kfree_skb_any(skb_beacon); 1957 return -ENOMEM; 1958 } 1959 skb_put(skb, H2C_BCN_BASE_LEN); 1960 1961 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1962 SET_BCN_UPD_MBSSID(skb->data, 0); 1963 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1964 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1965 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1966 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1967 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1968 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1969 1970 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1971 dev_kfree_skb_any(skb_beacon); 1972 1973 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1974 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1975 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1976 bcn_total_len); 1977 1978 ret = rtw89_h2c_tx(rtwdev, skb, false); 1979 if (ret) { 1980 rtw89_err(rtwdev, "failed to send h2c\n"); 1981 dev_kfree_skb_any(skb); 1982 return ret; 1983 } 1984 1985 return 0; 1986 } 1987 1988 #define H2C_ROLE_MAINTAIN_LEN 4 1989 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1990 struct rtw89_vif *rtwvif, 1991 struct rtw89_sta *rtwsta, 1992 enum rtw89_upd_mode upd_mode) 1993 { 1994 struct sk_buff *skb; 1995 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1996 u8 self_role; 1997 int ret; 1998 1999 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 2000 if (rtwsta) 2001 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2002 else 2003 self_role = rtwvif->self_role; 2004 } else { 2005 self_role = rtwvif->self_role; 2006 } 2007 2008 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 2009 if (!skb) { 2010 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2011 return -ENOMEM; 2012 } 2013 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 2014 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 2015 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 2016 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 2017 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2018 2019 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2020 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2021 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 2022 H2C_ROLE_MAINTAIN_LEN); 2023 2024 ret = rtw89_h2c_tx(rtwdev, skb, false); 2025 if (ret) { 2026 rtw89_err(rtwdev, "failed to send h2c\n"); 2027 goto fail; 2028 } 2029 2030 return 0; 2031 fail: 2032 dev_kfree_skb_any(skb); 2033 2034 return ret; 2035 } 2036 2037 #define H2C_JOIN_INFO_LEN 4 2038 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2039 struct rtw89_sta *rtwsta, bool dis_conn) 2040 { 2041 struct sk_buff *skb; 2042 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2043 u8 self_role = rtwvif->self_role; 2044 u8 net_type = rtwvif->net_type; 2045 int ret; 2046 2047 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 2048 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2049 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 2050 } 2051 2052 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2053 if (!skb) { 2054 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2055 return -ENOMEM; 2056 } 2057 skb_put(skb, H2C_JOIN_INFO_LEN); 2058 SET_JOININFO_MACID(skb->data, mac_id); 2059 SET_JOININFO_OP(skb->data, dis_conn); 2060 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 2061 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 2062 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 2063 SET_JOININFO_ISHESTA(skb->data, 0); 2064 SET_JOININFO_DLBW(skb->data, 0); 2065 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 2066 SET_JOININFO_DL_T_PE(skb->data, 0); 2067 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 2068 SET_JOININFO_NET_TYPE(skb->data, net_type); 2069 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2070 SET_JOININFO_SELF_ROLE(skb->data, self_role); 2071 2072 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2073 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2074 H2C_FUNC_MAC_JOININFO, 0, 1, 2075 H2C_JOIN_INFO_LEN); 2076 2077 ret = rtw89_h2c_tx(rtwdev, skb, false); 2078 if (ret) { 2079 rtw89_err(rtwdev, "failed to send h2c\n"); 2080 goto fail; 2081 } 2082 2083 return 0; 2084 fail: 2085 dev_kfree_skb_any(skb); 2086 2087 return ret; 2088 } 2089 2090 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 2091 bool pause) 2092 { 2093 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 2094 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 2095 struct sk_buff *skb; 2096 int ret; 2097 2098 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2099 if (!skb) { 2100 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2101 return -ENOMEM; 2102 } 2103 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 2104 if (pause) 2105 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 2106 skb_put_data(skb, &h2c, len); 2107 2108 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2109 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2110 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 2111 len); 2112 2113 ret = rtw89_h2c_tx(rtwdev, skb, false); 2114 if (ret) { 2115 rtw89_err(rtwdev, "failed to send h2c\n"); 2116 goto fail; 2117 } 2118 2119 return 0; 2120 fail: 2121 dev_kfree_skb_any(skb); 2122 2123 return ret; 2124 } 2125 2126 #define H2C_EDCA_LEN 12 2127 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2128 u8 ac, u32 val) 2129 { 2130 struct sk_buff *skb; 2131 int ret; 2132 2133 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 2134 if (!skb) { 2135 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 2136 return -ENOMEM; 2137 } 2138 skb_put(skb, H2C_EDCA_LEN); 2139 RTW89_SET_EDCA_SEL(skb->data, 0); 2140 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 2141 RTW89_SET_EDCA_WMM(skb->data, 0); 2142 RTW89_SET_EDCA_AC(skb->data, ac); 2143 RTW89_SET_EDCA_PARAM(skb->data, val); 2144 2145 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2146 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2147 H2C_FUNC_USR_EDCA, 0, 1, 2148 H2C_EDCA_LEN); 2149 2150 ret = rtw89_h2c_tx(rtwdev, skb, false); 2151 if (ret) { 2152 rtw89_err(rtwdev, "failed to send h2c\n"); 2153 goto fail; 2154 } 2155 2156 return 0; 2157 fail: 2158 dev_kfree_skb_any(skb); 2159 2160 return ret; 2161 } 2162 2163 #define H2C_TSF32_TOGL_LEN 4 2164 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2165 bool en) 2166 { 2167 struct sk_buff *skb; 2168 u16 early_us = en ? 2000 : 0; 2169 u8 *cmd; 2170 int ret; 2171 2172 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 2173 if (!skb) { 2174 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2175 return -ENOMEM; 2176 } 2177 skb_put(skb, H2C_TSF32_TOGL_LEN); 2178 cmd = skb->data; 2179 2180 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 2181 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 2182 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 2183 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 2184 2185 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2186 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2187 H2C_FUNC_TSF32_TOGL, 0, 0, 2188 H2C_TSF32_TOGL_LEN); 2189 2190 ret = rtw89_h2c_tx(rtwdev, skb, false); 2191 if (ret) { 2192 rtw89_err(rtwdev, "failed to send h2c\n"); 2193 goto fail; 2194 } 2195 2196 return 0; 2197 fail: 2198 dev_kfree_skb_any(skb); 2199 2200 return ret; 2201 } 2202 2203 #define H2C_OFLD_CFG_LEN 8 2204 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 2205 { 2206 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 2207 struct sk_buff *skb; 2208 int ret; 2209 2210 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 2211 if (!skb) { 2212 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 2213 return -ENOMEM; 2214 } 2215 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 2216 2217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2218 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2219 H2C_FUNC_OFLD_CFG, 0, 1, 2220 H2C_OFLD_CFG_LEN); 2221 2222 ret = rtw89_h2c_tx(rtwdev, skb, false); 2223 if (ret) { 2224 rtw89_err(rtwdev, "failed to send h2c\n"); 2225 goto fail; 2226 } 2227 2228 return 0; 2229 fail: 2230 dev_kfree_skb_any(skb); 2231 2232 return ret; 2233 } 2234 2235 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 2236 struct ieee80211_vif *vif, 2237 bool connect) 2238 { 2239 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 2240 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 2241 struct rtw89_h2c_bcnfltr *h2c; 2242 u32 len = sizeof(*h2c); 2243 struct sk_buff *skb; 2244 int ret; 2245 2246 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2247 return -EINVAL; 2248 2249 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2250 return -EINVAL; 2251 2252 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2253 if (!skb) { 2254 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 2255 return -ENOMEM; 2256 } 2257 2258 skb_put(skb, len); 2259 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 2260 2261 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 2262 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 2263 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 2264 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 2265 RTW89_H2C_BCNFLTR_W0_MODE) | 2266 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 2267 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 2268 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 2269 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 2270 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 2271 2272 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2273 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2274 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 2275 2276 ret = rtw89_h2c_tx(rtwdev, skb, false); 2277 if (ret) { 2278 rtw89_err(rtwdev, "failed to send h2c\n"); 2279 goto fail; 2280 } 2281 2282 return 0; 2283 fail: 2284 dev_kfree_skb_any(skb); 2285 2286 return ret; 2287 } 2288 2289 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 2290 struct rtw89_rx_phy_ppdu *phy_ppdu) 2291 { 2292 struct rtw89_h2c_ofld_rssi *h2c; 2293 u32 len = sizeof(*h2c); 2294 struct sk_buff *skb; 2295 s8 rssi; 2296 int ret; 2297 2298 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2299 return -EINVAL; 2300 2301 if (!phy_ppdu) 2302 return -EINVAL; 2303 2304 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2305 if (!skb) { 2306 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 2307 return -ENOMEM; 2308 } 2309 2310 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 2311 skb_put(skb, len); 2312 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 2313 2314 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 2315 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 2316 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 2317 2318 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2319 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2320 H2C_FUNC_OFLD_RSSI, 0, 1, len); 2321 2322 ret = rtw89_h2c_tx(rtwdev, skb, false); 2323 if (ret) { 2324 rtw89_err(rtwdev, "failed to send h2c\n"); 2325 goto fail; 2326 } 2327 2328 return 0; 2329 fail: 2330 dev_kfree_skb_any(skb); 2331 2332 return ret; 2333 } 2334 2335 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2336 { 2337 struct rtw89_traffic_stats *stats = &rtwvif->stats; 2338 struct rtw89_h2c_ofld *h2c; 2339 u32 len = sizeof(*h2c); 2340 struct sk_buff *skb; 2341 int ret; 2342 2343 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2344 return -EINVAL; 2345 2346 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2347 if (!skb) { 2348 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 2349 return -ENOMEM; 2350 } 2351 2352 skb_put(skb, len); 2353 h2c = (struct rtw89_h2c_ofld *)skb->data; 2354 2355 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 2356 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 2357 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 2358 2359 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2360 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2361 H2C_FUNC_OFLD_TP, 0, 1, len); 2362 2363 ret = rtw89_h2c_tx(rtwdev, skb, false); 2364 if (ret) { 2365 rtw89_err(rtwdev, "failed to send h2c\n"); 2366 goto fail; 2367 } 2368 2369 return 0; 2370 fail: 2371 dev_kfree_skb_any(skb); 2372 2373 return ret; 2374 } 2375 2376 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 2377 { 2378 const struct rtw89_chip_info *chip = rtwdev->chip; 2379 struct rtw89_h2c_ra_v1 *h2c_v1; 2380 struct rtw89_h2c_ra *h2c; 2381 u32 len = sizeof(*h2c); 2382 bool format_v1 = false; 2383 struct sk_buff *skb; 2384 int ret; 2385 2386 if (chip->chip_gen == RTW89_CHIP_BE) { 2387 len = sizeof(*h2c_v1); 2388 format_v1 = true; 2389 } 2390 2391 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2392 if (!skb) { 2393 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2394 return -ENOMEM; 2395 } 2396 skb_put(skb, len); 2397 h2c = (struct rtw89_h2c_ra *)skb->data; 2398 rtw89_debug(rtwdev, RTW89_DBG_RA, 2399 "ra cmd msk: %llx ", ra->ra_mask); 2400 2401 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 2402 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 2403 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 2404 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 2405 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 2406 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 2407 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 2408 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 2409 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 2410 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 2411 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 2412 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 2413 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 2414 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 2415 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 2416 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 2417 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 2418 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 2419 2420 if (!format_v1) 2421 goto csi; 2422 2423 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 2424 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 2425 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 2426 2427 csi: 2428 if (!csi) 2429 goto done; 2430 2431 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 2432 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 2433 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 2434 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 2435 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 2436 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 2437 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 2438 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 2439 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 2440 2441 done: 2442 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2443 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 2444 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 2445 len); 2446 2447 ret = rtw89_h2c_tx(rtwdev, skb, false); 2448 if (ret) { 2449 rtw89_err(rtwdev, "failed to send h2c\n"); 2450 goto fail; 2451 } 2452 2453 return 0; 2454 fail: 2455 dev_kfree_skb_any(skb); 2456 2457 return ret; 2458 } 2459 2460 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 2461 { 2462 struct rtw89_btc *btc = &rtwdev->btc; 2463 struct rtw89_btc_dm *dm = &btc->dm; 2464 struct rtw89_btc_init_info *init_info = &dm->init_info; 2465 struct rtw89_btc_module *module = &init_info->module; 2466 struct rtw89_btc_ant_info *ant = &module->ant; 2467 struct rtw89_h2c_cxinit *h2c; 2468 u32 len = sizeof(*h2c); 2469 struct sk_buff *skb; 2470 int ret; 2471 2472 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2473 if (!skb) { 2474 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 2475 return -ENOMEM; 2476 } 2477 skb_put(skb, len); 2478 h2c = (struct rtw89_h2c_cxinit *)skb->data; 2479 2480 h2c->hdr.type = CXDRVINFO_INIT; 2481 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 2482 2483 h2c->ant_type = ant->type; 2484 h2c->ant_num = ant->num; 2485 h2c->ant_iso = ant->isolation; 2486 h2c->ant_info = 2487 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 2488 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 2489 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2490 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2491 2492 h2c->mod_rfe = module->rfe_type; 2493 h2c->mod_cv = module->cv; 2494 h2c->mod_info = 2495 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2496 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2497 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2498 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2499 h2c->mod_adie_kt = module->kt_ver_adie; 2500 h2c->wl_gch = init_info->wl_guard_ch; 2501 2502 h2c->info = 2503 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2504 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2505 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2506 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2507 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2508 2509 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2510 H2C_CAT_OUTSRC, BTFC_SET, 2511 SET_DRV_INFO, 0, 0, 2512 len); 2513 2514 ret = rtw89_h2c_tx(rtwdev, skb, false); 2515 if (ret) { 2516 rtw89_err(rtwdev, "failed to send h2c\n"); 2517 goto fail; 2518 } 2519 2520 return 0; 2521 fail: 2522 dev_kfree_skb_any(skb); 2523 2524 return ret; 2525 } 2526 2527 #define PORT_DATA_OFFSET 4 2528 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2529 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2530 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2531 2532 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2533 { 2534 struct rtw89_btc *btc = &rtwdev->btc; 2535 const struct rtw89_btc_ver *ver = btc->ver; 2536 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2537 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2538 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2539 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2540 struct sk_buff *skb; 2541 u32 len; 2542 u8 offset = 0; 2543 u8 *cmd; 2544 int ret; 2545 int i; 2546 2547 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2548 2549 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2550 if (!skb) { 2551 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2552 return -ENOMEM; 2553 } 2554 skb_put(skb, len); 2555 cmd = skb->data; 2556 2557 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2558 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2559 2560 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2561 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2562 2563 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2564 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2565 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2566 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2567 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2568 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2569 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2570 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2571 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2572 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2573 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2574 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2575 2576 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2577 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2578 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2579 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2580 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2581 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2582 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2583 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2584 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2585 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2586 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2587 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2588 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2589 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2590 } 2591 2592 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2593 H2C_CAT_OUTSRC, BTFC_SET, 2594 SET_DRV_INFO, 0, 0, 2595 len); 2596 2597 ret = rtw89_h2c_tx(rtwdev, skb, false); 2598 if (ret) { 2599 rtw89_err(rtwdev, "failed to send h2c\n"); 2600 goto fail; 2601 } 2602 2603 return 0; 2604 fail: 2605 dev_kfree_skb_any(skb); 2606 2607 return ret; 2608 } 2609 2610 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2611 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2612 2613 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2614 { 2615 struct rtw89_btc *btc = &rtwdev->btc; 2616 const struct rtw89_btc_ver *ver = btc->ver; 2617 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2618 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2619 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2620 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2621 struct sk_buff *skb; 2622 u32 len; 2623 u8 *cmd, offset; 2624 int ret; 2625 int i; 2626 2627 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2628 2629 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2630 if (!skb) { 2631 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2632 return -ENOMEM; 2633 } 2634 skb_put(skb, len); 2635 cmd = skb->data; 2636 2637 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2638 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2639 2640 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2641 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2642 2643 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2644 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2645 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2646 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2647 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2648 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2649 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2650 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2651 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2652 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2653 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2654 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2655 2656 offset = PORT_DATA_OFFSET; 2657 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2658 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2659 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2660 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2661 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2662 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2663 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2664 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2665 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2666 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2667 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2668 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2669 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2670 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2671 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2672 } 2673 2674 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2675 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2676 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2677 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2678 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2679 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2680 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2681 2682 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2683 H2C_CAT_OUTSRC, BTFC_SET, 2684 SET_DRV_INFO, 0, 0, 2685 len); 2686 2687 ret = rtw89_h2c_tx(rtwdev, skb, false); 2688 if (ret) { 2689 rtw89_err(rtwdev, "failed to send h2c\n"); 2690 goto fail; 2691 } 2692 2693 return 0; 2694 fail: 2695 dev_kfree_skb_any(skb); 2696 2697 return ret; 2698 } 2699 2700 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2701 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2702 2703 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2704 { 2705 struct rtw89_btc *btc = &rtwdev->btc; 2706 const struct rtw89_btc_ver *ver = btc->ver; 2707 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2708 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2709 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2710 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2711 struct sk_buff *skb; 2712 u32 len; 2713 u8 *cmd, offset; 2714 int ret; 2715 int i; 2716 2717 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2718 2719 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2720 if (!skb) { 2721 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2722 return -ENOMEM; 2723 } 2724 skb_put(skb, len); 2725 cmd = skb->data; 2726 2727 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2728 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2729 2730 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2731 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2732 2733 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2734 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2735 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2736 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2737 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2738 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2739 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2740 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2741 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2742 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2743 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2744 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2745 2746 offset = PORT_DATA_OFFSET; 2747 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2748 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2749 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2750 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2751 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2752 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2753 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2754 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2755 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2756 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2757 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2758 } 2759 2760 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2761 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2762 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2763 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2764 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2765 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2766 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2767 2768 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2769 H2C_CAT_OUTSRC, BTFC_SET, 2770 SET_DRV_INFO, 0, 0, 2771 len); 2772 2773 ret = rtw89_h2c_tx(rtwdev, skb, false); 2774 if (ret) { 2775 rtw89_err(rtwdev, "failed to send h2c\n"); 2776 goto fail; 2777 } 2778 2779 return 0; 2780 fail: 2781 dev_kfree_skb_any(skb); 2782 2783 return ret; 2784 } 2785 2786 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2787 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2788 { 2789 struct rtw89_btc *btc = &rtwdev->btc; 2790 const struct rtw89_btc_ver *ver = btc->ver; 2791 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2792 struct sk_buff *skb; 2793 u8 *cmd; 2794 int ret; 2795 2796 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2797 if (!skb) { 2798 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2799 return -ENOMEM; 2800 } 2801 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2802 cmd = skb->data; 2803 2804 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2805 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2806 2807 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2808 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2809 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2810 if (ver->fcxctrl == 0) 2811 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2812 2813 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2814 H2C_CAT_OUTSRC, BTFC_SET, 2815 SET_DRV_INFO, 0, 0, 2816 H2C_LEN_CXDRVINFO_CTRL); 2817 2818 ret = rtw89_h2c_tx(rtwdev, skb, false); 2819 if (ret) { 2820 rtw89_err(rtwdev, "failed to send h2c\n"); 2821 goto fail; 2822 } 2823 2824 return 0; 2825 fail: 2826 dev_kfree_skb_any(skb); 2827 2828 return ret; 2829 } 2830 2831 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2832 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2833 { 2834 struct rtw89_btc *btc = &rtwdev->btc; 2835 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2836 struct sk_buff *skb; 2837 u8 *cmd; 2838 int ret; 2839 2840 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2841 if (!skb) { 2842 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2843 return -ENOMEM; 2844 } 2845 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2846 cmd = skb->data; 2847 2848 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2849 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2850 2851 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2852 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2853 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2854 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2855 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2856 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2857 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2858 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2859 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2860 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2861 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2862 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2863 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2864 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2865 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2866 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2867 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2868 2869 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2870 H2C_CAT_OUTSRC, BTFC_SET, 2871 SET_DRV_INFO, 0, 0, 2872 H2C_LEN_CXDRVINFO_TRX); 2873 2874 ret = rtw89_h2c_tx(rtwdev, skb, false); 2875 if (ret) { 2876 rtw89_err(rtwdev, "failed to send h2c\n"); 2877 goto fail; 2878 } 2879 2880 return 0; 2881 fail: 2882 dev_kfree_skb_any(skb); 2883 2884 return ret; 2885 } 2886 2887 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2888 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2889 { 2890 struct rtw89_btc *btc = &rtwdev->btc; 2891 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2892 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2893 struct sk_buff *skb; 2894 u8 *cmd; 2895 int ret; 2896 2897 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2898 if (!skb) { 2899 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2900 return -ENOMEM; 2901 } 2902 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2903 cmd = skb->data; 2904 2905 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2906 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2907 2908 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2909 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2910 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2911 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2912 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2913 2914 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2915 H2C_CAT_OUTSRC, BTFC_SET, 2916 SET_DRV_INFO, 0, 0, 2917 H2C_LEN_CXDRVINFO_RFK); 2918 2919 ret = rtw89_h2c_tx(rtwdev, skb, false); 2920 if (ret) { 2921 rtw89_err(rtwdev, "failed to send h2c\n"); 2922 goto fail; 2923 } 2924 2925 return 0; 2926 fail: 2927 dev_kfree_skb_any(skb); 2928 2929 return ret; 2930 } 2931 2932 #define H2C_LEN_PKT_OFLD 4 2933 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2934 { 2935 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2936 struct sk_buff *skb; 2937 unsigned int cond; 2938 u8 *cmd; 2939 int ret; 2940 2941 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2942 if (!skb) { 2943 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2944 return -ENOMEM; 2945 } 2946 skb_put(skb, H2C_LEN_PKT_OFLD); 2947 cmd = skb->data; 2948 2949 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2950 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2951 2952 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2953 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2954 H2C_FUNC_PACKET_OFLD, 1, 1, 2955 H2C_LEN_PKT_OFLD); 2956 2957 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 2958 2959 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 2960 if (ret < 0) { 2961 rtw89_debug(rtwdev, RTW89_DBG_FW, 2962 "failed to del pkt ofld: id %d, ret %d\n", 2963 id, ret); 2964 return ret; 2965 } 2966 2967 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 2968 return 0; 2969 } 2970 2971 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2972 struct sk_buff *skb_ofld) 2973 { 2974 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 2975 struct sk_buff *skb; 2976 unsigned int cond; 2977 u8 *cmd; 2978 u8 alloc_id; 2979 int ret; 2980 2981 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2982 RTW89_MAX_PKT_OFLD_NUM); 2983 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2984 return -ENOSPC; 2985 2986 *id = alloc_id; 2987 2988 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2989 if (!skb) { 2990 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2991 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 2992 return -ENOMEM; 2993 } 2994 skb_put(skb, H2C_LEN_PKT_OFLD); 2995 cmd = skb->data; 2996 2997 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2998 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2999 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 3000 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 3001 3002 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3003 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3004 H2C_FUNC_PACKET_OFLD, 1, 1, 3005 H2C_LEN_PKT_OFLD + skb_ofld->len); 3006 3007 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 3008 3009 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3010 if (ret < 0) { 3011 rtw89_debug(rtwdev, RTW89_DBG_FW, 3012 "failed to add pkt ofld: id %d, ret %d\n", 3013 alloc_id, ret); 3014 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 3015 return ret; 3016 } 3017 3018 return 0; 3019 } 3020 3021 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 3022 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 3023 struct list_head *chan_list) 3024 { 3025 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3026 struct rtw89_mac_chinfo *ch_info; 3027 struct sk_buff *skb; 3028 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 3029 unsigned int cond; 3030 u8 *cmd; 3031 int ret; 3032 3033 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 3034 if (!skb) { 3035 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 3036 return -ENOMEM; 3037 } 3038 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 3039 cmd = skb->data; 3040 3041 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 3042 /* in unit of 4 bytes */ 3043 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 3044 3045 list_for_each_entry(ch_info, chan_list, list) { 3046 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 3047 3048 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 3049 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 3050 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 3051 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 3052 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 3053 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 3054 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 3055 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 3056 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 3057 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 3058 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 3059 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 3060 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 3061 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 3062 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 3063 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 3064 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 3065 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 3066 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 3067 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 3068 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 3069 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 3070 } 3071 3072 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3073 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3074 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 3075 3076 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 3077 3078 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3079 if (ret) { 3080 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 3081 return ret; 3082 } 3083 3084 return 0; 3085 } 3086 3087 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 3088 struct rtw89_scan_option *option, 3089 struct rtw89_vif *rtwvif) 3090 { 3091 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3092 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3093 struct rtw89_h2c_scanofld *h2c; 3094 u32 len = sizeof(*h2c); 3095 struct sk_buff *skb; 3096 unsigned int cond; 3097 int ret; 3098 3099 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3100 if (!skb) { 3101 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 3102 return -ENOMEM; 3103 } 3104 skb_put(skb, len); 3105 h2c = (struct rtw89_h2c_scanofld *)skb->data; 3106 3107 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 3108 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 3109 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 3110 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 3111 3112 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 3113 le32_encode_bits(option->target_ch_mode, 3114 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 3115 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 3116 RTW89_H2C_SCANOFLD_W1_START_MODE) | 3117 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 3118 3119 if (option->target_ch_mode) { 3120 h2c->w1 |= le32_encode_bits(op->band_width, 3121 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 3122 le32_encode_bits(op->primary_channel, 3123 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 3124 le32_encode_bits(op->channel, 3125 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 3126 h2c->w0 |= le32_encode_bits(op->band_type, 3127 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 3128 } 3129 3130 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3131 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3132 H2C_FUNC_SCANOFLD, 1, 1, 3133 len); 3134 3135 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 3136 3137 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3138 if (ret) { 3139 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 3140 return ret; 3141 } 3142 3143 return 0; 3144 } 3145 3146 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 3147 struct rtw89_fw_h2c_rf_reg_info *info, 3148 u16 len, u8 page) 3149 { 3150 struct sk_buff *skb; 3151 u8 class = info->rf_path == RF_PATH_A ? 3152 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 3153 int ret; 3154 3155 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3156 if (!skb) { 3157 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 3158 return -ENOMEM; 3159 } 3160 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 3161 3162 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3163 H2C_CAT_OUTSRC, class, page, 0, 0, 3164 len); 3165 3166 ret = rtw89_h2c_tx(rtwdev, skb, false); 3167 if (ret) { 3168 rtw89_err(rtwdev, "failed to send h2c\n"); 3169 goto fail; 3170 } 3171 3172 return 0; 3173 fail: 3174 dev_kfree_skb_any(skb); 3175 3176 return ret; 3177 } 3178 3179 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 3180 { 3181 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3182 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 3183 struct rtw89_fw_h2c_rf_get_mccch *mccch; 3184 struct sk_buff *skb; 3185 int ret; 3186 3187 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 3188 if (!skb) { 3189 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 3190 return -ENOMEM; 3191 } 3192 skb_put(skb, sizeof(*mccch)); 3193 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 3194 3195 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 3196 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 3197 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 3198 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 3199 mccch->current_channel = cpu_to_le32(chan->channel); 3200 mccch->current_band_type = cpu_to_le32(chan->band_type); 3201 3202 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3203 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 3204 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 3205 sizeof(*mccch)); 3206 3207 ret = rtw89_h2c_tx(rtwdev, skb, false); 3208 if (ret) { 3209 rtw89_err(rtwdev, "failed to send h2c\n"); 3210 goto fail; 3211 } 3212 3213 return 0; 3214 fail: 3215 dev_kfree_skb_any(skb); 3216 3217 return ret; 3218 } 3219 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 3220 3221 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 3222 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 3223 bool rack, bool dack) 3224 { 3225 struct sk_buff *skb; 3226 int ret; 3227 3228 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3229 if (!skb) { 3230 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 3231 return -ENOMEM; 3232 } 3233 skb_put_data(skb, buf, len); 3234 3235 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3236 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 3237 len); 3238 3239 ret = rtw89_h2c_tx(rtwdev, skb, false); 3240 if (ret) { 3241 rtw89_err(rtwdev, "failed to send h2c\n"); 3242 goto fail; 3243 } 3244 3245 return 0; 3246 fail: 3247 dev_kfree_skb_any(skb); 3248 3249 return ret; 3250 } 3251 3252 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 3253 { 3254 struct sk_buff *skb; 3255 int ret; 3256 3257 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 3258 if (!skb) { 3259 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 3260 return -ENOMEM; 3261 } 3262 skb_put_data(skb, buf, len); 3263 3264 ret = rtw89_h2c_tx(rtwdev, skb, false); 3265 if (ret) { 3266 rtw89_err(rtwdev, "failed to send h2c\n"); 3267 goto fail; 3268 } 3269 3270 return 0; 3271 fail: 3272 dev_kfree_skb_any(skb); 3273 3274 return ret; 3275 } 3276 3277 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 3278 { 3279 struct rtw89_early_h2c *early_h2c; 3280 3281 lockdep_assert_held(&rtwdev->mutex); 3282 3283 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 3284 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 3285 } 3286 } 3287 3288 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 3289 { 3290 struct rtw89_early_h2c *early_h2c, *tmp; 3291 3292 mutex_lock(&rtwdev->mutex); 3293 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 3294 list_del(&early_h2c->list); 3295 kfree(early_h2c->h2c); 3296 kfree(early_h2c); 3297 } 3298 mutex_unlock(&rtwdev->mutex); 3299 } 3300 3301 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 3302 { 3303 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 3304 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3305 3306 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 3307 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 3308 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 3309 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 3310 } 3311 3312 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 3313 struct sk_buff *c2h) 3314 { 3315 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3316 u8 category = attr->category; 3317 u8 class = attr->class; 3318 u8 func = attr->func; 3319 3320 switch (category) { 3321 default: 3322 return false; 3323 case RTW89_C2H_CAT_MAC: 3324 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 3325 } 3326 } 3327 3328 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 3329 { 3330 rtw89_fw_c2h_parse_attr(c2h); 3331 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 3332 goto enqueue; 3333 3334 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 3335 dev_kfree_skb_any(c2h); 3336 return; 3337 3338 enqueue: 3339 skb_queue_tail(&rtwdev->c2h_queue, c2h); 3340 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 3341 } 3342 3343 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 3344 struct sk_buff *skb) 3345 { 3346 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 3347 u8 category = attr->category; 3348 u8 class = attr->class; 3349 u8 func = attr->func; 3350 u16 len = attr->len; 3351 bool dump = true; 3352 3353 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 3354 return; 3355 3356 switch (category) { 3357 case RTW89_C2H_CAT_TEST: 3358 break; 3359 case RTW89_C2H_CAT_MAC: 3360 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 3361 if (class == RTW89_MAC_C2H_CLASS_INFO && 3362 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 3363 dump = false; 3364 break; 3365 case RTW89_C2H_CAT_OUTSRC: 3366 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 3367 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 3368 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 3369 else 3370 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 3371 break; 3372 } 3373 3374 if (dump) 3375 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 3376 } 3377 3378 void rtw89_fw_c2h_work(struct work_struct *work) 3379 { 3380 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 3381 c2h_work); 3382 struct sk_buff *skb, *tmp; 3383 3384 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 3385 skb_unlink(skb, &rtwdev->c2h_queue); 3386 mutex_lock(&rtwdev->mutex); 3387 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 3388 mutex_unlock(&rtwdev->mutex); 3389 dev_kfree_skb_any(skb); 3390 } 3391 } 3392 3393 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 3394 struct rtw89_mac_h2c_info *info) 3395 { 3396 const struct rtw89_chip_info *chip = rtwdev->chip; 3397 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3398 const u32 *h2c_reg = chip->h2c_regs; 3399 u8 i, val, len; 3400 int ret; 3401 3402 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 3403 rtwdev, chip->h2c_ctrl_reg); 3404 if (ret) { 3405 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 3406 return ret; 3407 } 3408 3409 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 3410 sizeof(info->u.h2creg[0])); 3411 3412 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 3413 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 3414 3415 for (i = 0; i < RTW89_H2CREG_MAX; i++) 3416 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 3417 3418 fw_info->h2c_counter++; 3419 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 3420 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 3421 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 3422 3423 return 0; 3424 } 3425 3426 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 3427 struct rtw89_mac_c2h_info *info) 3428 { 3429 const struct rtw89_chip_info *chip = rtwdev->chip; 3430 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3431 const u32 *c2h_reg = chip->c2h_regs; 3432 u32 ret; 3433 u8 i, val; 3434 3435 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 3436 3437 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 3438 RTW89_C2H_TIMEOUT, false, rtwdev, 3439 chip->c2h_ctrl_reg); 3440 if (ret) { 3441 rtw89_warn(rtwdev, "c2h reg timeout\n"); 3442 return ret; 3443 } 3444 3445 for (i = 0; i < RTW89_C2HREG_MAX; i++) 3446 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 3447 3448 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 3449 3450 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 3451 info->content_len = 3452 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 3453 RTW89_C2HREG_HDR_LEN; 3454 3455 fw_info->c2h_counter++; 3456 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 3457 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 3458 3459 return 0; 3460 } 3461 3462 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 3463 struct rtw89_mac_h2c_info *h2c_info, 3464 struct rtw89_mac_c2h_info *c2h_info) 3465 { 3466 u32 ret; 3467 3468 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 3469 lockdep_assert_held(&rtwdev->mutex); 3470 3471 if (!h2c_info && !c2h_info) 3472 return -EINVAL; 3473 3474 if (!h2c_info) 3475 goto recv_c2h; 3476 3477 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 3478 if (ret) 3479 return ret; 3480 3481 recv_c2h: 3482 if (!c2h_info) 3483 return 0; 3484 3485 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 3486 if (ret) 3487 return ret; 3488 3489 return 0; 3490 } 3491 3492 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3493 { 3494 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3495 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3496 return; 3497 } 3498 3499 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3500 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3501 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3502 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3503 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3504 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3505 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3506 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3507 3508 rtw89_fw_prog_cnt_dump(rtwdev); 3509 } 3510 3511 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3512 { 3513 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3514 struct rtw89_pktofld_info *info, *tmp; 3515 u8 idx; 3516 3517 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3518 if (!(rtwdev->chip->support_bands & BIT(idx))) 3519 continue; 3520 3521 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3522 if (test_bit(info->id, rtwdev->pkt_offload)) 3523 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3524 list_del(&info->list); 3525 kfree(info); 3526 } 3527 } 3528 } 3529 3530 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3531 struct rtw89_vif *rtwvif, 3532 struct rtw89_pktofld_info *info, 3533 enum nl80211_band band, u8 ssid_idx) 3534 { 3535 struct cfg80211_scan_request *req = rtwvif->scan_req; 3536 3537 if (band != NL80211_BAND_6GHZ) 3538 return false; 3539 3540 if (req->ssids[ssid_idx].ssid_len) { 3541 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3542 req->ssids[ssid_idx].ssid_len); 3543 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3544 return false; 3545 } else { 3546 return true; 3547 } 3548 } 3549 3550 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3551 struct rtw89_vif *rtwvif, 3552 struct sk_buff *skb, u8 ssid_idx) 3553 { 3554 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3555 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3556 struct rtw89_pktofld_info *info; 3557 struct sk_buff *new; 3558 int ret = 0; 3559 u8 band; 3560 3561 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3562 if (!(rtwdev->chip->support_bands & BIT(band))) 3563 continue; 3564 3565 new = skb_copy(skb, GFP_KERNEL); 3566 if (!new) { 3567 ret = -ENOMEM; 3568 goto out; 3569 } 3570 skb_put_data(new, ies->ies[band], ies->len[band]); 3571 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3572 3573 info = kzalloc(sizeof(*info), GFP_KERNEL); 3574 if (!info) { 3575 ret = -ENOMEM; 3576 kfree_skb(new); 3577 goto out; 3578 } 3579 3580 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3581 ssid_idx)) { 3582 kfree_skb(new); 3583 kfree(info); 3584 goto out; 3585 } 3586 3587 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3588 if (ret) { 3589 kfree_skb(new); 3590 kfree(info); 3591 goto out; 3592 } 3593 3594 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3595 kfree_skb(new); 3596 } 3597 out: 3598 return ret; 3599 } 3600 3601 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3602 struct rtw89_vif *rtwvif) 3603 { 3604 struct cfg80211_scan_request *req = rtwvif->scan_req; 3605 struct sk_buff *skb; 3606 u8 num = req->n_ssids, i; 3607 int ret; 3608 3609 for (i = 0; i < num; i++) { 3610 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3611 req->ssids[i].ssid, 3612 req->ssids[i].ssid_len, 3613 req->ie_len); 3614 if (!skb) 3615 return -ENOMEM; 3616 3617 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3618 kfree_skb(skb); 3619 3620 if (ret) 3621 return ret; 3622 } 3623 3624 return 0; 3625 } 3626 3627 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3628 struct cfg80211_scan_request *req, 3629 struct rtw89_mac_chinfo *ch_info) 3630 { 3631 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3632 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3633 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3634 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3635 struct cfg80211_scan_6ghz_params *params; 3636 struct rtw89_pktofld_info *info, *tmp; 3637 struct ieee80211_hdr *hdr; 3638 struct sk_buff *skb; 3639 bool found; 3640 int ret = 0; 3641 u8 i; 3642 3643 if (!req->n_6ghz_params) 3644 return 0; 3645 3646 for (i = 0; i < req->n_6ghz_params; i++) { 3647 params = &req->scan_6ghz_params[i]; 3648 3649 if (req->channels[params->channel_idx]->hw_value != 3650 ch_info->pri_ch) 3651 continue; 3652 3653 found = false; 3654 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3655 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3656 found = true; 3657 break; 3658 } 3659 } 3660 if (found) 3661 continue; 3662 3663 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3664 NULL, 0, req->ie_len); 3665 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3666 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3667 hdr = (struct ieee80211_hdr *)skb->data; 3668 ether_addr_copy(hdr->addr3, params->bssid); 3669 3670 info = kzalloc(sizeof(*info), GFP_KERNEL); 3671 if (!info) { 3672 ret = -ENOMEM; 3673 kfree_skb(skb); 3674 goto out; 3675 } 3676 3677 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3678 if (ret) { 3679 kfree_skb(skb); 3680 kfree(info); 3681 goto out; 3682 } 3683 3684 ether_addr_copy(info->bssid, params->bssid); 3685 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3686 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3687 3688 ch_info->tx_pkt = true; 3689 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3690 3691 kfree_skb(skb); 3692 } 3693 3694 out: 3695 return ret; 3696 } 3697 3698 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3699 int ssid_num, 3700 struct rtw89_mac_chinfo *ch_info) 3701 { 3702 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3703 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3704 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3705 struct cfg80211_scan_request *req = rtwvif->scan_req; 3706 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3707 struct rtw89_pktofld_info *info; 3708 u8 band, probe_count = 0; 3709 int ret; 3710 3711 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3712 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3713 ch_info->bw = RTW89_SCAN_WIDTH; 3714 ch_info->tx_pkt = true; 3715 ch_info->cfg_tx_pwr = false; 3716 ch_info->tx_pwr_idx = 0; 3717 ch_info->tx_null = false; 3718 ch_info->pause_data = false; 3719 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3720 3721 if (ch_info->ch_band == RTW89_BAND_6G) { 3722 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3723 !ch_info->is_psc) { 3724 ch_info->tx_pkt = false; 3725 if (!req->duration_mandatory) 3726 ch_info->period -= RTW89_DWELL_TIME_6G; 3727 } 3728 } 3729 3730 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3731 if (ret) 3732 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3733 3734 if (ssid_num) { 3735 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3736 3737 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3738 if (info->channel_6ghz && 3739 ch_info->pri_ch != info->channel_6ghz) 3740 continue; 3741 ch_info->pkt_id[probe_count++] = info->id; 3742 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3743 break; 3744 } 3745 ch_info->num_pkt = probe_count; 3746 } 3747 3748 switch (chan_type) { 3749 case RTW89_CHAN_OPERATE: 3750 ch_info->central_ch = op->channel; 3751 ch_info->pri_ch = op->primary_channel; 3752 ch_info->ch_band = op->band_type; 3753 ch_info->bw = op->band_width; 3754 ch_info->tx_null = true; 3755 ch_info->num_pkt = 0; 3756 break; 3757 case RTW89_CHAN_DFS: 3758 if (ch_info->ch_band != RTW89_BAND_6G) 3759 ch_info->period = max_t(u8, ch_info->period, 3760 RTW89_DFS_CHAN_TIME); 3761 ch_info->dwell_time = RTW89_DWELL_TIME; 3762 break; 3763 case RTW89_CHAN_ACTIVE: 3764 break; 3765 default: 3766 rtw89_err(rtwdev, "Channel type out of bound\n"); 3767 } 3768 } 3769 3770 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3771 struct rtw89_vif *rtwvif, bool connected) 3772 { 3773 struct cfg80211_scan_request *req = rtwvif->scan_req; 3774 struct rtw89_mac_chinfo *ch_info, *tmp; 3775 struct ieee80211_channel *channel; 3776 struct list_head chan_list; 3777 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3778 int list_len, off_chan_time = 0; 3779 enum rtw89_chan_type type; 3780 int ret = 0; 3781 u32 idx; 3782 3783 INIT_LIST_HEAD(&chan_list); 3784 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3785 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3786 idx++, list_len++) { 3787 channel = req->channels[idx]; 3788 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3789 if (!ch_info) { 3790 ret = -ENOMEM; 3791 goto out; 3792 } 3793 3794 if (req->duration_mandatory) 3795 ch_info->period = req->duration; 3796 else if (channel->band == NL80211_BAND_6GHZ) 3797 ch_info->period = RTW89_CHANNEL_TIME_6G + 3798 RTW89_DWELL_TIME_6G; 3799 else 3800 ch_info->period = RTW89_CHANNEL_TIME; 3801 3802 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3803 ch_info->central_ch = channel->hw_value; 3804 ch_info->pri_ch = channel->hw_value; 3805 ch_info->rand_seq_num = random_seq; 3806 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3807 3808 if (channel->flags & 3809 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3810 type = RTW89_CHAN_DFS; 3811 else 3812 type = RTW89_CHAN_ACTIVE; 3813 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3814 3815 if (connected && 3816 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3817 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3818 if (!tmp) { 3819 ret = -ENOMEM; 3820 kfree(ch_info); 3821 goto out; 3822 } 3823 3824 type = RTW89_CHAN_OPERATE; 3825 tmp->period = req->duration_mandatory ? 3826 req->duration : RTW89_CHANNEL_TIME; 3827 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3828 list_add_tail(&tmp->list, &chan_list); 3829 off_chan_time = 0; 3830 list_len++; 3831 } 3832 list_add_tail(&ch_info->list, &chan_list); 3833 off_chan_time += ch_info->period; 3834 } 3835 rtwdev->scan_info.last_chan_idx = idx; 3836 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3837 3838 out: 3839 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3840 list_del(&ch_info->list); 3841 kfree(ch_info); 3842 } 3843 3844 return ret; 3845 } 3846 3847 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3848 struct rtw89_vif *rtwvif, bool connected) 3849 { 3850 int ret; 3851 3852 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3853 if (ret) { 3854 rtw89_err(rtwdev, "Update probe request failed\n"); 3855 goto out; 3856 } 3857 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3858 out: 3859 return ret; 3860 } 3861 3862 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3863 struct ieee80211_scan_request *scan_req) 3864 { 3865 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3866 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3867 struct cfg80211_scan_request *req = &scan_req->req; 3868 u32 rx_fltr = rtwdev->hal.rx_fltr; 3869 u8 mac_addr[ETH_ALEN]; 3870 3871 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3872 rtwdev->scan_info.scanning_vif = vif; 3873 rtwdev->scan_info.last_chan_idx = 0; 3874 rtwvif->scan_ies = &scan_req->ies; 3875 rtwvif->scan_req = req; 3876 ieee80211_stop_queues(rtwdev->hw); 3877 3878 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 3879 get_random_mask_addr(mac_addr, req->mac_addr, 3880 req->mac_addr_mask); 3881 else 3882 ether_addr_copy(mac_addr, vif->addr); 3883 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 3884 3885 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 3886 rx_fltr &= ~B_AX_A_BC; 3887 rx_fltr &= ~B_AX_A_A1_MATCH; 3888 rtw89_write32_mask(rtwdev, 3889 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 3890 B_AX_RX_FLTR_CFG_MASK, 3891 rx_fltr); 3892 } 3893 3894 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3895 bool aborted) 3896 { 3897 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3898 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3899 struct cfg80211_scan_info info = { 3900 .aborted = aborted, 3901 }; 3902 struct rtw89_vif *rtwvif; 3903 3904 if (!vif) 3905 return; 3906 3907 rtw89_write32_mask(rtwdev, 3908 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 3909 B_AX_RX_FLTR_CFG_MASK, 3910 rtwdev->hal.rx_fltr); 3911 3912 rtw89_core_scan_complete(rtwdev, vif, true); 3913 ieee80211_scan_completed(rtwdev->hw, &info); 3914 ieee80211_wake_queues(rtwdev->hw); 3915 3916 rtw89_release_pkt_list(rtwdev); 3917 rtwvif = (struct rtw89_vif *)vif->drv_priv; 3918 rtwvif->scan_req = NULL; 3919 rtwvif->scan_ies = NULL; 3920 scan_info->last_chan_idx = 0; 3921 scan_info->scanning_vif = NULL; 3922 3923 rtw89_set_channel(rtwdev); 3924 } 3925 3926 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 3927 { 3928 rtw89_hw_scan_offload(rtwdev, vif, false); 3929 rtw89_hw_scan_complete(rtwdev, vif, true); 3930 } 3931 3932 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3933 bool enable) 3934 { 3935 struct rtw89_scan_option opt = {0}; 3936 struct rtw89_vif *rtwvif; 3937 bool connected; 3938 int ret = 0; 3939 3940 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 3941 if (!rtwvif) 3942 return -EINVAL; 3943 3944 /* This variable implies connected or during attempt to connect */ 3945 connected = !is_zero_ether_addr(rtwvif->bssid); 3946 opt.enable = enable; 3947 opt.target_ch_mode = connected; 3948 if (enable) { 3949 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 3950 if (ret) 3951 goto out; 3952 } 3953 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 3954 out: 3955 return ret; 3956 } 3957 3958 #define H2C_FW_CPU_EXCEPTION_LEN 4 3959 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 3960 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 3961 { 3962 struct sk_buff *skb; 3963 int ret; 3964 3965 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 3966 if (!skb) { 3967 rtw89_err(rtwdev, 3968 "failed to alloc skb for fw cpu exception\n"); 3969 return -ENOMEM; 3970 } 3971 3972 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3973 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3974 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3975 3976 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3977 H2C_CAT_TEST, 3978 H2C_CL_FW_STATUS_TEST, 3979 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3980 H2C_FW_CPU_EXCEPTION_LEN); 3981 3982 ret = rtw89_h2c_tx(rtwdev, skb, false); 3983 if (ret) { 3984 rtw89_err(rtwdev, "failed to send h2c\n"); 3985 goto fail; 3986 } 3987 3988 return 0; 3989 3990 fail: 3991 dev_kfree_skb_any(skb); 3992 return ret; 3993 } 3994 3995 #define H2C_PKT_DROP_LEN 24 3996 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3997 const struct rtw89_pkt_drop_params *params) 3998 { 3999 struct sk_buff *skb; 4000 int ret; 4001 4002 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 4003 if (!skb) { 4004 rtw89_err(rtwdev, 4005 "failed to alloc skb for packet drop\n"); 4006 return -ENOMEM; 4007 } 4008 4009 switch (params->sel) { 4010 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 4011 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 4012 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 4013 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 4014 case RTW89_PKT_DROP_SEL_BAND_ONCE: 4015 break; 4016 default: 4017 rtw89_debug(rtwdev, RTW89_DBG_FW, 4018 "H2C of pkt drop might not fully support sel: %d yet\n", 4019 params->sel); 4020 break; 4021 } 4022 4023 skb_put(skb, H2C_PKT_DROP_LEN); 4024 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 4025 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 4026 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 4027 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 4028 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 4029 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 4030 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 4031 params->macid_band_sel[0]); 4032 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 4033 params->macid_band_sel[1]); 4034 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 4035 params->macid_band_sel[2]); 4036 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 4037 params->macid_band_sel[3]); 4038 4039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4040 H2C_CAT_MAC, 4041 H2C_CL_MAC_FW_OFLD, 4042 H2C_FUNC_PKT_DROP, 0, 0, 4043 H2C_PKT_DROP_LEN); 4044 4045 ret = rtw89_h2c_tx(rtwdev, skb, false); 4046 if (ret) { 4047 rtw89_err(rtwdev, "failed to send h2c\n"); 4048 goto fail; 4049 } 4050 4051 return 0; 4052 4053 fail: 4054 dev_kfree_skb_any(skb); 4055 return ret; 4056 } 4057 4058 #define H2C_KEEP_ALIVE_LEN 4 4059 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4060 bool enable) 4061 { 4062 struct sk_buff *skb; 4063 u8 pkt_id = 0; 4064 int ret; 4065 4066 if (enable) { 4067 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 4068 RTW89_PKT_OFLD_TYPE_NULL_DATA, 4069 &pkt_id); 4070 if (ret) 4071 return -EPERM; 4072 } 4073 4074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 4075 if (!skb) { 4076 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4077 return -ENOMEM; 4078 } 4079 4080 skb_put(skb, H2C_KEEP_ALIVE_LEN); 4081 4082 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 4083 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 4084 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 4085 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 4086 4087 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4088 H2C_CAT_MAC, 4089 H2C_CL_MAC_WOW, 4090 H2C_FUNC_KEEP_ALIVE, 0, 1, 4091 H2C_KEEP_ALIVE_LEN); 4092 4093 ret = rtw89_h2c_tx(rtwdev, skb, false); 4094 if (ret) { 4095 rtw89_err(rtwdev, "failed to send h2c\n"); 4096 goto fail; 4097 } 4098 4099 return 0; 4100 4101 fail: 4102 dev_kfree_skb_any(skb); 4103 4104 return ret; 4105 } 4106 4107 #define H2C_DISCONNECT_DETECT_LEN 8 4108 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 4109 struct rtw89_vif *rtwvif, bool enable) 4110 { 4111 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4112 struct sk_buff *skb; 4113 u8 macid = rtwvif->mac_id; 4114 int ret; 4115 4116 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 4117 if (!skb) { 4118 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4119 return -ENOMEM; 4120 } 4121 4122 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 4123 4124 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 4125 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 4126 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 4127 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 4128 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 4129 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 4130 } 4131 4132 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4133 H2C_CAT_MAC, 4134 H2C_CL_MAC_WOW, 4135 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 4136 H2C_DISCONNECT_DETECT_LEN); 4137 4138 ret = rtw89_h2c_tx(rtwdev, skb, false); 4139 if (ret) { 4140 rtw89_err(rtwdev, "failed to send h2c\n"); 4141 goto fail; 4142 } 4143 4144 return 0; 4145 4146 fail: 4147 dev_kfree_skb_any(skb); 4148 4149 return ret; 4150 } 4151 4152 #define H2C_WOW_GLOBAL_LEN 8 4153 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4154 bool enable) 4155 { 4156 struct sk_buff *skb; 4157 u8 macid = rtwvif->mac_id; 4158 int ret; 4159 4160 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 4161 if (!skb) { 4162 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4163 return -ENOMEM; 4164 } 4165 4166 skb_put(skb, H2C_WOW_GLOBAL_LEN); 4167 4168 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 4169 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 4170 4171 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4172 H2C_CAT_MAC, 4173 H2C_CL_MAC_WOW, 4174 H2C_FUNC_WOW_GLOBAL, 0, 1, 4175 H2C_WOW_GLOBAL_LEN); 4176 4177 ret = rtw89_h2c_tx(rtwdev, skb, false); 4178 if (ret) { 4179 rtw89_err(rtwdev, "failed to send h2c\n"); 4180 goto fail; 4181 } 4182 4183 return 0; 4184 4185 fail: 4186 dev_kfree_skb_any(skb); 4187 4188 return ret; 4189 } 4190 4191 #define H2C_WAKEUP_CTRL_LEN 4 4192 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 4193 struct rtw89_vif *rtwvif, 4194 bool enable) 4195 { 4196 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4197 struct sk_buff *skb; 4198 u8 macid = rtwvif->mac_id; 4199 int ret; 4200 4201 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 4202 if (!skb) { 4203 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4204 return -ENOMEM; 4205 } 4206 4207 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 4208 4209 if (rtw_wow->pattern_cnt) 4210 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 4211 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 4212 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 4213 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 4214 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 4215 4216 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 4217 4218 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4219 H2C_CAT_MAC, 4220 H2C_CL_MAC_WOW, 4221 H2C_FUNC_WAKEUP_CTRL, 0, 1, 4222 H2C_WAKEUP_CTRL_LEN); 4223 4224 ret = rtw89_h2c_tx(rtwdev, skb, false); 4225 if (ret) { 4226 rtw89_err(rtwdev, "failed to send h2c\n"); 4227 goto fail; 4228 } 4229 4230 return 0; 4231 4232 fail: 4233 dev_kfree_skb_any(skb); 4234 4235 return ret; 4236 } 4237 4238 #define H2C_WOW_CAM_UPD_LEN 24 4239 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 4240 struct rtw89_wow_cam_info *cam_info) 4241 { 4242 struct sk_buff *skb; 4243 int ret; 4244 4245 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 4246 if (!skb) { 4247 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4248 return -ENOMEM; 4249 } 4250 4251 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 4252 4253 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 4254 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 4255 if (cam_info->valid) { 4256 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 4257 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 4258 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 4259 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 4260 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 4261 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 4262 cam_info->negative_pattern_match); 4263 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 4264 cam_info->skip_mac_hdr); 4265 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 4266 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 4267 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 4268 } 4269 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 4270 4271 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4272 H2C_CAT_MAC, 4273 H2C_CL_MAC_WOW, 4274 H2C_FUNC_WOW_CAM_UPD, 0, 1, 4275 H2C_WOW_CAM_UPD_LEN); 4276 4277 ret = rtw89_h2c_tx(rtwdev, skb, false); 4278 if (ret) { 4279 rtw89_err(rtwdev, "failed to send h2c\n"); 4280 goto fail; 4281 } 4282 4283 return 0; 4284 fail: 4285 dev_kfree_skb_any(skb); 4286 4287 return ret; 4288 } 4289 4290 /* Return < 0, if failures happen during waiting for the condition. 4291 * Return 0, when waiting for the condition succeeds. 4292 * Return > 0, if the wait is considered unreachable due to driver/FW design, 4293 * where 1 means during SER. 4294 */ 4295 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 4296 struct rtw89_wait_info *wait, unsigned int cond) 4297 { 4298 int ret; 4299 4300 ret = rtw89_h2c_tx(rtwdev, skb, false); 4301 if (ret) { 4302 rtw89_err(rtwdev, "failed to send h2c\n"); 4303 dev_kfree_skb_any(skb); 4304 return -EBUSY; 4305 } 4306 4307 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 4308 return 1; 4309 4310 return rtw89_wait_for_cond(wait, cond); 4311 } 4312 4313 #define H2C_ADD_MCC_LEN 16 4314 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 4315 const struct rtw89_fw_mcc_add_req *p) 4316 { 4317 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4318 struct sk_buff *skb; 4319 unsigned int cond; 4320 4321 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 4322 if (!skb) { 4323 rtw89_err(rtwdev, 4324 "failed to alloc skb for add mcc\n"); 4325 return -ENOMEM; 4326 } 4327 4328 skb_put(skb, H2C_ADD_MCC_LEN); 4329 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 4330 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 4331 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 4332 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 4333 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 4334 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 4335 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 4336 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 4337 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 4338 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 4339 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 4340 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 4341 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 4342 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 4343 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 4344 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 4345 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 4346 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 4347 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 4348 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 4349 4350 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4351 H2C_CAT_MAC, 4352 H2C_CL_MCC, 4353 H2C_FUNC_ADD_MCC, 0, 0, 4354 H2C_ADD_MCC_LEN); 4355 4356 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 4357 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4358 } 4359 4360 #define H2C_START_MCC_LEN 12 4361 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 4362 const struct rtw89_fw_mcc_start_req *p) 4363 { 4364 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4365 struct sk_buff *skb; 4366 unsigned int cond; 4367 4368 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 4369 if (!skb) { 4370 rtw89_err(rtwdev, 4371 "failed to alloc skb for start mcc\n"); 4372 return -ENOMEM; 4373 } 4374 4375 skb_put(skb, H2C_START_MCC_LEN); 4376 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 4377 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 4378 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 4379 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 4380 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 4381 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 4382 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 4383 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 4384 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 4385 4386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4387 H2C_CAT_MAC, 4388 H2C_CL_MCC, 4389 H2C_FUNC_START_MCC, 0, 0, 4390 H2C_START_MCC_LEN); 4391 4392 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 4393 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4394 } 4395 4396 #define H2C_STOP_MCC_LEN 4 4397 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4398 bool prev_groups) 4399 { 4400 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4401 struct sk_buff *skb; 4402 unsigned int cond; 4403 4404 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 4405 if (!skb) { 4406 rtw89_err(rtwdev, 4407 "failed to alloc skb for stop mcc\n"); 4408 return -ENOMEM; 4409 } 4410 4411 skb_put(skb, H2C_STOP_MCC_LEN); 4412 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 4413 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 4414 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 4415 4416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4417 H2C_CAT_MAC, 4418 H2C_CL_MCC, 4419 H2C_FUNC_STOP_MCC, 0, 0, 4420 H2C_STOP_MCC_LEN); 4421 4422 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 4423 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4424 } 4425 4426 #define H2C_DEL_MCC_GROUP_LEN 4 4427 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 4428 bool prev_groups) 4429 { 4430 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4431 struct sk_buff *skb; 4432 unsigned int cond; 4433 4434 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 4435 if (!skb) { 4436 rtw89_err(rtwdev, 4437 "failed to alloc skb for del mcc group\n"); 4438 return -ENOMEM; 4439 } 4440 4441 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 4442 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 4443 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 4444 4445 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4446 H2C_CAT_MAC, 4447 H2C_CL_MCC, 4448 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 4449 H2C_DEL_MCC_GROUP_LEN); 4450 4451 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 4452 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4453 } 4454 4455 #define H2C_RESET_MCC_GROUP_LEN 4 4456 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 4457 { 4458 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4459 struct sk_buff *skb; 4460 unsigned int cond; 4461 4462 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 4463 if (!skb) { 4464 rtw89_err(rtwdev, 4465 "failed to alloc skb for reset mcc group\n"); 4466 return -ENOMEM; 4467 } 4468 4469 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 4470 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 4471 4472 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4473 H2C_CAT_MAC, 4474 H2C_CL_MCC, 4475 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 4476 H2C_RESET_MCC_GROUP_LEN); 4477 4478 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 4479 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4480 } 4481 4482 #define H2C_MCC_REQ_TSF_LEN 4 4483 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 4484 const struct rtw89_fw_mcc_tsf_req *req, 4485 struct rtw89_mac_mcc_tsf_rpt *rpt) 4486 { 4487 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4488 struct rtw89_mac_mcc_tsf_rpt *tmp; 4489 struct sk_buff *skb; 4490 unsigned int cond; 4491 int ret; 4492 4493 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 4494 if (!skb) { 4495 rtw89_err(rtwdev, 4496 "failed to alloc skb for mcc req tsf\n"); 4497 return -ENOMEM; 4498 } 4499 4500 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 4501 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 4502 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4503 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4504 4505 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4506 H2C_CAT_MAC, 4507 H2C_CL_MCC, 4508 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4509 H2C_MCC_REQ_TSF_LEN); 4510 4511 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4512 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4513 if (ret) 4514 return ret; 4515 4516 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4517 *rpt = *tmp; 4518 4519 return 0; 4520 } 4521 4522 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4523 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4524 u8 *bitmap) 4525 { 4526 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4527 struct sk_buff *skb; 4528 unsigned int cond; 4529 u8 map_len; 4530 u8 h2c_len; 4531 4532 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4533 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4534 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4536 if (!skb) { 4537 rtw89_err(rtwdev, 4538 "failed to alloc skb for mcc macid bitmap\n"); 4539 return -ENOMEM; 4540 } 4541 4542 skb_put(skb, h2c_len); 4543 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4544 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4545 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4546 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4547 4548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4549 H2C_CAT_MAC, 4550 H2C_CL_MCC, 4551 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4552 h2c_len); 4553 4554 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4555 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4556 } 4557 4558 #define H2C_MCC_SYNC_LEN 4 4559 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4560 u8 target, u8 offset) 4561 { 4562 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4563 struct sk_buff *skb; 4564 unsigned int cond; 4565 4566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4567 if (!skb) { 4568 rtw89_err(rtwdev, 4569 "failed to alloc skb for mcc sync\n"); 4570 return -ENOMEM; 4571 } 4572 4573 skb_put(skb, H2C_MCC_SYNC_LEN); 4574 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4575 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4576 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4577 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4578 4579 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4580 H2C_CAT_MAC, 4581 H2C_CL_MCC, 4582 H2C_FUNC_MCC_SYNC, 0, 0, 4583 H2C_MCC_SYNC_LEN); 4584 4585 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4586 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4587 } 4588 4589 #define H2C_MCC_SET_DURATION_LEN 20 4590 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4591 const struct rtw89_fw_mcc_duration *p) 4592 { 4593 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4594 struct sk_buff *skb; 4595 unsigned int cond; 4596 4597 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4598 if (!skb) { 4599 rtw89_err(rtwdev, 4600 "failed to alloc skb for mcc set duration\n"); 4601 return -ENOMEM; 4602 } 4603 4604 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4605 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4606 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4607 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4608 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4609 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4610 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4611 p->start_tsf_low); 4612 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4613 p->start_tsf_high); 4614 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4615 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4616 4617 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4618 H2C_CAT_MAC, 4619 H2C_CL_MCC, 4620 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4621 H2C_MCC_SET_DURATION_LEN); 4622 4623 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4624 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4625 } 4626