1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "cam.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "reg.h"
13 
14 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
15 				    struct sk_buff *skb);
16 
17 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
18 					      bool header)
19 {
20 	struct sk_buff *skb;
21 	u32 header_len = 0;
22 	u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
23 
24 	if (header)
25 		header_len = H2C_HEADER_LEN;
26 
27 	skb = dev_alloc_skb(len + header_len + h2c_desc_size);
28 	if (!skb)
29 		return NULL;
30 	skb_reserve(skb, header_len + h2c_desc_size);
31 	memset(skb->data, 0, len);
32 
33 	return skb;
34 }
35 
36 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
37 {
38 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
39 }
40 
41 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
42 {
43 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
44 }
45 
46 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
47 {
48 	u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
49 
50 	return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
51 }
52 
53 #define FWDL_WAIT_CNT 400000
54 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
55 {
56 	u8 val;
57 	int ret;
58 
59 	ret = read_poll_timeout_atomic(_fw_get_rdy, val,
60 				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
61 				       1, FWDL_WAIT_CNT, false, rtwdev);
62 	if (ret) {
63 		switch (val) {
64 		case RTW89_FWDL_CHECKSUM_FAIL:
65 			rtw89_err(rtwdev, "fw checksum fail\n");
66 			return -EINVAL;
67 
68 		case RTW89_FWDL_SECURITY_FAIL:
69 			rtw89_err(rtwdev, "fw security fail\n");
70 			return -EINVAL;
71 
72 		case RTW89_FWDL_CV_NOT_MATCH:
73 			rtw89_err(rtwdev, "fw cv not match\n");
74 			return -EINVAL;
75 
76 		default:
77 			return -EBUSY;
78 		}
79 	}
80 
81 	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
82 
83 	return 0;
84 }
85 
86 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
87 			       struct rtw89_fw_bin_info *info)
88 {
89 	struct rtw89_fw_hdr_section_info *section_info;
90 	const u8 *fw_end = fw + len;
91 	const u8 *fwdynhdr;
92 	const u8 *bin;
93 	u32 base_hdr_len;
94 	u32 i;
95 
96 	if (!info)
97 		return -EINVAL;
98 
99 	info->section_num = GET_FW_HDR_SEC_NUM(fw);
100 	base_hdr_len = RTW89_FW_HDR_SIZE +
101 		       info->section_num * RTW89_FW_SECTION_HDR_SIZE;
102 	info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw);
103 
104 	if (info->dynamic_hdr_en) {
105 		info->hdr_len = GET_FW_HDR_LEN(fw);
106 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
107 		fwdynhdr = fw + base_hdr_len;
108 		if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) {
109 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
110 			return -EINVAL;
111 		}
112 	} else {
113 		info->hdr_len = base_hdr_len;
114 		info->dynamic_hdr_len = 0;
115 	}
116 
117 	bin = fw + info->hdr_len;
118 
119 	/* jump to section header */
120 	fw += RTW89_FW_HDR_SIZE;
121 	section_info = info->section_info;
122 	for (i = 0; i < info->section_num; i++) {
123 		section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
124 		if (GET_FWSECTION_HDR_CHECKSUM(fw))
125 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
126 		section_info->redl = GET_FWSECTION_HDR_REDL(fw);
127 		section_info->dladdr =
128 				GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
129 		section_info->addr = bin;
130 		bin += section_info->len;
131 		fw += RTW89_FW_SECTION_HDR_SIZE;
132 		section_info++;
133 	}
134 
135 	if (fw_end != bin) {
136 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
137 		return -EINVAL;
138 	}
139 
140 	return 0;
141 }
142 
143 static
144 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
145 			struct rtw89_fw_suit *fw_suit)
146 {
147 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
148 	const u8 *mfw = fw_info->firmware->data;
149 	u32 mfw_len = fw_info->firmware->size;
150 	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
151 	const struct rtw89_mfw_info *mfw_info;
152 	int i;
153 
154 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
155 		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
156 		/* legacy firmware support normal type only */
157 		if (type != RTW89_FW_NORMAL)
158 			return -EINVAL;
159 		fw_suit->data = mfw;
160 		fw_suit->size = mfw_len;
161 		return 0;
162 	}
163 
164 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
165 		mfw_info = &mfw_hdr->info[i];
166 		if (mfw_info->cv != rtwdev->hal.cv ||
167 		    mfw_info->type != type ||
168 		    mfw_info->mp)
169 			continue;
170 
171 		fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
172 		fw_suit->size = le32_to_cpu(mfw_info->size);
173 		return 0;
174 	}
175 
176 	rtw89_err(rtwdev, "no suitable firmware found\n");
177 	return -ENOENT;
178 }
179 
180 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
181 				enum rtw89_fw_type type,
182 				struct rtw89_fw_suit *fw_suit)
183 {
184 	const u8 *hdr = fw_suit->data;
185 
186 	fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
187 	fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
188 	fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
189 	fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
190 	fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
191 	fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
192 	fw_suit->build_date = GET_FW_HDR_DATE(hdr);
193 	fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
194 	fw_suit->build_min = GET_FW_HDR_MIN(hdr);
195 	fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
196 
197 	rtw89_info(rtwdev,
198 		   "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
199 		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
200 		   fw_suit->sub_idex, fw_suit->cmd_ver, type);
201 }
202 
203 static
204 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
205 {
206 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
207 	int ret;
208 
209 	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
210 	if (ret)
211 		return ret;
212 
213 	rtw89_fw_update_ver(rtwdev, type, fw_suit);
214 
215 	return 0;
216 }
217 
218 #define __DEF_FW_FEAT_COND(__cond, __op) \
219 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
220 { \
221 	return suit_ver_code __op comp_ver_code; \
222 }
223 
224 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
225 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
226 
227 struct __fw_feat_cfg {
228 	enum rtw89_core_chip_id chip_id;
229 	enum rtw89_fw_feature feature;
230 	u32 ver_code;
231 	bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
232 };
233 
234 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
235 	{ \
236 		.chip_id = _chip, \
237 		.feature = RTW89_FW_FEATURE_ ## _feat, \
238 		.ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
239 		.cond = __fw_feat_cond_ ## _cond, \
240 	}
241 
242 static const struct __fw_feat_cfg fw_feat_tbl[] = {
243 	__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
244 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
245 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
246 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
247 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
248 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
249 	__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
250 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
251 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
252 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
253 };
254 
255 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
256 {
257 	const struct rtw89_chip_info *chip = rtwdev->chip;
258 	const struct __fw_feat_cfg *ent;
259 	const struct rtw89_fw_suit *fw_suit;
260 	u32 suit_ver_code;
261 	int i;
262 
263 	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
264 	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
265 
266 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
267 		ent = &fw_feat_tbl[i];
268 		if (chip->chip_id != ent->chip_id)
269 			continue;
270 
271 		if (ent->cond(suit_ver_code, ent->ver_code))
272 			RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
273 	}
274 }
275 
276 const struct firmware *
277 rtw89_early_fw_feature_recognize(struct device *device,
278 				 const struct rtw89_chip_info *chip,
279 				 u32 *early_feat_map)
280 {
281 	union rtw89_compat_fw_hdr buf = {};
282 	const struct firmware *firmware;
283 	bool full_req = false;
284 	u32 ver_code;
285 	int ret;
286 	int i;
287 
288 	/* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will
289 	 * be denied (-EPERM). Then, we don't get right firmware things as
290 	 * expected. So, in this case, we have to request full firmware here.
291 	 */
292 	if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE))
293 		full_req = true;
294 
295 	if (full_req)
296 		ret = request_firmware(&firmware, chip->fw_name, device);
297 	else
298 		ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
299 							device, &buf, sizeof(buf),
300 							0);
301 
302 	if (ret) {
303 		dev_err(device, "failed to early request firmware: %d\n", ret);
304 		return NULL;
305 	}
306 
307 	if (full_req)
308 		ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
309 	else
310 		ver_code = rtw89_compat_fw_hdr_ver_code(&buf);
311 
312 	if (!ver_code)
313 		goto out;
314 
315 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
316 		const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
317 
318 		if (chip->chip_id != ent->chip_id)
319 			continue;
320 
321 		if (ent->cond(ver_code, ent->ver_code))
322 			*early_feat_map |= BIT(ent->feature);
323 	}
324 
325 out:
326 	if (full_req)
327 		return firmware;
328 
329 	release_firmware(firmware);
330 	return NULL;
331 }
332 
333 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
334 {
335 	int ret;
336 
337 	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
338 	if (ret)
339 		return ret;
340 
341 	/* It still works if wowlan firmware isn't existing. */
342 	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
343 
344 	rtw89_fw_recognize_features(rtwdev);
345 
346 	return 0;
347 }
348 
349 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
350 			   u8 type, u8 cat, u8 class, u8 func,
351 			   bool rack, bool dack, u32 len)
352 {
353 	struct fwcmd_hdr *hdr;
354 
355 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
356 
357 	if (!(rtwdev->fw.h2c_seq % 4))
358 		rack = true;
359 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
360 				FIELD_PREP(H2C_HDR_CAT, cat) |
361 				FIELD_PREP(H2C_HDR_CLASS, class) |
362 				FIELD_PREP(H2C_HDR_FUNC, func) |
363 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
364 
365 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
366 					   len + H2C_HEADER_LEN) |
367 				(rack ? H2C_HDR_REC_ACK : 0) |
368 				(dack ? H2C_HDR_DONE_ACK : 0));
369 
370 	rtwdev->fw.h2c_seq++;
371 }
372 
373 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
374 				       struct sk_buff *skb,
375 				       u8 type, u8 cat, u8 class, u8 func,
376 				       u32 len)
377 {
378 	struct fwcmd_hdr *hdr;
379 
380 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
381 
382 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
383 				FIELD_PREP(H2C_HDR_CAT, cat) |
384 				FIELD_PREP(H2C_HDR_CLASS, class) |
385 				FIELD_PREP(H2C_HDR_FUNC, func) |
386 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
387 
388 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
389 					   len + H2C_HEADER_LEN));
390 }
391 
392 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
393 {
394 	struct sk_buff *skb;
395 	u32 ret = 0;
396 
397 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
398 	if (!skb) {
399 		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
400 		return -ENOMEM;
401 	}
402 
403 	skb_put_data(skb, fw, len);
404 	SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
405 	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
406 				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
407 				   H2C_FUNC_MAC_FWHDR_DL, len);
408 
409 	ret = rtw89_h2c_tx(rtwdev, skb, false);
410 	if (ret) {
411 		rtw89_err(rtwdev, "failed to send h2c\n");
412 		ret = -1;
413 		goto fail;
414 	}
415 
416 	return 0;
417 fail:
418 	dev_kfree_skb_any(skb);
419 
420 	return ret;
421 }
422 
423 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
424 {
425 	u8 val;
426 	int ret;
427 
428 	ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
429 	if (ret) {
430 		rtw89_err(rtwdev, "[ERR]FW header download\n");
431 		return ret;
432 	}
433 
434 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
435 				       1, FWDL_WAIT_CNT, false,
436 				       rtwdev, R_AX_WCPU_FW_CTRL);
437 	if (ret) {
438 		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
439 		return ret;
440 	}
441 
442 	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
443 	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
444 
445 	return 0;
446 }
447 
448 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
449 				    struct rtw89_fw_hdr_section_info *info)
450 {
451 	struct sk_buff *skb;
452 	const u8 *section = info->addr;
453 	u32 residue_len = info->len;
454 	u32 pkt_len;
455 	int ret;
456 
457 	while (residue_len) {
458 		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
459 			pkt_len = FWDL_SECTION_PER_PKT_LEN;
460 		else
461 			pkt_len = residue_len;
462 
463 		skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
464 		if (!skb) {
465 			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
466 			return -ENOMEM;
467 		}
468 		skb_put_data(skb, section, pkt_len);
469 
470 		ret = rtw89_h2c_tx(rtwdev, skb, true);
471 		if (ret) {
472 			rtw89_err(rtwdev, "failed to send h2c\n");
473 			ret = -1;
474 			goto fail;
475 		}
476 
477 		section += pkt_len;
478 		residue_len -= pkt_len;
479 	}
480 
481 	return 0;
482 fail:
483 	dev_kfree_skb_any(skb);
484 
485 	return ret;
486 }
487 
488 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
489 				  struct rtw89_fw_bin_info *info)
490 {
491 	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
492 	u8 section_num = info->section_num;
493 	int ret;
494 
495 	while (section_num--) {
496 		ret = __rtw89_fw_download_main(rtwdev, section_info);
497 		if (ret)
498 			return ret;
499 		section_info++;
500 	}
501 
502 	mdelay(5);
503 
504 	ret = rtw89_fw_check_rdy(rtwdev);
505 	if (ret) {
506 		rtw89_warn(rtwdev, "download firmware fail\n");
507 		return ret;
508 	}
509 
510 	return 0;
511 }
512 
513 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
514 {
515 	u32 val32;
516 	u16 index;
517 
518 	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
519 		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
520 		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
521 	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
522 
523 	for (index = 0; index < 15; index++) {
524 		val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
525 		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
526 		fsleep(10);
527 	}
528 }
529 
530 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
531 {
532 	u32 val32;
533 	u16 val16;
534 
535 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
536 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
537 
538 	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
539 	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
540 
541 	rtw89_fw_prog_cnt_dump(rtwdev);
542 }
543 
544 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
545 {
546 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
547 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
548 	struct rtw89_fw_bin_info info;
549 	const u8 *fw = fw_suit->data;
550 	u32 len = fw_suit->size;
551 	u8 val;
552 	int ret;
553 
554 	rtw89_mac_disable_cpu(rtwdev);
555 	ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
556 	if (ret)
557 		return ret;
558 
559 	if (!fw || !len) {
560 		rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
561 		return -ENOENT;
562 	}
563 
564 	ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
565 	if (ret) {
566 		rtw89_err(rtwdev, "parse fw header fail\n");
567 		goto fwdl_err;
568 	}
569 
570 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
571 				       1, FWDL_WAIT_CNT, false,
572 				       rtwdev, R_AX_WCPU_FW_CTRL);
573 	if (ret) {
574 		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
575 		goto fwdl_err;
576 	}
577 
578 	ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len);
579 	if (ret) {
580 		ret = -EBUSY;
581 		goto fwdl_err;
582 	}
583 
584 	ret = rtw89_fw_download_main(rtwdev, fw, &info);
585 	if (ret) {
586 		ret = -EBUSY;
587 		goto fwdl_err;
588 	}
589 
590 	fw_info->h2c_seq = 0;
591 	fw_info->rec_seq = 0;
592 	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
593 	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
594 
595 	return ret;
596 
597 fwdl_err:
598 	rtw89_fw_dl_fail_dump(rtwdev);
599 	return ret;
600 }
601 
602 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
603 {
604 	struct rtw89_fw_info *fw = &rtwdev->fw;
605 
606 	wait_for_completion(&fw->completion);
607 	if (!fw->firmware)
608 		return -EINVAL;
609 
610 	return 0;
611 }
612 
613 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
614 {
615 	struct rtw89_fw_info *fw = context;
616 	struct rtw89_dev *rtwdev = fw->rtwdev;
617 
618 	if (!firmware || !firmware->data) {
619 		rtw89_err(rtwdev, "failed to request firmware\n");
620 		complete_all(&fw->completion);
621 		return;
622 	}
623 
624 	fw->firmware = firmware;
625 	complete_all(&fw->completion);
626 }
627 
628 int rtw89_load_firmware(struct rtw89_dev *rtwdev)
629 {
630 	struct rtw89_fw_info *fw = &rtwdev->fw;
631 	const char *fw_name = rtwdev->chip->fw_name;
632 	int ret;
633 
634 	fw->rtwdev = rtwdev;
635 	init_completion(&fw->completion);
636 
637 	if (fw->firmware) {
638 		rtw89_debug(rtwdev, RTW89_DBG_FW,
639 			    "full firmware has been early requested\n");
640 		complete_all(&fw->completion);
641 		return 0;
642 	}
643 
644 	ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
645 				      GFP_KERNEL, fw, rtw89_load_firmware_cb);
646 	if (ret) {
647 		rtw89_err(rtwdev, "failed to async firmware request\n");
648 		return ret;
649 	}
650 
651 	return 0;
652 }
653 
654 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
655 {
656 	struct rtw89_fw_info *fw = &rtwdev->fw;
657 
658 	rtw89_wait_firmware_completion(rtwdev);
659 
660 	if (fw->firmware) {
661 		release_firmware(fw->firmware);
662 
663 		/* assign NULL back in case rtw89_free_ieee80211_hw()
664 		 * try to release the same one again.
665 		 */
666 		fw->firmware = NULL;
667 	}
668 }
669 
670 #define H2C_CAM_LEN 60
671 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
672 		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
673 {
674 	struct sk_buff *skb;
675 	int ret;
676 
677 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
678 	if (!skb) {
679 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
680 		return -ENOMEM;
681 	}
682 	skb_put(skb, H2C_CAM_LEN);
683 	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
684 	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
685 
686 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
687 			      H2C_CAT_MAC,
688 			      H2C_CL_MAC_ADDR_CAM_UPDATE,
689 			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
690 			      H2C_CAM_LEN);
691 
692 	ret = rtw89_h2c_tx(rtwdev, skb, false);
693 	if (ret) {
694 		rtw89_err(rtwdev, "failed to send h2c\n");
695 		goto fail;
696 	}
697 
698 	return 0;
699 fail:
700 	dev_kfree_skb_any(skb);
701 
702 	return ret;
703 }
704 
705 #define H2C_DCTL_SEC_CAM_LEN 68
706 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
707 				 struct rtw89_vif *rtwvif,
708 				 struct rtw89_sta *rtwsta)
709 {
710 	struct sk_buff *skb;
711 	int ret;
712 
713 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
714 	if (!skb) {
715 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
716 		return -ENOMEM;
717 	}
718 	skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
719 
720 	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
721 
722 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
723 			      H2C_CAT_MAC,
724 			      H2C_CL_MAC_FR_EXCHG,
725 			      H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
726 			      H2C_DCTL_SEC_CAM_LEN);
727 
728 	ret = rtw89_h2c_tx(rtwdev, skb, false);
729 	if (ret) {
730 		rtw89_err(rtwdev, "failed to send h2c\n");
731 		goto fail;
732 	}
733 
734 	return 0;
735 fail:
736 	dev_kfree_skb_any(skb);
737 
738 	return ret;
739 }
740 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
741 
742 #define H2C_BA_CAM_LEN 8
743 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
744 			bool valid, struct ieee80211_ampdu_params *params)
745 {
746 	const struct rtw89_chip_info *chip = rtwdev->chip;
747 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
748 	u8 macid = rtwsta->mac_id;
749 	struct sk_buff *skb;
750 	u8 entry_idx;
751 	int ret;
752 
753 	ret = valid ?
754 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
755 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
756 	if (ret) {
757 		/* it still works even if we don't have static BA CAM, because
758 		 * hardware can create dynamic BA CAM automatically.
759 		 */
760 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
761 			    "failed to %s entry tid=%d for h2c ba cam\n",
762 			    valid ? "alloc" : "free", params->tid);
763 		return 0;
764 	}
765 
766 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
767 	if (!skb) {
768 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
769 		return -ENOMEM;
770 	}
771 	skb_put(skb, H2C_BA_CAM_LEN);
772 	SET_BA_CAM_MACID(skb->data, macid);
773 	if (chip->bacam_v1)
774 		SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
775 	else
776 		SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
777 	if (!valid)
778 		goto end;
779 	SET_BA_CAM_VALID(skb->data, valid);
780 	SET_BA_CAM_TID(skb->data, params->tid);
781 	if (params->buf_size > 64)
782 		SET_BA_CAM_BMAP_SIZE(skb->data, 4);
783 	else
784 		SET_BA_CAM_BMAP_SIZE(skb->data, 0);
785 	/* If init req is set, hw will set the ssn */
786 	SET_BA_CAM_INIT_REQ(skb->data, 1);
787 	SET_BA_CAM_SSN(skb->data, params->ssn);
788 
789 	if (chip->bacam_v1) {
790 		SET_BA_CAM_STD_EN(skb->data, 1);
791 		SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
792 	}
793 
794 end:
795 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
796 			      H2C_CAT_MAC,
797 			      H2C_CL_BA_CAM,
798 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
799 			      H2C_BA_CAM_LEN);
800 
801 	ret = rtw89_h2c_tx(rtwdev, skb, false);
802 	if (ret) {
803 		rtw89_err(rtwdev, "failed to send h2c\n");
804 		goto fail;
805 	}
806 
807 	return 0;
808 fail:
809 	dev_kfree_skb_any(skb);
810 
811 	return ret;
812 }
813 
814 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
815 					       u8 entry_idx, u8 uid)
816 {
817 	struct sk_buff *skb;
818 	int ret;
819 
820 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
821 	if (!skb) {
822 		rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
823 		return -ENOMEM;
824 	}
825 	skb_put(skb, H2C_BA_CAM_LEN);
826 
827 	SET_BA_CAM_VALID(skb->data, 1);
828 	SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
829 	SET_BA_CAM_UID(skb->data, uid);
830 	SET_BA_CAM_BAND(skb->data, 0);
831 	SET_BA_CAM_STD_EN(skb->data, 0);
832 
833 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
834 			      H2C_CAT_MAC,
835 			      H2C_CL_BA_CAM,
836 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
837 			      H2C_BA_CAM_LEN);
838 
839 	ret = rtw89_h2c_tx(rtwdev, skb, false);
840 	if (ret) {
841 		rtw89_err(rtwdev, "failed to send h2c\n");
842 		goto fail;
843 	}
844 
845 	return 0;
846 fail:
847 	dev_kfree_skb_any(skb);
848 
849 	return ret;
850 }
851 
852 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
853 {
854 	const struct rtw89_chip_info *chip = rtwdev->chip;
855 	u8 entry_idx = chip->bacam_num;
856 	u8 uid = 0;
857 	int i;
858 
859 	for (i = 0; i < chip->bacam_dynamic_num; i++) {
860 		rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
861 		entry_idx++;
862 		uid++;
863 	}
864 }
865 
866 #define H2C_LOG_CFG_LEN 12
867 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
868 {
869 	struct sk_buff *skb;
870 	u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
871 			    BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
872 	int ret;
873 
874 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
875 	if (!skb) {
876 		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
877 		return -ENOMEM;
878 	}
879 
880 	skb_put(skb, H2C_LOG_CFG_LEN);
881 	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
882 	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
883 	SET_LOG_CFG_COMP(skb->data, comp);
884 	SET_LOG_CFG_COMP_EXT(skb->data, 0);
885 
886 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
887 			      H2C_CAT_MAC,
888 			      H2C_CL_FW_INFO,
889 			      H2C_FUNC_LOG_CFG, 0, 0,
890 			      H2C_LOG_CFG_LEN);
891 
892 	ret = rtw89_h2c_tx(rtwdev, skb, false);
893 	if (ret) {
894 		rtw89_err(rtwdev, "failed to send h2c\n");
895 		goto fail;
896 	}
897 
898 	return 0;
899 fail:
900 	dev_kfree_skb_any(skb);
901 
902 	return ret;
903 }
904 
905 static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
906 					struct rtw89_vif *rtwvif,
907 					enum rtw89_fw_pkt_ofld_type type,
908 					u8 *id)
909 {
910 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
911 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
912 	struct rtw89_pktofld_info *info;
913 	struct sk_buff *skb;
914 	int ret;
915 
916 	info = kzalloc(sizeof(*info), GFP_KERNEL);
917 	if (!info)
918 		return -ENOMEM;
919 
920 	switch (type) {
921 	case RTW89_PKT_OFLD_TYPE_PS_POLL:
922 		skb = ieee80211_pspoll_get(rtwdev->hw, vif);
923 		break;
924 	case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
925 		skb = ieee80211_proberesp_get(rtwdev->hw, vif);
926 		break;
927 	case RTW89_PKT_OFLD_TYPE_NULL_DATA:
928 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
929 		break;
930 	case RTW89_PKT_OFLD_TYPE_QOS_NULL:
931 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
932 		break;
933 	default:
934 		goto err;
935 	}
936 
937 	if (!skb)
938 		goto err;
939 
940 	list_add_tail(&info->list, &rtw_wow->pkt_list);
941 	ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
942 	kfree_skb(skb);
943 
944 	if (ret)
945 		return ret;
946 
947 	*id = info->id;
948 	return 0;
949 
950 err:
951 	kfree(info);
952 	return -ENOMEM;
953 }
954 
955 #define H2C_GENERAL_PKT_LEN 6
956 #define H2C_GENERAL_PKT_ID_UND 0xff
957 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
958 {
959 	struct sk_buff *skb;
960 	int ret;
961 
962 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
963 	if (!skb) {
964 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
965 		return -ENOMEM;
966 	}
967 	skb_put(skb, H2C_GENERAL_PKT_LEN);
968 	SET_GENERAL_PKT_MACID(skb->data, macid);
969 	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
970 	SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
971 	SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
972 	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
973 	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
974 
975 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
976 			      H2C_CAT_MAC,
977 			      H2C_CL_FW_INFO,
978 			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
979 			      H2C_GENERAL_PKT_LEN);
980 
981 	ret = rtw89_h2c_tx(rtwdev, skb, false);
982 	if (ret) {
983 		rtw89_err(rtwdev, "failed to send h2c\n");
984 		goto fail;
985 	}
986 
987 	return 0;
988 fail:
989 	dev_kfree_skb_any(skb);
990 
991 	return ret;
992 }
993 
994 #define H2C_LPS_PARM_LEN 8
995 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
996 			  struct rtw89_lps_parm *lps_param)
997 {
998 	struct sk_buff *skb;
999 	int ret;
1000 
1001 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1002 	if (!skb) {
1003 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1004 		return -ENOMEM;
1005 	}
1006 	skb_put(skb, H2C_LPS_PARM_LEN);
1007 
1008 	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
1009 	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
1010 	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
1011 	SET_LPS_PARM_RLBM(skb->data, 1);
1012 	SET_LPS_PARM_SMARTPS(skb->data, 1);
1013 	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
1014 	SET_LPS_PARM_VOUAPSD(skb->data, 0);
1015 	SET_LPS_PARM_VIUAPSD(skb->data, 0);
1016 	SET_LPS_PARM_BEUAPSD(skb->data, 0);
1017 	SET_LPS_PARM_BKUAPSD(skb->data, 0);
1018 
1019 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1020 			      H2C_CAT_MAC,
1021 			      H2C_CL_MAC_PS,
1022 			      H2C_FUNC_MAC_LPS_PARM, 0, 1,
1023 			      H2C_LPS_PARM_LEN);
1024 
1025 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1026 	if (ret) {
1027 		rtw89_err(rtwdev, "failed to send h2c\n");
1028 		goto fail;
1029 	}
1030 
1031 	return 0;
1032 fail:
1033 	dev_kfree_skb_any(skb);
1034 
1035 	return ret;
1036 }
1037 
1038 #define H2C_P2P_ACT_LEN 20
1039 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1040 			 struct ieee80211_p2p_noa_desc *desc,
1041 			 u8 act, u8 noa_id)
1042 {
1043 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1044 	bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
1045 	u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1046 	struct sk_buff *skb;
1047 	u8 *cmd;
1048 	int ret;
1049 
1050 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
1051 	if (!skb) {
1052 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1053 		return -ENOMEM;
1054 	}
1055 	skb_put(skb, H2C_P2P_ACT_LEN);
1056 	cmd = skb->data;
1057 
1058 	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
1059 	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
1060 	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
1061 	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
1062 	RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
1063 	RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
1064 	if (desc) {
1065 		RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
1066 		RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
1067 		RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
1068 		RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
1069 		RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
1070 	}
1071 
1072 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1073 			      H2C_CAT_MAC, H2C_CL_MAC_PS,
1074 			      H2C_FUNC_P2P_ACT, 0, 0,
1075 			      H2C_P2P_ACT_LEN);
1076 
1077 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1078 	if (ret) {
1079 		rtw89_err(rtwdev, "failed to send h2c\n");
1080 		goto fail;
1081 	}
1082 
1083 	return 0;
1084 fail:
1085 	dev_kfree_skb_any(skb);
1086 
1087 	return ret;
1088 }
1089 
1090 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
1091 				       struct sk_buff *skb)
1092 {
1093 	struct rtw89_hal *hal = &rtwdev->hal;
1094 	u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
1095 	u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
1096 
1097 	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
1098 	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
1099 	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
1100 	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
1101 	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
1102 }
1103 
1104 #define H2C_CMC_TBL_LEN 68
1105 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
1106 				  struct rtw89_vif *rtwvif)
1107 {
1108 	const struct rtw89_chip_info *chip = rtwdev->chip;
1109 	struct sk_buff *skb;
1110 	u8 macid = rtwvif->mac_id;
1111 	int ret;
1112 
1113 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1114 	if (!skb) {
1115 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1116 		return -ENOMEM;
1117 	}
1118 	skb_put(skb, H2C_CMC_TBL_LEN);
1119 	SET_CTRL_INFO_MACID(skb->data, macid);
1120 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1121 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1122 		SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
1123 		__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1124 		SET_CMC_TBL_ANTSEL_A(skb->data, 0);
1125 		SET_CMC_TBL_ANTSEL_B(skb->data, 0);
1126 		SET_CMC_TBL_ANTSEL_C(skb->data, 0);
1127 		SET_CMC_TBL_ANTSEL_D(skb->data, 0);
1128 	}
1129 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
1130 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
1131 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1132 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1133 
1134 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1135 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1136 			      chip->h2c_cctl_func_id, 0, 1,
1137 			      H2C_CMC_TBL_LEN);
1138 
1139 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1140 	if (ret) {
1141 		rtw89_err(rtwdev, "failed to send h2c\n");
1142 		goto fail;
1143 	}
1144 
1145 	return 0;
1146 fail:
1147 	dev_kfree_skb_any(skb);
1148 
1149 	return ret;
1150 }
1151 
1152 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
1153 				     struct ieee80211_sta *sta, u8 *pads)
1154 {
1155 	bool ppe_th;
1156 	u8 ppe16, ppe8;
1157 	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
1158 	u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
1159 	u8 ru_bitmap;
1160 	u8 n, idx, sh;
1161 	u16 ppe;
1162 	int i;
1163 
1164 	if (!sta->deflink.he_cap.has_he)
1165 		return;
1166 
1167 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
1168 			   sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
1169 	if (!ppe_th) {
1170 		u8 pad;
1171 
1172 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
1173 				sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
1174 
1175 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
1176 			pads[i] = pad;
1177 
1178 		return;
1179 	}
1180 
1181 	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
1182 	n = hweight8(ru_bitmap);
1183 	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
1184 
1185 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
1186 		if (!(ru_bitmap & BIT(i))) {
1187 			pads[i] = 1;
1188 			continue;
1189 		}
1190 
1191 		idx = n >> 3;
1192 		sh = n & 7;
1193 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
1194 
1195 		ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
1196 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1197 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
1198 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1199 
1200 		if (ppe16 != 7 && ppe8 == 7)
1201 			pads[i] = 2;
1202 		else if (ppe8 != 7)
1203 			pads[i] = 1;
1204 		else
1205 			pads[i] = 0;
1206 	}
1207 }
1208 
1209 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
1210 				struct ieee80211_vif *vif,
1211 				struct ieee80211_sta *sta)
1212 {
1213 	const struct rtw89_chip_info *chip = rtwdev->chip;
1214 	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
1215 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1216 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1217 	struct sk_buff *skb;
1218 	u8 pads[RTW89_PPE_BW_NUM];
1219 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1220 	u16 lowest_rate;
1221 	int ret;
1222 
1223 	memset(pads, 0, sizeof(pads));
1224 	if (sta)
1225 		__get_sta_he_pkt_padding(rtwdev, sta, pads);
1226 
1227 	if (vif->p2p)
1228 		lowest_rate = RTW89_HW_RATE_OFDM6;
1229 	else if (chan->band_type == RTW89_BAND_2G)
1230 		lowest_rate = RTW89_HW_RATE_CCK1;
1231 	else
1232 		lowest_rate = RTW89_HW_RATE_OFDM6;
1233 
1234 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1235 	if (!skb) {
1236 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1237 		return -ENOMEM;
1238 	}
1239 	skb_put(skb, H2C_CMC_TBL_LEN);
1240 	SET_CTRL_INFO_MACID(skb->data, mac_id);
1241 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1242 	SET_CMC_TBL_DISRTSFB(skb->data, 1);
1243 	SET_CMC_TBL_DISDATAFB(skb->data, 1);
1244 	SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
1245 	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
1246 	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
1247 	if (vif->type == NL80211_IFTYPE_STATION)
1248 		SET_CMC_TBL_ULDL(skb->data, 1);
1249 	else
1250 		SET_CMC_TBL_ULDL(skb->data, 0);
1251 	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
1252 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
1253 		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1254 		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1255 		SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1256 		SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1257 	} else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1258 		SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1259 		SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1260 		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1261 		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1262 	}
1263 	if (sta)
1264 		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
1265 						  sta->deflink.he_cap.has_he);
1266 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1267 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1268 
1269 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1270 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1271 			      chip->h2c_cctl_func_id, 0, 1,
1272 			      H2C_CMC_TBL_LEN);
1273 
1274 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1275 	if (ret) {
1276 		rtw89_err(rtwdev, "failed to send h2c\n");
1277 		goto fail;
1278 	}
1279 
1280 	return 0;
1281 fail:
1282 	dev_kfree_skb_any(skb);
1283 
1284 	return ret;
1285 }
1286 
1287 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
1288 				 struct rtw89_sta *rtwsta)
1289 {
1290 	const struct rtw89_chip_info *chip = rtwdev->chip;
1291 	struct sk_buff *skb;
1292 	int ret;
1293 
1294 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1295 	if (!skb) {
1296 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1297 		return -ENOMEM;
1298 	}
1299 	skb_put(skb, H2C_CMC_TBL_LEN);
1300 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1301 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1302 	if (rtwsta->cctl_tx_time) {
1303 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
1304 		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
1305 	}
1306 	if (rtwsta->cctl_tx_retry_limit) {
1307 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
1308 		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
1309 	}
1310 
1311 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1312 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1313 			      chip->h2c_cctl_func_id, 0, 1,
1314 			      H2C_CMC_TBL_LEN);
1315 
1316 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1317 	if (ret) {
1318 		rtw89_err(rtwdev, "failed to send h2c\n");
1319 		goto fail;
1320 	}
1321 
1322 	return 0;
1323 fail:
1324 	dev_kfree_skb_any(skb);
1325 
1326 	return ret;
1327 }
1328 
1329 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
1330 				 struct rtw89_sta *rtwsta)
1331 {
1332 	const struct rtw89_chip_info *chip = rtwdev->chip;
1333 	struct sk_buff *skb;
1334 	int ret;
1335 
1336 	if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
1337 		return 0;
1338 
1339 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1340 	if (!skb) {
1341 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1342 		return -ENOMEM;
1343 	}
1344 	skb_put(skb, H2C_CMC_TBL_LEN);
1345 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1346 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1347 
1348 	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1349 
1350 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1351 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1352 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
1353 			      H2C_CMC_TBL_LEN);
1354 
1355 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1356 	if (ret) {
1357 		rtw89_err(rtwdev, "failed to send h2c\n");
1358 		goto fail;
1359 	}
1360 
1361 	return 0;
1362 fail:
1363 	dev_kfree_skb_any(skb);
1364 
1365 	return ret;
1366 }
1367 
1368 #define H2C_BCN_BASE_LEN 12
1369 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
1370 			       struct rtw89_vif *rtwvif)
1371 {
1372 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1373 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1374 	struct sk_buff *skb;
1375 	struct sk_buff *skb_beacon;
1376 	u16 tim_offset;
1377 	int bcn_total_len;
1378 	u16 beacon_rate;
1379 	int ret;
1380 
1381 	if (vif->p2p)
1382 		beacon_rate = RTW89_HW_RATE_OFDM6;
1383 	else if (chan->band_type == RTW89_BAND_2G)
1384 		beacon_rate = RTW89_HW_RATE_CCK1;
1385 	else
1386 		beacon_rate = RTW89_HW_RATE_OFDM6;
1387 
1388 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
1389 					      NULL, 0);
1390 	if (!skb_beacon) {
1391 		rtw89_err(rtwdev, "failed to get beacon skb\n");
1392 		return -ENOMEM;
1393 	}
1394 
1395 	bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
1396 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
1397 	if (!skb) {
1398 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1399 		dev_kfree_skb_any(skb_beacon);
1400 		return -ENOMEM;
1401 	}
1402 	skb_put(skb, H2C_BCN_BASE_LEN);
1403 
1404 	SET_BCN_UPD_PORT(skb->data, rtwvif->port);
1405 	SET_BCN_UPD_MBSSID(skb->data, 0);
1406 	SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
1407 	SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
1408 	SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
1409 	SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
1410 	SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
1411 	SET_BCN_UPD_RATE(skb->data, beacon_rate);
1412 
1413 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
1414 	dev_kfree_skb_any(skb_beacon);
1415 
1416 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1417 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1418 			      H2C_FUNC_MAC_BCN_UPD, 0, 1,
1419 			      bcn_total_len);
1420 
1421 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1422 	if (ret) {
1423 		rtw89_err(rtwdev, "failed to send h2c\n");
1424 		dev_kfree_skb_any(skb);
1425 		return ret;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 #define H2C_ROLE_MAINTAIN_LEN 4
1432 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
1433 			       struct rtw89_vif *rtwvif,
1434 			       struct rtw89_sta *rtwsta,
1435 			       enum rtw89_upd_mode upd_mode)
1436 {
1437 	struct sk_buff *skb;
1438 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1439 	u8 self_role;
1440 	int ret;
1441 
1442 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
1443 		if (rtwsta)
1444 			self_role = RTW89_SELF_ROLE_AP_CLIENT;
1445 		else
1446 			self_role = rtwvif->self_role;
1447 	} else {
1448 		self_role = rtwvif->self_role;
1449 	}
1450 
1451 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
1452 	if (!skb) {
1453 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1454 		return -ENOMEM;
1455 	}
1456 	skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
1457 	SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
1458 	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
1459 	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
1460 	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1461 
1462 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1463 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1464 			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
1465 			      H2C_ROLE_MAINTAIN_LEN);
1466 
1467 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1468 	if (ret) {
1469 		rtw89_err(rtwdev, "failed to send h2c\n");
1470 		goto fail;
1471 	}
1472 
1473 	return 0;
1474 fail:
1475 	dev_kfree_skb_any(skb);
1476 
1477 	return ret;
1478 }
1479 
1480 #define H2C_JOIN_INFO_LEN 4
1481 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1482 			   struct rtw89_sta *rtwsta, bool dis_conn)
1483 {
1484 	struct sk_buff *skb;
1485 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1486 	u8 self_role = rtwvif->self_role;
1487 	u8 net_type = rtwvif->net_type;
1488 	int ret;
1489 
1490 	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
1491 		self_role = RTW89_SELF_ROLE_AP_CLIENT;
1492 		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
1493 	}
1494 
1495 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1496 	if (!skb) {
1497 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1498 		return -ENOMEM;
1499 	}
1500 	skb_put(skb, H2C_JOIN_INFO_LEN);
1501 	SET_JOININFO_MACID(skb->data, mac_id);
1502 	SET_JOININFO_OP(skb->data, dis_conn);
1503 	SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
1504 	SET_JOININFO_WMM(skb->data, rtwvif->wmm);
1505 	SET_JOININFO_TGR(skb->data, rtwvif->trigger);
1506 	SET_JOININFO_ISHESTA(skb->data, 0);
1507 	SET_JOININFO_DLBW(skb->data, 0);
1508 	SET_JOININFO_TF_MAC_PAD(skb->data, 0);
1509 	SET_JOININFO_DL_T_PE(skb->data, 0);
1510 	SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
1511 	SET_JOININFO_NET_TYPE(skb->data, net_type);
1512 	SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1513 	SET_JOININFO_SELF_ROLE(skb->data, self_role);
1514 
1515 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1516 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1517 			      H2C_FUNC_MAC_JOININFO, 0, 1,
1518 			      H2C_JOIN_INFO_LEN);
1519 
1520 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1521 	if (ret) {
1522 		rtw89_err(rtwdev, "failed to send h2c\n");
1523 		goto fail;
1524 	}
1525 
1526 	return 0;
1527 fail:
1528 	dev_kfree_skb_any(skb);
1529 
1530 	return ret;
1531 }
1532 
1533 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
1534 			     bool pause)
1535 {
1536 	struct rtw89_fw_macid_pause_grp h2c = {{0}};
1537 	u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
1538 	struct sk_buff *skb;
1539 	int ret;
1540 
1541 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1542 	if (!skb) {
1543 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1544 		return -ENOMEM;
1545 	}
1546 	h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
1547 	if (pause)
1548 		h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
1549 	skb_put_data(skb, &h2c, len);
1550 
1551 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1552 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1553 			      H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
1554 			      len);
1555 
1556 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1557 	if (ret) {
1558 		rtw89_err(rtwdev, "failed to send h2c\n");
1559 		goto fail;
1560 	}
1561 
1562 	return 0;
1563 fail:
1564 	dev_kfree_skb_any(skb);
1565 
1566 	return ret;
1567 }
1568 
1569 #define H2C_EDCA_LEN 12
1570 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1571 			  u8 ac, u32 val)
1572 {
1573 	struct sk_buff *skb;
1574 	int ret;
1575 
1576 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
1577 	if (!skb) {
1578 		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
1579 		return -ENOMEM;
1580 	}
1581 	skb_put(skb, H2C_EDCA_LEN);
1582 	RTW89_SET_EDCA_SEL(skb->data, 0);
1583 	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
1584 	RTW89_SET_EDCA_WMM(skb->data, 0);
1585 	RTW89_SET_EDCA_AC(skb->data, ac);
1586 	RTW89_SET_EDCA_PARAM(skb->data, val);
1587 
1588 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1589 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1590 			      H2C_FUNC_USR_EDCA, 0, 1,
1591 			      H2C_EDCA_LEN);
1592 
1593 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1594 	if (ret) {
1595 		rtw89_err(rtwdev, "failed to send h2c\n");
1596 		goto fail;
1597 	}
1598 
1599 	return 0;
1600 fail:
1601 	dev_kfree_skb_any(skb);
1602 
1603 	return ret;
1604 }
1605 
1606 #define H2C_TSF32_TOGL_LEN 4
1607 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1608 			      bool en)
1609 {
1610 	struct sk_buff *skb;
1611 	u16 early_us = en ? 2000 : 0;
1612 	u8 *cmd;
1613 	int ret;
1614 
1615 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
1616 	if (!skb) {
1617 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1618 		return -ENOMEM;
1619 	}
1620 	skb_put(skb, H2C_TSF32_TOGL_LEN);
1621 	cmd = skb->data;
1622 
1623 	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
1624 	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
1625 	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
1626 	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
1627 
1628 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1629 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1630 			      H2C_FUNC_TSF32_TOGL, 0, 0,
1631 			      H2C_TSF32_TOGL_LEN);
1632 
1633 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1634 	if (ret) {
1635 		rtw89_err(rtwdev, "failed to send h2c\n");
1636 		goto fail;
1637 	}
1638 
1639 	return 0;
1640 fail:
1641 	dev_kfree_skb_any(skb);
1642 
1643 	return ret;
1644 }
1645 
1646 #define H2C_OFLD_CFG_LEN 8
1647 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
1648 {
1649 	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
1650 	struct sk_buff *skb;
1651 	int ret;
1652 
1653 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
1654 	if (!skb) {
1655 		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
1656 		return -ENOMEM;
1657 	}
1658 	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
1659 
1660 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1661 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1662 			      H2C_FUNC_OFLD_CFG, 0, 1,
1663 			      H2C_OFLD_CFG_LEN);
1664 
1665 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1666 	if (ret) {
1667 		rtw89_err(rtwdev, "failed to send h2c\n");
1668 		goto fail;
1669 	}
1670 
1671 	return 0;
1672 fail:
1673 	dev_kfree_skb_any(skb);
1674 
1675 	return ret;
1676 }
1677 
1678 #define H2C_RA_LEN 16
1679 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
1680 {
1681 	struct sk_buff *skb;
1682 	u8 *cmd;
1683 	int ret;
1684 
1685 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
1686 	if (!skb) {
1687 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1688 		return -ENOMEM;
1689 	}
1690 	skb_put(skb, H2C_RA_LEN);
1691 	cmd = skb->data;
1692 	rtw89_debug(rtwdev, RTW89_DBG_RA,
1693 		    "ra cmd msk: %llx ", ra->ra_mask);
1694 
1695 	RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
1696 	RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
1697 	RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
1698 	RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
1699 	RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
1700 	RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
1701 	RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
1702 	RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
1703 	RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
1704 	RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
1705 	RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
1706 	RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
1707 	RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
1708 	RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
1709 	RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
1710 	RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
1711 	RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
1712 	RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
1713 	RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
1714 	RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
1715 	RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
1716 
1717 	if (csi) {
1718 		RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
1719 		RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
1720 		RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
1721 		RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
1722 		RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
1723 		RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
1724 		RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
1725 		RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
1726 		RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
1727 	}
1728 
1729 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1730 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
1731 			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
1732 			      H2C_RA_LEN);
1733 
1734 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1735 	if (ret) {
1736 		rtw89_err(rtwdev, "failed to send h2c\n");
1737 		goto fail;
1738 	}
1739 
1740 	return 0;
1741 fail:
1742 	dev_kfree_skb_any(skb);
1743 
1744 	return ret;
1745 }
1746 
1747 #define H2C_LEN_CXDRVHDR 2
1748 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
1749 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
1750 {
1751 	struct rtw89_btc *btc = &rtwdev->btc;
1752 	struct rtw89_btc_dm *dm = &btc->dm;
1753 	struct rtw89_btc_init_info *init_info = &dm->init_info;
1754 	struct rtw89_btc_module *module = &init_info->module;
1755 	struct rtw89_btc_ant_info *ant = &module->ant;
1756 	struct sk_buff *skb;
1757 	u8 *cmd;
1758 	int ret;
1759 
1760 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
1761 	if (!skb) {
1762 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
1763 		return -ENOMEM;
1764 	}
1765 	skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
1766 	cmd = skb->data;
1767 
1768 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
1769 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
1770 
1771 	RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
1772 	RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
1773 	RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
1774 	RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
1775 	RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
1776 
1777 	RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
1778 	RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
1779 	RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
1780 	RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
1781 	RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
1782 
1783 	RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
1784 	RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
1785 	RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
1786 	RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
1787 	RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
1788 	RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
1789 
1790 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1791 			      H2C_CAT_OUTSRC, BTFC_SET,
1792 			      SET_DRV_INFO, 0, 0,
1793 			      H2C_LEN_CXDRVINFO_INIT);
1794 
1795 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1796 	if (ret) {
1797 		rtw89_err(rtwdev, "failed to send h2c\n");
1798 		goto fail;
1799 	}
1800 
1801 	return 0;
1802 fail:
1803 	dev_kfree_skb_any(skb);
1804 
1805 	return ret;
1806 }
1807 
1808 #define PORT_DATA_OFFSET 4
1809 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
1810 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
1811 #define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
1812 				   H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
1813 				   H2C_LEN_CXDRVHDR)
1814 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
1815 {
1816 	struct rtw89_btc *btc = &rtwdev->btc;
1817 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1818 	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
1819 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1820 	struct rtw89_btc_wl_active_role *active = role_info->active_role;
1821 	struct sk_buff *skb;
1822 	u8 offset = 0;
1823 	u8 *cmd;
1824 	int ret;
1825 	int i;
1826 
1827 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
1828 	if (!skb) {
1829 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1830 		return -ENOMEM;
1831 	}
1832 	skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
1833 	cmd = skb->data;
1834 
1835 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1836 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
1837 
1838 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1839 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1840 
1841 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1842 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1843 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1844 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1845 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1846 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1847 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1848 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1849 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1850 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1851 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1852 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1853 
1854 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
1855 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
1856 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
1857 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
1858 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
1859 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
1860 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
1861 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
1862 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
1863 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
1864 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
1865 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
1866 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
1867 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
1868 	}
1869 
1870 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1871 			      H2C_CAT_OUTSRC, BTFC_SET,
1872 			      SET_DRV_INFO, 0, 0,
1873 			      H2C_LEN_CXDRVINFO_ROLE);
1874 
1875 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1876 	if (ret) {
1877 		rtw89_err(rtwdev, "failed to send h2c\n");
1878 		goto fail;
1879 	}
1880 
1881 	return 0;
1882 fail:
1883 	dev_kfree_skb_any(skb);
1884 
1885 	return ret;
1886 }
1887 
1888 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
1889 {
1890 	struct rtw89_btc *btc = &rtwdev->btc;
1891 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1892 	struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
1893 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1894 	struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
1895 	struct sk_buff *skb;
1896 	u8 *cmd, offset;
1897 	int ret;
1898 	int i;
1899 
1900 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
1901 	if (!skb) {
1902 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1903 		return -ENOMEM;
1904 	}
1905 	skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
1906 	cmd = skb->data;
1907 
1908 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1909 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
1910 
1911 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1912 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1913 
1914 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1915 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1916 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1917 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1918 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1919 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1920 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1921 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1922 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1923 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1924 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1925 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1926 
1927 	offset = PORT_DATA_OFFSET;
1928 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
1929 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
1930 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
1931 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
1932 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
1933 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
1934 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
1935 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
1936 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
1937 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
1938 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
1939 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
1940 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
1941 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
1942 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
1943 	}
1944 
1945 	offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
1946 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
1947 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
1948 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
1949 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
1950 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
1951 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
1952 
1953 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1954 			      H2C_CAT_OUTSRC, BTFC_SET,
1955 			      SET_DRV_INFO, 0, 0,
1956 			      H2C_LEN_CXDRVINFO_ROLE_V1);
1957 
1958 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1959 	if (ret) {
1960 		rtw89_err(rtwdev, "failed to send h2c\n");
1961 		goto fail;
1962 	}
1963 
1964 	return 0;
1965 fail:
1966 	dev_kfree_skb_any(skb);
1967 
1968 	return ret;
1969 }
1970 
1971 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
1972 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
1973 {
1974 	const struct rtw89_chip_info *chip = rtwdev->chip;
1975 	struct rtw89_btc *btc = &rtwdev->btc;
1976 	struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
1977 	struct sk_buff *skb;
1978 	u8 *cmd;
1979 	int ret;
1980 
1981 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
1982 	if (!skb) {
1983 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1984 		return -ENOMEM;
1985 	}
1986 	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
1987 	cmd = skb->data;
1988 
1989 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
1990 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
1991 
1992 	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
1993 	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
1994 	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
1995 	if (chip->chip_id == RTL8852A)
1996 		RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
1997 
1998 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1999 			      H2C_CAT_OUTSRC, BTFC_SET,
2000 			      SET_DRV_INFO, 0, 0,
2001 			      H2C_LEN_CXDRVINFO_CTRL);
2002 
2003 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2004 	if (ret) {
2005 		rtw89_err(rtwdev, "failed to send h2c\n");
2006 		goto fail;
2007 	}
2008 
2009 	return 0;
2010 fail:
2011 	dev_kfree_skb_any(skb);
2012 
2013 	return ret;
2014 }
2015 
2016 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
2017 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
2018 {
2019 	struct rtw89_btc *btc = &rtwdev->btc;
2020 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2021 	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
2022 	struct sk_buff *skb;
2023 	u8 *cmd;
2024 	int ret;
2025 
2026 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
2027 	if (!skb) {
2028 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2029 		return -ENOMEM;
2030 	}
2031 	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
2032 	cmd = skb->data;
2033 
2034 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
2035 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
2036 
2037 	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
2038 	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
2039 	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
2040 	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
2041 	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
2042 
2043 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2044 			      H2C_CAT_OUTSRC, BTFC_SET,
2045 			      SET_DRV_INFO, 0, 0,
2046 			      H2C_LEN_CXDRVINFO_RFK);
2047 
2048 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2049 	if (ret) {
2050 		rtw89_err(rtwdev, "failed to send h2c\n");
2051 		goto fail;
2052 	}
2053 
2054 	return 0;
2055 fail:
2056 	dev_kfree_skb_any(skb);
2057 
2058 	return ret;
2059 }
2060 
2061 #define H2C_LEN_PKT_OFLD 4
2062 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
2063 {
2064 	struct sk_buff *skb;
2065 	u8 *cmd;
2066 	int ret;
2067 
2068 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
2069 	if (!skb) {
2070 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2071 		return -ENOMEM;
2072 	}
2073 	skb_put(skb, H2C_LEN_PKT_OFLD);
2074 	cmd = skb->data;
2075 
2076 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
2077 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
2078 
2079 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2080 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2081 			      H2C_FUNC_PACKET_OFLD, 1, 1,
2082 			      H2C_LEN_PKT_OFLD);
2083 
2084 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2085 	if (ret) {
2086 		rtw89_err(rtwdev, "failed to send h2c\n");
2087 		goto fail;
2088 	}
2089 
2090 	return 0;
2091 fail:
2092 	dev_kfree_skb_any(skb);
2093 
2094 	return ret;
2095 }
2096 
2097 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
2098 				 struct sk_buff *skb_ofld)
2099 {
2100 	struct sk_buff *skb;
2101 	u8 *cmd;
2102 	u8 alloc_id;
2103 	int ret;
2104 
2105 	alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
2106 					      RTW89_MAX_PKT_OFLD_NUM);
2107 	if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
2108 		return -ENOSPC;
2109 
2110 	*id = alloc_id;
2111 
2112 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
2113 	if (!skb) {
2114 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2115 		return -ENOMEM;
2116 	}
2117 	skb_put(skb, H2C_LEN_PKT_OFLD);
2118 	cmd = skb->data;
2119 
2120 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
2121 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
2122 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
2123 	skb_put_data(skb, skb_ofld->data, skb_ofld->len);
2124 
2125 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2126 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2127 			      H2C_FUNC_PACKET_OFLD, 1, 1,
2128 			      H2C_LEN_PKT_OFLD + skb_ofld->len);
2129 
2130 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2131 	if (ret) {
2132 		rtw89_err(rtwdev, "failed to send h2c\n");
2133 		goto fail;
2134 	}
2135 
2136 	return 0;
2137 fail:
2138 	dev_kfree_skb_any(skb);
2139 
2140 	return ret;
2141 }
2142 
2143 #define H2C_LEN_SCAN_LIST_OFFLOAD 4
2144 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
2145 				   struct list_head *chan_list)
2146 {
2147 	struct rtw89_mac_chinfo *ch_info;
2148 	struct sk_buff *skb;
2149 	int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
2150 	u8 *cmd;
2151 	int ret;
2152 
2153 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
2154 	if (!skb) {
2155 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
2156 		return -ENOMEM;
2157 	}
2158 	skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
2159 	cmd = skb->data;
2160 
2161 	RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
2162 	/* in unit of 4 bytes */
2163 	RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
2164 
2165 	list_for_each_entry(ch_info, chan_list, list) {
2166 		cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
2167 
2168 		RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
2169 		RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
2170 		RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
2171 		RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
2172 		RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
2173 		RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
2174 		RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
2175 		RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
2176 		RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
2177 		RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
2178 		RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
2179 		RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
2180 		RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
2181 		RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
2182 		RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
2183 		RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
2184 		RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
2185 		RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
2186 		RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
2187 		RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
2188 		RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
2189 		RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
2190 	}
2191 
2192 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2193 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2194 			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
2195 
2196 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2197 	if (ret) {
2198 		rtw89_err(rtwdev, "failed to send h2c\n");
2199 		goto fail;
2200 	}
2201 
2202 	return 0;
2203 fail:
2204 	dev_kfree_skb_any(skb);
2205 
2206 	return ret;
2207 }
2208 
2209 #define H2C_LEN_SCAN_OFFLOAD 28
2210 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
2211 			      struct rtw89_scan_option *option,
2212 			      struct rtw89_vif *rtwvif)
2213 {
2214 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2215 	struct sk_buff *skb;
2216 	u8 *cmd;
2217 	int ret;
2218 
2219 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
2220 	if (!skb) {
2221 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
2222 		return -ENOMEM;
2223 	}
2224 	skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
2225 	cmd = skb->data;
2226 
2227 	RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
2228 	RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
2229 	RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
2230 	RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
2231 	RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
2232 	RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
2233 	RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
2234 	RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
2235 	if (option->target_ch_mode) {
2236 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
2237 		RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
2238 						       scan_info->op_pri_ch);
2239 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
2240 							   scan_info->op_chan);
2241 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
2242 							scan_info->op_band);
2243 	}
2244 
2245 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2246 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2247 			      H2C_FUNC_SCANOFLD, 1, 1,
2248 			      H2C_LEN_SCAN_OFFLOAD);
2249 
2250 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2251 	if (ret) {
2252 		rtw89_err(rtwdev, "failed to send h2c\n");
2253 		goto fail;
2254 	}
2255 
2256 	return 0;
2257 fail:
2258 	dev_kfree_skb_any(skb);
2259 
2260 	return ret;
2261 }
2262 
2263 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
2264 			struct rtw89_fw_h2c_rf_reg_info *info,
2265 			u16 len, u8 page)
2266 {
2267 	struct sk_buff *skb;
2268 	u8 class = info->rf_path == RF_PATH_A ?
2269 		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
2270 	int ret;
2271 
2272 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2273 	if (!skb) {
2274 		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
2275 		return -ENOMEM;
2276 	}
2277 	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
2278 
2279 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2280 			      H2C_CAT_OUTSRC, class, page, 0, 0,
2281 			      len);
2282 
2283 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2284 	if (ret) {
2285 		rtw89_err(rtwdev, "failed to send h2c\n");
2286 		goto fail;
2287 	}
2288 
2289 	return 0;
2290 fail:
2291 	dev_kfree_skb_any(skb);
2292 
2293 	return ret;
2294 }
2295 
2296 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
2297 {
2298 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2299 	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
2300 	struct rtw89_fw_h2c_rf_get_mccch *mccch;
2301 	struct sk_buff *skb;
2302 	int ret;
2303 
2304 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
2305 	if (!skb) {
2306 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2307 		return -ENOMEM;
2308 	}
2309 	skb_put(skb, sizeof(*mccch));
2310 	mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
2311 
2312 	mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
2313 	mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
2314 	mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
2315 	mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
2316 	mccch->current_channel = cpu_to_le32(chan->channel);
2317 	mccch->current_band_type = cpu_to_le32(chan->band_type);
2318 
2319 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2320 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
2321 			      H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
2322 			      sizeof(*mccch));
2323 
2324 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2325 	if (ret) {
2326 		rtw89_err(rtwdev, "failed to send h2c\n");
2327 		goto fail;
2328 	}
2329 
2330 	return 0;
2331 fail:
2332 	dev_kfree_skb_any(skb);
2333 
2334 	return ret;
2335 }
2336 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
2337 
2338 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
2339 			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
2340 			      bool rack, bool dack)
2341 {
2342 	struct sk_buff *skb;
2343 	int ret;
2344 
2345 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2346 	if (!skb) {
2347 		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
2348 		return -ENOMEM;
2349 	}
2350 	skb_put_data(skb, buf, len);
2351 
2352 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2353 			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
2354 			      len);
2355 
2356 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2357 	if (ret) {
2358 		rtw89_err(rtwdev, "failed to send h2c\n");
2359 		goto fail;
2360 	}
2361 
2362 	return 0;
2363 fail:
2364 	dev_kfree_skb_any(skb);
2365 
2366 	return ret;
2367 }
2368 
2369 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
2370 {
2371 	struct sk_buff *skb;
2372 	int ret;
2373 
2374 	skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
2375 	if (!skb) {
2376 		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
2377 		return -ENOMEM;
2378 	}
2379 	skb_put_data(skb, buf, len);
2380 
2381 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2382 	if (ret) {
2383 		rtw89_err(rtwdev, "failed to send h2c\n");
2384 		goto fail;
2385 	}
2386 
2387 	return 0;
2388 fail:
2389 	dev_kfree_skb_any(skb);
2390 
2391 	return ret;
2392 }
2393 
2394 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
2395 {
2396 	struct rtw89_early_h2c *early_h2c;
2397 
2398 	lockdep_assert_held(&rtwdev->mutex);
2399 
2400 	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
2401 		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
2402 	}
2403 }
2404 
2405 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
2406 {
2407 	struct rtw89_early_h2c *early_h2c, *tmp;
2408 
2409 	mutex_lock(&rtwdev->mutex);
2410 	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
2411 		list_del(&early_h2c->list);
2412 		kfree(early_h2c->h2c);
2413 		kfree(early_h2c);
2414 	}
2415 	mutex_unlock(&rtwdev->mutex);
2416 }
2417 
2418 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
2419 {
2420 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
2421 
2422 	attr->category = RTW89_GET_C2H_CATEGORY(c2h->data);
2423 	attr->class = RTW89_GET_C2H_CLASS(c2h->data);
2424 	attr->func = RTW89_GET_C2H_FUNC(c2h->data);
2425 	attr->len = RTW89_GET_C2H_LEN(c2h->data);
2426 }
2427 
2428 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
2429 				    struct sk_buff *c2h)
2430 {
2431 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
2432 	u8 category = attr->category;
2433 	u8 class = attr->class;
2434 	u8 func = attr->func;
2435 
2436 	switch (category) {
2437 	default:
2438 		return false;
2439 	case RTW89_C2H_CAT_MAC:
2440 		return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
2441 	}
2442 }
2443 
2444 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
2445 {
2446 	rtw89_fw_c2h_parse_attr(c2h);
2447 	if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
2448 		goto enqueue;
2449 
2450 	rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
2451 	dev_kfree_skb_any(c2h);
2452 	return;
2453 
2454 enqueue:
2455 	skb_queue_tail(&rtwdev->c2h_queue, c2h);
2456 	ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
2457 }
2458 
2459 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
2460 				    struct sk_buff *skb)
2461 {
2462 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
2463 	u8 category = attr->category;
2464 	u8 class = attr->class;
2465 	u8 func = attr->func;
2466 	u16 len = attr->len;
2467 	bool dump = true;
2468 
2469 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
2470 		return;
2471 
2472 	switch (category) {
2473 	case RTW89_C2H_CAT_TEST:
2474 		break;
2475 	case RTW89_C2H_CAT_MAC:
2476 		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
2477 		if (class == RTW89_MAC_C2H_CLASS_INFO &&
2478 		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
2479 			dump = false;
2480 		break;
2481 	case RTW89_C2H_CAT_OUTSRC:
2482 		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
2483 		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
2484 			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
2485 		else
2486 			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
2487 		break;
2488 	}
2489 
2490 	if (dump)
2491 		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
2492 }
2493 
2494 void rtw89_fw_c2h_work(struct work_struct *work)
2495 {
2496 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
2497 						c2h_work);
2498 	struct sk_buff *skb, *tmp;
2499 
2500 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
2501 		skb_unlink(skb, &rtwdev->c2h_queue);
2502 		mutex_lock(&rtwdev->mutex);
2503 		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
2504 		mutex_unlock(&rtwdev->mutex);
2505 		dev_kfree_skb_any(skb);
2506 	}
2507 }
2508 
2509 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
2510 				  struct rtw89_mac_h2c_info *info)
2511 {
2512 	const struct rtw89_chip_info *chip = rtwdev->chip;
2513 	const u32 *h2c_reg = chip->h2c_regs;
2514 	u8 i, val, len;
2515 	int ret;
2516 
2517 	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
2518 				rtwdev, chip->h2c_ctrl_reg);
2519 	if (ret) {
2520 		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
2521 		return ret;
2522 	}
2523 
2524 	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
2525 			   sizeof(info->h2creg[0]));
2526 
2527 	RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
2528 	RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
2529 	for (i = 0; i < RTW89_H2CREG_MAX; i++)
2530 		rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
2531 
2532 	rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
2533 
2534 	return 0;
2535 }
2536 
2537 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
2538 				 struct rtw89_mac_c2h_info *info)
2539 {
2540 	const struct rtw89_chip_info *chip = rtwdev->chip;
2541 	const u32 *c2h_reg = chip->c2h_regs;
2542 	u32 ret;
2543 	u8 i, val;
2544 
2545 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
2546 
2547 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
2548 				       RTW89_C2H_TIMEOUT, false, rtwdev,
2549 				       chip->c2h_ctrl_reg);
2550 	if (ret) {
2551 		rtw89_warn(rtwdev, "c2h reg timeout\n");
2552 		return ret;
2553 	}
2554 
2555 	for (i = 0; i < RTW89_C2HREG_MAX; i++)
2556 		info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
2557 
2558 	rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
2559 
2560 	info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
2561 	info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
2562 				RTW89_C2HREG_HDR_LEN;
2563 
2564 	return 0;
2565 }
2566 
2567 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
2568 		     struct rtw89_mac_h2c_info *h2c_info,
2569 		     struct rtw89_mac_c2h_info *c2h_info)
2570 {
2571 	u32 ret;
2572 
2573 	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
2574 		lockdep_assert_held(&rtwdev->mutex);
2575 
2576 	if (!h2c_info && !c2h_info)
2577 		return -EINVAL;
2578 
2579 	if (!h2c_info)
2580 		goto recv_c2h;
2581 
2582 	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
2583 	if (ret)
2584 		return ret;
2585 
2586 recv_c2h:
2587 	if (!c2h_info)
2588 		return 0;
2589 
2590 	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
2591 	if (ret)
2592 		return ret;
2593 
2594 	return 0;
2595 }
2596 
2597 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
2598 {
2599 	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
2600 		rtw89_err(rtwdev, "[ERR]pwr is off\n");
2601 		return;
2602 	}
2603 
2604 	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
2605 	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
2606 	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
2607 	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
2608 	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
2609 		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
2610 	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
2611 		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
2612 
2613 	rtw89_fw_prog_cnt_dump(rtwdev);
2614 }
2615 
2616 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
2617 {
2618 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2619 	struct rtw89_pktofld_info *info, *tmp;
2620 	u8 idx;
2621 
2622 	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
2623 		if (!(rtwdev->chip->support_bands & BIT(idx)))
2624 			continue;
2625 
2626 		list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
2627 			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2628 			rtw89_core_release_bit_map(rtwdev->pkt_offload,
2629 						   info->id);
2630 			list_del(&info->list);
2631 			kfree(info);
2632 		}
2633 	}
2634 }
2635 
2636 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
2637 				     struct rtw89_vif *rtwvif,
2638 				     struct sk_buff *skb)
2639 {
2640 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2641 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
2642 	struct rtw89_pktofld_info *info;
2643 	struct sk_buff *new;
2644 	int ret = 0;
2645 	u8 band;
2646 
2647 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2648 		if (!(rtwdev->chip->support_bands & BIT(band)))
2649 			continue;
2650 
2651 		new = skb_copy(skb, GFP_KERNEL);
2652 		if (!new) {
2653 			ret = -ENOMEM;
2654 			goto out;
2655 		}
2656 		skb_put_data(new, ies->ies[band], ies->len[band]);
2657 		skb_put_data(new, ies->common_ies, ies->common_ie_len);
2658 
2659 		info = kzalloc(sizeof(*info), GFP_KERNEL);
2660 		if (!info) {
2661 			ret = -ENOMEM;
2662 			kfree_skb(new);
2663 			goto out;
2664 		}
2665 
2666 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
2667 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
2668 		if (ret)
2669 			goto out;
2670 
2671 		kfree_skb(new);
2672 	}
2673 out:
2674 	return ret;
2675 }
2676 
2677 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
2678 					  struct rtw89_vif *rtwvif)
2679 {
2680 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2681 	struct sk_buff *skb;
2682 	u8 num = req->n_ssids, i;
2683 	int ret;
2684 
2685 	for (i = 0; i < num; i++) {
2686 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2687 					     req->ssids[i].ssid,
2688 					     req->ssids[i].ssid_len,
2689 					     req->ie_len);
2690 		if (!skb)
2691 			return -ENOMEM;
2692 
2693 		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
2694 		kfree_skb(skb);
2695 
2696 		if (ret)
2697 			return ret;
2698 	}
2699 
2700 	return 0;
2701 }
2702 
2703 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
2704 				   int ssid_num,
2705 				   struct rtw89_mac_chinfo *ch_info)
2706 {
2707 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2708 	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2709 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2710 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2711 	struct rtw89_pktofld_info *info;
2712 	u8 band, probe_count = 0;
2713 
2714 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
2715 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
2716 	ch_info->bw = RTW89_SCAN_WIDTH;
2717 	ch_info->tx_pkt = true;
2718 	ch_info->cfg_tx_pwr = false;
2719 	ch_info->tx_pwr_idx = 0;
2720 	ch_info->tx_null = false;
2721 	ch_info->pause_data = false;
2722 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
2723 
2724 	if (ssid_num) {
2725 		ch_info->num_pkt = ssid_num;
2726 		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
2727 
2728 		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
2729 			ch_info->pkt_id[probe_count] = info->id;
2730 			if (++probe_count >= ssid_num)
2731 				break;
2732 		}
2733 		if (probe_count != ssid_num)
2734 			rtw89_err(rtwdev, "SSID num differs from list len\n");
2735 	}
2736 
2737 	if (ch_info->ch_band == RTW89_BAND_6G) {
2738 		if (ssid_num == 1 && req->ssids[0].ssid_len == 0) {
2739 			ch_info->tx_pkt = false;
2740 			if (!req->duration_mandatory)
2741 				ch_info->period -= RTW89_DWELL_TIME;
2742 		}
2743 	}
2744 
2745 	switch (chan_type) {
2746 	case RTW89_CHAN_OPERATE:
2747 		ch_info->central_ch = scan_info->op_chan;
2748 		ch_info->pri_ch = scan_info->op_pri_ch;
2749 		ch_info->ch_band = scan_info->op_band;
2750 		ch_info->bw = scan_info->op_bw;
2751 		ch_info->tx_null = true;
2752 		ch_info->num_pkt = 0;
2753 		break;
2754 	case RTW89_CHAN_DFS:
2755 		if (ch_info->ch_band != RTW89_BAND_6G)
2756 			ch_info->period = max_t(u8, ch_info->period,
2757 						RTW89_DFS_CHAN_TIME);
2758 		ch_info->dwell_time = RTW89_DWELL_TIME;
2759 		break;
2760 	case RTW89_CHAN_ACTIVE:
2761 		break;
2762 	default:
2763 		rtw89_err(rtwdev, "Channel type out of bound\n");
2764 	}
2765 }
2766 
2767 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
2768 				       struct rtw89_vif *rtwvif)
2769 {
2770 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2771 	struct rtw89_mac_chinfo	*ch_info, *tmp;
2772 	struct ieee80211_channel *channel;
2773 	struct list_head chan_list;
2774 	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2775 	int list_len, off_chan_time = 0;
2776 	enum rtw89_chan_type type;
2777 	int ret = 0;
2778 	u32 idx;
2779 
2780 	INIT_LIST_HEAD(&chan_list);
2781 	for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
2782 	     idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
2783 	     idx++, list_len++) {
2784 		channel = req->channels[idx];
2785 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
2786 		if (!ch_info) {
2787 			ret = -ENOMEM;
2788 			goto out;
2789 		}
2790 
2791 		if (req->duration_mandatory)
2792 			ch_info->period = req->duration;
2793 		else if (channel->band == NL80211_BAND_6GHZ)
2794 			ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME;
2795 		else
2796 			ch_info->period = RTW89_CHANNEL_TIME;
2797 
2798 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
2799 		ch_info->central_ch = channel->hw_value;
2800 		ch_info->pri_ch = channel->hw_value;
2801 		ch_info->rand_seq_num = random_seq;
2802 
2803 		if (channel->flags &
2804 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
2805 			type = RTW89_CHAN_DFS;
2806 		else
2807 			type = RTW89_CHAN_ACTIVE;
2808 		rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
2809 
2810 		if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
2811 		    off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
2812 			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2813 			if (!tmp) {
2814 				ret = -ENOMEM;
2815 				kfree(ch_info);
2816 				goto out;
2817 			}
2818 
2819 			type = RTW89_CHAN_OPERATE;
2820 			tmp->period = req->duration_mandatory ?
2821 				      req->duration : RTW89_CHANNEL_TIME;
2822 			rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
2823 			list_add_tail(&tmp->list, &chan_list);
2824 			off_chan_time = 0;
2825 			list_len++;
2826 		}
2827 		list_add_tail(&ch_info->list, &chan_list);
2828 		off_chan_time += ch_info->period;
2829 	}
2830 	rtwdev->scan_info.last_chan_idx = idx;
2831 	ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
2832 
2833 out:
2834 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
2835 		list_del(&ch_info->list);
2836 		kfree(ch_info);
2837 	}
2838 
2839 	return ret;
2840 }
2841 
2842 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
2843 				   struct rtw89_vif *rtwvif)
2844 {
2845 	int ret;
2846 
2847 	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
2848 	if (ret) {
2849 		rtw89_err(rtwdev, "Update probe request failed\n");
2850 		goto out;
2851 	}
2852 	ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
2853 out:
2854 	return ret;
2855 }
2856 
2857 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2858 			 struct ieee80211_scan_request *scan_req)
2859 {
2860 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2861 	struct cfg80211_scan_request *req = &scan_req->req;
2862 	u32 rx_fltr = rtwdev->hal.rx_fltr;
2863 	u8 mac_addr[ETH_ALEN];
2864 
2865 	rtwdev->scan_info.scanning_vif = vif;
2866 	rtwdev->scan_info.last_chan_idx = 0;
2867 	rtwvif->scan_ies = &scan_req->ies;
2868 	rtwvif->scan_req = req;
2869 	ieee80211_stop_queues(rtwdev->hw);
2870 
2871 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
2872 		get_random_mask_addr(mac_addr, req->mac_addr,
2873 				     req->mac_addr_mask);
2874 	else
2875 		ether_addr_copy(mac_addr, vif->addr);
2876 	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
2877 
2878 	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
2879 	rx_fltr &= ~B_AX_A_BC;
2880 	rx_fltr &= ~B_AX_A_A1_MATCH;
2881 	rtw89_write32_mask(rtwdev,
2882 			   rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
2883 			   B_AX_RX_FLTR_CFG_MASK,
2884 			   rx_fltr);
2885 }
2886 
2887 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2888 			    bool aborted)
2889 {
2890 	struct cfg80211_scan_info info = {
2891 		.aborted = aborted,
2892 	};
2893 	struct rtw89_vif *rtwvif;
2894 
2895 	if (!vif)
2896 		return;
2897 
2898 	rtw89_write32_mask(rtwdev,
2899 			   rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
2900 			   B_AX_RX_FLTR_CFG_MASK,
2901 			   rtwdev->hal.rx_fltr);
2902 
2903 	rtw89_core_scan_complete(rtwdev, vif, true);
2904 	ieee80211_scan_completed(rtwdev->hw, &info);
2905 	ieee80211_wake_queues(rtwdev->hw);
2906 
2907 	rtw89_release_pkt_list(rtwdev);
2908 	rtwvif = (struct rtw89_vif *)vif->drv_priv;
2909 	rtwvif->scan_req = NULL;
2910 	rtwvif->scan_ies = NULL;
2911 	rtwdev->scan_info.last_chan_idx = 0;
2912 	rtwdev->scan_info.scanning_vif = NULL;
2913 
2914 	if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
2915 		rtw89_store_op_chan(rtwdev, false);
2916 	rtw89_set_channel(rtwdev);
2917 }
2918 
2919 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
2920 {
2921 	rtw89_hw_scan_offload(rtwdev, vif, false);
2922 	rtw89_hw_scan_complete(rtwdev, vif, true);
2923 }
2924 
2925 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2926 			  bool enable)
2927 {
2928 	struct rtw89_scan_option opt = {0};
2929 	struct rtw89_vif *rtwvif;
2930 	int ret = 0;
2931 
2932 	rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
2933 	if (!rtwvif)
2934 		return -EINVAL;
2935 
2936 	opt.enable = enable;
2937 	opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
2938 	if (enable) {
2939 		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
2940 		if (ret)
2941 			goto out;
2942 	}
2943 	ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
2944 out:
2945 	return ret;
2946 }
2947 
2948 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
2949 {
2950 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2951 	const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2952 	struct rtw89_chan new;
2953 
2954 	if (backup) {
2955 		scan_info->op_pri_ch = cur->primary_channel;
2956 		scan_info->op_chan = cur->channel;
2957 		scan_info->op_bw = cur->band_width;
2958 		scan_info->op_band = cur->band_type;
2959 	} else {
2960 		rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
2961 				  scan_info->op_band, scan_info->op_bw);
2962 		rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
2963 	}
2964 }
2965 
2966 #define H2C_FW_CPU_EXCEPTION_LEN 4
2967 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
2968 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
2969 {
2970 	struct sk_buff *skb;
2971 	int ret;
2972 
2973 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
2974 	if (!skb) {
2975 		rtw89_err(rtwdev,
2976 			  "failed to alloc skb for fw cpu exception\n");
2977 		return -ENOMEM;
2978 	}
2979 
2980 	skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
2981 	RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
2982 					   H2C_FW_CPU_EXCEPTION_TYPE_DEF);
2983 
2984 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2985 			      H2C_CAT_TEST,
2986 			      H2C_CL_FW_STATUS_TEST,
2987 			      H2C_FUNC_CPU_EXCEPTION, 0, 0,
2988 			      H2C_FW_CPU_EXCEPTION_LEN);
2989 
2990 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2991 	if (ret) {
2992 		rtw89_err(rtwdev, "failed to send h2c\n");
2993 		goto fail;
2994 	}
2995 
2996 	return 0;
2997 
2998 fail:
2999 	dev_kfree_skb_any(skb);
3000 	return ret;
3001 }
3002 
3003 #define H2C_PKT_DROP_LEN 24
3004 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
3005 			  const struct rtw89_pkt_drop_params *params)
3006 {
3007 	struct sk_buff *skb;
3008 	int ret;
3009 
3010 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
3011 	if (!skb) {
3012 		rtw89_err(rtwdev,
3013 			  "failed to alloc skb for packet drop\n");
3014 		return -ENOMEM;
3015 	}
3016 
3017 	switch (params->sel) {
3018 	case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
3019 	case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
3020 	case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
3021 	case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
3022 	case RTW89_PKT_DROP_SEL_BAND_ONCE:
3023 		break;
3024 	default:
3025 		rtw89_debug(rtwdev, RTW89_DBG_FW,
3026 			    "H2C of pkt drop might not fully support sel: %d yet\n",
3027 			    params->sel);
3028 		break;
3029 	}
3030 
3031 	skb_put(skb, H2C_PKT_DROP_LEN);
3032 	RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
3033 	RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
3034 	RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
3035 	RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
3036 	RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
3037 	RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
3038 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
3039 						  params->macid_band_sel[0]);
3040 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
3041 						  params->macid_band_sel[1]);
3042 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
3043 						  params->macid_band_sel[2]);
3044 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
3045 						  params->macid_band_sel[3]);
3046 
3047 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3048 			      H2C_CAT_MAC,
3049 			      H2C_CL_MAC_FW_OFLD,
3050 			      H2C_FUNC_PKT_DROP, 0, 0,
3051 			      H2C_PKT_DROP_LEN);
3052 
3053 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3054 	if (ret) {
3055 		rtw89_err(rtwdev, "failed to send h2c\n");
3056 		goto fail;
3057 	}
3058 
3059 	return 0;
3060 
3061 fail:
3062 	dev_kfree_skb_any(skb);
3063 	return ret;
3064 }
3065 
3066 #define H2C_KEEP_ALIVE_LEN 4
3067 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3068 			    bool enable)
3069 {
3070 	struct sk_buff *skb;
3071 	u8 pkt_id = 0;
3072 	int ret;
3073 
3074 	if (enable) {
3075 		ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif,
3076 						   RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id);
3077 		if (ret)
3078 			return -EPERM;
3079 	}
3080 
3081 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
3082 	if (!skb) {
3083 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3084 		return -ENOMEM;
3085 	}
3086 
3087 	skb_put(skb, H2C_KEEP_ALIVE_LEN);
3088 
3089 	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
3090 	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
3091 	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
3092 	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
3093 
3094 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3095 			      H2C_CAT_MAC,
3096 			      H2C_CL_MAC_WOW,
3097 			      H2C_FUNC_KEEP_ALIVE, 0, 1,
3098 			      H2C_KEEP_ALIVE_LEN);
3099 
3100 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3101 	if (ret) {
3102 		rtw89_err(rtwdev, "failed to send h2c\n");
3103 		goto fail;
3104 	}
3105 
3106 	return 0;
3107 
3108 fail:
3109 	dev_kfree_skb_any(skb);
3110 
3111 	return ret;
3112 }
3113 
3114 #define H2C_DISCONNECT_DETECT_LEN 8
3115 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
3116 				   struct rtw89_vif *rtwvif, bool enable)
3117 {
3118 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
3119 	struct sk_buff *skb;
3120 	u8 macid = rtwvif->mac_id;
3121 	int ret;
3122 
3123 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
3124 	if (!skb) {
3125 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3126 		return -ENOMEM;
3127 	}
3128 
3129 	skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
3130 
3131 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
3132 		RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
3133 		RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
3134 		RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
3135 		RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
3136 		RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
3137 	}
3138 
3139 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3140 			      H2C_CAT_MAC,
3141 			      H2C_CL_MAC_WOW,
3142 			      H2C_FUNC_DISCONNECT_DETECT, 0, 1,
3143 			      H2C_DISCONNECT_DETECT_LEN);
3144 
3145 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3146 	if (ret) {
3147 		rtw89_err(rtwdev, "failed to send h2c\n");
3148 		goto fail;
3149 	}
3150 
3151 	return 0;
3152 
3153 fail:
3154 	dev_kfree_skb_any(skb);
3155 
3156 	return ret;
3157 }
3158 
3159 #define H2C_WOW_GLOBAL_LEN 8
3160 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3161 			    bool enable)
3162 {
3163 	struct sk_buff *skb;
3164 	u8 macid = rtwvif->mac_id;
3165 	int ret;
3166 
3167 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
3168 	if (!skb) {
3169 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3170 		return -ENOMEM;
3171 	}
3172 
3173 	skb_put(skb, H2C_WOW_GLOBAL_LEN);
3174 
3175 	RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
3176 	RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
3177 
3178 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3179 			      H2C_CAT_MAC,
3180 			      H2C_CL_MAC_WOW,
3181 			      H2C_FUNC_WOW_GLOBAL, 0, 1,
3182 			      H2C_WOW_GLOBAL_LEN);
3183 
3184 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3185 	if (ret) {
3186 		rtw89_err(rtwdev, "failed to send h2c\n");
3187 		goto fail;
3188 	}
3189 
3190 	return 0;
3191 
3192 fail:
3193 	dev_kfree_skb_any(skb);
3194 
3195 	return ret;
3196 }
3197 
3198 #define H2C_WAKEUP_CTRL_LEN 4
3199 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
3200 				 struct rtw89_vif *rtwvif,
3201 				 bool enable)
3202 {
3203 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
3204 	struct sk_buff *skb;
3205 	u8 macid = rtwvif->mac_id;
3206 	int ret;
3207 
3208 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
3209 	if (!skb) {
3210 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3211 		return -ENOMEM;
3212 	}
3213 
3214 	skb_put(skb, H2C_WAKEUP_CTRL_LEN);
3215 
3216 	if (rtw_wow->pattern_cnt)
3217 		RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
3218 	if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
3219 		RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
3220 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
3221 		RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
3222 
3223 	RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
3224 
3225 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3226 			      H2C_CAT_MAC,
3227 			      H2C_CL_MAC_WOW,
3228 			      H2C_FUNC_WAKEUP_CTRL, 0, 1,
3229 			      H2C_WAKEUP_CTRL_LEN);
3230 
3231 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3232 	if (ret) {
3233 		rtw89_err(rtwdev, "failed to send h2c\n");
3234 		goto fail;
3235 	}
3236 
3237 	return 0;
3238 
3239 fail:
3240 	dev_kfree_skb_any(skb);
3241 
3242 	return ret;
3243 }
3244 
3245 #define H2C_WOW_CAM_UPD_LEN 24
3246 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
3247 			    struct rtw89_wow_cam_info *cam_info)
3248 {
3249 	struct sk_buff *skb;
3250 	int ret;
3251 
3252 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
3253 	if (!skb) {
3254 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3255 		return -ENOMEM;
3256 	}
3257 
3258 	skb_put(skb, H2C_WOW_CAM_UPD_LEN);
3259 
3260 	RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
3261 	RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
3262 	if (cam_info->valid) {
3263 		RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
3264 		RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
3265 		RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
3266 		RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
3267 		RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
3268 		RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
3269 							     cam_info->negative_pattern_match);
3270 		RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
3271 						   cam_info->skip_mac_hdr);
3272 		RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
3273 		RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
3274 		RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
3275 	}
3276 	RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
3277 
3278 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3279 			      H2C_CAT_MAC,
3280 			      H2C_CL_MAC_WOW,
3281 			      H2C_FUNC_WOW_CAM_UPD, 0, 1,
3282 			      H2C_WOW_CAM_UPD_LEN);
3283 
3284 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3285 	if (ret) {
3286 		rtw89_err(rtwdev, "failed to send h2c\n");
3287 		goto fail;
3288 	}
3289 
3290 	return 0;
3291 fail:
3292 	dev_kfree_skb_any(skb);
3293 
3294 	return ret;
3295 }
3296 
3297 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
3298 				 struct rtw89_wait_info *wait, unsigned int cond)
3299 {
3300 	int ret;
3301 
3302 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3303 	if (ret) {
3304 		rtw89_err(rtwdev, "failed to send h2c\n");
3305 		dev_kfree_skb_any(skb);
3306 		return -EBUSY;
3307 	}
3308 
3309 	return rtw89_wait_for_cond(wait, cond);
3310 }
3311 
3312 #define H2C_ADD_MCC_LEN 16
3313 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
3314 			 const struct rtw89_fw_mcc_add_req *p)
3315 {
3316 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3317 	struct sk_buff *skb;
3318 	unsigned int cond;
3319 
3320 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
3321 	if (!skb) {
3322 		rtw89_err(rtwdev,
3323 			  "failed to alloc skb for add mcc\n");
3324 		return -ENOMEM;
3325 	}
3326 
3327 	skb_put(skb, H2C_ADD_MCC_LEN);
3328 	RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
3329 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
3330 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
3331 	RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
3332 	RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
3333 	RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
3334 	RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
3335 	RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
3336 	RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
3337 	RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
3338 	RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
3339 	RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
3340 	RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
3341 	RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
3342 	RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
3343 	RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
3344 	RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
3345 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
3346 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
3347 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
3348 
3349 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3350 			      H2C_CAT_MAC,
3351 			      H2C_CL_MCC,
3352 			      H2C_FUNC_ADD_MCC, 0, 0,
3353 			      H2C_ADD_MCC_LEN);
3354 
3355 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
3356 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3357 }
3358 
3359 #define H2C_START_MCC_LEN 12
3360 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
3361 			   const struct rtw89_fw_mcc_start_req *p)
3362 {
3363 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3364 	struct sk_buff *skb;
3365 	unsigned int cond;
3366 
3367 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
3368 	if (!skb) {
3369 		rtw89_err(rtwdev,
3370 			  "failed to alloc skb for start mcc\n");
3371 		return -ENOMEM;
3372 	}
3373 
3374 	skb_put(skb, H2C_START_MCC_LEN);
3375 	RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
3376 	RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
3377 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
3378 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
3379 	RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
3380 	RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
3381 	RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
3382 	RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
3383 	RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
3384 
3385 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3386 			      H2C_CAT_MAC,
3387 			      H2C_CL_MCC,
3388 			      H2C_FUNC_START_MCC, 0, 0,
3389 			      H2C_START_MCC_LEN);
3390 
3391 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
3392 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3393 }
3394 
3395 #define H2C_STOP_MCC_LEN 4
3396 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
3397 			  bool prev_groups)
3398 {
3399 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3400 	struct sk_buff *skb;
3401 	unsigned int cond;
3402 
3403 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
3404 	if (!skb) {
3405 		rtw89_err(rtwdev,
3406 			  "failed to alloc skb for stop mcc\n");
3407 		return -ENOMEM;
3408 	}
3409 
3410 	skb_put(skb, H2C_STOP_MCC_LEN);
3411 	RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
3412 	RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
3413 	RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
3414 
3415 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3416 			      H2C_CAT_MAC,
3417 			      H2C_CL_MCC,
3418 			      H2C_FUNC_STOP_MCC, 0, 0,
3419 			      H2C_STOP_MCC_LEN);
3420 
3421 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
3422 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3423 }
3424 
3425 #define H2C_DEL_MCC_GROUP_LEN 4
3426 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
3427 			       bool prev_groups)
3428 {
3429 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3430 	struct sk_buff *skb;
3431 	unsigned int cond;
3432 
3433 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
3434 	if (!skb) {
3435 		rtw89_err(rtwdev,
3436 			  "failed to alloc skb for del mcc group\n");
3437 		return -ENOMEM;
3438 	}
3439 
3440 	skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
3441 	RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
3442 	RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
3443 
3444 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3445 			      H2C_CAT_MAC,
3446 			      H2C_CL_MCC,
3447 			      H2C_FUNC_DEL_MCC_GROUP, 0, 0,
3448 			      H2C_DEL_MCC_GROUP_LEN);
3449 
3450 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
3451 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3452 }
3453 
3454 #define H2C_RESET_MCC_GROUP_LEN 4
3455 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
3456 {
3457 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3458 	struct sk_buff *skb;
3459 	unsigned int cond;
3460 
3461 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
3462 	if (!skb) {
3463 		rtw89_err(rtwdev,
3464 			  "failed to alloc skb for reset mcc group\n");
3465 		return -ENOMEM;
3466 	}
3467 
3468 	skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
3469 	RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
3470 
3471 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3472 			      H2C_CAT_MAC,
3473 			      H2C_CL_MCC,
3474 			      H2C_FUNC_RESET_MCC_GROUP, 0, 0,
3475 			      H2C_RESET_MCC_GROUP_LEN);
3476 
3477 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
3478 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3479 }
3480 
3481 #define H2C_MCC_REQ_TSF_LEN 4
3482 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
3483 			     const struct rtw89_fw_mcc_tsf_req *req,
3484 			     struct rtw89_mac_mcc_tsf_rpt *rpt)
3485 {
3486 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3487 	struct rtw89_mac_mcc_tsf_rpt *tmp;
3488 	struct sk_buff *skb;
3489 	unsigned int cond;
3490 	int ret;
3491 
3492 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
3493 	if (!skb) {
3494 		rtw89_err(rtwdev,
3495 			  "failed to alloc skb for mcc req tsf\n");
3496 		return -ENOMEM;
3497 	}
3498 
3499 	skb_put(skb, H2C_MCC_REQ_TSF_LEN);
3500 	RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
3501 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
3502 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
3503 
3504 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3505 			      H2C_CAT_MAC,
3506 			      H2C_CL_MCC,
3507 			      H2C_FUNC_MCC_REQ_TSF, 0, 0,
3508 			      H2C_MCC_REQ_TSF_LEN);
3509 
3510 	cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
3511 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3512 	if (ret)
3513 		return ret;
3514 
3515 	tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
3516 	*rpt = *tmp;
3517 
3518 	return 0;
3519 }
3520 
3521 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
3522 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
3523 				  u8 *bitmap)
3524 {
3525 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3526 	struct sk_buff *skb;
3527 	unsigned int cond;
3528 	u8 map_len;
3529 	u8 h2c_len;
3530 
3531 	BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
3532 	map_len = RTW89_MAX_MAC_ID_NUM / 8;
3533 	h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
3534 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
3535 	if (!skb) {
3536 		rtw89_err(rtwdev,
3537 			  "failed to alloc skb for mcc macid bitmap\n");
3538 		return -ENOMEM;
3539 	}
3540 
3541 	skb_put(skb, h2c_len);
3542 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
3543 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
3544 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
3545 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
3546 
3547 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3548 			      H2C_CAT_MAC,
3549 			      H2C_CL_MCC,
3550 			      H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
3551 			      h2c_len);
3552 
3553 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
3554 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3555 }
3556 
3557 #define H2C_MCC_SYNC_LEN 4
3558 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
3559 			  u8 target, u8 offset)
3560 {
3561 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3562 	struct sk_buff *skb;
3563 	unsigned int cond;
3564 
3565 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
3566 	if (!skb) {
3567 		rtw89_err(rtwdev,
3568 			  "failed to alloc skb for mcc sync\n");
3569 		return -ENOMEM;
3570 	}
3571 
3572 	skb_put(skb, H2C_MCC_SYNC_LEN);
3573 	RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
3574 	RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
3575 	RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
3576 	RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
3577 
3578 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3579 			      H2C_CAT_MAC,
3580 			      H2C_CL_MCC,
3581 			      H2C_FUNC_MCC_SYNC, 0, 0,
3582 			      H2C_MCC_SYNC_LEN);
3583 
3584 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
3585 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3586 }
3587 
3588 #define H2C_MCC_SET_DURATION_LEN 20
3589 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
3590 				  const struct rtw89_fw_mcc_duration *p)
3591 {
3592 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3593 	struct sk_buff *skb;
3594 	unsigned int cond;
3595 
3596 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
3597 	if (!skb) {
3598 		rtw89_err(rtwdev,
3599 			  "failed to alloc skb for mcc set duration\n");
3600 		return -ENOMEM;
3601 	}
3602 
3603 	skb_put(skb, H2C_MCC_SET_DURATION_LEN);
3604 	RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
3605 	RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
3606 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
3607 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
3608 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
3609 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
3610 						       p->start_tsf_low);
3611 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
3612 							p->start_tsf_high);
3613 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
3614 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
3615 
3616 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3617 			      H2C_CAT_MAC,
3618 			      H2C_CL_MCC,
3619 			      H2C_FUNC_MCC_SET_DURATION, 0, 0,
3620 			      H2C_MCC_SET_DURATION_LEN);
3621 
3622 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
3623 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3624 }
3625