1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "cam.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "fw.h"
9 #include "mac.h"
10 #include "phy.h"
11 #include "reg.h"
12 
13 static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header)
14 {
15 	struct sk_buff *skb;
16 	u32 header_len = 0;
17 
18 	if (header)
19 		header_len = H2C_HEADER_LEN;
20 
21 	skb = dev_alloc_skb(len + header_len + 24);
22 	if (!skb)
23 		return NULL;
24 	skb_reserve(skb, header_len + 24);
25 	memset(skb->data, 0, len);
26 
27 	return skb;
28 }
29 
30 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len)
31 {
32 	return rtw89_fw_h2c_alloc_skb(len, true);
33 }
34 
35 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len)
36 {
37 	return rtw89_fw_h2c_alloc_skb(len, false);
38 }
39 
40 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
41 {
42 	u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
43 
44 	return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
45 }
46 
47 #define FWDL_WAIT_CNT 400000
48 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
49 {
50 	u8 val;
51 	int ret;
52 
53 	ret = read_poll_timeout_atomic(_fw_get_rdy, val,
54 				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
55 				       1, FWDL_WAIT_CNT, false, rtwdev);
56 	if (ret) {
57 		switch (val) {
58 		case RTW89_FWDL_CHECKSUM_FAIL:
59 			rtw89_err(rtwdev, "fw checksum fail\n");
60 			return -EINVAL;
61 
62 		case RTW89_FWDL_SECURITY_FAIL:
63 			rtw89_err(rtwdev, "fw security fail\n");
64 			return -EINVAL;
65 
66 		case RTW89_FWDL_CV_NOT_MATCH:
67 			rtw89_err(rtwdev, "fw cv not match\n");
68 			return -EINVAL;
69 
70 		default:
71 			return -EBUSY;
72 		}
73 	}
74 
75 	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
76 
77 	return 0;
78 }
79 
80 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
81 			       struct rtw89_fw_bin_info *info)
82 {
83 	struct rtw89_fw_hdr_section_info *section_info;
84 	const u8 *fw_end = fw + len;
85 	const u8 *bin;
86 	u32 i;
87 
88 	if (!info)
89 		return -EINVAL;
90 
91 	info->section_num = GET_FW_HDR_SEC_NUM(fw);
92 	info->hdr_len = RTW89_FW_HDR_SIZE +
93 			info->section_num * RTW89_FW_SECTION_HDR_SIZE;
94 	SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
95 
96 	bin = fw + info->hdr_len;
97 
98 	/* jump to section header */
99 	fw += RTW89_FW_HDR_SIZE;
100 	section_info = info->section_info;
101 	for (i = 0; i < info->section_num; i++) {
102 		section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
103 		if (GET_FWSECTION_HDR_CHECKSUM(fw))
104 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
105 		section_info->redl = GET_FWSECTION_HDR_REDL(fw);
106 		section_info->dladdr =
107 				GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
108 		section_info->addr = bin;
109 		bin += section_info->len;
110 		fw += RTW89_FW_SECTION_HDR_SIZE;
111 		section_info++;
112 	}
113 
114 	if (fw_end != bin) {
115 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
116 		return -EINVAL;
117 	}
118 
119 	return 0;
120 }
121 
122 static
123 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
124 			struct rtw89_fw_suit *fw_suit)
125 {
126 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
127 	const u8 *mfw = fw_info->firmware->data;
128 	u32 mfw_len = fw_info->firmware->size;
129 	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
130 	const struct rtw89_mfw_info *mfw_info;
131 	int i;
132 
133 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
134 		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
135 		/* legacy firmware support normal type only */
136 		if (type != RTW89_FW_NORMAL)
137 			return -EINVAL;
138 		fw_suit->data = mfw;
139 		fw_suit->size = mfw_len;
140 		return 0;
141 	}
142 
143 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
144 		mfw_info = &mfw_hdr->info[i];
145 		if (mfw_info->cv != rtwdev->hal.cv ||
146 		    mfw_info->type != type ||
147 		    mfw_info->mp)
148 			continue;
149 
150 		fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
151 		fw_suit->size = le32_to_cpu(mfw_info->size);
152 		return 0;
153 	}
154 
155 	rtw89_err(rtwdev, "no suitable firmware found\n");
156 	return -ENOENT;
157 }
158 
159 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
160 				enum rtw89_fw_type type,
161 				struct rtw89_fw_suit *fw_suit)
162 {
163 	const u8 *hdr = fw_suit->data;
164 
165 	fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
166 	fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
167 	fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
168 	fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
169 	fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
170 	fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
171 	fw_suit->build_date = GET_FW_HDR_DATE(hdr);
172 	fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
173 	fw_suit->build_min = GET_FW_HDR_MIN(hdr);
174 	fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
175 
176 	rtw89_info(rtwdev,
177 		   "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
178 		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
179 		   fw_suit->sub_idex, fw_suit->cmd_ver, type);
180 }
181 
182 static
183 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
184 {
185 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
186 	int ret;
187 
188 	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
189 	if (ret)
190 		return ret;
191 
192 	rtw89_fw_update_ver(rtwdev, type, fw_suit);
193 
194 	return 0;
195 }
196 
197 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
198 {
199 	const struct rtw89_chip_info *chip = rtwdev->chip;
200 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
201 
202 	if (chip->chip_id == RTL8852A &&
203 	    RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
204 		rtwdev->fw.old_ht_ra_format = true;
205 }
206 
207 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
208 {
209 	int ret;
210 
211 	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
212 	if (ret)
213 		return ret;
214 
215 	/* It still works if wowlan firmware isn't existing. */
216 	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
217 
218 	rtw89_fw_recognize_features(rtwdev);
219 
220 	return 0;
221 }
222 
223 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
224 			   u8 type, u8 cat, u8 class, u8 func,
225 			   bool rack, bool dack, u32 len)
226 {
227 	struct fwcmd_hdr *hdr;
228 
229 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
230 
231 	if (!(rtwdev->fw.h2c_seq % 4))
232 		rack = true;
233 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
234 				FIELD_PREP(H2C_HDR_CAT, cat) |
235 				FIELD_PREP(H2C_HDR_CLASS, class) |
236 				FIELD_PREP(H2C_HDR_FUNC, func) |
237 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
238 
239 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
240 					   len + H2C_HEADER_LEN) |
241 				(rack ? H2C_HDR_REC_ACK : 0) |
242 				(dack ? H2C_HDR_DONE_ACK : 0));
243 
244 	rtwdev->fw.h2c_seq++;
245 }
246 
247 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
248 				       struct sk_buff *skb,
249 				       u8 type, u8 cat, u8 class, u8 func,
250 				       u32 len)
251 {
252 	struct fwcmd_hdr *hdr;
253 
254 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
255 
256 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
257 				FIELD_PREP(H2C_HDR_CAT, cat) |
258 				FIELD_PREP(H2C_HDR_CLASS, class) |
259 				FIELD_PREP(H2C_HDR_FUNC, func) |
260 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
261 
262 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
263 					   len + H2C_HEADER_LEN));
264 }
265 
266 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
267 {
268 	struct sk_buff *skb;
269 	u32 ret = 0;
270 
271 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
272 	if (!skb) {
273 		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
274 		return -ENOMEM;
275 	}
276 
277 	skb_put_data(skb, fw, len);
278 	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
279 				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
280 				   H2C_FUNC_MAC_FWHDR_DL, len);
281 
282 	ret = rtw89_h2c_tx(rtwdev, skb, false);
283 	if (ret) {
284 		rtw89_err(rtwdev, "failed to send h2c\n");
285 		ret = -1;
286 		goto fail;
287 	}
288 
289 	return 0;
290 fail:
291 	dev_kfree_skb_any(skb);
292 
293 	return ret;
294 }
295 
296 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
297 {
298 	u8 val;
299 	int ret;
300 
301 	ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
302 	if (ret) {
303 		rtw89_err(rtwdev, "[ERR]FW header download\n");
304 		return ret;
305 	}
306 
307 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
308 				       1, FWDL_WAIT_CNT, false,
309 				       rtwdev, R_AX_WCPU_FW_CTRL);
310 	if (ret) {
311 		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
312 		return ret;
313 	}
314 
315 	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
316 	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
317 
318 	return 0;
319 }
320 
321 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
322 				    struct rtw89_fw_hdr_section_info *info)
323 {
324 	struct sk_buff *skb;
325 	const u8 *section = info->addr;
326 	u32 residue_len = info->len;
327 	u32 pkt_len;
328 	int ret;
329 
330 	while (residue_len) {
331 		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
332 			pkt_len = FWDL_SECTION_PER_PKT_LEN;
333 		else
334 			pkt_len = residue_len;
335 
336 		skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len);
337 		if (!skb) {
338 			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
339 			return -ENOMEM;
340 		}
341 		skb_put_data(skb, section, pkt_len);
342 
343 		ret = rtw89_h2c_tx(rtwdev, skb, true);
344 		if (ret) {
345 			rtw89_err(rtwdev, "failed to send h2c\n");
346 			ret = -1;
347 			goto fail;
348 		}
349 
350 		section += pkt_len;
351 		residue_len -= pkt_len;
352 	}
353 
354 	return 0;
355 fail:
356 	dev_kfree_skb_any(skb);
357 
358 	return ret;
359 }
360 
361 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
362 				  struct rtw89_fw_bin_info *info)
363 {
364 	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
365 	u8 section_num = info->section_num;
366 	int ret;
367 
368 	while (section_num--) {
369 		ret = __rtw89_fw_download_main(rtwdev, section_info);
370 		if (ret)
371 			return ret;
372 		section_info++;
373 	}
374 
375 	mdelay(5);
376 
377 	ret = rtw89_fw_check_rdy(rtwdev);
378 	if (ret) {
379 		rtw89_warn(rtwdev, "download firmware fail\n");
380 		return ret;
381 	}
382 
383 	return 0;
384 }
385 
386 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
387 {
388 	u32 val32;
389 	u16 index;
390 
391 	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
392 		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
393 		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
394 	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
395 
396 	for (index = 0; index < 15; index++) {
397 		val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
398 		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
399 		fsleep(10);
400 	}
401 }
402 
403 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
404 {
405 	u32 val32;
406 	u16 val16;
407 
408 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
409 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
410 
411 	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
412 	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
413 
414 	rtw89_fw_prog_cnt_dump(rtwdev);
415 }
416 
417 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
418 {
419 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
420 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
421 	struct rtw89_fw_bin_info info;
422 	const u8 *fw = fw_suit->data;
423 	u32 len = fw_suit->size;
424 	u8 val;
425 	int ret;
426 
427 	if (!fw || !len) {
428 		rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
429 		return -ENOENT;
430 	}
431 
432 	ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
433 	if (ret) {
434 		rtw89_err(rtwdev, "parse fw header fail\n");
435 		goto fwdl_err;
436 	}
437 
438 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
439 				       1, FWDL_WAIT_CNT, false,
440 				       rtwdev, R_AX_WCPU_FW_CTRL);
441 	if (ret) {
442 		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
443 		goto fwdl_err;
444 	}
445 
446 	ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len);
447 	if (ret) {
448 		ret = -EBUSY;
449 		goto fwdl_err;
450 	}
451 
452 	ret = rtw89_fw_download_main(rtwdev, fw, &info);
453 	if (ret) {
454 		ret = -EBUSY;
455 		goto fwdl_err;
456 	}
457 
458 	fw_info->h2c_seq = 0;
459 	fw_info->rec_seq = 0;
460 	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
461 	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
462 
463 	return ret;
464 
465 fwdl_err:
466 	rtw89_fw_dl_fail_dump(rtwdev);
467 	return ret;
468 }
469 
470 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
471 {
472 	struct rtw89_fw_info *fw = &rtwdev->fw;
473 
474 	wait_for_completion(&fw->completion);
475 	if (!fw->firmware)
476 		return -EINVAL;
477 
478 	return 0;
479 }
480 
481 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
482 {
483 	struct rtw89_fw_info *fw = context;
484 	struct rtw89_dev *rtwdev = fw->rtwdev;
485 
486 	if (!firmware || !firmware->data) {
487 		rtw89_err(rtwdev, "failed to request firmware\n");
488 		complete_all(&fw->completion);
489 		return;
490 	}
491 
492 	fw->firmware = firmware;
493 	complete_all(&fw->completion);
494 }
495 
496 int rtw89_load_firmware(struct rtw89_dev *rtwdev)
497 {
498 	struct rtw89_fw_info *fw = &rtwdev->fw;
499 	const char *fw_name = rtwdev->chip->fw_name;
500 	int ret;
501 
502 	fw->rtwdev = rtwdev;
503 	init_completion(&fw->completion);
504 
505 	ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
506 				      GFP_KERNEL, fw, rtw89_load_firmware_cb);
507 	if (ret) {
508 		rtw89_err(rtwdev, "failed to async firmware request\n");
509 		return ret;
510 	}
511 
512 	return 0;
513 }
514 
515 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
516 {
517 	struct rtw89_fw_info *fw = &rtwdev->fw;
518 
519 	rtw89_wait_firmware_completion(rtwdev);
520 
521 	if (fw->firmware)
522 		release_firmware(fw->firmware);
523 }
524 
525 #define H2C_CAM_LEN 60
526 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
527 {
528 	struct sk_buff *skb;
529 
530 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN);
531 	if (!skb) {
532 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
533 		return -ENOMEM;
534 	}
535 	skb_put(skb, H2C_CAM_LEN);
536 	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, skb->data);
537 	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
538 
539 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
540 			      H2C_CAT_MAC,
541 			      H2C_CL_MAC_ADDR_CAM_UPDATE,
542 			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
543 			      H2C_CAM_LEN);
544 
545 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
546 		rtw89_err(rtwdev, "failed to send h2c\n");
547 		goto fail;
548 	}
549 
550 	return 0;
551 fail:
552 	dev_kfree_skb_any(skb);
553 
554 	return -EBUSY;
555 }
556 
557 #define H2C_BA_CAM_LEN 4
558 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
559 			struct ieee80211_ampdu_params *params)
560 {
561 	struct sk_buff *skb;
562 
563 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
564 	if (!skb) {
565 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
566 		return -ENOMEM;
567 	}
568 	skb_put(skb, H2C_BA_CAM_LEN);
569 	SET_BA_CAM_MACID(skb->data, macid);
570 	if (!valid)
571 		goto end;
572 	SET_BA_CAM_VALID(skb->data, valid);
573 	SET_BA_CAM_TID(skb->data, params->tid);
574 	if (params->buf_size > 64)
575 		SET_BA_CAM_BMAP_SIZE(skb->data, 4);
576 	else
577 		SET_BA_CAM_BMAP_SIZE(skb->data, 0);
578 	/* If init req is set, hw will set the ssn */
579 	SET_BA_CAM_INIT_REQ(skb->data, 0);
580 	SET_BA_CAM_SSN(skb->data, params->ssn);
581 
582 end:
583 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
584 			      H2C_CAT_MAC,
585 			      H2C_CL_BA_CAM,
586 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
587 			      H2C_BA_CAM_LEN);
588 
589 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
590 		rtw89_err(rtwdev, "failed to send h2c\n");
591 		goto fail;
592 	}
593 
594 	return 0;
595 fail:
596 	dev_kfree_skb_any(skb);
597 
598 	return -EBUSY;
599 }
600 
601 #define H2C_LOG_CFG_LEN 12
602 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
603 {
604 	struct sk_buff *skb;
605 	u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
606 			    BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
607 
608 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN);
609 	if (!skb) {
610 		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
611 		return -ENOMEM;
612 	}
613 
614 	skb_put(skb, H2C_LOG_CFG_LEN);
615 	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
616 	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
617 	SET_LOG_CFG_COMP(skb->data, comp);
618 	SET_LOG_CFG_COMP_EXT(skb->data, 0);
619 
620 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
621 			      H2C_CAT_MAC,
622 			      H2C_CL_FW_INFO,
623 			      H2C_FUNC_LOG_CFG, 0, 0,
624 			      H2C_LOG_CFG_LEN);
625 
626 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
627 		rtw89_err(rtwdev, "failed to send h2c\n");
628 		goto fail;
629 	}
630 
631 	return 0;
632 fail:
633 	dev_kfree_skb_any(skb);
634 
635 	return -EBUSY;
636 }
637 
638 #define H2C_GENERAL_PKT_LEN 6
639 #define H2C_GENERAL_PKT_ID_UND 0xff
640 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
641 {
642 	struct sk_buff *skb;
643 
644 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN);
645 	if (!skb) {
646 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
647 		return -ENOMEM;
648 	}
649 	skb_put(skb, H2C_GENERAL_PKT_LEN);
650 	SET_GENERAL_PKT_MACID(skb->data, macid);
651 	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
652 	SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
653 	SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
654 	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
655 	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
656 
657 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
658 			      H2C_CAT_MAC,
659 			      H2C_CL_FW_INFO,
660 			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
661 			      H2C_GENERAL_PKT_LEN);
662 
663 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
664 		rtw89_err(rtwdev, "failed to send h2c\n");
665 		goto fail;
666 	}
667 
668 	return 0;
669 fail:
670 	dev_kfree_skb_any(skb);
671 
672 	return -EBUSY;
673 }
674 
675 #define H2C_LPS_PARM_LEN 8
676 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
677 			  struct rtw89_lps_parm *lps_param)
678 {
679 	struct sk_buff *skb;
680 
681 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN);
682 	if (!skb) {
683 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
684 		return -ENOMEM;
685 	}
686 	skb_put(skb, H2C_LPS_PARM_LEN);
687 
688 	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
689 	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
690 	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
691 	SET_LPS_PARM_RLBM(skb->data, 1);
692 	SET_LPS_PARM_SMARTPS(skb->data, 1);
693 	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
694 	SET_LPS_PARM_VOUAPSD(skb->data, 0);
695 	SET_LPS_PARM_VIUAPSD(skb->data, 0);
696 	SET_LPS_PARM_BEUAPSD(skb->data, 0);
697 	SET_LPS_PARM_BKUAPSD(skb->data, 0);
698 
699 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
700 			      H2C_CAT_MAC,
701 			      H2C_CL_MAC_PS,
702 			      H2C_FUNC_MAC_LPS_PARM, 0, 1,
703 			      H2C_LPS_PARM_LEN);
704 
705 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
706 		rtw89_err(rtwdev, "failed to send h2c\n");
707 		goto fail;
708 	}
709 
710 	return 0;
711 fail:
712 	dev_kfree_skb_any(skb);
713 
714 	return -EBUSY;
715 }
716 
717 #define H2C_CMC_TBL_LEN 68
718 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
719 {
720 	struct rtw89_hal *hal = &rtwdev->hal;
721 	struct sk_buff *skb;
722 	u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
723 	u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
724 
725 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
726 	if (!skb) {
727 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
728 		return -ENOMEM;
729 	}
730 	skb_put(skb, H2C_CMC_TBL_LEN);
731 	SET_CTRL_INFO_MACID(skb->data, macid);
732 	SET_CTRL_INFO_OPERATION(skb->data, 1);
733 	SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
734 	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
735 	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
736 	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
737 	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
738 	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
739 	SET_CMC_TBL_ANTSEL_A(skb->data, 0);
740 	SET_CMC_TBL_ANTSEL_B(skb->data, 0);
741 	SET_CMC_TBL_ANTSEL_C(skb->data, 0);
742 	SET_CMC_TBL_ANTSEL_D(skb->data, 0);
743 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
744 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
745 
746 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
747 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
748 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
749 			      H2C_CMC_TBL_LEN);
750 
751 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
752 		rtw89_err(rtwdev, "failed to send h2c\n");
753 		goto fail;
754 	}
755 
756 	return 0;
757 fail:
758 	dev_kfree_skb_any(skb);
759 
760 	return -EBUSY;
761 }
762 
763 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
764 				     struct ieee80211_sta *sta, u8 *pads)
765 {
766 	bool ppe_th;
767 	u8 ppe16, ppe8;
768 	u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
769 	u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0];
770 	u8 ru_bitmap;
771 	u8 n, idx, sh;
772 	u16 ppe;
773 	int i;
774 
775 	if (!sta->he_cap.has_he)
776 		return;
777 
778 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
779 			   sta->he_cap.he_cap_elem.phy_cap_info[6]);
780 	if (!ppe_th) {
781 		u8 pad;
782 
783 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK,
784 				sta->he_cap.he_cap_elem.phy_cap_info[9]);
785 
786 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
787 			pads[i] = pad;
788 	}
789 
790 	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
791 	n = hweight8(ru_bitmap);
792 	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
793 
794 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
795 		if (!(ru_bitmap & BIT(i))) {
796 			pads[i] = 1;
797 			continue;
798 		}
799 
800 		idx = n >> 3;
801 		sh = n & 7;
802 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
803 
804 		ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx]));
805 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
806 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
807 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
808 
809 		if (ppe16 != 7 && ppe8 == 7)
810 			pads[i] = 2;
811 		else if (ppe8 != 7)
812 			pads[i] = 1;
813 		else
814 			pads[i] = 0;
815 	}
816 }
817 
818 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
819 				struct ieee80211_vif *vif,
820 				struct ieee80211_sta *sta)
821 {
822 	struct rtw89_hal *hal = &rtwdev->hal;
823 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
824 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
825 	struct sk_buff *skb;
826 	u8 pads[RTW89_PPE_BW_NUM];
827 
828 	memset(pads, 0, sizeof(pads));
829 	__get_sta_he_pkt_padding(rtwdev, sta, pads);
830 
831 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
832 	if (!skb) {
833 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
834 		return -ENOMEM;
835 	}
836 	skb_put(skb, H2C_CMC_TBL_LEN);
837 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
838 	SET_CTRL_INFO_OPERATION(skb->data, 1);
839 	SET_CMC_TBL_DISRTSFB(skb->data, 1);
840 	SET_CMC_TBL_DISDATAFB(skb->data, 1);
841 	if (hal->current_band_type == RTW89_BAND_2G)
842 		SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
843 	else
844 		SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
845 	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
846 	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
847 	if (vif->type == NL80211_IFTYPE_STATION)
848 		SET_CMC_TBL_ULDL(skb->data, 1);
849 	else
850 		SET_CMC_TBL_ULDL(skb->data, 0);
851 	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
852 	SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
853 	SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
854 	SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
855 	SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
856 
857 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
858 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
859 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
860 			      H2C_CMC_TBL_LEN);
861 
862 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
863 		rtw89_err(rtwdev, "failed to send h2c\n");
864 		goto fail;
865 	}
866 
867 	return 0;
868 fail:
869 	dev_kfree_skb_any(skb);
870 
871 	return -EBUSY;
872 }
873 
874 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
875 				 struct rtw89_sta *rtwsta)
876 {
877 	struct sk_buff *skb;
878 
879 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
880 	if (!skb) {
881 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
882 		return -ENOMEM;
883 	}
884 	skb_put(skb, H2C_CMC_TBL_LEN);
885 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
886 	SET_CTRL_INFO_OPERATION(skb->data, 1);
887 	if (rtwsta->cctl_tx_time) {
888 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
889 		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
890 	}
891 	if (rtwsta->cctl_tx_retry_limit) {
892 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
893 		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
894 	}
895 
896 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
897 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
898 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
899 			      H2C_CMC_TBL_LEN);
900 
901 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
902 		rtw89_err(rtwdev, "failed to send h2c\n");
903 		goto fail;
904 	}
905 
906 	return 0;
907 fail:
908 	dev_kfree_skb_any(skb);
909 
910 	return -EBUSY;
911 }
912 
913 #define H2C_VIF_MAINTAIN_LEN 4
914 int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
915 			      struct rtw89_vif *rtwvif,
916 			      enum rtw89_upd_mode upd_mode)
917 {
918 	struct sk_buff *skb;
919 
920 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
921 	if (!skb) {
922 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
923 		return -ENOMEM;
924 	}
925 	skb_put(skb, H2C_VIF_MAINTAIN_LEN);
926 	SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
927 	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
928 	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
929 	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
930 
931 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
932 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
933 			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
934 			      H2C_VIF_MAINTAIN_LEN);
935 
936 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
937 		rtw89_err(rtwdev, "failed to send h2c\n");
938 		goto fail;
939 	}
940 
941 	return 0;
942 fail:
943 	dev_kfree_skb_any(skb);
944 
945 	return -EBUSY;
946 }
947 
948 #define H2C_JOIN_INFO_LEN 4
949 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
950 			   u8 dis_conn)
951 {
952 	struct sk_buff *skb;
953 
954 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
955 	if (!skb) {
956 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
957 		return -ENOMEM;
958 	}
959 	skb_put(skb, H2C_JOIN_INFO_LEN);
960 	SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
961 	SET_JOININFO_OP(skb->data, dis_conn);
962 	SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
963 	SET_JOININFO_WMM(skb->data, rtwvif->wmm);
964 	SET_JOININFO_TGR(skb->data, rtwvif->trigger);
965 	SET_JOININFO_ISHESTA(skb->data, 0);
966 	SET_JOININFO_DLBW(skb->data, 0);
967 	SET_JOININFO_TF_MAC_PAD(skb->data, 0);
968 	SET_JOININFO_DL_T_PE(skb->data, 0);
969 	SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
970 	SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
971 	SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
972 	SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
973 
974 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
975 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
976 			      H2C_FUNC_MAC_JOININFO, 0, 1,
977 			      H2C_JOIN_INFO_LEN);
978 
979 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
980 		rtw89_err(rtwdev, "failed to send h2c\n");
981 		goto fail;
982 	}
983 
984 	return 0;
985 fail:
986 	dev_kfree_skb_any(skb);
987 
988 	return -EBUSY;
989 }
990 
991 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
992 			     bool pause)
993 {
994 	struct rtw89_fw_macid_pause_grp h2c = {{0}};
995 	u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
996 	struct sk_buff *skb;
997 
998 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
999 	if (!skb) {
1000 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1001 		return -ENOMEM;
1002 	}
1003 	h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
1004 	if (pause)
1005 		h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
1006 	skb_put_data(skb, &h2c, len);
1007 
1008 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1009 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1010 			      H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
1011 			      len);
1012 
1013 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1014 		rtw89_err(rtwdev, "failed to send h2c\n");
1015 		goto fail;
1016 	}
1017 
1018 	return 0;
1019 fail:
1020 	dev_kfree_skb_any(skb);
1021 
1022 	return -EBUSY;
1023 }
1024 
1025 #define H2C_EDCA_LEN 12
1026 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1027 			  u8 ac, u32 val)
1028 {
1029 	struct sk_buff *skb;
1030 
1031 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN);
1032 	if (!skb) {
1033 		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
1034 		return -ENOMEM;
1035 	}
1036 	skb_put(skb, H2C_EDCA_LEN);
1037 	RTW89_SET_EDCA_SEL(skb->data, 0);
1038 	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
1039 	RTW89_SET_EDCA_WMM(skb->data, 0);
1040 	RTW89_SET_EDCA_AC(skb->data, ac);
1041 	RTW89_SET_EDCA_PARAM(skb->data, val);
1042 
1043 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1044 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1045 			      H2C_FUNC_USR_EDCA, 0, 1,
1046 			      H2C_EDCA_LEN);
1047 
1048 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1049 		rtw89_err(rtwdev, "failed to send h2c\n");
1050 		goto fail;
1051 	}
1052 
1053 	return 0;
1054 fail:
1055 	dev_kfree_skb_any(skb);
1056 
1057 	return -EBUSY;
1058 }
1059 
1060 #define H2C_OFLD_CFG_LEN 8
1061 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
1062 {
1063 	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
1064 	struct sk_buff *skb;
1065 
1066 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN);
1067 	if (!skb) {
1068 		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
1069 		return -ENOMEM;
1070 	}
1071 	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
1072 
1073 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1074 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1075 			      H2C_FUNC_OFLD_CFG, 0, 1,
1076 			      H2C_OFLD_CFG_LEN);
1077 
1078 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1079 		rtw89_err(rtwdev, "failed to send h2c\n");
1080 		goto fail;
1081 	}
1082 
1083 	return 0;
1084 fail:
1085 	dev_kfree_skb_any(skb);
1086 
1087 	return -EBUSY;
1088 }
1089 
1090 #define H2C_RA_LEN 16
1091 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
1092 {
1093 	struct sk_buff *skb;
1094 	u8 *cmd;
1095 
1096 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN);
1097 	if (!skb) {
1098 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1099 		return -ENOMEM;
1100 	}
1101 	skb_put(skb, H2C_RA_LEN);
1102 	cmd = skb->data;
1103 	rtw89_debug(rtwdev, RTW89_DBG_RA,
1104 		    "ra cmd msk: %llx ", ra->ra_mask);
1105 
1106 	RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
1107 	RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
1108 	RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
1109 	RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
1110 	RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
1111 	RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
1112 	RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
1113 	RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
1114 	RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
1115 	RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
1116 	RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
1117 	RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
1118 	RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
1119 	RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
1120 	RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
1121 	RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
1122 	RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
1123 	RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
1124 	RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
1125 
1126 	if (csi) {
1127 		RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
1128 		RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
1129 		RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
1130 		RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
1131 		RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
1132 		RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
1133 		RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
1134 		RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
1135 		RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
1136 	}
1137 
1138 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1139 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
1140 			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
1141 			      H2C_RA_LEN);
1142 
1143 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1144 		rtw89_err(rtwdev, "failed to send h2c\n");
1145 		goto fail;
1146 	}
1147 
1148 	return 0;
1149 fail:
1150 	dev_kfree_skb_any(skb);
1151 
1152 	return -EBUSY;
1153 }
1154 
1155 #define H2C_LEN_CXDRVHDR 2
1156 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
1157 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
1158 {
1159 	struct rtw89_btc *btc = &rtwdev->btc;
1160 	struct rtw89_btc_dm *dm = &btc->dm;
1161 	struct rtw89_btc_init_info *init_info = &dm->init_info;
1162 	struct rtw89_btc_module *module = &init_info->module;
1163 	struct rtw89_btc_ant_info *ant = &module->ant;
1164 	struct sk_buff *skb;
1165 	u8 *cmd;
1166 
1167 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT);
1168 	if (!skb) {
1169 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
1170 		return -ENOMEM;
1171 	}
1172 	skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
1173 	cmd = skb->data;
1174 
1175 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
1176 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
1177 
1178 	RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
1179 	RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
1180 	RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
1181 	RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
1182 	RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
1183 
1184 	RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
1185 	RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
1186 	RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
1187 	RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
1188 	RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
1189 
1190 	RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
1191 	RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
1192 	RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
1193 	RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
1194 	RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
1195 	RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
1196 
1197 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1198 			      H2C_CAT_OUTSRC, BTFC_SET,
1199 			      SET_DRV_INFO, 0, 0,
1200 			      H2C_LEN_CXDRVINFO_INIT);
1201 
1202 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1203 		rtw89_err(rtwdev, "failed to send h2c\n");
1204 		goto fail;
1205 	}
1206 
1207 	return 0;
1208 fail:
1209 	dev_kfree_skb_any(skb);
1210 
1211 	return -EBUSY;
1212 }
1213 
1214 #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
1215 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
1216 {
1217 	struct rtw89_btc *btc = &rtwdev->btc;
1218 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1219 	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
1220 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1221 	struct rtw89_btc_wl_active_role *active = role_info->active_role;
1222 	struct sk_buff *skb;
1223 	u8 *cmd;
1224 	int i;
1225 
1226 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE);
1227 	if (!skb) {
1228 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1229 		return -ENOMEM;
1230 	}
1231 	skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
1232 	cmd = skb->data;
1233 
1234 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1235 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
1236 
1237 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1238 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1239 
1240 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1241 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1242 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1243 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1244 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1245 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1246 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1247 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1248 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1249 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1250 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1251 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1252 
1253 	for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
1254 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
1255 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
1256 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
1257 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
1258 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
1259 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
1260 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
1261 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
1262 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
1263 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
1264 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
1265 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
1266 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
1267 	}
1268 
1269 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1270 			      H2C_CAT_OUTSRC, BTFC_SET,
1271 			      SET_DRV_INFO, 0, 0,
1272 			      H2C_LEN_CXDRVINFO_ROLE);
1273 
1274 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1275 		rtw89_err(rtwdev, "failed to send h2c\n");
1276 		goto fail;
1277 	}
1278 
1279 	return 0;
1280 fail:
1281 	dev_kfree_skb_any(skb);
1282 
1283 	return -EBUSY;
1284 }
1285 
1286 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
1287 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
1288 {
1289 	struct rtw89_btc *btc = &rtwdev->btc;
1290 	struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
1291 	struct sk_buff *skb;
1292 	u8 *cmd;
1293 
1294 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL);
1295 	if (!skb) {
1296 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1297 		return -ENOMEM;
1298 	}
1299 	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
1300 	cmd = skb->data;
1301 
1302 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
1303 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
1304 
1305 	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
1306 	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
1307 	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
1308 	RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
1309 
1310 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1311 			      H2C_CAT_OUTSRC, BTFC_SET,
1312 			      SET_DRV_INFO, 0, 0,
1313 			      H2C_LEN_CXDRVINFO_CTRL);
1314 
1315 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1316 		rtw89_err(rtwdev, "failed to send h2c\n");
1317 		goto fail;
1318 	}
1319 
1320 	return 0;
1321 fail:
1322 	dev_kfree_skb_any(skb);
1323 
1324 	return -EBUSY;
1325 }
1326 
1327 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
1328 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
1329 {
1330 	struct rtw89_btc *btc = &rtwdev->btc;
1331 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1332 	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
1333 	struct sk_buff *skb;
1334 	u8 *cmd;
1335 
1336 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK);
1337 	if (!skb) {
1338 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
1339 		return -ENOMEM;
1340 	}
1341 	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
1342 	cmd = skb->data;
1343 
1344 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
1345 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
1346 
1347 	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
1348 	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
1349 	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
1350 	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
1351 	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
1352 
1353 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1354 			      H2C_CAT_OUTSRC, BTFC_SET,
1355 			      SET_DRV_INFO, 0, 0,
1356 			      H2C_LEN_CXDRVINFO_RFK);
1357 
1358 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1359 		rtw89_err(rtwdev, "failed to send h2c\n");
1360 		goto fail;
1361 	}
1362 
1363 	return 0;
1364 fail:
1365 	dev_kfree_skb_any(skb);
1366 
1367 	return -EBUSY;
1368 }
1369 
1370 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
1371 			struct rtw89_fw_h2c_rf_reg_info *info,
1372 			u16 len, u8 page)
1373 {
1374 	struct sk_buff *skb;
1375 	u8 class = info->rf_path == RF_PATH_A ?
1376 		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
1377 
1378 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
1379 	if (!skb) {
1380 		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
1381 		return -ENOMEM;
1382 	}
1383 	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
1384 
1385 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1386 			      H2C_CAT_OUTSRC, class, page, 0, 0,
1387 			      len);
1388 
1389 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1390 		rtw89_err(rtwdev, "failed to send h2c\n");
1391 		goto fail;
1392 	}
1393 
1394 	return 0;
1395 fail:
1396 	dev_kfree_skb_any(skb);
1397 
1398 	return -EBUSY;
1399 }
1400 
1401 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
1402 			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
1403 			      bool rack, bool dack)
1404 {
1405 	struct sk_buff *skb;
1406 
1407 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
1408 	if (!skb) {
1409 		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
1410 		return -ENOMEM;
1411 	}
1412 	skb_put_data(skb, buf, len);
1413 
1414 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1415 			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
1416 			      len);
1417 
1418 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1419 		rtw89_err(rtwdev, "failed to send h2c\n");
1420 		goto fail;
1421 	}
1422 
1423 	return 0;
1424 fail:
1425 	dev_kfree_skb_any(skb);
1426 
1427 	return -EBUSY;
1428 }
1429 
1430 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
1431 {
1432 	struct sk_buff *skb;
1433 
1434 	skb = rtw89_fw_h2c_alloc_skb_no_hdr(len);
1435 	if (!skb) {
1436 		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
1437 		return -ENOMEM;
1438 	}
1439 	skb_put_data(skb, buf, len);
1440 
1441 	if (rtw89_h2c_tx(rtwdev, skb, false)) {
1442 		rtw89_err(rtwdev, "failed to send h2c\n");
1443 		goto fail;
1444 	}
1445 
1446 	return 0;
1447 fail:
1448 	dev_kfree_skb_any(skb);
1449 
1450 	return -EBUSY;
1451 }
1452 
1453 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
1454 {
1455 	struct rtw89_early_h2c *early_h2c;
1456 
1457 	lockdep_assert_held(&rtwdev->mutex);
1458 
1459 	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
1460 		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
1461 	}
1462 }
1463 
1464 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
1465 {
1466 	struct rtw89_early_h2c *early_h2c, *tmp;
1467 
1468 	mutex_lock(&rtwdev->mutex);
1469 	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
1470 		list_del(&early_h2c->list);
1471 		kfree(early_h2c->h2c);
1472 		kfree(early_h2c);
1473 	}
1474 	mutex_unlock(&rtwdev->mutex);
1475 }
1476 
1477 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
1478 {
1479 	skb_queue_tail(&rtwdev->c2h_queue, c2h);
1480 	ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
1481 }
1482 
1483 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
1484 				    struct sk_buff *skb)
1485 {
1486 	u8 category = RTW89_GET_C2H_CATEGORY(skb->data);
1487 	u8 class = RTW89_GET_C2H_CLASS(skb->data);
1488 	u8 func = RTW89_GET_C2H_FUNC(skb->data);
1489 	u16 len = RTW89_GET_C2H_LEN(skb->data);
1490 	bool dump = true;
1491 
1492 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
1493 		return;
1494 
1495 	switch (category) {
1496 	case RTW89_C2H_CAT_TEST:
1497 		break;
1498 	case RTW89_C2H_CAT_MAC:
1499 		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
1500 		if (class == RTW89_MAC_C2H_CLASS_INFO &&
1501 		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
1502 			dump = false;
1503 		break;
1504 	case RTW89_C2H_CAT_OUTSRC:
1505 		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
1506 		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
1507 			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
1508 		else
1509 			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
1510 		break;
1511 	}
1512 
1513 	if (dump)
1514 		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
1515 }
1516 
1517 void rtw89_fw_c2h_work(struct work_struct *work)
1518 {
1519 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
1520 						c2h_work);
1521 	struct sk_buff *skb, *tmp;
1522 
1523 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
1524 		skb_unlink(skb, &rtwdev->c2h_queue);
1525 		mutex_lock(&rtwdev->mutex);
1526 		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
1527 		mutex_unlock(&rtwdev->mutex);
1528 		dev_kfree_skb_any(skb);
1529 	}
1530 }
1531 
1532 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
1533 				  struct rtw89_mac_h2c_info *info)
1534 {
1535 	static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
1536 		R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
1537 		R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
1538 	};
1539 	u8 i, val, len;
1540 	int ret;
1541 
1542 	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
1543 				rtwdev, R_AX_H2CREG_CTRL);
1544 	if (ret) {
1545 		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
1546 		return ret;
1547 	}
1548 
1549 	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
1550 			   sizeof(info->h2creg[0]));
1551 
1552 	RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
1553 	RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
1554 	for (i = 0; i < RTW89_H2CREG_MAX; i++)
1555 		rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
1556 
1557 	rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
1558 
1559 	return 0;
1560 }
1561 
1562 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
1563 				 struct rtw89_mac_c2h_info *info)
1564 {
1565 	static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
1566 		R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
1567 		R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
1568 	};
1569 	u32 ret;
1570 	u8 i, val;
1571 
1572 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
1573 
1574 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
1575 				       RTW89_C2H_TIMEOUT, false, rtwdev,
1576 				       R_AX_C2HREG_CTRL);
1577 	if (ret) {
1578 		rtw89_warn(rtwdev, "c2h reg timeout\n");
1579 		return ret;
1580 	}
1581 
1582 	for (i = 0; i < RTW89_C2HREG_MAX; i++)
1583 		info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
1584 
1585 	rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
1586 
1587 	info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
1588 	info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
1589 				RTW89_C2HREG_HDR_LEN;
1590 
1591 	return 0;
1592 }
1593 
1594 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
1595 		     struct rtw89_mac_h2c_info *h2c_info,
1596 		     struct rtw89_mac_c2h_info *c2h_info)
1597 {
1598 	u32 ret;
1599 
1600 	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
1601 		lockdep_assert_held(&rtwdev->mutex);
1602 
1603 	if (!h2c_info && !c2h_info)
1604 		return -EINVAL;
1605 
1606 	if (!h2c_info)
1607 		goto recv_c2h;
1608 
1609 	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
1610 	if (ret)
1611 		return ret;
1612 
1613 recv_c2h:
1614 	if (!c2h_info)
1615 		return 0;
1616 
1617 	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
1618 	if (ret)
1619 		return ret;
1620 
1621 	return 0;
1622 }
1623 
1624 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
1625 {
1626 	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
1627 		rtw89_err(rtwdev, "[ERR]pwr is off\n");
1628 		return;
1629 	}
1630 
1631 	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
1632 	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
1633 	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
1634 	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
1635 	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
1636 		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
1637 	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
1638 		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
1639 
1640 	rtw89_fw_prog_cnt_dump(rtwdev);
1641 }
1642