1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include "cam.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "util.h"
15
16 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
17 struct sk_buff *skb);
18 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
19 struct rtw89_wait_info *wait, unsigned int cond);
20
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)21 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
22 bool header)
23 {
24 struct sk_buff *skb;
25 u32 header_len = 0;
26 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
27
28 if (header)
29 header_len = H2C_HEADER_LEN;
30
31 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
32 if (!skb)
33 return NULL;
34 skb_reserve(skb, header_len + h2c_desc_size);
35 memset(skb->data, 0, len);
36
37 return skb;
38 }
39
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)40 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
41 {
42 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
43 }
44
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)45 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
46 {
47 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
48 }
49
_fw_get_rdy(struct rtw89_dev * rtwdev)50 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
51 {
52 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
53
54 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
55 }
56
57 #define FWDL_WAIT_CNT 400000
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev)58 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
59 {
60 u8 val;
61 int ret;
62
63 ret = read_poll_timeout_atomic(_fw_get_rdy, val,
64 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
65 1, FWDL_WAIT_CNT, false, rtwdev);
66 if (ret) {
67 switch (val) {
68 case RTW89_FWDL_CHECKSUM_FAIL:
69 rtw89_err(rtwdev, "fw checksum fail\n");
70 return -EINVAL;
71
72 case RTW89_FWDL_SECURITY_FAIL:
73 rtw89_err(rtwdev, "fw security fail\n");
74 return -EINVAL;
75
76 case RTW89_FWDL_CV_NOT_MATCH:
77 rtw89_err(rtwdev, "fw cv not match\n");
78 return -EINVAL;
79
80 default:
81 return -EBUSY;
82 }
83 }
84
85 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
86
87 return 0;
88 }
89
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)90 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
91 struct rtw89_fw_bin_info *info)
92 {
93 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
94 struct rtw89_fw_hdr_section_info *section_info;
95 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
96 const struct rtw89_fw_hdr_section *section;
97 const u8 *fw_end = fw + len;
98 const u8 *bin;
99 u32 base_hdr_len;
100 u32 mssc_len = 0;
101 u32 i;
102
103 if (!info)
104 return -EINVAL;
105
106 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
107 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
108 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
109
110 if (info->dynamic_hdr_en) {
111 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
112 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
113 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
114 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
115 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
116 return -EINVAL;
117 }
118 } else {
119 info->hdr_len = base_hdr_len;
120 info->dynamic_hdr_len = 0;
121 }
122
123 bin = fw + info->hdr_len;
124
125 /* jump to section header */
126 section_info = info->section_info;
127 for (i = 0; i < info->section_num; i++) {
128 section = &fw_hdr->sections[i];
129 section_info->type =
130 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
131 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
132 section_info->mssc =
133 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
134 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
135 } else {
136 section_info->mssc = 0;
137 }
138
139 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
140 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
141 section_info->len += FWDL_SECTION_CHKSUM_LEN;
142 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
143 section_info->dladdr =
144 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
145 section_info->addr = bin;
146 bin += section_info->len;
147 section_info++;
148 }
149
150 if (fw_end != bin + mssc_len) {
151 rtw89_err(rtwdev, "[ERR]fw bin size\n");
152 return -EINVAL;
153 }
154
155 return 0;
156 }
157
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)158 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
159 struct rtw89_fw_bin_info *info)
160 {
161 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
162 struct rtw89_fw_hdr_section_info *section_info;
163 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
164 const struct rtw89_fw_hdr_section_v1 *section;
165 const u8 *fw_end = fw + len;
166 const u8 *bin;
167 u32 base_hdr_len;
168 u32 mssc_len = 0;
169 u32 i;
170
171 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
172 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
173 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
174
175 if (info->dynamic_hdr_en) {
176 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
177 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
178 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
179 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
180 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
181 return -EINVAL;
182 }
183 } else {
184 info->hdr_len = base_hdr_len;
185 info->dynamic_hdr_len = 0;
186 }
187
188 bin = fw + info->hdr_len;
189
190 /* jump to section header */
191 section_info = info->section_info;
192 for (i = 0; i < info->section_num; i++) {
193 section = &fw_hdr->sections[i];
194 section_info->type =
195 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
196 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
197 section_info->mssc =
198 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
199 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
200 } else {
201 section_info->mssc = 0;
202 }
203
204 section_info->len =
205 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
206 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
207 section_info->len += FWDL_SECTION_CHKSUM_LEN;
208 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
209 section_info->dladdr =
210 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
211 section_info->addr = bin;
212 bin += section_info->len;
213 section_info++;
214 }
215
216 if (fw_end != bin + mssc_len) {
217 rtw89_err(rtwdev, "[ERR]fw bin size\n");
218 return -EINVAL;
219 }
220
221 return 0;
222 }
223
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)224 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
225 const struct rtw89_fw_suit *fw_suit,
226 struct rtw89_fw_bin_info *info)
227 {
228 const u8 *fw = fw_suit->data;
229 u32 len = fw_suit->size;
230
231 if (!fw || !len) {
232 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
233 return -ENOENT;
234 }
235
236 switch (fw_suit->hdr_ver) {
237 case 0:
238 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
239 case 1:
240 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
241 default:
242 return -ENOENT;
243 }
244 }
245
246 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)247 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
248 struct rtw89_fw_suit *fw_suit, bool nowarn)
249 {
250 struct rtw89_fw_info *fw_info = &rtwdev->fw;
251 const struct firmware *firmware = fw_info->req.firmware;
252 const u8 *mfw = firmware->data;
253 u32 mfw_len = firmware->size;
254 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
255 const struct rtw89_mfw_info *mfw_info;
256 int i;
257
258 if (mfw_hdr->sig != RTW89_MFW_SIG) {
259 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
260 /* legacy firmware support normal type only */
261 if (type != RTW89_FW_NORMAL)
262 return -EINVAL;
263 fw_suit->data = mfw;
264 fw_suit->size = mfw_len;
265 return 0;
266 }
267
268 for (i = 0; i < mfw_hdr->fw_nr; i++) {
269 mfw_info = &mfw_hdr->info[i];
270 if (mfw_info->type == type) {
271 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
272 goto found;
273 if (type == RTW89_FW_LOGFMT)
274 goto found;
275 }
276 }
277
278 if (!nowarn)
279 rtw89_err(rtwdev, "no suitable firmware found\n");
280 return -ENOENT;
281
282 found:
283 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
284 fw_suit->size = le32_to_cpu(mfw_info->size);
285 return 0;
286 }
287
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)288 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
289 {
290 struct rtw89_fw_info *fw_info = &rtwdev->fw;
291 const struct firmware *firmware = fw_info->req.firmware;
292 const struct rtw89_mfw_hdr *mfw_hdr =
293 (const struct rtw89_mfw_hdr *)firmware->data;
294 const struct rtw89_mfw_info *mfw_info;
295 u32 size;
296
297 if (mfw_hdr->sig != RTW89_MFW_SIG) {
298 rtw89_warn(rtwdev, "not mfw format\n");
299 return 0;
300 }
301
302 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
303 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
304
305 return size;
306 }
307
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)308 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
309 struct rtw89_fw_suit *fw_suit,
310 const struct rtw89_fw_hdr *hdr)
311 {
312 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
313 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
314 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
315 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
316 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
317 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
318 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
319 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
320 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
321 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
322 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
323 }
324
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)325 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
326 struct rtw89_fw_suit *fw_suit,
327 const struct rtw89_fw_hdr_v1 *hdr)
328 {
329 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
330 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
331 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
332 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
333 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
334 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
335 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
336 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
337 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
338 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
339 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
340 }
341
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)342 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
343 enum rtw89_fw_type type,
344 struct rtw89_fw_suit *fw_suit)
345 {
346 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
347 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
348
349 if (type == RTW89_FW_LOGFMT)
350 return 0;
351
352 fw_suit->type = type;
353 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
354
355 switch (fw_suit->hdr_ver) {
356 case 0:
357 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
358 break;
359 case 1:
360 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
361 break;
362 default:
363 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
364 fw_suit->hdr_ver);
365 return -ENOENT;
366 }
367
368 rtw89_info(rtwdev,
369 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
370 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
371 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
372
373 return 0;
374 }
375
376 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)377 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
378 bool nowarn)
379 {
380 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
381 int ret;
382
383 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
384 if (ret)
385 return ret;
386
387 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
388 }
389
390 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const void * data)391 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
392 const struct rtw89_fw_element_hdr *elm,
393 const void *data)
394 {
395 enum rtw89_fw_type type = (enum rtw89_fw_type)data;
396 struct rtw89_fw_suit *fw_suit;
397
398 fw_suit = rtw89_fw_suit_get(rtwdev, type);
399 fw_suit->data = elm->u.common.contents;
400 fw_suit->size = le32_to_cpu(elm->size);
401
402 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
403 }
404
405 #define __DEF_FW_FEAT_COND(__cond, __op) \
406 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
407 { \
408 return suit_ver_code __op comp_ver_code; \
409 }
410
411 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
412 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
413 __DEF_FW_FEAT_COND(lt, <); /* less than */
414
415 struct __fw_feat_cfg {
416 enum rtw89_core_chip_id chip_id;
417 enum rtw89_fw_feature feature;
418 u32 ver_code;
419 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
420 };
421
422 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
423 { \
424 .chip_id = _chip, \
425 .feature = RTW89_FW_FEATURE_ ## _feat, \
426 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
427 .cond = __fw_feat_cond_ ## _cond, \
428 }
429
430 static const struct __fw_feat_cfg fw_feat_tbl[] = {
431 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
432 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
433 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
434 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
435 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
436 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
437 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
438 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
439 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
440 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
441 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
442 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
443 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
444 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
445 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
446 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
447 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
448 };
449
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)450 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
451 const struct rtw89_chip_info *chip,
452 u32 ver_code)
453 {
454 int i;
455
456 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
457 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
458
459 if (chip->chip_id != ent->chip_id)
460 continue;
461
462 if (ent->cond(ver_code, ent->ver_code))
463 RTW89_SET_FW_FEATURE(ent->feature, fw);
464 }
465 }
466
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)467 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
468 {
469 const struct rtw89_chip_info *chip = rtwdev->chip;
470 const struct rtw89_fw_suit *fw_suit;
471 u32 suit_ver_code;
472
473 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
474 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
475
476 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
477 }
478
479 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)480 rtw89_early_fw_feature_recognize(struct device *device,
481 const struct rtw89_chip_info *chip,
482 struct rtw89_fw_info *early_fw,
483 int *used_fw_format)
484 {
485 const struct firmware *firmware;
486 char fw_name[64];
487 int fw_format;
488 u32 ver_code;
489 int ret;
490
491 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
492 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
493 chip->fw_basename, fw_format);
494
495 ret = request_firmware(&firmware, fw_name, device);
496 if (!ret) {
497 dev_info(device, "loaded firmware %s\n", fw_name);
498 *used_fw_format = fw_format;
499 break;
500 }
501 }
502
503 if (ret) {
504 dev_err(device, "failed to early request firmware: %d\n", ret);
505 return NULL;
506 }
507
508 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
509
510 if (!ver_code)
511 goto out;
512
513 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
514
515 out:
516 return firmware;
517 }
518
rtw89_fw_recognize(struct rtw89_dev * rtwdev)519 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
520 {
521 const struct rtw89_chip_info *chip = rtwdev->chip;
522 int ret;
523
524 if (chip->try_ce_fw) {
525 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
526 if (!ret)
527 goto normal_done;
528 }
529
530 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
531 if (ret)
532 return ret;
533
534 normal_done:
535 /* It still works if wowlan firmware isn't existing. */
536 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
537
538 /* It still works if log format file isn't existing. */
539 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
540
541 rtw89_fw_recognize_features(rtwdev);
542
543 rtw89_coex_recognize_ver(rtwdev);
544
545 return 0;
546 }
547
548 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const void * data)549 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
550 const struct rtw89_fw_element_hdr *elm,
551 const void *data)
552 {
553 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
554 struct rtw89_phy_table *tbl;
555 struct rtw89_reg2_def *regs;
556 enum rtw89_rf_path rf_path;
557 u32 n_regs, i;
558 u8 idx;
559
560 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
561 if (!tbl)
562 return -ENOMEM;
563
564 switch (le32_to_cpu(elm->id)) {
565 case RTW89_FW_ELEMENT_ID_BB_REG:
566 elm_info->bb_tbl = tbl;
567 break;
568 case RTW89_FW_ELEMENT_ID_BB_GAIN:
569 elm_info->bb_gain = tbl;
570 break;
571 case RTW89_FW_ELEMENT_ID_RADIO_A:
572 case RTW89_FW_ELEMENT_ID_RADIO_B:
573 case RTW89_FW_ELEMENT_ID_RADIO_C:
574 case RTW89_FW_ELEMENT_ID_RADIO_D:
575 rf_path = (enum rtw89_rf_path)data;
576 idx = elm->u.reg2.idx;
577
578 elm_info->rf_radio[idx] = tbl;
579 tbl->rf_path = rf_path;
580 tbl->config = rtw89_phy_config_rf_reg_v1;
581 break;
582 case RTW89_FW_ELEMENT_ID_RF_NCTL:
583 elm_info->rf_nctl = tbl;
584 break;
585 default:
586 kfree(tbl);
587 return -ENOENT;
588 }
589
590 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
591 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
592 if (!regs)
593 goto out;
594
595 for (i = 0; i < n_regs; i++) {
596 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
597 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
598 }
599
600 tbl->n_regs = n_regs;
601 tbl->regs = regs;
602
603 return 0;
604
605 out:
606 kfree(tbl);
607 return -ENOMEM;
608 }
609
610 struct rtw89_fw_element_handler {
611 int (*fn)(struct rtw89_dev *rtwdev,
612 const struct rtw89_fw_element_hdr *elm, const void *data);
613 const void *data;
614 const char *name;
615 };
616
617 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
618 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
619 (const void *)RTW89_FW_BBMCU0, NULL},
620 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
621 (const void *)RTW89_FW_BBMCU1, NULL},
622 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, NULL, "BB"},
623 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, NULL, NULL},
624 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
625 (const void *)RF_PATH_A, "radio A"},
626 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
627 (const void *)RF_PATH_B, NULL},
628 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
629 (const void *)RF_PATH_C, NULL},
630 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
631 (const void *)RF_PATH_D, NULL},
632 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, NULL, "NCTL"},
633 };
634
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)635 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
636 {
637 struct rtw89_fw_info *fw_info = &rtwdev->fw;
638 const struct firmware *firmware = fw_info->req.firmware;
639 const struct rtw89_chip_info *chip = rtwdev->chip;
640 u32 unrecognized_elements = chip->needed_fw_elms;
641 const struct rtw89_fw_element_handler *handler;
642 const struct rtw89_fw_element_hdr *hdr;
643 u32 elm_size;
644 u32 elem_id;
645 u32 offset;
646 int ret;
647
648 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
649
650 offset = rtw89_mfw_get_size(rtwdev);
651 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
652 if (offset == 0)
653 return -EINVAL;
654
655 while (offset + sizeof(*hdr) < firmware->size) {
656 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
657
658 elm_size = le32_to_cpu(hdr->size);
659 if (offset + elm_size >= firmware->size) {
660 rtw89_warn(rtwdev, "firmware element size exceeds\n");
661 break;
662 }
663
664 elem_id = le32_to_cpu(hdr->id);
665 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
666 goto next;
667
668 handler = &__fw_element_handlers[elem_id];
669 if (!handler->fn)
670 goto next;
671
672 ret = handler->fn(rtwdev, hdr, handler->data);
673 if (ret)
674 return ret;
675
676 if (handler->name)
677 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
678 handler->name, hdr->ver);
679
680 unrecognized_elements &= ~BIT(elem_id);
681 next:
682 offset += sizeof(*hdr) + elm_size;
683 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
684 }
685
686 if (unrecognized_elements) {
687 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
688 unrecognized_elements);
689 return -ENOENT;
690 }
691
692 return 0;
693 }
694
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)695 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
696 u8 type, u8 cat, u8 class, u8 func,
697 bool rack, bool dack, u32 len)
698 {
699 struct fwcmd_hdr *hdr;
700
701 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
702
703 if (!(rtwdev->fw.h2c_seq % 4))
704 rack = true;
705 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
706 FIELD_PREP(H2C_HDR_CAT, cat) |
707 FIELD_PREP(H2C_HDR_CLASS, class) |
708 FIELD_PREP(H2C_HDR_FUNC, func) |
709 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
710
711 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
712 len + H2C_HEADER_LEN) |
713 (rack ? H2C_HDR_REC_ACK : 0) |
714 (dack ? H2C_HDR_DONE_ACK : 0));
715
716 rtwdev->fw.h2c_seq++;
717 }
718
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)719 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
720 struct sk_buff *skb,
721 u8 type, u8 cat, u8 class, u8 func,
722 u32 len)
723 {
724 struct fwcmd_hdr *hdr;
725
726 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
727
728 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
729 FIELD_PREP(H2C_HDR_CAT, cat) |
730 FIELD_PREP(H2C_HDR_CLASS, class) |
731 FIELD_PREP(H2C_HDR_FUNC, func) |
732 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
733
734 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
735 len + H2C_HEADER_LEN));
736 }
737
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)738 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
739 {
740 struct sk_buff *skb;
741 u32 ret = 0;
742
743 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
744 if (!skb) {
745 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
746 return -ENOMEM;
747 }
748
749 skb_put_data(skb, fw, len);
750 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
751 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
752 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
753 H2C_FUNC_MAC_FWHDR_DL, len);
754
755 ret = rtw89_h2c_tx(rtwdev, skb, false);
756 if (ret) {
757 rtw89_err(rtwdev, "failed to send h2c\n");
758 goto fail;
759 }
760
761 return 0;
762 fail:
763 dev_kfree_skb_any(skb);
764
765 return ret;
766 }
767
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)768 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
769 {
770 u8 val;
771 int ret;
772
773 ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
774 if (ret) {
775 rtw89_err(rtwdev, "[ERR]FW header download\n");
776 return ret;
777 }
778
779 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
780 1, FWDL_WAIT_CNT, false,
781 rtwdev, R_AX_WCPU_FW_CTRL);
782 if (ret) {
783 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
784 return ret;
785 }
786
787 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
788 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
789
790 return 0;
791 }
792
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)793 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
794 struct rtw89_fw_hdr_section_info *info)
795 {
796 struct sk_buff *skb;
797 const u8 *section = info->addr;
798 u32 residue_len = info->len;
799 u32 pkt_len;
800 int ret;
801
802 while (residue_len) {
803 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
804 pkt_len = FWDL_SECTION_PER_PKT_LEN;
805 else
806 pkt_len = residue_len;
807
808 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
809 if (!skb) {
810 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
811 return -ENOMEM;
812 }
813 skb_put_data(skb, section, pkt_len);
814
815 ret = rtw89_h2c_tx(rtwdev, skb, true);
816 if (ret) {
817 rtw89_err(rtwdev, "failed to send h2c\n");
818 goto fail;
819 }
820
821 section += pkt_len;
822 residue_len -= pkt_len;
823 }
824
825 return 0;
826 fail:
827 dev_kfree_skb_any(skb);
828
829 return ret;
830 }
831
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const u8 * fw,struct rtw89_fw_bin_info * info)832 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
833 struct rtw89_fw_bin_info *info)
834 {
835 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
836 u8 section_num = info->section_num;
837 int ret;
838
839 while (section_num--) {
840 ret = __rtw89_fw_download_main(rtwdev, section_info);
841 if (ret)
842 return ret;
843 section_info++;
844 }
845
846 mdelay(5);
847
848 ret = rtw89_fw_check_rdy(rtwdev);
849 if (ret) {
850 rtw89_warn(rtwdev, "download firmware fail\n");
851 return ret;
852 }
853
854 return 0;
855 }
856
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)857 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
858 {
859 u32 val32;
860 u16 index;
861
862 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
863 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
864 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
865 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
866
867 for (index = 0; index < 15; index++) {
868 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
869 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
870 fsleep(10);
871 }
872 }
873
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)874 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
875 {
876 u32 val32;
877 u16 val16;
878
879 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
880 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
881
882 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
883 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
884
885 rtw89_fw_prog_cnt_dump(rtwdev);
886 }
887
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type)888 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
889 {
890 struct rtw89_fw_info *fw_info = &rtwdev->fw;
891 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
892 struct rtw89_fw_bin_info info;
893 u8 val;
894 int ret;
895
896 rtw89_mac_disable_cpu(rtwdev);
897 ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
898 if (ret)
899 return ret;
900
901 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
902 if (ret) {
903 rtw89_err(rtwdev, "parse fw header fail\n");
904 goto fwdl_err;
905 }
906
907 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
908 1, FWDL_WAIT_CNT, false,
909 rtwdev, R_AX_WCPU_FW_CTRL);
910 if (ret) {
911 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
912 goto fwdl_err;
913 }
914
915 ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
916 info.dynamic_hdr_len);
917 if (ret) {
918 ret = -EBUSY;
919 goto fwdl_err;
920 }
921
922 ret = rtw89_fw_download_main(rtwdev, fw_suit->data, &info);
923 if (ret) {
924 ret = -EBUSY;
925 goto fwdl_err;
926 }
927
928 fw_info->h2c_seq = 0;
929 fw_info->rec_seq = 0;
930 fw_info->h2c_counter = 0;
931 fw_info->c2h_counter = 0;
932 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
933 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
934
935 return ret;
936
937 fwdl_err:
938 rtw89_fw_dl_fail_dump(rtwdev);
939 return ret;
940 }
941
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)942 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
943 {
944 struct rtw89_fw_info *fw = &rtwdev->fw;
945
946 wait_for_completion(&fw->req.completion);
947 if (!fw->req.firmware)
948 return -EINVAL;
949
950 return 0;
951 }
952
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)953 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
954 struct rtw89_fw_req_info *req,
955 const char *fw_name, bool nowarn)
956 {
957 int ret;
958
959 if (req->firmware) {
960 rtw89_debug(rtwdev, RTW89_DBG_FW,
961 "full firmware has been early requested\n");
962 complete_all(&req->completion);
963 return 0;
964 }
965
966 if (nowarn)
967 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
968 else
969 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
970
971 complete_all(&req->completion);
972
973 return ret;
974 }
975
rtw89_load_firmware_work(struct work_struct * work)976 void rtw89_load_firmware_work(struct work_struct *work)
977 {
978 struct rtw89_dev *rtwdev =
979 container_of(work, struct rtw89_dev, load_firmware_work);
980 const struct rtw89_chip_info *chip = rtwdev->chip;
981 char fw_name[64];
982
983 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
984 chip->fw_basename, rtwdev->fw.fw_format);
985
986 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
987 }
988
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)989 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
990 {
991 if (!tbl)
992 return;
993
994 kfree(tbl->regs);
995 kfree(tbl);
996 }
997
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)998 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
999 {
1000 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1001 int i;
1002
1003 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1004 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1005 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1006 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1007 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1008 }
1009
rtw89_unload_firmware(struct rtw89_dev * rtwdev)1010 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1011 {
1012 struct rtw89_fw_info *fw = &rtwdev->fw;
1013
1014 cancel_work_sync(&rtwdev->load_firmware_work);
1015
1016 if (fw->req.firmware) {
1017 release_firmware(fw->req.firmware);
1018
1019 /* assign NULL back in case rtw89_free_ieee80211_hw()
1020 * try to release the same one again.
1021 */
1022 fw->req.firmware = NULL;
1023 }
1024
1025 kfree(fw->log.fmts);
1026 rtw89_unload_firmware_elements(rtwdev);
1027 }
1028
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)1029 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1030 {
1031 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1032 u32 i;
1033
1034 if (fmt_id > fw_log->last_fmt_id)
1035 return 0;
1036
1037 for (i = 0; i < fw_log->fmt_count; i++) {
1038 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1039 return i;
1040 }
1041 return 0;
1042 }
1043
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)1044 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1045 {
1046 struct rtw89_fw_log *log = &rtwdev->fw.log;
1047 const struct rtw89_fw_logsuit_hdr *suit_hdr;
1048 struct rtw89_fw_suit *suit = &log->suit;
1049 const void *fmts_ptr, *fmts_end_ptr;
1050 u32 fmt_count;
1051 int i;
1052
1053 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1054 fmt_count = le32_to_cpu(suit_hdr->count);
1055 log->fmt_ids = suit_hdr->ids;
1056 fmts_ptr = &suit_hdr->ids[fmt_count];
1057 fmts_end_ptr = suit->data + suit->size;
1058 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1059 if (!log->fmts)
1060 return -ENOMEM;
1061
1062 for (i = 0; i < fmt_count; i++) {
1063 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1064 if (!fmts_ptr)
1065 break;
1066
1067 (*log->fmts)[i] = fmts_ptr;
1068 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1069 log->fmt_count++;
1070 fmts_ptr += strlen(fmts_ptr);
1071 }
1072
1073 return 0;
1074 }
1075
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)1076 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1077 {
1078 struct rtw89_fw_log *log = &rtwdev->fw.log;
1079 struct rtw89_fw_suit *suit = &log->suit;
1080
1081 if (!suit || !suit->data) {
1082 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1083 return -EINVAL;
1084 }
1085 if (log->fmts)
1086 return 0;
1087
1088 return rtw89_fw_log_create_fmts_dict(rtwdev);
1089 }
1090
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)1091 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1092 const struct rtw89_fw_c2h_log_fmt *log_fmt,
1093 u32 fmt_idx, u8 para_int, bool raw_data)
1094 {
1095 const char *(*fmts)[] = rtwdev->fw.log.fmts;
1096 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1097 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1098 int i;
1099
1100 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1101 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1102 log_fmt->argc);
1103 return;
1104 }
1105
1106 if (para_int)
1107 for (i = 0 ; i < log_fmt->argc; i++)
1108 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1109
1110 if (raw_data) {
1111 if (para_int)
1112 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1113 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1114 para_int, log_fmt->argc, (int)sizeof(args), args);
1115 else
1116 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1117 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
1118 para_int, log_fmt->argc, log_fmt->u.raw);
1119 } else {
1120 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
1121 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
1122 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
1123 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
1124 args[0xf]);
1125 }
1126
1127 rtw89_info(rtwdev, "C2H log: %s", str_buf);
1128 }
1129
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)1130 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
1131 {
1132 const struct rtw89_fw_c2h_log_fmt *log_fmt;
1133 u8 para_int;
1134 u32 fmt_idx;
1135
1136 if (len < RTW89_C2H_HEADER_LEN) {
1137 rtw89_err(rtwdev, "c2h log length is wrong!\n");
1138 return;
1139 }
1140
1141 buf += RTW89_C2H_HEADER_LEN;
1142 len -= RTW89_C2H_HEADER_LEN;
1143 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
1144
1145 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
1146 goto plain_log;
1147
1148 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
1149 goto plain_log;
1150
1151 if (!rtwdev->fw.log.fmts)
1152 return;
1153
1154 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
1155 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
1156
1157 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
1158 rtw89_info(rtwdev, "C2H log: %s%s",
1159 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
1160 else if (fmt_idx != 0 && para_int)
1161 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
1162 else
1163 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
1164 return;
1165
1166 plain_log:
1167 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
1168
1169 }
1170
1171 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,const u8 * scan_mac_addr)1172 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1173 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
1174 {
1175 struct sk_buff *skb;
1176 int ret;
1177
1178 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
1179 if (!skb) {
1180 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1181 return -ENOMEM;
1182 }
1183 skb_put(skb, H2C_CAM_LEN);
1184 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
1185 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
1186
1187 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1188 H2C_CAT_MAC,
1189 H2C_CL_MAC_ADDR_CAM_UPDATE,
1190 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
1191 H2C_CAM_LEN);
1192
1193 ret = rtw89_h2c_tx(rtwdev, skb, false);
1194 if (ret) {
1195 rtw89_err(rtwdev, "failed to send h2c\n");
1196 goto fail;
1197 }
1198
1199 return 0;
1200 fail:
1201 dev_kfree_skb_any(skb);
1202
1203 return ret;
1204 }
1205
1206 #define H2C_DCTL_SEC_CAM_LEN 68
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta)1207 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
1208 struct rtw89_vif *rtwvif,
1209 struct rtw89_sta *rtwsta)
1210 {
1211 struct sk_buff *skb;
1212 int ret;
1213
1214 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
1215 if (!skb) {
1216 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1217 return -ENOMEM;
1218 }
1219 skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
1220
1221 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
1222
1223 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1224 H2C_CAT_MAC,
1225 H2C_CL_MAC_FR_EXCHG,
1226 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
1227 H2C_DCTL_SEC_CAM_LEN);
1228
1229 ret = rtw89_h2c_tx(rtwdev, skb, false);
1230 if (ret) {
1231 rtw89_err(rtwdev, "failed to send h2c\n");
1232 goto fail;
1233 }
1234
1235 return 0;
1236 fail:
1237 dev_kfree_skb_any(skb);
1238
1239 return ret;
1240 }
1241 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
1242
1243 #define H2C_BA_CAM_LEN 8
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta,bool valid,struct ieee80211_ampdu_params * params)1244 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
1245 bool valid, struct ieee80211_ampdu_params *params)
1246 {
1247 const struct rtw89_chip_info *chip = rtwdev->chip;
1248 struct rtw89_vif *rtwvif = rtwsta->rtwvif;
1249 u8 macid = rtwsta->mac_id;
1250 struct sk_buff *skb;
1251 u8 entry_idx;
1252 int ret;
1253
1254 ret = valid ?
1255 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
1256 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
1257 if (ret) {
1258 /* it still works even if we don't have static BA CAM, because
1259 * hardware can create dynamic BA CAM automatically.
1260 */
1261 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1262 "failed to %s entry tid=%d for h2c ba cam\n",
1263 valid ? "alloc" : "free", params->tid);
1264 return 0;
1265 }
1266
1267 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1268 if (!skb) {
1269 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
1270 return -ENOMEM;
1271 }
1272 skb_put(skb, H2C_BA_CAM_LEN);
1273 SET_BA_CAM_MACID(skb->data, macid);
1274 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
1275 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1276 else
1277 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
1278 if (!valid)
1279 goto end;
1280 SET_BA_CAM_VALID(skb->data, valid);
1281 SET_BA_CAM_TID(skb->data, params->tid);
1282 if (params->buf_size > 64)
1283 SET_BA_CAM_BMAP_SIZE(skb->data, 4);
1284 else
1285 SET_BA_CAM_BMAP_SIZE(skb->data, 0);
1286 /* If init req is set, hw will set the ssn */
1287 SET_BA_CAM_INIT_REQ(skb->data, 1);
1288 SET_BA_CAM_SSN(skb->data, params->ssn);
1289
1290 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
1291 SET_BA_CAM_STD_EN(skb->data, 1);
1292 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
1293 }
1294
1295 end:
1296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1297 H2C_CAT_MAC,
1298 H2C_CL_BA_CAM,
1299 H2C_FUNC_MAC_BA_CAM, 0, 1,
1300 H2C_BA_CAM_LEN);
1301
1302 ret = rtw89_h2c_tx(rtwdev, skb, false);
1303 if (ret) {
1304 rtw89_err(rtwdev, "failed to send h2c\n");
1305 goto fail;
1306 }
1307
1308 return 0;
1309 fail:
1310 dev_kfree_skb_any(skb);
1311
1312 return ret;
1313 }
1314
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)1315 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
1316 u8 entry_idx, u8 uid)
1317 {
1318 struct sk_buff *skb;
1319 int ret;
1320
1321 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1322 if (!skb) {
1323 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
1324 return -ENOMEM;
1325 }
1326 skb_put(skb, H2C_BA_CAM_LEN);
1327
1328 SET_BA_CAM_VALID(skb->data, 1);
1329 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1330 SET_BA_CAM_UID(skb->data, uid);
1331 SET_BA_CAM_BAND(skb->data, 0);
1332 SET_BA_CAM_STD_EN(skb->data, 0);
1333
1334 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1335 H2C_CAT_MAC,
1336 H2C_CL_BA_CAM,
1337 H2C_FUNC_MAC_BA_CAM, 0, 1,
1338 H2C_BA_CAM_LEN);
1339
1340 ret = rtw89_h2c_tx(rtwdev, skb, false);
1341 if (ret) {
1342 rtw89_err(rtwdev, "failed to send h2c\n");
1343 goto fail;
1344 }
1345
1346 return 0;
1347 fail:
1348 dev_kfree_skb_any(skb);
1349
1350 return ret;
1351 }
1352
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)1353 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
1354 {
1355 const struct rtw89_chip_info *chip = rtwdev->chip;
1356 u8 entry_idx = chip->bacam_num;
1357 u8 uid = 0;
1358 int i;
1359
1360 for (i = 0; i < chip->bacam_dynamic_num; i++) {
1361 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
1362 entry_idx++;
1363 uid++;
1364 }
1365 }
1366
1367 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)1368 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
1369 {
1370 struct sk_buff *skb;
1371 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
1372 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
1373 int ret;
1374
1375 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
1376 if (!skb) {
1377 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
1378 return -ENOMEM;
1379 }
1380
1381 skb_put(skb, H2C_LOG_CFG_LEN);
1382 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
1383 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
1384 SET_LOG_CFG_COMP(skb->data, comp);
1385 SET_LOG_CFG_COMP_EXT(skb->data, 0);
1386
1387 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1388 H2C_CAT_MAC,
1389 H2C_CL_FW_INFO,
1390 H2C_FUNC_LOG_CFG, 0, 0,
1391 H2C_LOG_CFG_LEN);
1392
1393 ret = rtw89_h2c_tx(rtwdev, skb, false);
1394 if (ret) {
1395 rtw89_err(rtwdev, "failed to send h2c\n");
1396 goto fail;
1397 }
1398
1399 return 0;
1400 fail:
1401 dev_kfree_skb_any(skb);
1402
1403 return ret;
1404 }
1405
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,enum rtw89_fw_pkt_ofld_type type,u8 * id)1406 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
1407 struct rtw89_vif *rtwvif,
1408 enum rtw89_fw_pkt_ofld_type type,
1409 u8 *id)
1410 {
1411 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1412 struct rtw89_pktofld_info *info;
1413 struct sk_buff *skb;
1414 int ret;
1415
1416 info = kzalloc(sizeof(*info), GFP_KERNEL);
1417 if (!info)
1418 return -ENOMEM;
1419
1420 switch (type) {
1421 case RTW89_PKT_OFLD_TYPE_PS_POLL:
1422 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
1423 break;
1424 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
1425 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
1426 break;
1427 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
1428 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
1429 break;
1430 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
1431 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
1432 break;
1433 default:
1434 goto err;
1435 }
1436
1437 if (!skb)
1438 goto err;
1439
1440 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
1441 kfree_skb(skb);
1442
1443 if (ret)
1444 goto err;
1445
1446 list_add_tail(&info->list, &rtwvif->general_pkt_list);
1447 *id = info->id;
1448 return 0;
1449
1450 err:
1451 kfree(info);
1452 return -ENOMEM;
1453 }
1454
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool notify_fw)1455 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
1456 struct rtw89_vif *rtwvif, bool notify_fw)
1457 {
1458 struct list_head *pkt_list = &rtwvif->general_pkt_list;
1459 struct rtw89_pktofld_info *info, *tmp;
1460
1461 list_for_each_entry_safe(info, tmp, pkt_list, list) {
1462 if (notify_fw)
1463 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
1464 else
1465 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
1466 list_del(&info->list);
1467 kfree(info);
1468 }
1469 }
1470
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)1471 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
1472 {
1473 struct rtw89_vif *rtwvif;
1474
1475 rtw89_for_each_rtwvif(rtwdev, rtwvif)
1476 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
1477 }
1478
1479 #define H2C_GENERAL_PKT_LEN 6
1480 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 macid)1481 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
1482 struct rtw89_vif *rtwvif, u8 macid)
1483 {
1484 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
1485 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
1486 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
1487 struct sk_buff *skb;
1488 int ret;
1489
1490 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1491 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
1492 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1493 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
1494 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1495 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
1496
1497 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
1498 if (!skb) {
1499 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1500 return -ENOMEM;
1501 }
1502 skb_put(skb, H2C_GENERAL_PKT_LEN);
1503 SET_GENERAL_PKT_MACID(skb->data, macid);
1504 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1505 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
1506 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
1507 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
1508 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1509
1510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1511 H2C_CAT_MAC,
1512 H2C_CL_FW_INFO,
1513 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
1514 H2C_GENERAL_PKT_LEN);
1515
1516 ret = rtw89_h2c_tx(rtwdev, skb, false);
1517 if (ret) {
1518 rtw89_err(rtwdev, "failed to send h2c\n");
1519 goto fail;
1520 }
1521
1522 return 0;
1523 fail:
1524 dev_kfree_skb_any(skb);
1525
1526 return ret;
1527 }
1528
1529 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)1530 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
1531 struct rtw89_lps_parm *lps_param)
1532 {
1533 struct sk_buff *skb;
1534 int ret;
1535
1536 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1537 if (!skb) {
1538 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1539 return -ENOMEM;
1540 }
1541 skb_put(skb, H2C_LPS_PARM_LEN);
1542
1543 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
1544 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
1545 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
1546 SET_LPS_PARM_RLBM(skb->data, 1);
1547 SET_LPS_PARM_SMARTPS(skb->data, 1);
1548 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
1549 SET_LPS_PARM_VOUAPSD(skb->data, 0);
1550 SET_LPS_PARM_VIUAPSD(skb->data, 0);
1551 SET_LPS_PARM_BEUAPSD(skb->data, 0);
1552 SET_LPS_PARM_BKUAPSD(skb->data, 0);
1553
1554 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1555 H2C_CAT_MAC,
1556 H2C_CL_MAC_PS,
1557 H2C_FUNC_MAC_LPS_PARM, 0, 1,
1558 H2C_LPS_PARM_LEN);
1559
1560 ret = rtw89_h2c_tx(rtwdev, skb, false);
1561 if (ret) {
1562 rtw89_err(rtwdev, "failed to send h2c\n");
1563 goto fail;
1564 }
1565
1566 return 0;
1567 fail:
1568 dev_kfree_skb_any(skb);
1569
1570 return ret;
1571 }
1572
1573 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id)1574 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1575 struct ieee80211_p2p_noa_desc *desc,
1576 u8 act, u8 noa_id)
1577 {
1578 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1579 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
1580 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1581 struct sk_buff *skb;
1582 u8 *cmd;
1583 int ret;
1584
1585 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
1586 if (!skb) {
1587 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1588 return -ENOMEM;
1589 }
1590 skb_put(skb, H2C_P2P_ACT_LEN);
1591 cmd = skb->data;
1592
1593 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
1594 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
1595 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
1596 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
1597 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
1598 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
1599 if (desc) {
1600 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
1601 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
1602 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
1603 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
1604 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
1605 }
1606
1607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1608 H2C_CAT_MAC, H2C_CL_MAC_PS,
1609 H2C_FUNC_P2P_ACT, 0, 0,
1610 H2C_P2P_ACT_LEN);
1611
1612 ret = rtw89_h2c_tx(rtwdev, skb, false);
1613 if (ret) {
1614 rtw89_err(rtwdev, "failed to send h2c\n");
1615 goto fail;
1616 }
1617
1618 return 0;
1619 fail:
1620 dev_kfree_skb_any(skb);
1621
1622 return ret;
1623 }
1624
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)1625 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
1626 struct sk_buff *skb)
1627 {
1628 const struct rtw89_chip_info *chip = rtwdev->chip;
1629 struct rtw89_hal *hal = &rtwdev->hal;
1630 u8 ntx_path;
1631 u8 map_b;
1632
1633 if (chip->rf_path_num == 1) {
1634 ntx_path = RF_A;
1635 map_b = 0;
1636 } else {
1637 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
1638 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
1639 }
1640
1641 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
1642 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
1643 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
1644 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
1645 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
1646 }
1647
1648 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)1649 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
1650 struct rtw89_vif *rtwvif)
1651 {
1652 const struct rtw89_chip_info *chip = rtwdev->chip;
1653 struct sk_buff *skb;
1654 u8 macid = rtwvif->mac_id;
1655 int ret;
1656
1657 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1658 if (!skb) {
1659 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1660 return -ENOMEM;
1661 }
1662 skb_put(skb, H2C_CMC_TBL_LEN);
1663 SET_CTRL_INFO_MACID(skb->data, macid);
1664 SET_CTRL_INFO_OPERATION(skb->data, 1);
1665 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1666 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
1667 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1668 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
1669 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
1670 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
1671 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
1672 }
1673 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
1674 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
1675 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1676 SET_CMC_TBL_DATA_DCM(skb->data, 0);
1677
1678 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1679 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1680 chip->h2c_cctl_func_id, 0, 1,
1681 H2C_CMC_TBL_LEN);
1682
1683 ret = rtw89_h2c_tx(rtwdev, skb, false);
1684 if (ret) {
1685 rtw89_err(rtwdev, "failed to send h2c\n");
1686 goto fail;
1687 }
1688
1689 return 0;
1690 fail:
1691 dev_kfree_skb_any(skb);
1692
1693 return ret;
1694 }
1695
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,u8 * pads)1696 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
1697 struct ieee80211_sta *sta, u8 *pads)
1698 {
1699 bool ppe_th;
1700 u8 ppe16, ppe8;
1701 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
1702 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
1703 u8 ru_bitmap;
1704 u8 n, idx, sh;
1705 u16 ppe;
1706 int i;
1707
1708 if (!sta->deflink.he_cap.has_he)
1709 return;
1710
1711 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
1712 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
1713 if (!ppe_th) {
1714 u8 pad;
1715
1716 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
1717 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
1718
1719 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
1720 pads[i] = pad;
1721
1722 return;
1723 }
1724
1725 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
1726 n = hweight8(ru_bitmap);
1727 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
1728
1729 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
1730 if (!(ru_bitmap & BIT(i))) {
1731 pads[i] = 1;
1732 continue;
1733 }
1734
1735 idx = n >> 3;
1736 sh = n & 7;
1737 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
1738
1739 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
1740 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1741 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
1742 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1743
1744 if (ppe16 != 7 && ppe8 == 7)
1745 pads[i] = 2;
1746 else if (ppe8 != 7)
1747 pads[i] = 1;
1748 else
1749 pads[i] = 0;
1750 }
1751 }
1752
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1753 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
1754 struct ieee80211_vif *vif,
1755 struct ieee80211_sta *sta)
1756 {
1757 const struct rtw89_chip_info *chip = rtwdev->chip;
1758 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
1759 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1760 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
1761 rtwvif->sub_entity_idx);
1762 struct sk_buff *skb;
1763 u8 pads[RTW89_PPE_BW_NUM];
1764 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1765 u16 lowest_rate;
1766 int ret;
1767
1768 memset(pads, 0, sizeof(pads));
1769 if (sta)
1770 __get_sta_he_pkt_padding(rtwdev, sta, pads);
1771
1772 if (vif->p2p)
1773 lowest_rate = RTW89_HW_RATE_OFDM6;
1774 else if (chan->band_type == RTW89_BAND_2G)
1775 lowest_rate = RTW89_HW_RATE_CCK1;
1776 else
1777 lowest_rate = RTW89_HW_RATE_OFDM6;
1778
1779 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1780 if (!skb) {
1781 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1782 return -ENOMEM;
1783 }
1784 skb_put(skb, H2C_CMC_TBL_LEN);
1785 SET_CTRL_INFO_MACID(skb->data, mac_id);
1786 SET_CTRL_INFO_OPERATION(skb->data, 1);
1787 SET_CMC_TBL_DISRTSFB(skb->data, 1);
1788 SET_CMC_TBL_DISDATAFB(skb->data, 1);
1789 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
1790 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
1791 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
1792 if (vif->type == NL80211_IFTYPE_STATION)
1793 SET_CMC_TBL_ULDL(skb->data, 1);
1794 else
1795 SET_CMC_TBL_ULDL(skb->data, 0);
1796 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
1797 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
1798 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1799 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1800 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1801 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1802 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1803 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1804 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1805 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1806 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1807 }
1808 if (sta)
1809 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
1810 sta->deflink.he_cap.has_he);
1811 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1812 SET_CMC_TBL_DATA_DCM(skb->data, 0);
1813
1814 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1815 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1816 chip->h2c_cctl_func_id, 0, 1,
1817 H2C_CMC_TBL_LEN);
1818
1819 ret = rtw89_h2c_tx(rtwdev, skb, false);
1820 if (ret) {
1821 rtw89_err(rtwdev, "failed to send h2c\n");
1822 goto fail;
1823 }
1824
1825 return 0;
1826 fail:
1827 dev_kfree_skb_any(skb);
1828
1829 return ret;
1830 }
1831
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta)1832 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
1833 struct rtw89_sta *rtwsta)
1834 {
1835 const struct rtw89_chip_info *chip = rtwdev->chip;
1836 struct sk_buff *skb;
1837 int ret;
1838
1839 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1840 if (!skb) {
1841 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1842 return -ENOMEM;
1843 }
1844 skb_put(skb, H2C_CMC_TBL_LEN);
1845 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1846 SET_CTRL_INFO_OPERATION(skb->data, 1);
1847 if (rtwsta->cctl_tx_time) {
1848 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
1849 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
1850 }
1851 if (rtwsta->cctl_tx_retry_limit) {
1852 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
1853 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
1854 }
1855
1856 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1857 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1858 chip->h2c_cctl_func_id, 0, 1,
1859 H2C_CMC_TBL_LEN);
1860
1861 ret = rtw89_h2c_tx(rtwdev, skb, false);
1862 if (ret) {
1863 rtw89_err(rtwdev, "failed to send h2c\n");
1864 goto fail;
1865 }
1866
1867 return 0;
1868 fail:
1869 dev_kfree_skb_any(skb);
1870
1871 return ret;
1872 }
1873
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta)1874 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
1875 struct rtw89_sta *rtwsta)
1876 {
1877 const struct rtw89_chip_info *chip = rtwdev->chip;
1878 struct sk_buff *skb;
1879 int ret;
1880
1881 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
1882 return 0;
1883
1884 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1885 if (!skb) {
1886 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1887 return -ENOMEM;
1888 }
1889 skb_put(skb, H2C_CMC_TBL_LEN);
1890 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1891 SET_CTRL_INFO_OPERATION(skb->data, 1);
1892
1893 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1894
1895 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1896 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1897 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
1898 H2C_CMC_TBL_LEN);
1899
1900 ret = rtw89_h2c_tx(rtwdev, skb, false);
1901 if (ret) {
1902 rtw89_err(rtwdev, "failed to send h2c\n");
1903 goto fail;
1904 }
1905
1906 return 0;
1907 fail:
1908 dev_kfree_skb_any(skb);
1909
1910 return ret;
1911 }
1912
1913 #define H2C_BCN_BASE_LEN 12
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)1914 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
1915 struct rtw89_vif *rtwvif)
1916 {
1917 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1918 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
1919 rtwvif->sub_entity_idx);
1920 struct sk_buff *skb;
1921 struct sk_buff *skb_beacon;
1922 u16 tim_offset;
1923 int bcn_total_len;
1924 u16 beacon_rate;
1925 void *noa_data;
1926 u8 noa_len;
1927 int ret;
1928
1929 if (vif->p2p)
1930 beacon_rate = RTW89_HW_RATE_OFDM6;
1931 else if (chan->band_type == RTW89_BAND_2G)
1932 beacon_rate = RTW89_HW_RATE_CCK1;
1933 else
1934 beacon_rate = RTW89_HW_RATE_OFDM6;
1935
1936 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
1937 NULL, 0);
1938 if (!skb_beacon) {
1939 rtw89_err(rtwdev, "failed to get beacon skb\n");
1940 return -ENOMEM;
1941 }
1942
1943 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
1944 if (noa_len &&
1945 (noa_len <= skb_tailroom(skb_beacon) ||
1946 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
1947 skb_put_data(skb_beacon, noa_data, noa_len);
1948 }
1949
1950 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
1951 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
1952 if (!skb) {
1953 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1954 dev_kfree_skb_any(skb_beacon);
1955 return -ENOMEM;
1956 }
1957 skb_put(skb, H2C_BCN_BASE_LEN);
1958
1959 SET_BCN_UPD_PORT(skb->data, rtwvif->port);
1960 SET_BCN_UPD_MBSSID(skb->data, 0);
1961 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
1962 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
1963 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
1964 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
1965 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
1966 SET_BCN_UPD_RATE(skb->data, beacon_rate);
1967
1968 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
1969 dev_kfree_skb_any(skb_beacon);
1970
1971 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1972 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1973 H2C_FUNC_MAC_BCN_UPD, 0, 1,
1974 bcn_total_len);
1975
1976 ret = rtw89_h2c_tx(rtwdev, skb, false);
1977 if (ret) {
1978 rtw89_err(rtwdev, "failed to send h2c\n");
1979 dev_kfree_skb_any(skb);
1980 return ret;
1981 }
1982
1983 return 0;
1984 }
1985
1986 #define H2C_ROLE_MAINTAIN_LEN 4
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,enum rtw89_upd_mode upd_mode)1987 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
1988 struct rtw89_vif *rtwvif,
1989 struct rtw89_sta *rtwsta,
1990 enum rtw89_upd_mode upd_mode)
1991 {
1992 struct sk_buff *skb;
1993 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1994 u8 self_role;
1995 int ret;
1996
1997 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
1998 if (rtwsta)
1999 self_role = RTW89_SELF_ROLE_AP_CLIENT;
2000 else
2001 self_role = rtwvif->self_role;
2002 } else {
2003 self_role = rtwvif->self_role;
2004 }
2005
2006 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
2007 if (!skb) {
2008 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2009 return -ENOMEM;
2010 }
2011 skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
2012 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
2013 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
2014 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
2015 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2016
2017 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2018 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2019 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
2020 H2C_ROLE_MAINTAIN_LEN);
2021
2022 ret = rtw89_h2c_tx(rtwdev, skb, false);
2023 if (ret) {
2024 rtw89_err(rtwdev, "failed to send h2c\n");
2025 goto fail;
2026 }
2027
2028 return 0;
2029 fail:
2030 dev_kfree_skb_any(skb);
2031
2032 return ret;
2033 }
2034
2035 #define H2C_JOIN_INFO_LEN 4
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,bool dis_conn)2036 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2037 struct rtw89_sta *rtwsta, bool dis_conn)
2038 {
2039 struct sk_buff *skb;
2040 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2041 u8 self_role = rtwvif->self_role;
2042 u8 net_type = rtwvif->net_type;
2043 int ret;
2044
2045 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
2046 self_role = RTW89_SELF_ROLE_AP_CLIENT;
2047 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
2048 }
2049
2050 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2051 if (!skb) {
2052 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2053 return -ENOMEM;
2054 }
2055 skb_put(skb, H2C_JOIN_INFO_LEN);
2056 SET_JOININFO_MACID(skb->data, mac_id);
2057 SET_JOININFO_OP(skb->data, dis_conn);
2058 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
2059 SET_JOININFO_WMM(skb->data, rtwvif->wmm);
2060 SET_JOININFO_TGR(skb->data, rtwvif->trigger);
2061 SET_JOININFO_ISHESTA(skb->data, 0);
2062 SET_JOININFO_DLBW(skb->data, 0);
2063 SET_JOININFO_TF_MAC_PAD(skb->data, 0);
2064 SET_JOININFO_DL_T_PE(skb->data, 0);
2065 SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
2066 SET_JOININFO_NET_TYPE(skb->data, net_type);
2067 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2068 SET_JOININFO_SELF_ROLE(skb->data, self_role);
2069
2070 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2071 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2072 H2C_FUNC_MAC_JOININFO, 0, 1,
2073 H2C_JOIN_INFO_LEN);
2074
2075 ret = rtw89_h2c_tx(rtwdev, skb, false);
2076 if (ret) {
2077 rtw89_err(rtwdev, "failed to send h2c\n");
2078 goto fail;
2079 }
2080
2081 return 0;
2082 fail:
2083 dev_kfree_skb_any(skb);
2084
2085 return ret;
2086 }
2087
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)2088 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
2089 bool pause)
2090 {
2091 struct rtw89_fw_macid_pause_grp h2c = {{0}};
2092 u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
2093 struct sk_buff *skb;
2094 int ret;
2095
2096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2097 if (!skb) {
2098 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2099 return -ENOMEM;
2100 }
2101 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
2102 if (pause)
2103 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
2104 skb_put_data(skb, &h2c, len);
2105
2106 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2107 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2108 H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
2109 len);
2110
2111 ret = rtw89_h2c_tx(rtwdev, skb, false);
2112 if (ret) {
2113 rtw89_err(rtwdev, "failed to send h2c\n");
2114 goto fail;
2115 }
2116
2117 return 0;
2118 fail:
2119 dev_kfree_skb_any(skb);
2120
2121 return ret;
2122 }
2123
2124 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 ac,u32 val)2125 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2126 u8 ac, u32 val)
2127 {
2128 struct sk_buff *skb;
2129 int ret;
2130
2131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
2132 if (!skb) {
2133 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
2134 return -ENOMEM;
2135 }
2136 skb_put(skb, H2C_EDCA_LEN);
2137 RTW89_SET_EDCA_SEL(skb->data, 0);
2138 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
2139 RTW89_SET_EDCA_WMM(skb->data, 0);
2140 RTW89_SET_EDCA_AC(skb->data, ac);
2141 RTW89_SET_EDCA_PARAM(skb->data, val);
2142
2143 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2144 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2145 H2C_FUNC_USR_EDCA, 0, 1,
2146 H2C_EDCA_LEN);
2147
2148 ret = rtw89_h2c_tx(rtwdev, skb, false);
2149 if (ret) {
2150 rtw89_err(rtwdev, "failed to send h2c\n");
2151 goto fail;
2152 }
2153
2154 return 0;
2155 fail:
2156 dev_kfree_skb_any(skb);
2157
2158 return ret;
2159 }
2160
2161 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool en)2162 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2163 bool en)
2164 {
2165 struct sk_buff *skb;
2166 u16 early_us = en ? 2000 : 0;
2167 u8 *cmd;
2168 int ret;
2169
2170 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
2171 if (!skb) {
2172 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
2173 return -ENOMEM;
2174 }
2175 skb_put(skb, H2C_TSF32_TOGL_LEN);
2176 cmd = skb->data;
2177
2178 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
2179 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
2180 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
2181 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
2182
2183 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2184 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2185 H2C_FUNC_TSF32_TOGL, 0, 0,
2186 H2C_TSF32_TOGL_LEN);
2187
2188 ret = rtw89_h2c_tx(rtwdev, skb, false);
2189 if (ret) {
2190 rtw89_err(rtwdev, "failed to send h2c\n");
2191 goto fail;
2192 }
2193
2194 return 0;
2195 fail:
2196 dev_kfree_skb_any(skb);
2197
2198 return ret;
2199 }
2200
2201 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)2202 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
2203 {
2204 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
2205 struct sk_buff *skb;
2206 int ret;
2207
2208 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
2209 if (!skb) {
2210 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
2211 return -ENOMEM;
2212 }
2213 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
2214
2215 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2216 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2217 H2C_FUNC_OFLD_CFG, 0, 1,
2218 H2C_OFLD_CFG_LEN);
2219
2220 ret = rtw89_h2c_tx(rtwdev, skb, false);
2221 if (ret) {
2222 rtw89_err(rtwdev, "failed to send h2c\n");
2223 goto fail;
2224 }
2225
2226 return 0;
2227 fail:
2228 dev_kfree_skb_any(skb);
2229
2230 return ret;
2231 }
2232
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool connect)2233 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
2234 struct ieee80211_vif *vif,
2235 bool connect)
2236 {
2237 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
2238 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
2239 struct rtw89_h2c_bcnfltr *h2c;
2240 u32 len = sizeof(*h2c);
2241 struct sk_buff *skb;
2242 int ret;
2243
2244 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2245 return -EINVAL;
2246
2247 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2248 return -EINVAL;
2249
2250 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2251 if (!skb) {
2252 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
2253 return -ENOMEM;
2254 }
2255
2256 skb_put(skb, len);
2257 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
2258
2259 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
2260 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
2261 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
2262 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
2263 RTW89_H2C_BCNFLTR_W0_MODE) |
2264 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
2265 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
2266 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
2267 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
2268 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
2269
2270 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2271 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2272 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
2273
2274 ret = rtw89_h2c_tx(rtwdev, skb, false);
2275 if (ret) {
2276 rtw89_err(rtwdev, "failed to send h2c\n");
2277 goto fail;
2278 }
2279
2280 return 0;
2281 fail:
2282 dev_kfree_skb_any(skb);
2283
2284 return ret;
2285 }
2286
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)2287 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
2288 struct rtw89_rx_phy_ppdu *phy_ppdu)
2289 {
2290 struct rtw89_h2c_ofld_rssi *h2c;
2291 u32 len = sizeof(*h2c);
2292 struct sk_buff *skb;
2293 s8 rssi;
2294 int ret;
2295
2296 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2297 return -EINVAL;
2298
2299 if (!phy_ppdu)
2300 return -EINVAL;
2301
2302 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2303 if (!skb) {
2304 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
2305 return -ENOMEM;
2306 }
2307
2308 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
2309 skb_put(skb, len);
2310 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
2311
2312 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
2313 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
2314 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
2315
2316 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2317 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2318 H2C_FUNC_OFLD_RSSI, 0, 1, len);
2319
2320 ret = rtw89_h2c_tx(rtwdev, skb, false);
2321 if (ret) {
2322 rtw89_err(rtwdev, "failed to send h2c\n");
2323 goto fail;
2324 }
2325
2326 return 0;
2327 fail:
2328 dev_kfree_skb_any(skb);
2329
2330 return ret;
2331 }
2332
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2333 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2334 {
2335 struct rtw89_traffic_stats *stats = &rtwvif->stats;
2336 struct rtw89_h2c_ofld *h2c;
2337 u32 len = sizeof(*h2c);
2338 struct sk_buff *skb;
2339 int ret;
2340
2341 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2342 return -EINVAL;
2343
2344 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2345 if (!skb) {
2346 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
2347 return -ENOMEM;
2348 }
2349
2350 skb_put(skb, len);
2351 h2c = (struct rtw89_h2c_ofld *)skb->data;
2352
2353 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
2354 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
2355 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
2356
2357 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2358 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2359 H2C_FUNC_OFLD_TP, 0, 1, len);
2360
2361 ret = rtw89_h2c_tx(rtwdev, skb, false);
2362 if (ret) {
2363 rtw89_err(rtwdev, "failed to send h2c\n");
2364 goto fail;
2365 }
2366
2367 return 0;
2368 fail:
2369 dev_kfree_skb_any(skb);
2370
2371 return ret;
2372 }
2373
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)2374 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
2375 {
2376 const struct rtw89_chip_info *chip = rtwdev->chip;
2377 struct rtw89_h2c_ra_v1 *h2c_v1;
2378 struct rtw89_h2c_ra *h2c;
2379 u32 len = sizeof(*h2c);
2380 bool format_v1 = false;
2381 struct sk_buff *skb;
2382 int ret;
2383
2384 if (chip->chip_gen == RTW89_CHIP_BE) {
2385 len = sizeof(*h2c_v1);
2386 format_v1 = true;
2387 }
2388
2389 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2390 if (!skb) {
2391 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2392 return -ENOMEM;
2393 }
2394 skb_put(skb, len);
2395 h2c = (struct rtw89_h2c_ra *)skb->data;
2396 rtw89_debug(rtwdev, RTW89_DBG_RA,
2397 "ra cmd msk: %llx ", ra->ra_mask);
2398
2399 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
2400 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
2401 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
2402 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
2403 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
2404 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
2405 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
2406 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
2407 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
2408 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
2409 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
2410 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
2411 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
2412 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
2413 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
2414 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
2415 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
2416 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
2417
2418 if (!format_v1)
2419 goto csi;
2420
2421 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
2422 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
2423 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
2424
2425 csi:
2426 if (!csi)
2427 goto done;
2428
2429 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
2430 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
2431 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
2432 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
2433 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
2434 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
2435 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
2436 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
2437 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
2438
2439 done:
2440 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2441 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
2442 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
2443 len);
2444
2445 ret = rtw89_h2c_tx(rtwdev, skb, false);
2446 if (ret) {
2447 rtw89_err(rtwdev, "failed to send h2c\n");
2448 goto fail;
2449 }
2450
2451 return 0;
2452 fail:
2453 dev_kfree_skb_any(skb);
2454
2455 return ret;
2456 }
2457
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev)2458 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
2459 {
2460 struct rtw89_btc *btc = &rtwdev->btc;
2461 struct rtw89_btc_dm *dm = &btc->dm;
2462 struct rtw89_btc_init_info *init_info = &dm->init_info;
2463 struct rtw89_btc_module *module = &init_info->module;
2464 struct rtw89_btc_ant_info *ant = &module->ant;
2465 struct rtw89_h2c_cxinit *h2c;
2466 u32 len = sizeof(*h2c);
2467 struct sk_buff *skb;
2468 int ret;
2469
2470 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2471 if (!skb) {
2472 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
2473 return -ENOMEM;
2474 }
2475 skb_put(skb, len);
2476 h2c = (struct rtw89_h2c_cxinit *)skb->data;
2477
2478 h2c->hdr.type = CXDRVINFO_INIT;
2479 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
2480
2481 h2c->ant_type = ant->type;
2482 h2c->ant_num = ant->num;
2483 h2c->ant_iso = ant->isolation;
2484 h2c->ant_info =
2485 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
2486 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
2487 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
2488 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
2489
2490 h2c->mod_rfe = module->rfe_type;
2491 h2c->mod_cv = module->cv;
2492 h2c->mod_info =
2493 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
2494 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
2495 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
2496 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
2497 h2c->mod_adie_kt = module->kt_ver_adie;
2498 h2c->wl_gch = init_info->wl_guard_ch;
2499
2500 h2c->info =
2501 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
2502 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
2503 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
2504 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
2505 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
2506
2507 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2508 H2C_CAT_OUTSRC, BTFC_SET,
2509 SET_DRV_INFO, 0, 0,
2510 len);
2511
2512 ret = rtw89_h2c_tx(rtwdev, skb, false);
2513 if (ret) {
2514 rtw89_err(rtwdev, "failed to send h2c\n");
2515 goto fail;
2516 }
2517
2518 return 0;
2519 fail:
2520 dev_kfree_skb_any(skb);
2521
2522 return ret;
2523 }
2524
2525 #define PORT_DATA_OFFSET 4
2526 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
2527 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
2528 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
2529
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev)2530 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
2531 {
2532 struct rtw89_btc *btc = &rtwdev->btc;
2533 const struct rtw89_btc_ver *ver = btc->ver;
2534 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2535 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
2536 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2537 struct rtw89_btc_wl_active_role *active = role_info->active_role;
2538 struct sk_buff *skb;
2539 u32 len;
2540 u8 offset = 0;
2541 u8 *cmd;
2542 int ret;
2543 int i;
2544
2545 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
2546
2547 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2548 if (!skb) {
2549 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2550 return -ENOMEM;
2551 }
2552 skb_put(skb, len);
2553 cmd = skb->data;
2554
2555 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2556 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2557
2558 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2559 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2560
2561 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2562 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2563 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2564 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2565 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2566 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2567 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2568 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2569 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2570 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2571 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2572 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2573
2574 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2575 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2576 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2577 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2578 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2579 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2580 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2581 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2582 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2583 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2584 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2585 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2586 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2587 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2588 }
2589
2590 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2591 H2C_CAT_OUTSRC, BTFC_SET,
2592 SET_DRV_INFO, 0, 0,
2593 len);
2594
2595 ret = rtw89_h2c_tx(rtwdev, skb, false);
2596 if (ret) {
2597 rtw89_err(rtwdev, "failed to send h2c\n");
2598 goto fail;
2599 }
2600
2601 return 0;
2602 fail:
2603 dev_kfree_skb_any(skb);
2604
2605 return ret;
2606 }
2607
2608 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
2609 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2610
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev)2611 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
2612 {
2613 struct rtw89_btc *btc = &rtwdev->btc;
2614 const struct rtw89_btc_ver *ver = btc->ver;
2615 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2616 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
2617 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2618 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
2619 struct sk_buff *skb;
2620 u32 len;
2621 u8 *cmd, offset;
2622 int ret;
2623 int i;
2624
2625 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
2626
2627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2628 if (!skb) {
2629 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2630 return -ENOMEM;
2631 }
2632 skb_put(skb, len);
2633 cmd = skb->data;
2634
2635 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2636 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2637
2638 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2639 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2640
2641 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2642 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2643 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2644 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2645 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2646 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2647 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2648 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2649 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2650 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2651 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2652 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2653
2654 offset = PORT_DATA_OFFSET;
2655 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2656 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2657 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2658 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2659 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2660 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2661 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2662 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2663 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2664 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2665 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2666 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2667 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2668 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2669 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
2670 }
2671
2672 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2673 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2674 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2675 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2676 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2677 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2678 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2679
2680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2681 H2C_CAT_OUTSRC, BTFC_SET,
2682 SET_DRV_INFO, 0, 0,
2683 len);
2684
2685 ret = rtw89_h2c_tx(rtwdev, skb, false);
2686 if (ret) {
2687 rtw89_err(rtwdev, "failed to send h2c\n");
2688 goto fail;
2689 }
2690
2691 return 0;
2692 fail:
2693 dev_kfree_skb_any(skb);
2694
2695 return ret;
2696 }
2697
2698 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
2699 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2700
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev)2701 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
2702 {
2703 struct rtw89_btc *btc = &rtwdev->btc;
2704 const struct rtw89_btc_ver *ver = btc->ver;
2705 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2706 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
2707 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2708 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
2709 struct sk_buff *skb;
2710 u32 len;
2711 u8 *cmd, offset;
2712 int ret;
2713 int i;
2714
2715 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
2716
2717 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2718 if (!skb) {
2719 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2720 return -ENOMEM;
2721 }
2722 skb_put(skb, len);
2723 cmd = skb->data;
2724
2725 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2726 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2727
2728 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2729 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2730
2731 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2732 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2733 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2734 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2735 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2736 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2737 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2738 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2739 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2740 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2741 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2742 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2743
2744 offset = PORT_DATA_OFFSET;
2745 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2746 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
2747 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
2748 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
2749 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
2750 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
2751 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
2752 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
2753 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
2754 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
2755 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
2756 }
2757
2758 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2759 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2760 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2761 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2762 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2763 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2764 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2765
2766 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2767 H2C_CAT_OUTSRC, BTFC_SET,
2768 SET_DRV_INFO, 0, 0,
2769 len);
2770
2771 ret = rtw89_h2c_tx(rtwdev, skb, false);
2772 if (ret) {
2773 rtw89_err(rtwdev, "failed to send h2c\n");
2774 goto fail;
2775 }
2776
2777 return 0;
2778 fail:
2779 dev_kfree_skb_any(skb);
2780
2781 return ret;
2782 }
2783
2784 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev)2785 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
2786 {
2787 struct rtw89_btc *btc = &rtwdev->btc;
2788 const struct rtw89_btc_ver *ver = btc->ver;
2789 struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
2790 struct sk_buff *skb;
2791 u8 *cmd;
2792 int ret;
2793
2794 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
2795 if (!skb) {
2796 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2797 return -ENOMEM;
2798 }
2799 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
2800 cmd = skb->data;
2801
2802 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
2803 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
2804
2805 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
2806 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
2807 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
2808 if (ver->fcxctrl == 0)
2809 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
2810
2811 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2812 H2C_CAT_OUTSRC, BTFC_SET,
2813 SET_DRV_INFO, 0, 0,
2814 H2C_LEN_CXDRVINFO_CTRL);
2815
2816 ret = rtw89_h2c_tx(rtwdev, skb, false);
2817 if (ret) {
2818 rtw89_err(rtwdev, "failed to send h2c\n");
2819 goto fail;
2820 }
2821
2822 return 0;
2823 fail:
2824 dev_kfree_skb_any(skb);
2825
2826 return ret;
2827 }
2828
2829 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev)2830 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
2831 {
2832 struct rtw89_btc *btc = &rtwdev->btc;
2833 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
2834 struct sk_buff *skb;
2835 u8 *cmd;
2836 int ret;
2837
2838 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
2839 if (!skb) {
2840 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
2841 return -ENOMEM;
2842 }
2843 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
2844 cmd = skb->data;
2845
2846 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
2847 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
2848
2849 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
2850 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
2851 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
2852 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
2853 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
2854 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
2855 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
2856 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
2857 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
2858 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
2859 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
2860 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
2861 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
2862 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
2863 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
2864 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
2865 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
2866
2867 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2868 H2C_CAT_OUTSRC, BTFC_SET,
2869 SET_DRV_INFO, 0, 0,
2870 H2C_LEN_CXDRVINFO_TRX);
2871
2872 ret = rtw89_h2c_tx(rtwdev, skb, false);
2873 if (ret) {
2874 rtw89_err(rtwdev, "failed to send h2c\n");
2875 goto fail;
2876 }
2877
2878 return 0;
2879 fail:
2880 dev_kfree_skb_any(skb);
2881
2882 return ret;
2883 }
2884
2885 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev)2886 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
2887 {
2888 struct rtw89_btc *btc = &rtwdev->btc;
2889 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2890 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
2891 struct sk_buff *skb;
2892 u8 *cmd;
2893 int ret;
2894
2895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
2896 if (!skb) {
2897 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2898 return -ENOMEM;
2899 }
2900 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
2901 cmd = skb->data;
2902
2903 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
2904 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
2905
2906 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
2907 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
2908 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
2909 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
2910 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
2911
2912 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2913 H2C_CAT_OUTSRC, BTFC_SET,
2914 SET_DRV_INFO, 0, 0,
2915 H2C_LEN_CXDRVINFO_RFK);
2916
2917 ret = rtw89_h2c_tx(rtwdev, skb, false);
2918 if (ret) {
2919 rtw89_err(rtwdev, "failed to send h2c\n");
2920 goto fail;
2921 }
2922
2923 return 0;
2924 fail:
2925 dev_kfree_skb_any(skb);
2926
2927 return ret;
2928 }
2929
2930 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)2931 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
2932 {
2933 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
2934 struct sk_buff *skb;
2935 unsigned int cond;
2936 u8 *cmd;
2937 int ret;
2938
2939 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
2940 if (!skb) {
2941 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2942 return -ENOMEM;
2943 }
2944 skb_put(skb, H2C_LEN_PKT_OFLD);
2945 cmd = skb->data;
2946
2947 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
2948 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
2949
2950 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2951 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2952 H2C_FUNC_PACKET_OFLD, 1, 1,
2953 H2C_LEN_PKT_OFLD);
2954
2955 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
2956
2957 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
2958 if (ret < 0) {
2959 rtw89_debug(rtwdev, RTW89_DBG_FW,
2960 "failed to del pkt ofld: id %d, ret %d\n",
2961 id, ret);
2962 return ret;
2963 }
2964
2965 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
2966 return 0;
2967 }
2968
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)2969 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
2970 struct sk_buff *skb_ofld)
2971 {
2972 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
2973 struct sk_buff *skb;
2974 unsigned int cond;
2975 u8 *cmd;
2976 u8 alloc_id;
2977 int ret;
2978
2979 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
2980 RTW89_MAX_PKT_OFLD_NUM);
2981 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
2982 return -ENOSPC;
2983
2984 *id = alloc_id;
2985
2986 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
2987 if (!skb) {
2988 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2989 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
2990 return -ENOMEM;
2991 }
2992 skb_put(skb, H2C_LEN_PKT_OFLD);
2993 cmd = skb->data;
2994
2995 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
2996 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
2997 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
2998 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
2999
3000 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3001 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3002 H2C_FUNC_PACKET_OFLD, 1, 1,
3003 H2C_LEN_PKT_OFLD + skb_ofld->len);
3004
3005 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
3006
3007 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3008 if (ret < 0) {
3009 rtw89_debug(rtwdev, RTW89_DBG_FW,
3010 "failed to add pkt ofld: id %d, ret %d\n",
3011 alloc_id, ret);
3012 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
3013 return ret;
3014 }
3015
3016 return 0;
3017 }
3018
3019 #define H2C_LEN_SCAN_LIST_OFFLOAD 4
rtw89_fw_h2c_scan_list_offload(struct rtw89_dev * rtwdev,int len,struct list_head * chan_list)3020 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
3021 struct list_head *chan_list)
3022 {
3023 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3024 struct rtw89_mac_chinfo *ch_info;
3025 struct sk_buff *skb;
3026 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
3027 unsigned int cond;
3028 u8 *cmd;
3029 int ret;
3030
3031 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
3032 if (!skb) {
3033 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
3034 return -ENOMEM;
3035 }
3036 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
3037 cmd = skb->data;
3038
3039 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
3040 /* in unit of 4 bytes */
3041 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
3042
3043 list_for_each_entry(ch_info, chan_list, list) {
3044 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
3045
3046 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
3047 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
3048 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
3049 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
3050 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
3051 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
3052 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
3053 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
3054 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
3055 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
3056 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
3057 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
3058 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
3059 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
3060 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
3061 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
3062 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
3063 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
3064 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
3065 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
3066 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
3067 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
3068 }
3069
3070 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3071 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3072 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
3073
3074 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
3075
3076 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3077 if (ret) {
3078 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
3079 return ret;
3080 }
3081
3082 return 0;
3083 }
3084
rtw89_fw_h2c_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif * rtwvif)3085 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
3086 struct rtw89_scan_option *option,
3087 struct rtw89_vif *rtwvif)
3088 {
3089 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3090 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3091 struct rtw89_h2c_scanofld *h2c;
3092 u32 len = sizeof(*h2c);
3093 struct sk_buff *skb;
3094 unsigned int cond;
3095 int ret;
3096
3097 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3098 if (!skb) {
3099 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
3100 return -ENOMEM;
3101 }
3102 skb_put(skb, len);
3103 h2c = (struct rtw89_h2c_scanofld *)skb->data;
3104
3105 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
3106 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
3107 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
3108 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
3109
3110 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
3111 le32_encode_bits(option->target_ch_mode,
3112 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
3113 le32_encode_bits(RTW89_SCAN_IMMEDIATE,
3114 RTW89_H2C_SCANOFLD_W1_START_MODE) |
3115 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
3116
3117 if (option->target_ch_mode) {
3118 h2c->w1 |= le32_encode_bits(op->band_width,
3119 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
3120 le32_encode_bits(op->primary_channel,
3121 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
3122 le32_encode_bits(op->channel,
3123 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
3124 h2c->w0 |= le32_encode_bits(op->band_type,
3125 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
3126 }
3127
3128 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3129 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3130 H2C_FUNC_SCANOFLD, 1, 1,
3131 len);
3132
3133 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
3134
3135 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3136 if (ret) {
3137 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
3138 return ret;
3139 }
3140
3141 return 0;
3142 }
3143
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)3144 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
3145 struct rtw89_fw_h2c_rf_reg_info *info,
3146 u16 len, u8 page)
3147 {
3148 struct sk_buff *skb;
3149 u8 class = info->rf_path == RF_PATH_A ?
3150 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
3151 int ret;
3152
3153 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3154 if (!skb) {
3155 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
3156 return -ENOMEM;
3157 }
3158 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
3159
3160 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3161 H2C_CAT_OUTSRC, class, page, 0, 0,
3162 len);
3163
3164 ret = rtw89_h2c_tx(rtwdev, skb, false);
3165 if (ret) {
3166 rtw89_err(rtwdev, "failed to send h2c\n");
3167 goto fail;
3168 }
3169
3170 return 0;
3171 fail:
3172 dev_kfree_skb_any(skb);
3173
3174 return ret;
3175 }
3176
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)3177 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
3178 {
3179 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3180 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
3181 struct rtw89_fw_h2c_rf_get_mccch *mccch;
3182 struct sk_buff *skb;
3183 int ret;
3184
3185 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
3186 if (!skb) {
3187 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3188 return -ENOMEM;
3189 }
3190 skb_put(skb, sizeof(*mccch));
3191 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
3192
3193 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
3194 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
3195 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
3196 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
3197 mccch->current_channel = cpu_to_le32(chan->channel);
3198 mccch->current_band_type = cpu_to_le32(chan->band_type);
3199
3200 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3201 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
3202 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
3203 sizeof(*mccch));
3204
3205 ret = rtw89_h2c_tx(rtwdev, skb, false);
3206 if (ret) {
3207 rtw89_err(rtwdev, "failed to send h2c\n");
3208 goto fail;
3209 }
3210
3211 return 0;
3212 fail:
3213 dev_kfree_skb_any(skb);
3214
3215 return ret;
3216 }
3217 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
3218
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)3219 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
3220 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
3221 bool rack, bool dack)
3222 {
3223 struct sk_buff *skb;
3224 int ret;
3225
3226 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3227 if (!skb) {
3228 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
3229 return -ENOMEM;
3230 }
3231 skb_put_data(skb, buf, len);
3232
3233 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3234 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
3235 len);
3236
3237 ret = rtw89_h2c_tx(rtwdev, skb, false);
3238 if (ret) {
3239 rtw89_err(rtwdev, "failed to send h2c\n");
3240 goto fail;
3241 }
3242
3243 return 0;
3244 fail:
3245 dev_kfree_skb_any(skb);
3246
3247 return ret;
3248 }
3249
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)3250 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
3251 {
3252 struct sk_buff *skb;
3253 int ret;
3254
3255 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
3256 if (!skb) {
3257 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
3258 return -ENOMEM;
3259 }
3260 skb_put_data(skb, buf, len);
3261
3262 ret = rtw89_h2c_tx(rtwdev, skb, false);
3263 if (ret) {
3264 rtw89_err(rtwdev, "failed to send h2c\n");
3265 goto fail;
3266 }
3267
3268 return 0;
3269 fail:
3270 dev_kfree_skb_any(skb);
3271
3272 return ret;
3273 }
3274
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)3275 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
3276 {
3277 struct rtw89_early_h2c *early_h2c;
3278
3279 lockdep_assert_held(&rtwdev->mutex);
3280
3281 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
3282 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
3283 }
3284 }
3285
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)3286 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
3287 {
3288 struct rtw89_early_h2c *early_h2c, *tmp;
3289
3290 mutex_lock(&rtwdev->mutex);
3291 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
3292 list_del(&early_h2c->list);
3293 kfree(early_h2c->h2c);
3294 kfree(early_h2c);
3295 }
3296 mutex_unlock(&rtwdev->mutex);
3297 }
3298
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)3299 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
3300 {
3301 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
3302 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3303
3304 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
3305 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
3306 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
3307 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
3308 }
3309
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)3310 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
3311 struct sk_buff *c2h)
3312 {
3313 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3314 u8 category = attr->category;
3315 u8 class = attr->class;
3316 u8 func = attr->func;
3317
3318 switch (category) {
3319 default:
3320 return false;
3321 case RTW89_C2H_CAT_MAC:
3322 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
3323 }
3324 }
3325
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)3326 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
3327 {
3328 rtw89_fw_c2h_parse_attr(c2h);
3329 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
3330 goto enqueue;
3331
3332 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
3333 dev_kfree_skb_any(c2h);
3334 return;
3335
3336 enqueue:
3337 skb_queue_tail(&rtwdev->c2h_queue, c2h);
3338 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
3339 }
3340
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)3341 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
3342 struct sk_buff *skb)
3343 {
3344 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
3345 u8 category = attr->category;
3346 u8 class = attr->class;
3347 u8 func = attr->func;
3348 u16 len = attr->len;
3349 bool dump = true;
3350
3351 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
3352 return;
3353
3354 switch (category) {
3355 case RTW89_C2H_CAT_TEST:
3356 break;
3357 case RTW89_C2H_CAT_MAC:
3358 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
3359 if (class == RTW89_MAC_C2H_CLASS_INFO &&
3360 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
3361 dump = false;
3362 break;
3363 case RTW89_C2H_CAT_OUTSRC:
3364 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
3365 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
3366 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
3367 else
3368 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
3369 break;
3370 }
3371
3372 if (dump)
3373 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
3374 }
3375
rtw89_fw_c2h_work(struct work_struct * work)3376 void rtw89_fw_c2h_work(struct work_struct *work)
3377 {
3378 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3379 c2h_work);
3380 struct sk_buff *skb, *tmp;
3381
3382 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
3383 skb_unlink(skb, &rtwdev->c2h_queue);
3384 mutex_lock(&rtwdev->mutex);
3385 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
3386 mutex_unlock(&rtwdev->mutex);
3387 dev_kfree_skb_any(skb);
3388 }
3389 }
3390
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)3391 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
3392 struct rtw89_mac_h2c_info *info)
3393 {
3394 const struct rtw89_chip_info *chip = rtwdev->chip;
3395 struct rtw89_fw_info *fw_info = &rtwdev->fw;
3396 const u32 *h2c_reg = chip->h2c_regs;
3397 u8 i, val, len;
3398 int ret;
3399
3400 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
3401 rtwdev, chip->h2c_ctrl_reg);
3402 if (ret) {
3403 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
3404 return ret;
3405 }
3406
3407 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
3408 sizeof(info->u.h2creg[0]));
3409
3410 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
3411 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
3412
3413 for (i = 0; i < RTW89_H2CREG_MAX; i++)
3414 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
3415
3416 fw_info->h2c_counter++;
3417 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
3418 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
3419 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
3420
3421 return 0;
3422 }
3423
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)3424 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
3425 struct rtw89_mac_c2h_info *info)
3426 {
3427 const struct rtw89_chip_info *chip = rtwdev->chip;
3428 struct rtw89_fw_info *fw_info = &rtwdev->fw;
3429 const u32 *c2h_reg = chip->c2h_regs;
3430 u32 ret;
3431 u8 i, val;
3432
3433 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
3434
3435 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
3436 RTW89_C2H_TIMEOUT, false, rtwdev,
3437 chip->c2h_ctrl_reg);
3438 if (ret) {
3439 rtw89_warn(rtwdev, "c2h reg timeout\n");
3440 return ret;
3441 }
3442
3443 for (i = 0; i < RTW89_C2HREG_MAX; i++)
3444 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
3445
3446 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
3447
3448 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
3449 info->content_len =
3450 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
3451 RTW89_C2HREG_HDR_LEN;
3452
3453 fw_info->c2h_counter++;
3454 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
3455 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
3456
3457 return 0;
3458 }
3459
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)3460 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
3461 struct rtw89_mac_h2c_info *h2c_info,
3462 struct rtw89_mac_c2h_info *c2h_info)
3463 {
3464 u32 ret;
3465
3466 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
3467 lockdep_assert_held(&rtwdev->mutex);
3468
3469 if (!h2c_info && !c2h_info)
3470 return -EINVAL;
3471
3472 if (!h2c_info)
3473 goto recv_c2h;
3474
3475 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
3476 if (ret)
3477 return ret;
3478
3479 recv_c2h:
3480 if (!c2h_info)
3481 return 0;
3482
3483 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
3484 if (ret)
3485 return ret;
3486
3487 return 0;
3488 }
3489
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)3490 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
3491 {
3492 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
3493 rtw89_err(rtwdev, "[ERR]pwr is off\n");
3494 return;
3495 }
3496
3497 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
3498 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
3499 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
3500 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
3501 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
3502 rtw89_read32(rtwdev, R_AX_HALT_C2H));
3503 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
3504 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
3505
3506 rtw89_fw_prog_cnt_dump(rtwdev);
3507 }
3508
rtw89_release_pkt_list(struct rtw89_dev * rtwdev)3509 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
3510 {
3511 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3512 struct rtw89_pktofld_info *info, *tmp;
3513 u8 idx;
3514
3515 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
3516 if (!(rtwdev->chip->support_bands & BIT(idx)))
3517 continue;
3518
3519 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
3520 if (test_bit(info->id, rtwdev->pkt_offload))
3521 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
3522 list_del(&info->list);
3523 kfree(info);
3524 }
3525 }
3526 }
3527
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)3528 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
3529 struct rtw89_vif *rtwvif,
3530 struct rtw89_pktofld_info *info,
3531 enum nl80211_band band, u8 ssid_idx)
3532 {
3533 struct cfg80211_scan_request *req = rtwvif->scan_req;
3534
3535 if (band != NL80211_BAND_6GHZ)
3536 return false;
3537
3538 if (req->ssids[ssid_idx].ssid_len) {
3539 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
3540 req->ssids[ssid_idx].ssid_len);
3541 info->ssid_len = req->ssids[ssid_idx].ssid_len;
3542 return false;
3543 } else {
3544 return true;
3545 }
3546 }
3547
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct sk_buff * skb,u8 ssid_idx)3548 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
3549 struct rtw89_vif *rtwvif,
3550 struct sk_buff *skb, u8 ssid_idx)
3551 {
3552 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3553 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3554 struct rtw89_pktofld_info *info;
3555 struct sk_buff *new;
3556 int ret = 0;
3557 u8 band;
3558
3559 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
3560 if (!(rtwdev->chip->support_bands & BIT(band)))
3561 continue;
3562
3563 new = skb_copy(skb, GFP_KERNEL);
3564 if (!new) {
3565 ret = -ENOMEM;
3566 goto out;
3567 }
3568 skb_put_data(new, ies->ies[band], ies->len[band]);
3569 skb_put_data(new, ies->common_ies, ies->common_ie_len);
3570
3571 info = kzalloc(sizeof(*info), GFP_KERNEL);
3572 if (!info) {
3573 ret = -ENOMEM;
3574 kfree_skb(new);
3575 goto out;
3576 }
3577
3578 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
3579 ssid_idx)) {
3580 kfree_skb(new);
3581 kfree(info);
3582 goto out;
3583 }
3584
3585 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
3586 if (ret) {
3587 kfree_skb(new);
3588 kfree(info);
3589 goto out;
3590 }
3591
3592 list_add_tail(&info->list, &scan_info->pkt_list[band]);
3593 kfree_skb(new);
3594 }
3595 out:
3596 return ret;
3597 }
3598
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3599 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
3600 struct rtw89_vif *rtwvif)
3601 {
3602 struct cfg80211_scan_request *req = rtwvif->scan_req;
3603 struct sk_buff *skb;
3604 u8 num = req->n_ssids, i;
3605 int ret;
3606
3607 for (i = 0; i < num; i++) {
3608 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3609 req->ssids[i].ssid,
3610 req->ssids[i].ssid_len,
3611 req->ie_len);
3612 if (!skb)
3613 return -ENOMEM;
3614
3615 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
3616 kfree_skb(skb);
3617
3618 if (ret)
3619 return ret;
3620 }
3621
3622 return 0;
3623 }
3624
rtw89_update_6ghz_rnr_chan(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo * ch_info)3625 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
3626 struct cfg80211_scan_request *req,
3627 struct rtw89_mac_chinfo *ch_info)
3628 {
3629 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3630 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3631 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
3632 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3633 struct cfg80211_scan_6ghz_params *params;
3634 struct rtw89_pktofld_info *info, *tmp;
3635 struct ieee80211_hdr *hdr;
3636 struct sk_buff *skb;
3637 bool found;
3638 int ret = 0;
3639 u8 i;
3640
3641 if (!req->n_6ghz_params)
3642 return 0;
3643
3644 for (i = 0; i < req->n_6ghz_params; i++) {
3645 params = &req->scan_6ghz_params[i];
3646
3647 if (req->channels[params->channel_idx]->hw_value !=
3648 ch_info->pri_ch)
3649 continue;
3650
3651 found = false;
3652 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
3653 if (ether_addr_equal(tmp->bssid, params->bssid)) {
3654 found = true;
3655 break;
3656 }
3657 }
3658 if (found)
3659 continue;
3660
3661 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3662 NULL, 0, req->ie_len);
3663 if (!skb)
3664 return -ENOMEM;
3665
3666 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
3667 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
3668 hdr = (struct ieee80211_hdr *)skb->data;
3669 ether_addr_copy(hdr->addr3, params->bssid);
3670
3671 info = kzalloc(sizeof(*info), GFP_KERNEL);
3672 if (!info) {
3673 ret = -ENOMEM;
3674 kfree_skb(skb);
3675 goto out;
3676 }
3677
3678 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
3679 if (ret) {
3680 kfree_skb(skb);
3681 kfree(info);
3682 goto out;
3683 }
3684
3685 ether_addr_copy(info->bssid, params->bssid);
3686 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
3687 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
3688
3689 ch_info->tx_pkt = true;
3690 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
3691
3692 kfree_skb(skb);
3693 }
3694
3695 out:
3696 return ret;
3697 }
3698
rtw89_hw_scan_add_chan(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)3699 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
3700 int ssid_num,
3701 struct rtw89_mac_chinfo *ch_info)
3702 {
3703 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3704 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3705 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3706 struct cfg80211_scan_request *req = rtwvif->scan_req;
3707 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3708 struct rtw89_pktofld_info *info;
3709 u8 band, probe_count = 0;
3710 int ret;
3711
3712 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
3713 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
3714 ch_info->bw = RTW89_SCAN_WIDTH;
3715 ch_info->tx_pkt = true;
3716 ch_info->cfg_tx_pwr = false;
3717 ch_info->tx_pwr_idx = 0;
3718 ch_info->tx_null = false;
3719 ch_info->pause_data = false;
3720 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
3721
3722 if (ch_info->ch_band == RTW89_BAND_6G) {
3723 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
3724 !ch_info->is_psc) {
3725 ch_info->tx_pkt = false;
3726 if (!req->duration_mandatory)
3727 ch_info->period -= RTW89_DWELL_TIME_6G;
3728 }
3729 }
3730
3731 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
3732 if (ret)
3733 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
3734
3735 if (ssid_num) {
3736 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
3737
3738 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
3739 if (info->channel_6ghz &&
3740 ch_info->pri_ch != info->channel_6ghz)
3741 continue;
3742 ch_info->pkt_id[probe_count++] = info->id;
3743 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
3744 break;
3745 }
3746 ch_info->num_pkt = probe_count;
3747 }
3748
3749 switch (chan_type) {
3750 case RTW89_CHAN_OPERATE:
3751 ch_info->central_ch = op->channel;
3752 ch_info->pri_ch = op->primary_channel;
3753 ch_info->ch_band = op->band_type;
3754 ch_info->bw = op->band_width;
3755 ch_info->tx_null = true;
3756 ch_info->num_pkt = 0;
3757 break;
3758 case RTW89_CHAN_DFS:
3759 if (ch_info->ch_band != RTW89_BAND_6G)
3760 ch_info->period = max_t(u8, ch_info->period,
3761 RTW89_DFS_CHAN_TIME);
3762 ch_info->dwell_time = RTW89_DWELL_TIME;
3763 break;
3764 case RTW89_CHAN_ACTIVE:
3765 break;
3766 default:
3767 rtw89_err(rtwdev, "Channel type out of bound\n");
3768 }
3769 }
3770
rtw89_hw_scan_add_chan_list(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool connected)3771 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
3772 struct rtw89_vif *rtwvif, bool connected)
3773 {
3774 struct cfg80211_scan_request *req = rtwvif->scan_req;
3775 struct rtw89_mac_chinfo *ch_info, *tmp;
3776 struct ieee80211_channel *channel;
3777 struct list_head chan_list;
3778 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
3779 int list_len, off_chan_time = 0;
3780 enum rtw89_chan_type type;
3781 int ret = 0;
3782 u32 idx;
3783
3784 INIT_LIST_HEAD(&chan_list);
3785 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
3786 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
3787 idx++, list_len++) {
3788 channel = req->channels[idx];
3789 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
3790 if (!ch_info) {
3791 ret = -ENOMEM;
3792 goto out;
3793 }
3794
3795 if (req->duration_mandatory)
3796 ch_info->period = req->duration;
3797 else if (channel->band == NL80211_BAND_6GHZ)
3798 ch_info->period = RTW89_CHANNEL_TIME_6G +
3799 RTW89_DWELL_TIME_6G;
3800 else
3801 ch_info->period = RTW89_CHANNEL_TIME;
3802
3803 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
3804 ch_info->central_ch = channel->hw_value;
3805 ch_info->pri_ch = channel->hw_value;
3806 ch_info->rand_seq_num = random_seq;
3807 ch_info->is_psc = cfg80211_channel_is_psc(channel);
3808
3809 if (channel->flags &
3810 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
3811 type = RTW89_CHAN_DFS;
3812 else
3813 type = RTW89_CHAN_ACTIVE;
3814 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
3815
3816 if (connected &&
3817 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
3818 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3819 if (!tmp) {
3820 ret = -ENOMEM;
3821 kfree(ch_info);
3822 goto out;
3823 }
3824
3825 type = RTW89_CHAN_OPERATE;
3826 tmp->period = req->duration_mandatory ?
3827 req->duration : RTW89_CHANNEL_TIME;
3828 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
3829 list_add_tail(&tmp->list, &chan_list);
3830 off_chan_time = 0;
3831 list_len++;
3832 }
3833 list_add_tail(&ch_info->list, &chan_list);
3834 off_chan_time += ch_info->period;
3835 }
3836 rtwdev->scan_info.last_chan_idx = idx;
3837 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
3838
3839 out:
3840 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
3841 list_del(&ch_info->list);
3842 kfree(ch_info);
3843 }
3844
3845 return ret;
3846 }
3847
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool connected)3848 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
3849 struct rtw89_vif *rtwvif, bool connected)
3850 {
3851 int ret;
3852
3853 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
3854 if (ret) {
3855 rtw89_err(rtwdev, "Update probe request failed\n");
3856 goto out;
3857 }
3858 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
3859 out:
3860 return ret;
3861 }
3862
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_scan_request * scan_req)3863 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3864 struct ieee80211_scan_request *scan_req)
3865 {
3866 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3867 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3868 struct cfg80211_scan_request *req = &scan_req->req;
3869 u32 rx_fltr = rtwdev->hal.rx_fltr;
3870 u8 mac_addr[ETH_ALEN];
3871
3872 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
3873 rtwdev->scan_info.scanning_vif = vif;
3874 rtwdev->scan_info.last_chan_idx = 0;
3875 rtwvif->scan_ies = &scan_req->ies;
3876 rtwvif->scan_req = req;
3877 ieee80211_stop_queues(rtwdev->hw);
3878
3879 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
3880 get_random_mask_addr(mac_addr, req->mac_addr,
3881 req->mac_addr_mask);
3882 else
3883 ether_addr_copy(mac_addr, vif->addr);
3884 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
3885
3886 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
3887 rx_fltr &= ~B_AX_A_BC;
3888 rx_fltr &= ~B_AX_A_A1_MATCH;
3889 rtw89_write32_mask(rtwdev,
3890 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
3891 B_AX_RX_FLTR_CFG_MASK,
3892 rx_fltr);
3893 }
3894
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool aborted)3895 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3896 bool aborted)
3897 {
3898 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3899 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3900 struct cfg80211_scan_info info = {
3901 .aborted = aborted,
3902 };
3903 struct rtw89_vif *rtwvif;
3904
3905 if (!vif)
3906 return;
3907
3908 rtw89_write32_mask(rtwdev,
3909 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
3910 B_AX_RX_FLTR_CFG_MASK,
3911 rtwdev->hal.rx_fltr);
3912
3913 rtw89_core_scan_complete(rtwdev, vif, true);
3914 ieee80211_scan_completed(rtwdev->hw, &info);
3915 ieee80211_wake_queues(rtwdev->hw);
3916 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
3917
3918 rtw89_release_pkt_list(rtwdev);
3919 rtwvif = (struct rtw89_vif *)vif->drv_priv;
3920 rtwvif->scan_req = NULL;
3921 rtwvif->scan_ies = NULL;
3922 scan_info->last_chan_idx = 0;
3923 scan_info->scanning_vif = NULL;
3924
3925 rtw89_set_channel(rtwdev);
3926 }
3927
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif)3928 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
3929 {
3930 rtw89_hw_scan_offload(rtwdev, vif, false);
3931 rtw89_hw_scan_complete(rtwdev, vif, true);
3932 }
3933
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)3934 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
3935 {
3936 struct rtw89_vif *rtwvif;
3937
3938 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
3939 /* This variable implies connected or during attempt to connect */
3940 if (!is_zero_ether_addr(rtwvif->bssid))
3941 return true;
3942 }
3943
3944 return false;
3945 }
3946
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool enable)3947 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3948 bool enable)
3949 {
3950 struct rtw89_scan_option opt = {0};
3951 struct rtw89_vif *rtwvif;
3952 bool connected;
3953 int ret = 0;
3954
3955 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
3956 if (!rtwvif)
3957 return -EINVAL;
3958
3959 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
3960 opt.enable = enable;
3961 opt.target_ch_mode = connected;
3962 if (enable) {
3963 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected);
3964 if (ret)
3965 goto out;
3966 }
3967 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
3968 out:
3969 return ret;
3970 }
3971
3972 #define H2C_FW_CPU_EXCEPTION_LEN 4
3973 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)3974 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
3975 {
3976 struct sk_buff *skb;
3977 int ret;
3978
3979 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
3980 if (!skb) {
3981 rtw89_err(rtwdev,
3982 "failed to alloc skb for fw cpu exception\n");
3983 return -ENOMEM;
3984 }
3985
3986 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
3987 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
3988 H2C_FW_CPU_EXCEPTION_TYPE_DEF);
3989
3990 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3991 H2C_CAT_TEST,
3992 H2C_CL_FW_STATUS_TEST,
3993 H2C_FUNC_CPU_EXCEPTION, 0, 0,
3994 H2C_FW_CPU_EXCEPTION_LEN);
3995
3996 ret = rtw89_h2c_tx(rtwdev, skb, false);
3997 if (ret) {
3998 rtw89_err(rtwdev, "failed to send h2c\n");
3999 goto fail;
4000 }
4001
4002 return 0;
4003
4004 fail:
4005 dev_kfree_skb_any(skb);
4006 return ret;
4007 }
4008
4009 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)4010 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
4011 const struct rtw89_pkt_drop_params *params)
4012 {
4013 struct sk_buff *skb;
4014 int ret;
4015
4016 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
4017 if (!skb) {
4018 rtw89_err(rtwdev,
4019 "failed to alloc skb for packet drop\n");
4020 return -ENOMEM;
4021 }
4022
4023 switch (params->sel) {
4024 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
4025 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
4026 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
4027 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
4028 case RTW89_PKT_DROP_SEL_BAND_ONCE:
4029 break;
4030 default:
4031 rtw89_debug(rtwdev, RTW89_DBG_FW,
4032 "H2C of pkt drop might not fully support sel: %d yet\n",
4033 params->sel);
4034 break;
4035 }
4036
4037 skb_put(skb, H2C_PKT_DROP_LEN);
4038 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
4039 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
4040 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
4041 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
4042 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
4043 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
4044 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
4045 params->macid_band_sel[0]);
4046 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
4047 params->macid_band_sel[1]);
4048 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
4049 params->macid_band_sel[2]);
4050 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
4051 params->macid_band_sel[3]);
4052
4053 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4054 H2C_CAT_MAC,
4055 H2C_CL_MAC_FW_OFLD,
4056 H2C_FUNC_PKT_DROP, 0, 0,
4057 H2C_PKT_DROP_LEN);
4058
4059 ret = rtw89_h2c_tx(rtwdev, skb, false);
4060 if (ret) {
4061 rtw89_err(rtwdev, "failed to send h2c\n");
4062 goto fail;
4063 }
4064
4065 return 0;
4066
4067 fail:
4068 dev_kfree_skb_any(skb);
4069 return ret;
4070 }
4071
4072 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4073 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4074 bool enable)
4075 {
4076 struct sk_buff *skb;
4077 u8 pkt_id = 0;
4078 int ret;
4079
4080 if (enable) {
4081 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
4082 RTW89_PKT_OFLD_TYPE_NULL_DATA,
4083 &pkt_id);
4084 if (ret)
4085 return -EPERM;
4086 }
4087
4088 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
4089 if (!skb) {
4090 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4091 return -ENOMEM;
4092 }
4093
4094 skb_put(skb, H2C_KEEP_ALIVE_LEN);
4095
4096 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
4097 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
4098 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
4099 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
4100
4101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4102 H2C_CAT_MAC,
4103 H2C_CL_MAC_WOW,
4104 H2C_FUNC_KEEP_ALIVE, 0, 1,
4105 H2C_KEEP_ALIVE_LEN);
4106
4107 ret = rtw89_h2c_tx(rtwdev, skb, false);
4108 if (ret) {
4109 rtw89_err(rtwdev, "failed to send h2c\n");
4110 goto fail;
4111 }
4112
4113 return 0;
4114
4115 fail:
4116 dev_kfree_skb_any(skb);
4117
4118 return ret;
4119 }
4120
4121 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4122 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
4123 struct rtw89_vif *rtwvif, bool enable)
4124 {
4125 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4126 struct sk_buff *skb;
4127 u8 macid = rtwvif->mac_id;
4128 int ret;
4129
4130 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
4131 if (!skb) {
4132 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4133 return -ENOMEM;
4134 }
4135
4136 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
4137
4138 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
4139 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
4140 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
4141 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
4142 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
4143 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
4144 }
4145
4146 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4147 H2C_CAT_MAC,
4148 H2C_CL_MAC_WOW,
4149 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
4150 H2C_DISCONNECT_DETECT_LEN);
4151
4152 ret = rtw89_h2c_tx(rtwdev, skb, false);
4153 if (ret) {
4154 rtw89_err(rtwdev, "failed to send h2c\n");
4155 goto fail;
4156 }
4157
4158 return 0;
4159
4160 fail:
4161 dev_kfree_skb_any(skb);
4162
4163 return ret;
4164 }
4165
4166 #define H2C_WOW_GLOBAL_LEN 8
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4167 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4168 bool enable)
4169 {
4170 struct sk_buff *skb;
4171 u8 macid = rtwvif->mac_id;
4172 int ret;
4173
4174 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
4175 if (!skb) {
4176 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4177 return -ENOMEM;
4178 }
4179
4180 skb_put(skb, H2C_WOW_GLOBAL_LEN);
4181
4182 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
4183 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
4184
4185 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4186 H2C_CAT_MAC,
4187 H2C_CL_MAC_WOW,
4188 H2C_FUNC_WOW_GLOBAL, 0, 1,
4189 H2C_WOW_GLOBAL_LEN);
4190
4191 ret = rtw89_h2c_tx(rtwdev, skb, false);
4192 if (ret) {
4193 rtw89_err(rtwdev, "failed to send h2c\n");
4194 goto fail;
4195 }
4196
4197 return 0;
4198
4199 fail:
4200 dev_kfree_skb_any(skb);
4201
4202 return ret;
4203 }
4204
4205 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4206 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
4207 struct rtw89_vif *rtwvif,
4208 bool enable)
4209 {
4210 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4211 struct sk_buff *skb;
4212 u8 macid = rtwvif->mac_id;
4213 int ret;
4214
4215 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
4216 if (!skb) {
4217 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4218 return -ENOMEM;
4219 }
4220
4221 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
4222
4223 if (rtw_wow->pattern_cnt)
4224 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
4225 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
4226 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
4227 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
4228 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
4229
4230 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
4231
4232 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4233 H2C_CAT_MAC,
4234 H2C_CL_MAC_WOW,
4235 H2C_FUNC_WAKEUP_CTRL, 0, 1,
4236 H2C_WAKEUP_CTRL_LEN);
4237
4238 ret = rtw89_h2c_tx(rtwdev, skb, false);
4239 if (ret) {
4240 rtw89_err(rtwdev, "failed to send h2c\n");
4241 goto fail;
4242 }
4243
4244 return 0;
4245
4246 fail:
4247 dev_kfree_skb_any(skb);
4248
4249 return ret;
4250 }
4251
4252 #define H2C_WOW_CAM_UPD_LEN 24
rtw89_fw_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)4253 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
4254 struct rtw89_wow_cam_info *cam_info)
4255 {
4256 struct sk_buff *skb;
4257 int ret;
4258
4259 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
4260 if (!skb) {
4261 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4262 return -ENOMEM;
4263 }
4264
4265 skb_put(skb, H2C_WOW_CAM_UPD_LEN);
4266
4267 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
4268 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
4269 if (cam_info->valid) {
4270 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
4271 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
4272 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
4273 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
4274 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
4275 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
4276 cam_info->negative_pattern_match);
4277 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
4278 cam_info->skip_mac_hdr);
4279 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
4280 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
4281 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
4282 }
4283 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
4284
4285 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4286 H2C_CAT_MAC,
4287 H2C_CL_MAC_WOW,
4288 H2C_FUNC_WOW_CAM_UPD, 0, 1,
4289 H2C_WOW_CAM_UPD_LEN);
4290
4291 ret = rtw89_h2c_tx(rtwdev, skb, false);
4292 if (ret) {
4293 rtw89_err(rtwdev, "failed to send h2c\n");
4294 goto fail;
4295 }
4296
4297 return 0;
4298 fail:
4299 dev_kfree_skb_any(skb);
4300
4301 return ret;
4302 }
4303
4304 /* Return < 0, if failures happen during waiting for the condition.
4305 * Return 0, when waiting for the condition succeeds.
4306 * Return > 0, if the wait is considered unreachable due to driver/FW design,
4307 * where 1 means during SER.
4308 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)4309 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
4310 struct rtw89_wait_info *wait, unsigned int cond)
4311 {
4312 int ret;
4313
4314 ret = rtw89_h2c_tx(rtwdev, skb, false);
4315 if (ret) {
4316 rtw89_err(rtwdev, "failed to send h2c\n");
4317 dev_kfree_skb_any(skb);
4318 return -EBUSY;
4319 }
4320
4321 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
4322 return 1;
4323
4324 return rtw89_wait_for_cond(wait, cond);
4325 }
4326
4327 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)4328 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
4329 const struct rtw89_fw_mcc_add_req *p)
4330 {
4331 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4332 struct sk_buff *skb;
4333 unsigned int cond;
4334
4335 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
4336 if (!skb) {
4337 rtw89_err(rtwdev,
4338 "failed to alloc skb for add mcc\n");
4339 return -ENOMEM;
4340 }
4341
4342 skb_put(skb, H2C_ADD_MCC_LEN);
4343 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
4344 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
4345 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
4346 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
4347 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
4348 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
4349 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
4350 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
4351 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
4352 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
4353 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
4354 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
4355 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
4356 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
4357 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
4358 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
4359 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
4360 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
4361 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
4362 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
4363
4364 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4365 H2C_CAT_MAC,
4366 H2C_CL_MCC,
4367 H2C_FUNC_ADD_MCC, 0, 0,
4368 H2C_ADD_MCC_LEN);
4369
4370 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
4371 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4372 }
4373
4374 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)4375 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
4376 const struct rtw89_fw_mcc_start_req *p)
4377 {
4378 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4379 struct sk_buff *skb;
4380 unsigned int cond;
4381
4382 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
4383 if (!skb) {
4384 rtw89_err(rtwdev,
4385 "failed to alloc skb for start mcc\n");
4386 return -ENOMEM;
4387 }
4388
4389 skb_put(skb, H2C_START_MCC_LEN);
4390 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
4391 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
4392 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
4393 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
4394 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
4395 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
4396 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
4397 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
4398 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
4399
4400 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4401 H2C_CAT_MAC,
4402 H2C_CL_MCC,
4403 H2C_FUNC_START_MCC, 0, 0,
4404 H2C_START_MCC_LEN);
4405
4406 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
4407 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4408 }
4409
4410 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)4411 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4412 bool prev_groups)
4413 {
4414 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4415 struct sk_buff *skb;
4416 unsigned int cond;
4417
4418 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
4419 if (!skb) {
4420 rtw89_err(rtwdev,
4421 "failed to alloc skb for stop mcc\n");
4422 return -ENOMEM;
4423 }
4424
4425 skb_put(skb, H2C_STOP_MCC_LEN);
4426 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
4427 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
4428 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
4429
4430 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4431 H2C_CAT_MAC,
4432 H2C_CL_MCC,
4433 H2C_FUNC_STOP_MCC, 0, 0,
4434 H2C_STOP_MCC_LEN);
4435
4436 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
4437 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4438 }
4439
4440 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)4441 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
4442 bool prev_groups)
4443 {
4444 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4445 struct sk_buff *skb;
4446 unsigned int cond;
4447
4448 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
4449 if (!skb) {
4450 rtw89_err(rtwdev,
4451 "failed to alloc skb for del mcc group\n");
4452 return -ENOMEM;
4453 }
4454
4455 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
4456 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
4457 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
4458
4459 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4460 H2C_CAT_MAC,
4461 H2C_CL_MCC,
4462 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
4463 H2C_DEL_MCC_GROUP_LEN);
4464
4465 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
4466 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4467 }
4468
4469 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)4470 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
4471 {
4472 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4473 struct sk_buff *skb;
4474 unsigned int cond;
4475
4476 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
4477 if (!skb) {
4478 rtw89_err(rtwdev,
4479 "failed to alloc skb for reset mcc group\n");
4480 return -ENOMEM;
4481 }
4482
4483 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
4484 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
4485
4486 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4487 H2C_CAT_MAC,
4488 H2C_CL_MCC,
4489 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
4490 H2C_RESET_MCC_GROUP_LEN);
4491
4492 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
4493 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4494 }
4495
4496 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)4497 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
4498 const struct rtw89_fw_mcc_tsf_req *req,
4499 struct rtw89_mac_mcc_tsf_rpt *rpt)
4500 {
4501 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4502 struct rtw89_mac_mcc_tsf_rpt *tmp;
4503 struct sk_buff *skb;
4504 unsigned int cond;
4505 int ret;
4506
4507 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
4508 if (!skb) {
4509 rtw89_err(rtwdev,
4510 "failed to alloc skb for mcc req tsf\n");
4511 return -ENOMEM;
4512 }
4513
4514 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
4515 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
4516 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
4517 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
4518
4519 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4520 H2C_CAT_MAC,
4521 H2C_CL_MCC,
4522 H2C_FUNC_MCC_REQ_TSF, 0, 0,
4523 H2C_MCC_REQ_TSF_LEN);
4524
4525 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
4526 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4527 if (ret)
4528 return ret;
4529
4530 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
4531 *rpt = *tmp;
4532
4533 return 0;
4534 }
4535
4536 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)4537 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4538 u8 *bitmap)
4539 {
4540 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4541 struct sk_buff *skb;
4542 unsigned int cond;
4543 u8 map_len;
4544 u8 h2c_len;
4545
4546 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
4547 map_len = RTW89_MAX_MAC_ID_NUM / 8;
4548 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
4549 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
4550 if (!skb) {
4551 rtw89_err(rtwdev,
4552 "failed to alloc skb for mcc macid bitmap\n");
4553 return -ENOMEM;
4554 }
4555
4556 skb_put(skb, h2c_len);
4557 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
4558 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
4559 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
4560 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
4561
4562 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4563 H2C_CAT_MAC,
4564 H2C_CL_MCC,
4565 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
4566 h2c_len);
4567
4568 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
4569 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4570 }
4571
4572 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)4573 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
4574 u8 target, u8 offset)
4575 {
4576 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4577 struct sk_buff *skb;
4578 unsigned int cond;
4579
4580 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
4581 if (!skb) {
4582 rtw89_err(rtwdev,
4583 "failed to alloc skb for mcc sync\n");
4584 return -ENOMEM;
4585 }
4586
4587 skb_put(skb, H2C_MCC_SYNC_LEN);
4588 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
4589 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
4590 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
4591 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
4592
4593 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4594 H2C_CAT_MAC,
4595 H2C_CL_MCC,
4596 H2C_FUNC_MCC_SYNC, 0, 0,
4597 H2C_MCC_SYNC_LEN);
4598
4599 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
4600 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4601 }
4602
4603 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)4604 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
4605 const struct rtw89_fw_mcc_duration *p)
4606 {
4607 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4608 struct sk_buff *skb;
4609 unsigned int cond;
4610
4611 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
4612 if (!skb) {
4613 rtw89_err(rtwdev,
4614 "failed to alloc skb for mcc set duration\n");
4615 return -ENOMEM;
4616 }
4617
4618 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
4619 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
4620 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
4621 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
4622 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
4623 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
4624 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
4625 p->start_tsf_low);
4626 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
4627 p->start_tsf_high);
4628 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
4629 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
4630
4631 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4632 H2C_CAT_MAC,
4633 H2C_CL_MCC,
4634 H2C_FUNC_MCC_SET_DURATION, 0, 0,
4635 H2C_MCC_SET_DURATION_LEN);
4636
4637 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
4638 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4639 }
4640