1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/iopoll.h>
6 
7 #include "main.h"
8 #include "coex.h"
9 #include "fw.h"
10 #include "tx.h"
11 #include "reg.h"
12 #include "sec.h"
13 #include "debug.h"
14 #include "util.h"
15 #include "wow.h"
16 #include "ps.h"
17 
18 static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
19 				      struct sk_buff *skb)
20 {
21 	struct rtw_c2h_cmd *c2h;
22 	u8 sub_cmd_id;
23 
24 	c2h = get_c2h_from_skb(skb);
25 	sub_cmd_id = c2h->payload[0];
26 
27 	switch (sub_cmd_id) {
28 	case C2H_CCX_RPT:
29 		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
30 		break;
31 	default:
32 		break;
33 	}
34 }
35 
36 static u16 get_max_amsdu_len(u32 bit_rate)
37 {
38 	/* lower than ofdm, do not aggregate */
39 	if (bit_rate < 550)
40 		return 1;
41 
42 	/* lower than 20M 2ss mcs8, make it small */
43 	if (bit_rate < 1800)
44 		return 1200;
45 
46 	/* lower than 40M 2ss mcs9, make it medium */
47 	if (bit_rate < 4000)
48 		return 2600;
49 
50 	/* not yet 80M 2ss mcs8/9, make it twice regular packet size */
51 	if (bit_rate < 7000)
52 		return 3500;
53 
54 	/* unlimited */
55 	return 0;
56 }
57 
58 struct rtw_fw_iter_ra_data {
59 	struct rtw_dev *rtwdev;
60 	u8 *payload;
61 };
62 
63 static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
64 {
65 	struct rtw_fw_iter_ra_data *ra_data = data;
66 	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
67 	u8 mac_id, rate, sgi, bw;
68 	u8 mcs, nss;
69 	u32 bit_rate;
70 
71 	mac_id = GET_RA_REPORT_MACID(ra_data->payload);
72 	if (si->mac_id != mac_id)
73 		return;
74 
75 	si->ra_report.txrate.flags = 0;
76 
77 	rate = GET_RA_REPORT_RATE(ra_data->payload);
78 	sgi = GET_RA_REPORT_SGI(ra_data->payload);
79 	bw = GET_RA_REPORT_BW(ra_data->payload);
80 
81 	if (rate < DESC_RATEMCS0) {
82 		si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
83 		goto legacy;
84 	}
85 
86 	rtw_desc_to_mcsrate(rate, &mcs, &nss);
87 	if (rate >= DESC_RATEVHT1SS_MCS0)
88 		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
89 	else if (rate >= DESC_RATEMCS0)
90 		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
91 
92 	if (rate >= DESC_RATEMCS0) {
93 		si->ra_report.txrate.mcs = mcs;
94 		si->ra_report.txrate.nss = nss;
95 	}
96 
97 	if (sgi)
98 		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
99 
100 	if (bw == RTW_CHANNEL_WIDTH_80)
101 		si->ra_report.txrate.bw = RATE_INFO_BW_80;
102 	else if (bw == RTW_CHANNEL_WIDTH_40)
103 		si->ra_report.txrate.bw = RATE_INFO_BW_40;
104 	else
105 		si->ra_report.txrate.bw = RATE_INFO_BW_20;
106 
107 legacy:
108 	bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
109 
110 	si->ra_report.desc_rate = rate;
111 	si->ra_report.bit_rate = bit_rate;
112 
113 	sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
114 }
115 
116 static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
117 				    u8 length)
118 {
119 	struct rtw_fw_iter_ra_data ra_data;
120 
121 	if (WARN(length < 7, "invalid ra report c2h length\n"))
122 		return;
123 
124 	rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
125 	ra_data.rtwdev = rtwdev;
126 	ra_data.payload = payload;
127 	rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
128 }
129 
130 struct rtw_beacon_filter_iter_data {
131 	struct rtw_dev *rtwdev;
132 	u8 *payload;
133 };
134 
135 static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
136 					      struct ieee80211_vif *vif)
137 {
138 	struct rtw_beacon_filter_iter_data *iter_data = data;
139 	struct rtw_dev *rtwdev = iter_data->rtwdev;
140 	u8 *payload = iter_data->payload;
141 	u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
142 	u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
143 	s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
144 
145 	switch (type) {
146 	case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
147 		event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
148 			NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
149 		ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
150 		break;
151 	case BCN_FILTER_CONNECTION_LOSS:
152 		ieee80211_connection_loss(vif);
153 		break;
154 	case BCN_FILTER_CONNECTED:
155 		rtwdev->beacon_loss = false;
156 		break;
157 	case BCN_FILTER_NOTIFY_BEACON_LOSS:
158 		rtwdev->beacon_loss = true;
159 		rtw_leave_lps(rtwdev);
160 		break;
161 	}
162 }
163 
164 static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
165 				     u8 length)
166 {
167 	struct rtw_beacon_filter_iter_data dev_iter_data;
168 
169 	dev_iter_data.rtwdev = rtwdev;
170 	dev_iter_data.payload = payload;
171 	rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
172 			 &dev_iter_data);
173 }
174 
175 static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
176 			       u8 length)
177 {
178 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
179 
180 	dm_info->scan_density = payload[0];
181 
182 	rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
183 		dm_info->scan_density);
184 }
185 
186 static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
187 				     u8 length)
188 {
189 	struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
190 	struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
191 
192 	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
193 		"Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
194 		result->density, result->igi, result->l2h_th_init, result->l2h,
195 		result->h2l, result->option);
196 
197 	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
198 		rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
199 				edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
200 		rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
201 				edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
202 
203 	rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
204 		rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
205 		"Set" : "Unset");
206 }
207 
208 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
209 {
210 	struct rtw_c2h_cmd *c2h;
211 	u32 pkt_offset;
212 	u8 len;
213 
214 	pkt_offset = *((u32 *)skb->cb);
215 	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
216 	len = skb->len - pkt_offset - 2;
217 
218 	mutex_lock(&rtwdev->mutex);
219 
220 	if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
221 		goto unlock;
222 
223 	switch (c2h->id) {
224 	case C2H_CCX_TX_RPT:
225 		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
226 		break;
227 	case C2H_BT_INFO:
228 		rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
229 		break;
230 	case C2H_WLAN_INFO:
231 		rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
232 		break;
233 	case C2H_BCN_FILTER_NOTIFY:
234 		rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
235 		break;
236 	case C2H_HALMAC:
237 		rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
238 		break;
239 	case C2H_RA_RPT:
240 		rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
241 		break;
242 	default:
243 		rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
244 		break;
245 	}
246 
247 unlock:
248 	mutex_unlock(&rtwdev->mutex);
249 }
250 
251 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
252 			       struct sk_buff *skb)
253 {
254 	struct rtw_c2h_cmd *c2h;
255 	u8 len;
256 
257 	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
258 	len = skb->len - pkt_offset - 2;
259 	*((u32 *)skb->cb) = pkt_offset;
260 
261 	rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
262 		c2h->id, c2h->seq, len);
263 
264 	switch (c2h->id) {
265 	case C2H_BT_MP_INFO:
266 		rtw_coex_info_response(rtwdev, skb);
267 		break;
268 	case C2H_WLAN_RFON:
269 		complete(&rtwdev->lps_leave_check);
270 		dev_kfree_skb_any(skb);
271 		break;
272 	case C2H_SCAN_RESULT:
273 		complete(&rtwdev->fw_scan_density);
274 		rtw_fw_scan_result(rtwdev, c2h->payload, len);
275 		dev_kfree_skb_any(skb);
276 		break;
277 	case C2H_ADAPTIVITY:
278 		rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
279 		dev_kfree_skb_any(skb);
280 		break;
281 	default:
282 		/* pass offset for further operation */
283 		*((u32 *)skb->cb) = pkt_offset;
284 		skb_queue_tail(&rtwdev->c2h_queue, skb);
285 		ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
286 		break;
287 	}
288 }
289 EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
290 
291 void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
292 {
293 	if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
294 		rtw_fw_recovery(rtwdev);
295 	else
296 		rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
297 }
298 EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
299 
300 static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
301 				    u8 *h2c)
302 {
303 	u8 box;
304 	u8 box_state;
305 	u32 box_reg, box_ex_reg;
306 	int idx;
307 	int ret;
308 
309 	rtw_dbg(rtwdev, RTW_DBG_FW,
310 		"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
311 		h2c[3], h2c[2], h2c[1], h2c[0],
312 		h2c[7], h2c[6], h2c[5], h2c[4]);
313 
314 	spin_lock(&rtwdev->h2c.lock);
315 
316 	box = rtwdev->h2c.last_box_num;
317 	switch (box) {
318 	case 0:
319 		box_reg = REG_HMEBOX0;
320 		box_ex_reg = REG_HMEBOX0_EX;
321 		break;
322 	case 1:
323 		box_reg = REG_HMEBOX1;
324 		box_ex_reg = REG_HMEBOX1_EX;
325 		break;
326 	case 2:
327 		box_reg = REG_HMEBOX2;
328 		box_ex_reg = REG_HMEBOX2_EX;
329 		break;
330 	case 3:
331 		box_reg = REG_HMEBOX3;
332 		box_ex_reg = REG_HMEBOX3_EX;
333 		break;
334 	default:
335 		WARN(1, "invalid h2c mail box number\n");
336 		goto out;
337 	}
338 
339 	ret = read_poll_timeout_atomic(rtw_read8, box_state,
340 				       !((box_state >> box) & 0x1), 100, 3000,
341 				       false, rtwdev, REG_HMETFR);
342 
343 	if (ret) {
344 		rtw_err(rtwdev, "failed to send h2c command\n");
345 		goto out;
346 	}
347 
348 	for (idx = 0; idx < 4; idx++)
349 		rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
350 	for (idx = 0; idx < 4; idx++)
351 		rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
352 
353 	if (++rtwdev->h2c.last_box_num >= 4)
354 		rtwdev->h2c.last_box_num = 0;
355 
356 out:
357 	spin_unlock(&rtwdev->h2c.lock);
358 }
359 
360 void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
361 {
362 	rtw_fw_send_h2c_command(rtwdev, h2c);
363 }
364 
365 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
366 {
367 	int ret;
368 
369 	spin_lock(&rtwdev->h2c.lock);
370 
371 	FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
372 	ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
373 	if (ret)
374 		rtw_err(rtwdev, "failed to send h2c packet\n");
375 	rtwdev->h2c.seq++;
376 
377 	spin_unlock(&rtwdev->h2c.lock);
378 }
379 
380 void
381 rtw_fw_send_general_info(struct rtw_dev *rtwdev)
382 {
383 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
384 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
385 	u16 total_size = H2C_PKT_HDR_SIZE + 4;
386 
387 	if (rtw_chip_wcpu_11n(rtwdev))
388 		return;
389 
390 	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
391 
392 	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
393 
394 	GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
395 					fifo->rsvd_fw_txbuf_addr -
396 					fifo->rsvd_boundary);
397 
398 	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
399 }
400 
401 void
402 rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
403 {
404 	struct rtw_hal *hal = &rtwdev->hal;
405 	struct rtw_efuse *efuse = &rtwdev->efuse;
406 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
407 	u16 total_size = H2C_PKT_HDR_SIZE + 8;
408 	u8 fw_rf_type = 0;
409 
410 	if (rtw_chip_wcpu_11n(rtwdev))
411 		return;
412 
413 	if (hal->rf_type == RF_1T1R)
414 		fw_rf_type = FW_RF_1T1R;
415 	else if (hal->rf_type == RF_2T2R)
416 		fw_rf_type = FW_RF_2T2R;
417 
418 	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
419 
420 	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
421 	PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
422 	PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
423 	PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
424 	PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
425 	PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
426 
427 	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
428 }
429 
430 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
431 {
432 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
433 	u16 total_size = H2C_PKT_HDR_SIZE + 1;
434 
435 	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
436 	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
437 	IQK_SET_CLEAR(h2c_pkt, para->clear);
438 	IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
439 
440 	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
441 }
442 EXPORT_SYMBOL(rtw_fw_do_iqk);
443 
444 void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
445 {
446 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
447 
448 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
449 
450 	RFK_SET_INFORM_START(h2c_pkt, start);
451 
452 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
453 }
454 EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
455 
456 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
457 {
458 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
459 
460 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
461 
462 	SET_QUERY_BT_INFO(h2c_pkt, true);
463 
464 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
465 }
466 
467 void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
468 {
469 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
470 
471 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
472 
473 	SET_WL_CH_INFO_LINK(h2c_pkt, link);
474 	SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
475 	SET_WL_CH_INFO_BW(h2c_pkt, bw);
476 
477 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
478 }
479 
480 void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
481 			     struct rtw_coex_info_req *req)
482 {
483 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
484 
485 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
486 
487 	SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
488 	SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
489 	SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
490 	SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
491 	SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
492 
493 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
494 }
495 
496 void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
497 {
498 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
499 	u8 index = 0 - bt_pwr_dec_lvl;
500 
501 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
502 
503 	SET_BT_TX_POWER_INDEX(h2c_pkt, index);
504 
505 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
506 }
507 
508 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
509 {
510 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
511 
512 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
513 
514 	SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
515 
516 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
517 }
518 
519 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
520 			   u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
521 {
522 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
523 
524 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
525 
526 	SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
527 	SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
528 	SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
529 	SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
530 	SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
531 
532 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
533 }
534 
535 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
536 {
537 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
538 
539 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
540 
541 	SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
542 
543 	SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
544 	SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
545 	SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
546 	SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
547 	SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
548 
549 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
550 }
551 
552 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
553 {
554 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
555 	u8 rssi = ewma_rssi_read(&si->avg_rssi);
556 	bool stbc_en = si->stbc_en ? true : false;
557 
558 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
559 
560 	SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
561 	SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
562 	SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
563 
564 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
565 }
566 
567 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
568 {
569 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
570 	bool no_update = si->updated;
571 	bool disable_pt = true;
572 
573 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
574 
575 	SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
576 	SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
577 	SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
578 	SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
579 	SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
580 	SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
581 	SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
582 	SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
583 	SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
584 	SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
585 	SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
586 	SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
587 	SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
588 
589 	si->init_ra_lv = 0;
590 	si->updated = true;
591 
592 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
593 }
594 
595 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
596 {
597 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
598 
599 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
600 	MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
601 	MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
602 
603 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
604 }
605 
606 void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
607 {
608 	struct rtw_traffic_stats *stats = &rtwdev->stats;
609 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
610 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
611 
612 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
613 	SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
614 	SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
615 	SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
616 	SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
617 	SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
618 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
619 }
620 
621 void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
622 				 struct ieee80211_vif *vif)
623 {
624 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
625 	struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
626 	static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
627 	struct rtw_sta_info *si =
628 		sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
629 	s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
630 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
631 
632 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
633 		return;
634 
635 	if (!connect) {
636 		SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
637 		SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
638 		rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
639 
640 		return;
641 	}
642 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
643 	ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
644 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
645 
646 	memset(h2c_pkt, 0, sizeof(h2c_pkt));
647 	threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
648 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
649 	SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
650 	SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
651 					       BCN_FILTER_OFFLOAD_MODE_DEFAULT);
652 	SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
653 	SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
654 	SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
655 	SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
656 	SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
657 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
658 }
659 
660 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
661 {
662 	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
663 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
664 
665 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
666 
667 	SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
668 	SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
669 	SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
670 	SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
671 	SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
672 	SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
673 
674 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
675 }
676 
677 void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
678 {
679 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
680 	struct rtw_fw_wow_keep_alive_para mode = {
681 		.adopt = true,
682 		.pkt_type = KEEP_ALIVE_NULL_PKT,
683 		.period = 5,
684 	};
685 
686 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
687 	SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
688 	SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
689 	SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
690 	SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
691 
692 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
693 }
694 
695 void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
696 {
697 	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
698 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
699 	struct rtw_fw_wow_disconnect_para mode = {
700 		.adopt = true,
701 		.period = 30,
702 		.retry_count = 5,
703 	};
704 
705 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
706 
707 	if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
708 		SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
709 		SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
710 		SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
711 		SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
712 	}
713 
714 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
715 }
716 
717 void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
718 {
719 	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
720 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
721 
722 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
723 
724 	SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
725 	if (rtw_wow_mgd_linked(rtwdev)) {
726 		if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
727 			SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
728 		if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
729 			SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
730 		if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
731 			SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
732 		if (rtw_wow->pattern_cnt)
733 			SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
734 	}
735 
736 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
737 }
738 
739 void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
740 				     u8 pairwise_key_enc,
741 				     u8 group_key_enc)
742 {
743 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
744 
745 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
746 
747 	SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
748 	SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
749 
750 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
751 }
752 
753 void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
754 {
755 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
756 
757 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
758 
759 	SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
760 
761 	if (rtw_wow_no_link(rtwdev))
762 		SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
763 
764 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
765 }
766 
767 static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
768 				     enum rtw_rsvd_packet_type type)
769 {
770 	struct rtw_rsvd_page *rsvd_pkt;
771 	u8 location = 0;
772 
773 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
774 		if (type == rsvd_pkt->type)
775 			location = rsvd_pkt->page;
776 	}
777 
778 	return location;
779 }
780 
781 void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
782 {
783 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
784 	u8 loc_nlo;
785 
786 	loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
787 
788 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
789 
790 	SET_NLO_FUN_EN(h2c_pkt, enable);
791 	if (enable) {
792 		if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
793 			SET_NLO_PS_32K(h2c_pkt, enable);
794 		SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
795 		SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
796 	}
797 
798 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
799 }
800 
801 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
802 {
803 	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
804 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
805 	u8 loc_pg, loc_dpk;
806 
807 	loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
808 	loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
809 
810 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
811 
812 	LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
813 	LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
814 	LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
815 	LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
816 
817 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
818 }
819 
820 static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
821 					       struct cfg80211_ssid *ssid)
822 {
823 	struct rtw_rsvd_page *rsvd_pkt;
824 	u8 location = 0;
825 
826 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
827 		if (rsvd_pkt->type != RSVD_PROBE_REQ)
828 			continue;
829 		if ((!ssid && !rsvd_pkt->ssid) ||
830 		    rtw_ssid_equal(rsvd_pkt->ssid, ssid))
831 			location = rsvd_pkt->page;
832 	}
833 
834 	return location;
835 }
836 
837 static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
838 					    struct cfg80211_ssid *ssid)
839 {
840 	struct rtw_rsvd_page *rsvd_pkt;
841 	u16 size = 0;
842 
843 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
844 		if (rsvd_pkt->type != RSVD_PROBE_REQ)
845 			continue;
846 		if ((!ssid && !rsvd_pkt->ssid) ||
847 		    rtw_ssid_equal(rsvd_pkt->ssid, ssid))
848 			size = rsvd_pkt->probe_req_size;
849 	}
850 
851 	return size;
852 }
853 
854 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
855 {
856 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
857 	u8 location = 0;
858 
859 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
860 
861 	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
862 	*(h2c_pkt + 1) = location;
863 	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
864 
865 	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
866 	*(h2c_pkt + 2) = location;
867 	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
868 
869 	location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
870 	*(h2c_pkt + 3) = location;
871 	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
872 
873 	location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
874 	*(h2c_pkt + 4) = location;
875 	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
876 
877 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
878 }
879 
880 static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
881 {
882 	struct rtw_dev *rtwdev = hw->priv;
883 	struct rtw_chip_info *chip = rtwdev->chip;
884 	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
885 	struct rtw_nlo_info_hdr *nlo_hdr;
886 	struct cfg80211_ssid *ssid;
887 	struct sk_buff *skb;
888 	u8 *pos, loc;
889 	u32 size;
890 	int i;
891 
892 	if (!pno_req->inited || !pno_req->match_set_cnt)
893 		return NULL;
894 
895 	size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
896 		      IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
897 
898 	skb = alloc_skb(size, GFP_KERNEL);
899 	if (!skb)
900 		return NULL;
901 
902 	skb_reserve(skb, chip->tx_pkt_desc_sz);
903 
904 	nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
905 
906 	nlo_hdr->nlo_count = pno_req->match_set_cnt;
907 	nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
908 
909 	/* pattern check for firmware */
910 	memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
911 
912 	for (i = 0; i < pno_req->match_set_cnt; i++)
913 		nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
914 
915 	for (i = 0; i < pno_req->match_set_cnt; i++) {
916 		ssid = &pno_req->match_sets[i].ssid;
917 		loc  = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
918 		if (!loc) {
919 			rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
920 			kfree_skb(skb);
921 			return NULL;
922 		}
923 		nlo_hdr->location[i] = loc;
924 	}
925 
926 	for (i = 0; i < pno_req->match_set_cnt; i++) {
927 		pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
928 		memcpy(pos, pno_req->match_sets[i].ssid.ssid,
929 		       pno_req->match_sets[i].ssid.ssid_len);
930 	}
931 
932 	return skb;
933 }
934 
935 static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
936 {
937 	struct rtw_dev *rtwdev = hw->priv;
938 	struct rtw_chip_info *chip = rtwdev->chip;
939 	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
940 	struct ieee80211_channel *channels = pno_req->channels;
941 	struct sk_buff *skb;
942 	int count =  pno_req->channel_cnt;
943 	u8 *pos;
944 	int i = 0;
945 
946 	skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
947 	if (!skb)
948 		return NULL;
949 
950 	skb_reserve(skb, chip->tx_pkt_desc_sz);
951 
952 	for (i = 0; i < count; i++) {
953 		pos = skb_put_zero(skb, 4);
954 
955 		CHSW_INFO_SET_CH(pos, channels[i].hw_value);
956 
957 		if (channels[i].flags & IEEE80211_CHAN_RADAR)
958 			CHSW_INFO_SET_ACTION_ID(pos, 0);
959 		else
960 			CHSW_INFO_SET_ACTION_ID(pos, 1);
961 		CHSW_INFO_SET_TIMEOUT(pos, 1);
962 		CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
963 		CHSW_INFO_SET_BW(pos, 0);
964 	}
965 
966 	return skb;
967 }
968 
969 static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
970 {
971 	struct rtw_dev *rtwdev = hw->priv;
972 	struct rtw_chip_info *chip = rtwdev->chip;
973 	struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
974 	struct rtw_lps_pg_dpk_hdr *dpk_hdr;
975 	struct sk_buff *skb;
976 	u32 size;
977 
978 	size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
979 	skb = alloc_skb(size, GFP_KERNEL);
980 	if (!skb)
981 		return NULL;
982 
983 	skb_reserve(skb, chip->tx_pkt_desc_sz);
984 	dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
985 	dpk_hdr->dpk_ch = dpk_info->dpk_ch;
986 	dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
987 	memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
988 	memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
989 	memcpy(dpk_hdr->coef, dpk_info->coef, 160);
990 
991 	return skb;
992 }
993 
994 static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
995 {
996 	struct rtw_dev *rtwdev = hw->priv;
997 	struct rtw_chip_info *chip = rtwdev->chip;
998 	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
999 	struct rtw_lps_pg_info_hdr *pg_info_hdr;
1000 	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1001 	struct sk_buff *skb;
1002 	u32 size;
1003 
1004 	size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
1005 	skb = alloc_skb(size, GFP_KERNEL);
1006 	if (!skb)
1007 		return NULL;
1008 
1009 	skb_reserve(skb, chip->tx_pkt_desc_sz);
1010 	pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
1011 	pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
1012 	pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
1013 	pg_info_hdr->sec_cam_count =
1014 		rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
1015 	pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
1016 
1017 	conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
1018 	conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
1019 
1020 	return skb;
1021 }
1022 
1023 static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
1024 					     struct rtw_rsvd_page *rsvd_pkt)
1025 {
1026 	struct ieee80211_vif *vif;
1027 	struct rtw_vif *rtwvif;
1028 	struct sk_buff *skb_new;
1029 	struct cfg80211_ssid *ssid;
1030 
1031 	if (rsvd_pkt->type == RSVD_DUMMY) {
1032 		skb_new = alloc_skb(1, GFP_KERNEL);
1033 		if (!skb_new)
1034 			return NULL;
1035 
1036 		skb_put(skb_new, 1);
1037 		return skb_new;
1038 	}
1039 
1040 	rtwvif = rsvd_pkt->rtwvif;
1041 	if (!rtwvif)
1042 		return NULL;
1043 
1044 	vif = rtwvif_to_vif(rtwvif);
1045 
1046 	switch (rsvd_pkt->type) {
1047 	case RSVD_BEACON:
1048 		skb_new = ieee80211_beacon_get(hw, vif);
1049 		break;
1050 	case RSVD_PS_POLL:
1051 		skb_new = ieee80211_pspoll_get(hw, vif);
1052 		break;
1053 	case RSVD_PROBE_RESP:
1054 		skb_new = ieee80211_proberesp_get(hw, vif);
1055 		break;
1056 	case RSVD_NULL:
1057 		skb_new = ieee80211_nullfunc_get(hw, vif, false);
1058 		break;
1059 	case RSVD_QOS_NULL:
1060 		skb_new = ieee80211_nullfunc_get(hw, vif, true);
1061 		break;
1062 	case RSVD_LPS_PG_DPK:
1063 		skb_new = rtw_lps_pg_dpk_get(hw);
1064 		break;
1065 	case RSVD_LPS_PG_INFO:
1066 		skb_new = rtw_lps_pg_info_get(hw);
1067 		break;
1068 	case RSVD_PROBE_REQ:
1069 		ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
1070 		if (ssid)
1071 			skb_new = ieee80211_probereq_get(hw, vif->addr,
1072 							 ssid->ssid,
1073 							 ssid->ssid_len, 0);
1074 		else
1075 			skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
1076 		if (skb_new)
1077 			rsvd_pkt->probe_req_size = (u16)skb_new->len;
1078 		break;
1079 	case RSVD_NLO_INFO:
1080 		skb_new = rtw_nlo_info_get(hw);
1081 		break;
1082 	case RSVD_CH_INFO:
1083 		skb_new = rtw_cs_channel_info_get(hw);
1084 		break;
1085 	default:
1086 		return NULL;
1087 	}
1088 
1089 	if (!skb_new)
1090 		return NULL;
1091 
1092 	return skb_new;
1093 }
1094 
1095 static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
1096 				    enum rtw_rsvd_packet_type type)
1097 {
1098 	struct rtw_tx_pkt_info pkt_info = {0};
1099 	struct rtw_chip_info *chip = rtwdev->chip;
1100 	u8 *pkt_desc;
1101 
1102 	rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
1103 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
1104 	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
1105 	rtw_tx_fill_tx_desc(&pkt_info, skb);
1106 }
1107 
1108 static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
1109 {
1110 	return DIV_ROUND_UP(len, page_size);
1111 }
1112 
1113 static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
1114 				      u8 page_margin, u32 page, u8 *buf,
1115 				      struct rtw_rsvd_page *rsvd_pkt)
1116 {
1117 	struct sk_buff *skb = rsvd_pkt->skb;
1118 
1119 	if (page >= 1)
1120 		memcpy(buf + page_margin + page_size * (page - 1),
1121 		       skb->data, skb->len);
1122 	else
1123 		memcpy(buf, skb->data, skb->len);
1124 }
1125 
1126 static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
1127 						 enum rtw_rsvd_packet_type type,
1128 						 bool txdesc)
1129 {
1130 	struct rtw_rsvd_page *rsvd_pkt = NULL;
1131 
1132 	rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
1133 
1134 	if (!rsvd_pkt)
1135 		return NULL;
1136 
1137 	INIT_LIST_HEAD(&rsvd_pkt->vif_list);
1138 	INIT_LIST_HEAD(&rsvd_pkt->build_list);
1139 	rsvd_pkt->type = type;
1140 	rsvd_pkt->add_txdesc = txdesc;
1141 
1142 	return rsvd_pkt;
1143 }
1144 
1145 static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
1146 				 struct rtw_vif *rtwvif,
1147 				 struct rtw_rsvd_page *rsvd_pkt)
1148 {
1149 	lockdep_assert_held(&rtwdev->mutex);
1150 
1151 	list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
1152 }
1153 
1154 static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
1155 			      struct rtw_vif *rtwvif,
1156 			      enum rtw_rsvd_packet_type type,
1157 			      bool txdesc)
1158 {
1159 	struct rtw_rsvd_page *rsvd_pkt;
1160 
1161 	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
1162 	if (!rsvd_pkt) {
1163 		rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
1164 		return;
1165 	}
1166 
1167 	rsvd_pkt->rtwvif = rtwvif;
1168 	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1169 }
1170 
1171 static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
1172 					struct rtw_vif *rtwvif,
1173 					struct cfg80211_ssid *ssid)
1174 {
1175 	struct rtw_rsvd_page *rsvd_pkt;
1176 
1177 	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
1178 	if (!rsvd_pkt) {
1179 		rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
1180 		return;
1181 	}
1182 
1183 	rsvd_pkt->rtwvif = rtwvif;
1184 	rsvd_pkt->ssid = ssid;
1185 	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1186 }
1187 
1188 void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
1189 			  struct rtw_vif *rtwvif)
1190 {
1191 	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1192 
1193 	lockdep_assert_held(&rtwdev->mutex);
1194 
1195 	/* remove all of the rsvd pages for vif */
1196 	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
1197 				 vif_list) {
1198 		list_del(&rsvd_pkt->vif_list);
1199 		if (!list_empty(&rsvd_pkt->build_list))
1200 			list_del(&rsvd_pkt->build_list);
1201 		kfree(rsvd_pkt);
1202 	}
1203 }
1204 
1205 void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
1206 			   struct rtw_vif *rtwvif)
1207 {
1208 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1209 
1210 	if (vif->type != NL80211_IFTYPE_AP &&
1211 	    vif->type != NL80211_IFTYPE_ADHOC &&
1212 	    vif->type != NL80211_IFTYPE_MESH_POINT) {
1213 		rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
1214 			 vif->type);
1215 		return;
1216 	}
1217 
1218 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
1219 }
1220 
1221 void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
1222 			   struct rtw_vif *rtwvif)
1223 {
1224 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1225 	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1226 	struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
1227 	struct cfg80211_ssid *ssid;
1228 	int i;
1229 
1230 	if (vif->type != NL80211_IFTYPE_STATION) {
1231 		rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
1232 			 vif->type);
1233 		return;
1234 	}
1235 
1236 	for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
1237 		ssid = &rtw_pno_req->match_sets[i].ssid;
1238 		rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
1239 	}
1240 
1241 	rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
1242 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
1243 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
1244 }
1245 
1246 void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
1247 			   struct rtw_vif *rtwvif)
1248 {
1249 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1250 
1251 	if (vif->type != NL80211_IFTYPE_STATION) {
1252 		rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
1253 			 vif->type);
1254 		return;
1255 	}
1256 
1257 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
1258 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
1259 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
1260 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
1261 	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
1262 }
1263 
1264 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
1265 				u8 *buf, u32 size)
1266 {
1267 	u8 bckp[2];
1268 	u8 val;
1269 	u16 rsvd_pg_head;
1270 	u32 bcn_valid_addr;
1271 	u32 bcn_valid_mask;
1272 	int ret;
1273 
1274 	lockdep_assert_held(&rtwdev->mutex);
1275 
1276 	if (!size)
1277 		return -EINVAL;
1278 
1279 	if (rtw_chip_wcpu_11n(rtwdev)) {
1280 		rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
1281 	} else {
1282 		pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
1283 		pg_addr |= BIT_BCN_VALID_V1;
1284 		rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
1285 	}
1286 
1287 	val = rtw_read8(rtwdev, REG_CR + 1);
1288 	bckp[0] = val;
1289 	val |= BIT_ENSWBCN >> 8;
1290 	rtw_write8(rtwdev, REG_CR + 1, val);
1291 
1292 	val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
1293 	bckp[1] = val;
1294 	val &= ~(BIT_EN_BCNQ_DL >> 16);
1295 	rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
1296 
1297 	ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
1298 	if (ret) {
1299 		rtw_err(rtwdev, "failed to write data to rsvd page\n");
1300 		goto restore;
1301 	}
1302 
1303 	if (rtw_chip_wcpu_11n(rtwdev)) {
1304 		bcn_valid_addr = REG_DWBCN0_CTRL;
1305 		bcn_valid_mask = BIT_BCN_VALID;
1306 	} else {
1307 		bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
1308 		bcn_valid_mask = BIT_BCN_VALID_V1;
1309 	}
1310 
1311 	if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
1312 		rtw_err(rtwdev, "error beacon valid\n");
1313 		ret = -EBUSY;
1314 	}
1315 
1316 restore:
1317 	rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
1318 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
1319 		    rsvd_pg_head | BIT_BCN_VALID_V1);
1320 	rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
1321 	rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
1322 
1323 	return ret;
1324 }
1325 
1326 static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
1327 {
1328 	u32 pg_size;
1329 	u32 pg_num = 0;
1330 	u16 pg_addr = 0;
1331 
1332 	pg_size = rtwdev->chip->page_size;
1333 	pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
1334 	if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
1335 		return -ENOMEM;
1336 
1337 	pg_addr = rtwdev->fifo.rsvd_drv_addr;
1338 
1339 	return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
1340 }
1341 
1342 static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
1343 {
1344 	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1345 
1346 	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
1347 				 build_list) {
1348 		list_del_init(&rsvd_pkt->build_list);
1349 
1350 		/* Don't free except for the dummy rsvd page,
1351 		 * others will be freed when removing vif
1352 		 */
1353 		if (rsvd_pkt->type == RSVD_DUMMY)
1354 			kfree(rsvd_pkt);
1355 	}
1356 }
1357 
1358 static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
1359 				     struct ieee80211_vif *vif)
1360 {
1361 	struct rtw_dev *rtwdev = data;
1362 	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
1363 	struct rtw_rsvd_page *rsvd_pkt;
1364 
1365 	list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
1366 		if (rsvd_pkt->type == RSVD_BEACON)
1367 			list_add(&rsvd_pkt->build_list,
1368 				 &rtwdev->rsvd_page_list);
1369 		else
1370 			list_add_tail(&rsvd_pkt->build_list,
1371 				      &rtwdev->rsvd_page_list);
1372 	}
1373 }
1374 
1375 static int  __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
1376 {
1377 	struct rtw_rsvd_page *rsvd_pkt;
1378 
1379 	__rtw_build_rsvd_page_reset(rtwdev);
1380 
1381 	/* gather rsvd page from vifs */
1382 	rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
1383 
1384 	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1385 					    struct rtw_rsvd_page, build_list);
1386 	if (!rsvd_pkt) {
1387 		WARN(1, "Should not have an empty reserved page\n");
1388 		return -EINVAL;
1389 	}
1390 
1391 	/* the first rsvd should be beacon, otherwise add a dummy one */
1392 	if (rsvd_pkt->type != RSVD_BEACON) {
1393 		struct rtw_rsvd_page *dummy_pkt;
1394 
1395 		dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
1396 		if (!dummy_pkt) {
1397 			rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
1398 			return -ENOMEM;
1399 		}
1400 
1401 		list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
1408 {
1409 	struct ieee80211_hw *hw = rtwdev->hw;
1410 	struct rtw_chip_info *chip = rtwdev->chip;
1411 	struct sk_buff *iter;
1412 	struct rtw_rsvd_page *rsvd_pkt;
1413 	u32 page = 0;
1414 	u8 total_page = 0;
1415 	u8 page_size, page_margin, tx_desc_sz;
1416 	u8 *buf;
1417 	int ret;
1418 
1419 	page_size = chip->page_size;
1420 	tx_desc_sz = chip->tx_pkt_desc_sz;
1421 	page_margin = page_size - tx_desc_sz;
1422 
1423 	ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
1424 	if (ret) {
1425 		rtw_err(rtwdev,
1426 			"failed to build rsvd page from vifs, ret %d\n", ret);
1427 		return NULL;
1428 	}
1429 
1430 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1431 		iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1432 		if (!iter) {
1433 			rtw_err(rtwdev, "failed to build rsvd packet\n");
1434 			goto release_skb;
1435 		}
1436 
1437 		/* Fill the tx_desc for the rsvd pkt that requires one.
1438 		 * And iter->len will be added with size of tx_desc_sz.
1439 		 */
1440 		if (rsvd_pkt->add_txdesc)
1441 			rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
1442 
1443 		rsvd_pkt->skb = iter;
1444 		rsvd_pkt->page = total_page;
1445 
1446 		/* Reserved page is downloaded via TX path, and TX path will
1447 		 * generate a tx_desc at the header to describe length of
1448 		 * the buffer. If we are not counting page numbers with the
1449 		 * size of tx_desc added at the first rsvd_pkt (usually a
1450 		 * beacon, firmware default refer to the first page as the
1451 		 * content of beacon), we could generate a buffer which size
1452 		 * is smaller than the actual size of the whole rsvd_page
1453 		 */
1454 		if (total_page == 0) {
1455 			if (rsvd_pkt->type != RSVD_BEACON &&
1456 			    rsvd_pkt->type != RSVD_DUMMY) {
1457 				rtw_err(rtwdev, "first page should be a beacon\n");
1458 				goto release_skb;
1459 			}
1460 			total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1461 						      page_size);
1462 		} else {
1463 			total_page += rtw_len_to_page(iter->len, page_size);
1464 		}
1465 	}
1466 
1467 	if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1468 		rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
1469 		goto release_skb;
1470 	}
1471 
1472 	*size = (total_page - 1) * page_size + page_margin;
1473 	buf = kzalloc(*size, GFP_KERNEL);
1474 	if (!buf)
1475 		goto release_skb;
1476 
1477 	/* Copy the content of each rsvd_pkt to the buf, and they should
1478 	 * be aligned to the pages.
1479 	 *
1480 	 * Note that the first rsvd_pkt is a beacon no matter what vif->type.
1481 	 * And that rsvd_pkt does not require tx_desc because when it goes
1482 	 * through TX path, the TX path will generate one for it.
1483 	 */
1484 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1485 		rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1486 					  page, buf, rsvd_pkt);
1487 		if (page == 0)
1488 			page += rtw_len_to_page(rsvd_pkt->skb->len +
1489 						tx_desc_sz, page_size);
1490 		else
1491 			page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1492 
1493 		kfree_skb(rsvd_pkt->skb);
1494 		rsvd_pkt->skb = NULL;
1495 	}
1496 
1497 	return buf;
1498 
1499 release_skb:
1500 	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1501 		kfree_skb(rsvd_pkt->skb);
1502 		rsvd_pkt->skb = NULL;
1503 	}
1504 
1505 	return NULL;
1506 }
1507 
1508 static int rtw_download_beacon(struct rtw_dev *rtwdev)
1509 {
1510 	struct ieee80211_hw *hw = rtwdev->hw;
1511 	struct rtw_rsvd_page *rsvd_pkt;
1512 	struct sk_buff *skb;
1513 	int ret = 0;
1514 
1515 	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1516 					    struct rtw_rsvd_page, build_list);
1517 	if (!rsvd_pkt) {
1518 		rtw_err(rtwdev, "failed to get rsvd page from build list\n");
1519 		return -ENOENT;
1520 	}
1521 
1522 	if (rsvd_pkt->type != RSVD_BEACON &&
1523 	    rsvd_pkt->type != RSVD_DUMMY) {
1524 		rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
1525 			rsvd_pkt->type);
1526 		return -EINVAL;
1527 	}
1528 
1529 	skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1530 	if (!skb) {
1531 		rtw_err(rtwdev, "failed to get beacon skb\n");
1532 		return -ENOMEM;
1533 	}
1534 
1535 	ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
1536 	if (ret)
1537 		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1538 
1539 	dev_kfree_skb(skb);
1540 
1541 	return ret;
1542 }
1543 
1544 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
1545 {
1546 	u8 *buf;
1547 	u32 size;
1548 	int ret;
1549 
1550 	buf = rtw_build_rsvd_page(rtwdev, &size);
1551 	if (!buf) {
1552 		rtw_err(rtwdev, "failed to build rsvd page pkt\n");
1553 		return -ENOMEM;
1554 	}
1555 
1556 	ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
1557 	if (ret) {
1558 		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1559 		goto free;
1560 	}
1561 
1562 	/* The last thing is to download the *ONLY* beacon again, because
1563 	 * the previous tx_desc is to describe the total rsvd page. Download
1564 	 * the beacon again to replace the TX desc header, and we will get
1565 	 * a correct tx_desc for the beacon in the rsvd page.
1566 	 */
1567 	ret = rtw_download_beacon(rtwdev);
1568 	if (ret) {
1569 		rtw_err(rtwdev, "failed to download beacon\n");
1570 		goto free;
1571 	}
1572 
1573 free:
1574 	kfree(buf);
1575 
1576 	return ret;
1577 }
1578 
1579 static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
1580 				  u32 *buf, u32 residue, u16 start_pg)
1581 {
1582 	u32 i;
1583 	u16 idx = 0;
1584 	u16 ctl;
1585 
1586 	ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
1587 	/* disable rx clock gate */
1588 	rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
1589 
1590 	do {
1591 		rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
1592 
1593 		for (i = FIFO_DUMP_ADDR + residue;
1594 		     i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
1595 			buf[idx++] = rtw_read32(rtwdev, i);
1596 			size -= 4;
1597 			if (size == 0)
1598 				goto out;
1599 		}
1600 
1601 		residue = 0;
1602 		start_pg++;
1603 	} while (size);
1604 
1605 out:
1606 	rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
1607 	/* restore rx clock gate */
1608 	rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
1609 }
1610 
1611 static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
1612 			     u32 offset, u32 size, u32 *buf)
1613 {
1614 	struct rtw_chip_info *chip = rtwdev->chip;
1615 	u32 start_pg, residue;
1616 
1617 	if (sel >= RTW_FW_FIFO_MAX) {
1618 		rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
1619 		return;
1620 	}
1621 	if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
1622 		offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
1623 	residue = offset & (FIFO_PAGE_SIZE - 1);
1624 	start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
1625 
1626 	rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
1627 }
1628 
1629 static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
1630 				   enum rtw_fw_fifo_sel sel,
1631 				   u32 start_addr, u32 size)
1632 {
1633 	switch (sel) {
1634 	case RTW_FW_FIFO_SEL_TX:
1635 	case RTW_FW_FIFO_SEL_RX:
1636 		if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
1637 			return false;
1638 		fallthrough;
1639 	default:
1640 		return true;
1641 	}
1642 }
1643 
1644 int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
1645 		     u32 *buffer)
1646 {
1647 	if (!rtwdev->chip->fw_fifo_addr[0]) {
1648 		rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
1649 		return -ENOTSUPP;
1650 	}
1651 
1652 	if (size == 0 || !buffer)
1653 		return -EINVAL;
1654 
1655 	if (size & 0x3) {
1656 		rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
1657 		return -EINVAL;
1658 	}
1659 
1660 	if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
1661 		rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
1662 		return -EINVAL;
1663 	}
1664 
1665 	rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
1666 
1667 	return 0;
1668 }
1669 
1670 static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
1671 				u8 location)
1672 {
1673 	struct rtw_chip_info *chip = rtwdev->chip;
1674 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1675 	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
1676 
1677 	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
1678 
1679 	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1680 	UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
1681 	UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
1682 
1683 	/* include txdesc size */
1684 	size += chip->tx_pkt_desc_sz;
1685 	UPDATE_PKT_SET_SIZE(h2c_pkt, size);
1686 
1687 	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1688 }
1689 
1690 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
1691 				 struct cfg80211_ssid *ssid)
1692 {
1693 	u8 loc;
1694 	u16 size;
1695 
1696 	loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1697 	if (!loc) {
1698 		rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
1699 		return;
1700 	}
1701 
1702 	size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
1703 	if (!size) {
1704 		rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
1705 		return;
1706 	}
1707 
1708 	__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
1709 }
1710 
1711 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
1712 {
1713 	struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
1714 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1715 	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
1716 	u8 loc_ch_info;
1717 	const struct rtw_ch_switch_option cs_option = {
1718 		.dest_ch_en = 1,
1719 		.dest_ch = 1,
1720 		.periodic_option = 2,
1721 		.normal_period = 5,
1722 		.normal_period_sel = 0,
1723 		.normal_cycle = 10,
1724 		.slow_period = 1,
1725 		.slow_period_sel = 1,
1726 	};
1727 
1728 	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
1729 	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1730 
1731 	CH_SWITCH_SET_START(h2c_pkt, enable);
1732 	CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
1733 	CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
1734 	CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
1735 	CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
1736 	CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
1737 	CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
1738 	CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
1739 	CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
1740 
1741 	CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
1742 	CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
1743 
1744 	loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
1745 	CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
1746 
1747 	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1748 }
1749 
1750 void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
1751 {
1752 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
1753 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1754 
1755 	if (!rtw_edcca_enabled) {
1756 		dm_info->edcca_mode = RTW_EDCCA_NORMAL;
1757 		rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
1758 			"EDCCA disabled by debugfs\n");
1759 	}
1760 
1761 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
1762 	SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
1763 	SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
1764 	SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
1765 	SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
1766 	SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
1767 
1768 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1769 }
1770 
1771 void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
1772 {
1773 	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1774 
1775 	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
1776 	SET_SCAN_START(h2c_pkt, start);
1777 
1778 	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1779 }
1780