1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10 
11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 			 u8 primary_ch_idx)
13 {
14 	u8 txsc40 = 0, txsc20 = 0;
15 	u32 value32;
16 	u8 value8;
17 
18 	txsc20 = primary_ch_idx;
19 	if (bw == RTW_CHANNEL_WIDTH_80) {
20 		if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
21 			txsc40 = RTW_SC_40_UPPER;
22 		else
23 			txsc40 = RTW_SC_40_LOWER;
24 	}
25 	rtw_write8(rtwdev, REG_DATA_SC,
26 		   BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
27 
28 	value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
29 	value32 &= ~BIT_RFMOD;
30 	switch (bw) {
31 	case RTW_CHANNEL_WIDTH_80:
32 		value32 |= BIT_RFMOD_80M;
33 		break;
34 	case RTW_CHANNEL_WIDTH_40:
35 		value32 |= BIT_RFMOD_40M;
36 		break;
37 	case RTW_CHANNEL_WIDTH_20:
38 	default:
39 		break;
40 	}
41 	rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
42 
43 	value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
44 	value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
45 	rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
46 
47 	rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
48 	rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
49 
50 	value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
51 	value8 = value8 & ~BIT_CHECK_CCK_EN;
52 	if (IS_CH_5G_BAND(channel))
53 		value8 |= BIT_CHECK_CCK_EN;
54 	rtw_write8(rtwdev, REG_CCK_CHECK, value8);
55 }
56 
57 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
58 {
59 	u32 value32;
60 	u8 value8;
61 
62 	rtw_write8(rtwdev, REG_RSV_CTRL, 0);
63 
64 	switch (rtw_hci_type(rtwdev)) {
65 	case RTW_HCI_TYPE_PCIE:
66 		rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
67 		break;
68 	case RTW_HCI_TYPE_USB:
69 		break;
70 	default:
71 		return -EINVAL;
72 	}
73 
74 	/* config PIN Mux */
75 	value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
76 	value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
77 	rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
78 
79 	value32 = rtw_read32(rtwdev, REG_LED_CFG);
80 	value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
81 	rtw_write32(rtwdev, REG_LED_CFG, value32);
82 
83 	value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
84 	value32 |= BIT_WLRFE_4_5_EN;
85 	rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
86 
87 	/* disable BB/RF */
88 	value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
89 	value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
90 	rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
91 
92 	value8 = rtw_read8(rtwdev, REG_RF_CTRL);
93 	value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
94 	rtw_write8(rtwdev, REG_RF_CTRL, value8);
95 
96 	value32 = rtw_read32(rtwdev, REG_WLRF1);
97 	value32 &= ~BIT_WLRF1_BBRF_EN;
98 	rtw_write32(rtwdev, REG_WLRF1, value32);
99 
100 	return 0;
101 }
102 
103 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
104 			       const struct rtw_pwr_seq_cmd *cmd)
105 {
106 	u8 value;
107 	u8 flag = 0;
108 	u32 offset;
109 	u32 cnt = RTW_PWR_POLLING_CNT;
110 
111 	if (cmd->base == RTW_PWR_ADDR_SDIO)
112 		offset = cmd->offset | SDIO_LOCAL_OFFSET;
113 	else
114 		offset = cmd->offset;
115 
116 	do {
117 		cnt--;
118 		value = rtw_read8(rtwdev, offset);
119 		value &= cmd->mask;
120 		if (value == (cmd->value & cmd->mask))
121 			return 0;
122 		if (cnt == 0) {
123 			if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
124 			    flag == 0) {
125 				value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
126 				value |= BIT(3);
127 				rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
128 				value &= ~BIT(3);
129 				rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
130 				cnt = RTW_PWR_POLLING_CNT;
131 				flag = 1;
132 			} else {
133 				return -EBUSY;
134 			}
135 		} else {
136 			udelay(50);
137 		}
138 	} while (1);
139 }
140 
141 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
142 				  u8 cut_mask,
143 				  const struct rtw_pwr_seq_cmd *cmd)
144 {
145 	const struct rtw_pwr_seq_cmd *cur_cmd;
146 	u32 offset;
147 	u8 value;
148 
149 	for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
150 		if (!(cur_cmd->intf_mask & intf_mask) ||
151 		    !(cur_cmd->cut_mask & cut_mask))
152 			continue;
153 
154 		switch (cur_cmd->cmd) {
155 		case RTW_PWR_CMD_WRITE:
156 			offset = cur_cmd->offset;
157 
158 			if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
159 				offset |= SDIO_LOCAL_OFFSET;
160 
161 			value = rtw_read8(rtwdev, offset);
162 			value &= ~cur_cmd->mask;
163 			value |= (cur_cmd->value & cur_cmd->mask);
164 			rtw_write8(rtwdev, offset, value);
165 			break;
166 		case RTW_PWR_CMD_POLLING:
167 			if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
168 				return -EBUSY;
169 			break;
170 		case RTW_PWR_CMD_DELAY:
171 			if (cur_cmd->value == RTW_PWR_DELAY_US)
172 				udelay(cur_cmd->offset);
173 			else
174 				mdelay(cur_cmd->offset);
175 			break;
176 		case RTW_PWR_CMD_READ:
177 			break;
178 		default:
179 			return -EINVAL;
180 		}
181 	}
182 
183 	return 0;
184 }
185 
186 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
187 			      const struct rtw_pwr_seq_cmd **cmd_seq)
188 {
189 	u8 cut_mask;
190 	u8 intf_mask;
191 	u8 cut;
192 	u32 idx = 0;
193 	const struct rtw_pwr_seq_cmd *cmd;
194 	int ret;
195 
196 	cut = rtwdev->hal.cut_version;
197 	cut_mask = cut_version_to_mask(cut);
198 	switch (rtw_hci_type(rtwdev)) {
199 	case RTW_HCI_TYPE_PCIE:
200 		intf_mask = BIT(2);
201 		break;
202 	case RTW_HCI_TYPE_USB:
203 		intf_mask = BIT(1);
204 		break;
205 	default:
206 		return -EINVAL;
207 	}
208 
209 	do {
210 		cmd = cmd_seq[idx];
211 		if (!cmd)
212 			break;
213 
214 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
215 		if (ret)
216 			return -EBUSY;
217 
218 		idx++;
219 	} while (1);
220 
221 	return 0;
222 }
223 
224 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
225 {
226 	struct rtw_chip_info *chip = rtwdev->chip;
227 	const struct rtw_pwr_seq_cmd **pwr_seq;
228 	u8 rpwm;
229 	bool cur_pwr;
230 
231 	rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
232 
233 	/* Check FW still exist or not */
234 	if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
235 		rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
236 		rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
237 	}
238 
239 	if (rtw_read8(rtwdev, REG_CR) == 0xea)
240 		cur_pwr = false;
241 	else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
242 		 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
243 		cur_pwr = false;
244 	else
245 		cur_pwr = true;
246 
247 	if (pwr_on && cur_pwr)
248 		return -EALREADY;
249 
250 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
251 	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
252 		return -EINVAL;
253 
254 	return 0;
255 }
256 
257 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
258 {
259 	u8 sys_func_en = rtwdev->chip->sys_func_en;
260 	u8 value8;
261 	u32 value, tmp;
262 
263 	value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
264 	value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
265 	rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
266 
267 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
268 	value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
269 	rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
270 
271 	/* disable boot-from-flash for driver's DL FW */
272 	tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
273 	if (tmp & BIT_BOOT_FSPI_EN) {
274 		rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
275 		value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
276 		rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
277 	}
278 
279 	return 0;
280 }
281 
282 int rtw_mac_power_on(struct rtw_dev *rtwdev)
283 {
284 	int ret = 0;
285 
286 	ret = rtw_mac_pre_system_cfg(rtwdev);
287 	if (ret)
288 		goto err;
289 
290 	ret = rtw_mac_power_switch(rtwdev, true);
291 	if (ret == -EALREADY) {
292 		rtw_mac_power_switch(rtwdev, false);
293 		ret = rtw_mac_power_switch(rtwdev, true);
294 		if (ret)
295 			goto err;
296 	} else if (ret) {
297 		goto err;
298 	}
299 
300 	ret = rtw_mac_init_system_cfg(rtwdev);
301 	if (ret)
302 		goto err;
303 
304 	return 0;
305 
306 err:
307 	rtw_err(rtwdev, "mac power on failed");
308 	return ret;
309 }
310 
311 void rtw_mac_power_off(struct rtw_dev *rtwdev)
312 {
313 	rtw_mac_power_switch(rtwdev, false);
314 }
315 
316 static bool check_firmware_size(const u8 *data, u32 size)
317 {
318 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
319 	u32 dmem_size;
320 	u32 imem_size;
321 	u32 emem_size;
322 	u32 real_size;
323 
324 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
325 	imem_size = le32_to_cpu(fw_hdr->imem_size);
326 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
327 		    le32_to_cpu(fw_hdr->emem_size) : 0;
328 
329 	dmem_size += FW_HDR_CHKSUM_SIZE;
330 	imem_size += FW_HDR_CHKSUM_SIZE;
331 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
332 	real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
333 	if (real_size != size)
334 		return false;
335 
336 	return true;
337 }
338 
339 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
340 {
341 	if (enable) {
342 		/* cpu io interface enable */
343 		rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
344 
345 		/* cpu enable */
346 		rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
347 	} else {
348 		/* cpu io interface disable */
349 		rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
350 
351 		/* cpu disable */
352 		rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
353 	}
354 }
355 
356 #define DLFW_RESTORE_REG_NUM 6
357 
358 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
359 					 struct rtw_backup_info *bckp)
360 {
361 	u8 tmp;
362 	u8 bckp_idx = 0;
363 
364 	/* set HIQ to hi priority */
365 	bckp[bckp_idx].len = 1;
366 	bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
367 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
368 	bckp_idx++;
369 	tmp = RTW_DMA_MAPPING_HIGH << 6;
370 	rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
371 
372 	/* DLFW only use HIQ, map HIQ to hi priority */
373 	bckp[bckp_idx].len = 1;
374 	bckp[bckp_idx].reg = REG_CR;
375 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
376 	bckp_idx++;
377 	bckp[bckp_idx].len = 4;
378 	bckp[bckp_idx].reg = REG_H2CQ_CSR;
379 	bckp[bckp_idx].val = BIT_H2CQ_FULL;
380 	bckp_idx++;
381 	tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
382 	rtw_write8(rtwdev, REG_CR, tmp);
383 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
384 
385 	/* Config hi priority queue and public priority queue page number */
386 	bckp[bckp_idx].len = 2;
387 	bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
388 	bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
389 	bckp_idx++;
390 	bckp[bckp_idx].len = 4;
391 	bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
392 	bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
393 	bckp_idx++;
394 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
395 	rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
396 
397 	/* Disable beacon related functions */
398 	tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
399 	bckp[bckp_idx].len = 1;
400 	bckp[bckp_idx].reg = REG_BCN_CTRL;
401 	bckp[bckp_idx].val = tmp;
402 	bckp_idx++;
403 	tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
404 	rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
405 
406 	WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
407 }
408 
409 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
410 {
411 	rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
412 	rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
413 	rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
414 	rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
415 }
416 
417 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
418 					  struct rtw_backup_info *bckp,
419 					  u8 bckp_num)
420 {
421 	rtw_restore_reg(rtwdev, bckp, bckp_num);
422 }
423 
424 #define TX_DESC_SIZE 48
425 
426 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
427 				       const u8 *data, u32 size)
428 {
429 	u8 *buf;
430 	int ret;
431 
432 	buf = kmemdup(data, size, GFP_KERNEL);
433 	if (!buf)
434 		return -ENOMEM;
435 
436 	ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
437 	kfree(buf);
438 	return ret;
439 }
440 
441 static int
442 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
443 {
444 	int ret;
445 
446 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
447 	    !((size + TX_DESC_SIZE) & (512 - 1)))
448 		size += 1;
449 
450 	ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
451 	if (ret)
452 		rtw_err(rtwdev, "failed to download rsvd page\n");
453 
454 	return ret;
455 }
456 
457 static int
458 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
459 {
460 	rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
461 	rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
462 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
463 
464 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
465 		return -EBUSY;
466 
467 	return 0;
468 }
469 
470 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
471 				   u32 len, u8 first)
472 {
473 	u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
474 
475 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
476 		return -EBUSY;
477 
478 	ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
479 	if (!first)
480 		ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
481 
482 	if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
483 		return -EBUSY;
484 
485 	return 0;
486 }
487 
488 static bool
489 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
490 {
491 	u8 fw_ctrl;
492 
493 	fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
494 
495 	if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
496 		if (addr < OCPBASE_DMEM_88XX) {
497 			fw_ctrl |= BIT_IMEM_DW_OK;
498 			fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
499 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
500 		} else {
501 			fw_ctrl |= BIT_DMEM_DW_OK;
502 			fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
503 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
504 		}
505 
506 		rtw_err(rtwdev, "invalid fw checksum\n");
507 
508 		return false;
509 	}
510 
511 	if (addr < OCPBASE_DMEM_88XX) {
512 		fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
513 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
514 	} else {
515 		fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
516 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
517 	}
518 
519 	return true;
520 }
521 
522 static int
523 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
524 			 u32 src, u32 dst, u32 size)
525 {
526 	struct rtw_chip_info *chip = rtwdev->chip;
527 	u32 desc_size = chip->tx_pkt_desc_sz;
528 	u8 first_part;
529 	u32 mem_offset;
530 	u32 residue_size;
531 	u32 pkt_size;
532 	u32 max_size = 0x1000;
533 	u32 val;
534 	int ret;
535 
536 	mem_offset = 0;
537 	first_part = 1;
538 	residue_size = size;
539 
540 	val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
541 	val |= BIT_DDMACH0_RESET_CHKSUM_STS;
542 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
543 
544 	while (residue_size) {
545 		if (residue_size >= max_size)
546 			pkt_size = max_size;
547 		else
548 			pkt_size = residue_size;
549 
550 		ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
551 					data + mem_offset, pkt_size);
552 		if (ret)
553 			return ret;
554 
555 		ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
556 					      src + desc_size,
557 					      dst + mem_offset, pkt_size,
558 					      first_part);
559 		if (ret)
560 			return ret;
561 
562 		first_part = 0;
563 		mem_offset += pkt_size;
564 		residue_size -= pkt_size;
565 	}
566 
567 	if (!check_fw_checksum(rtwdev, dst))
568 		return -EINVAL;
569 
570 	return 0;
571 }
572 
573 static int
574 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
575 {
576 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
577 	const u8 *cur_fw;
578 	u16 val;
579 	u32 imem_size;
580 	u32 dmem_size;
581 	u32 emem_size;
582 	u32 addr;
583 	int ret;
584 
585 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
586 	imem_size = le32_to_cpu(fw_hdr->imem_size);
587 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
588 		    le32_to_cpu(fw_hdr->emem_size) : 0;
589 	dmem_size += FW_HDR_CHKSUM_SIZE;
590 	imem_size += FW_HDR_CHKSUM_SIZE;
591 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
592 
593 	val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
594 	val |= BIT_MCUFWDL_EN;
595 	rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
596 
597 	cur_fw = data + FW_HDR_SIZE;
598 	addr = le32_to_cpu(fw_hdr->dmem_addr);
599 	addr &= ~BIT(31);
600 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
601 	if (ret)
602 		return ret;
603 
604 	cur_fw = data + FW_HDR_SIZE + dmem_size;
605 	addr = le32_to_cpu(fw_hdr->imem_addr);
606 	addr &= ~BIT(31);
607 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
608 	if (ret)
609 		return ret;
610 
611 	if (emem_size) {
612 		cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
613 		addr = le32_to_cpu(fw_hdr->emem_addr);
614 		addr &= ~BIT(31);
615 		ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
616 					       emem_size);
617 		if (ret)
618 			return ret;
619 	}
620 
621 	return 0;
622 }
623 
624 static int download_firmware_validate(struct rtw_dev *rtwdev)
625 {
626 	u32 fw_key;
627 
628 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
629 		fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
630 		if (fw_key == ILLEGAL_KEY_GROUP)
631 			rtw_err(rtwdev, "invalid fw key\n");
632 		return -EINVAL;
633 	}
634 
635 	return 0;
636 }
637 
638 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
639 {
640 	u16 fw_ctrl;
641 
642 	rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
643 
644 	/* Check IMEM & DMEM checksum is OK or not */
645 	fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
646 	if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
647 		return;
648 
649 	fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
650 	rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
651 }
652 
653 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
654 {
655 	struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
656 	const u8 *data = fw->firmware->data;
657 	u32 size = fw->firmware->size;
658 	u32 ltecoex_bckp;
659 	int ret;
660 
661 	if (!check_firmware_size(data, size))
662 		return -EINVAL;
663 
664 	if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
665 		return -EBUSY;
666 
667 	wlan_cpu_enable(rtwdev, false);
668 
669 	download_firmware_reg_backup(rtwdev, bckp);
670 	download_firmware_reset_platform(rtwdev);
671 
672 	ret = start_download_firmware(rtwdev, data, size);
673 	if (ret)
674 		goto dlfw_fail;
675 
676 	download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
677 
678 	download_firmware_end_flow(rtwdev);
679 
680 	wlan_cpu_enable(rtwdev, true);
681 
682 	if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
683 		return -EBUSY;
684 
685 	ret = download_firmware_validate(rtwdev);
686 	if (ret)
687 		goto dlfw_fail;
688 
689 	/* reset desc and index */
690 	rtw_hci_setup(rtwdev);
691 
692 	rtwdev->h2c.last_box_num = 0;
693 	rtwdev->h2c.seq = 0;
694 
695 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
696 
697 	return 0;
698 
699 dlfw_fail:
700 	/* Disable FWDL_EN */
701 	rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
702 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
703 
704 	return ret;
705 }
706 
707 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
708 {
709 	const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
710 	u32 prio_queues = 0;
711 
712 	if (queues & BIT(IEEE80211_AC_VO))
713 		prio_queues |= BIT(rqpn->dma_map_vo);
714 	if (queues & BIT(IEEE80211_AC_VI))
715 		prio_queues |= BIT(rqpn->dma_map_vi);
716 	if (queues & BIT(IEEE80211_AC_BE))
717 		prio_queues |= BIT(rqpn->dma_map_be);
718 	if (queues & BIT(IEEE80211_AC_BK))
719 		prio_queues |= BIT(rqpn->dma_map_bk);
720 
721 	return prio_queues;
722 }
723 
724 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
725 				       u32 prio_queue, bool drop)
726 {
727 	u32 addr;
728 	u16 avail_page, rsvd_page;
729 	int i;
730 
731 	switch (prio_queue) {
732 	case RTW_DMA_MAPPING_EXTRA:
733 		addr = REG_FIFOPAGE_INFO_4;
734 		break;
735 	case RTW_DMA_MAPPING_LOW:
736 		addr = REG_FIFOPAGE_INFO_2;
737 		break;
738 	case RTW_DMA_MAPPING_NORMAL:
739 		addr = REG_FIFOPAGE_INFO_3;
740 		break;
741 	case RTW_DMA_MAPPING_HIGH:
742 		addr = REG_FIFOPAGE_INFO_1;
743 		break;
744 	default:
745 		return;
746 	}
747 
748 	/* check if all of the reserved pages are available for 100 msecs */
749 	for (i = 0; i < 5; i++) {
750 		rsvd_page = rtw_read16(rtwdev, addr);
751 		avail_page = rtw_read16(rtwdev, addr + 2);
752 		if (rsvd_page == avail_page)
753 			return;
754 
755 		msleep(20);
756 	}
757 
758 	/* priority queue is still not empty, throw a warning,
759 	 *
760 	 * Note that if we want to flush the tx queue when having a lot of
761 	 * traffic (ex, 100Mbps up), some of the packets could be dropped.
762 	 * And it requires like ~2secs to flush the full priority queue.
763 	 */
764 	if (!drop)
765 		rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
766 }
767 
768 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
769 				      u32 prio_queues, bool drop)
770 {
771 	u32 q;
772 
773 	for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
774 		if (prio_queues & BIT(q))
775 			__rtw_mac_flush_prio_queue(rtwdev, q, drop);
776 }
777 
778 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
779 {
780 	u32 prio_queues = 0;
781 
782 	/* If all of the hardware queues are requested to flush,
783 	 * or the priority queues are not mapped yet,
784 	 * flush all of the priority queues
785 	 */
786 	if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
787 		prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
788 	else
789 		prio_queues = get_priority_queues(rtwdev, queues);
790 
791 	rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
792 }
793 
794 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
795 {
796 	struct rtw_chip_info *chip = rtwdev->chip;
797 	const struct rtw_rqpn *rqpn = NULL;
798 	u16 txdma_pq_map = 0;
799 
800 	switch (rtw_hci_type(rtwdev)) {
801 	case RTW_HCI_TYPE_PCIE:
802 		rqpn = &chip->rqpn_table[1];
803 		break;
804 	case RTW_HCI_TYPE_USB:
805 		if (rtwdev->hci.bulkout_num == 2)
806 			rqpn = &chip->rqpn_table[2];
807 		else if (rtwdev->hci.bulkout_num == 3)
808 			rqpn = &chip->rqpn_table[3];
809 		else if (rtwdev->hci.bulkout_num == 4)
810 			rqpn = &chip->rqpn_table[4];
811 		else
812 			return -EINVAL;
813 		break;
814 	default:
815 		return -EINVAL;
816 	}
817 
818 	rtwdev->fifo.rqpn = rqpn;
819 	txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
820 	txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
821 	txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
822 	txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
823 	txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
824 	txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
825 	rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
826 
827 	rtw_write8(rtwdev, REG_CR, 0);
828 	rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
829 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
830 
831 	return 0;
832 }
833 
834 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
835 {
836 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
837 	struct rtw_chip_info *chip = rtwdev->chip;
838 	u16 cur_pg_addr;
839 	u8 csi_buf_pg_num = chip->csi_buf_pg_num;
840 
841 	/* config rsvd page num */
842 	fifo->rsvd_drv_pg_num = 8;
843 	fifo->txff_pg_num = chip->txff_size >> 7;
844 	fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
845 			   RSVD_PG_H2C_EXTRAINFO_NUM +
846 			   RSVD_PG_H2C_STATICINFO_NUM +
847 			   RSVD_PG_H2CQ_NUM +
848 			   RSVD_PG_CPU_INSTRUCTION_NUM +
849 			   RSVD_PG_FW_TXBUF_NUM +
850 			   csi_buf_pg_num;
851 
852 	if (fifo->rsvd_pg_num > fifo->txff_pg_num)
853 		return -ENOMEM;
854 
855 	fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
856 	fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
857 
858 	cur_pg_addr = fifo->txff_pg_num;
859 	cur_pg_addr -= csi_buf_pg_num;
860 	fifo->rsvd_csibuf_addr = cur_pg_addr;
861 	cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
862 	fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
863 	cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
864 	fifo->rsvd_cpu_instr_addr = cur_pg_addr;
865 	cur_pg_addr -= RSVD_PG_H2CQ_NUM;
866 	fifo->rsvd_h2cq_addr = cur_pg_addr;
867 	cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
868 	fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
869 	cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
870 	fifo->rsvd_h2c_info_addr = cur_pg_addr;
871 	cur_pg_addr -= fifo->rsvd_drv_pg_num;
872 	fifo->rsvd_drv_addr = cur_pg_addr;
873 
874 	if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
875 		rtw_err(rtwdev, "wrong rsvd driver address\n");
876 		return -EINVAL;
877 	}
878 
879 	return 0;
880 }
881 
882 static int priority_queue_cfg(struct rtw_dev *rtwdev)
883 {
884 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
885 	struct rtw_chip_info *chip = rtwdev->chip;
886 	const struct rtw_page_table *pg_tbl = NULL;
887 	u16 pubq_num;
888 	int ret;
889 
890 	ret = set_trx_fifo_info(rtwdev);
891 	if (ret)
892 		return ret;
893 
894 	switch (rtw_hci_type(rtwdev)) {
895 	case RTW_HCI_TYPE_PCIE:
896 		pg_tbl = &chip->page_table[1];
897 		break;
898 	case RTW_HCI_TYPE_USB:
899 		if (rtwdev->hci.bulkout_num == 2)
900 			pg_tbl = &chip->page_table[2];
901 		else if (rtwdev->hci.bulkout_num == 3)
902 			pg_tbl = &chip->page_table[3];
903 		else if (rtwdev->hci.bulkout_num == 4)
904 			pg_tbl = &chip->page_table[4];
905 		else
906 			return -EINVAL;
907 		break;
908 	default:
909 		return -EINVAL;
910 	}
911 
912 	pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
913 		   pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
914 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
915 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
916 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
917 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
918 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
919 	rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
920 
921 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
922 	rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
923 
924 	rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
925 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
926 	rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
927 	rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
928 	rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
929 
930 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
931 		return -EBUSY;
932 
933 	rtw_write8(rtwdev, REG_CR + 3, 0);
934 
935 	return 0;
936 }
937 
938 static int init_h2c(struct rtw_dev *rtwdev)
939 {
940 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
941 	u8 value8;
942 	u32 value32;
943 	u32 h2cq_addr;
944 	u32 h2cq_size;
945 	u32 h2cq_free;
946 	u32 wp, rp;
947 
948 	h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
949 	h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
950 
951 	value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
952 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
953 	rtw_write32(rtwdev, REG_H2C_HEAD, value32);
954 
955 	value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
956 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
957 	rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
958 
959 	value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
960 	value32 &= 0xFFFC0000;
961 	value32 |= (h2cq_addr + h2cq_size);
962 	rtw_write32(rtwdev, REG_H2C_TAIL, value32);
963 
964 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
965 	value8 = (u8)((value8 & 0xFC) | 0x01);
966 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
967 
968 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
969 	value8 = (u8)((value8 & 0xFB) | 0x04);
970 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
971 
972 	value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
973 	value8 = (u8)((value8 & 0x7f) | 0x80);
974 	rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
975 
976 	wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
977 	rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
978 	h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
979 
980 	if (h2cq_size != h2cq_free) {
981 		rtw_err(rtwdev, "H2C queue mismatch\n");
982 		return -EINVAL;
983 	}
984 
985 	return 0;
986 }
987 
988 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
989 {
990 	int ret;
991 
992 	ret = txdma_queue_mapping(rtwdev);
993 	if (ret)
994 		return ret;
995 
996 	ret = priority_queue_cfg(rtwdev);
997 	if (ret)
998 		return ret;
999 
1000 	ret = init_h2c(rtwdev);
1001 	if (ret)
1002 		return ret;
1003 
1004 	return 0;
1005 }
1006 
1007 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1008 {
1009 	u8 value8;
1010 
1011 	rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1012 	value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1013 	value8 &= 0xF0;
1014 	/* For rxdesc len = 0 issue */
1015 	value8 |= 0xF;
1016 	rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1017 	rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1018 	rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1019 
1020 	return 0;
1021 }
1022 
1023 int rtw_mac_init(struct rtw_dev *rtwdev)
1024 {
1025 	struct rtw_chip_info *chip = rtwdev->chip;
1026 	int ret;
1027 
1028 	ret = rtw_init_trx_cfg(rtwdev);
1029 	if (ret)
1030 		return ret;
1031 
1032 	ret = chip->ops->mac_init(rtwdev);
1033 	if (ret)
1034 		return ret;
1035 
1036 	ret = rtw_drv_info_cfg(rtwdev);
1037 	if (ret)
1038 		return ret;
1039 
1040 	rtw_hci_interface_cfg(rtwdev);
1041 
1042 	return 0;
1043 }
1044