1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10 
11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 			 u8 primary_ch_idx)
13 {
14 	u8 txsc40 = 0, txsc20 = 0;
15 	u32 value32;
16 	u8 value8;
17 
18 	txsc20 = primary_ch_idx;
19 	if (txsc20 == 1 || txsc20 == 3)
20 		txsc40 = 9;
21 	else
22 		txsc40 = 10;
23 	rtw_write8(rtwdev, REG_DATA_SC,
24 		   BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
25 
26 	value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
27 	value32 &= ~BIT_RFMOD;
28 	switch (bw) {
29 	case RTW_CHANNEL_WIDTH_80:
30 		value32 |= BIT_RFMOD_80M;
31 		break;
32 	case RTW_CHANNEL_WIDTH_40:
33 		value32 |= BIT_RFMOD_40M;
34 		break;
35 	case RTW_CHANNEL_WIDTH_20:
36 	default:
37 		break;
38 	}
39 	rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
40 
41 	value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
42 	value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
43 	rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
44 
45 	rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
46 	rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
47 
48 	value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
49 	value8 = value8 & ~BIT_CHECK_CCK_EN;
50 	if (IS_CH_5G_BAND(channel))
51 		value8 |= BIT_CHECK_CCK_EN;
52 	rtw_write8(rtwdev, REG_CCK_CHECK, value8);
53 }
54 
55 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
56 {
57 	u32 value32;
58 	u8 value8;
59 
60 	rtw_write8(rtwdev, REG_RSV_CTRL, 0);
61 
62 	switch (rtw_hci_type(rtwdev)) {
63 	case RTW_HCI_TYPE_PCIE:
64 		rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
65 		break;
66 	case RTW_HCI_TYPE_USB:
67 		break;
68 	default:
69 		return -EINVAL;
70 	}
71 
72 	/* config PIN Mux */
73 	value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
74 	value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
75 	rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
76 
77 	value32 = rtw_read32(rtwdev, REG_LED_CFG);
78 	value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
79 	rtw_write32(rtwdev, REG_LED_CFG, value32);
80 
81 	value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
82 	value32 |= BIT_WLRFE_4_5_EN;
83 	rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
84 
85 	/* disable BB/RF */
86 	value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
87 	value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
88 	rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
89 
90 	value8 = rtw_read8(rtwdev, REG_RF_CTRL);
91 	value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
92 	rtw_write8(rtwdev, REG_RF_CTRL, value8);
93 
94 	value32 = rtw_read32(rtwdev, REG_WLRF1);
95 	value32 &= ~BIT_WLRF1_BBRF_EN;
96 	rtw_write32(rtwdev, REG_WLRF1, value32);
97 
98 	return 0;
99 }
100 
101 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
102 			       struct rtw_pwr_seq_cmd *cmd)
103 {
104 	u8 value;
105 	u8 flag = 0;
106 	u32 offset;
107 	u32 cnt = RTW_PWR_POLLING_CNT;
108 
109 	if (cmd->base == RTW_PWR_ADDR_SDIO)
110 		offset = cmd->offset | SDIO_LOCAL_OFFSET;
111 	else
112 		offset = cmd->offset;
113 
114 	do {
115 		cnt--;
116 		value = rtw_read8(rtwdev, offset);
117 		value &= cmd->mask;
118 		if (value == (cmd->value & cmd->mask))
119 			return 0;
120 		if (cnt == 0) {
121 			if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
122 			    flag == 0) {
123 				value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
124 				value |= BIT(3);
125 				rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
126 				value &= ~BIT(3);
127 				rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
128 				cnt = RTW_PWR_POLLING_CNT;
129 				flag = 1;
130 			} else {
131 				return -EBUSY;
132 			}
133 		} else {
134 			udelay(50);
135 		}
136 	} while (1);
137 }
138 
139 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
140 				  u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
141 {
142 	struct rtw_pwr_seq_cmd *cur_cmd;
143 	u32 offset;
144 	u8 value;
145 
146 	for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
147 		if (!(cur_cmd->intf_mask & intf_mask) ||
148 		    !(cur_cmd->cut_mask & cut_mask))
149 			continue;
150 
151 		switch (cur_cmd->cmd) {
152 		case RTW_PWR_CMD_WRITE:
153 			offset = cur_cmd->offset;
154 
155 			if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
156 				offset |= SDIO_LOCAL_OFFSET;
157 
158 			value = rtw_read8(rtwdev, offset);
159 			value &= ~cur_cmd->mask;
160 			value |= (cur_cmd->value & cur_cmd->mask);
161 			rtw_write8(rtwdev, offset, value);
162 			break;
163 		case RTW_PWR_CMD_POLLING:
164 			if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
165 				return -EBUSY;
166 			break;
167 		case RTW_PWR_CMD_DELAY:
168 			if (cur_cmd->value == RTW_PWR_DELAY_US)
169 				udelay(cur_cmd->offset);
170 			else
171 				mdelay(cur_cmd->offset);
172 			break;
173 		case RTW_PWR_CMD_READ:
174 			break;
175 		default:
176 			return -EINVAL;
177 		}
178 	}
179 
180 	return 0;
181 }
182 
183 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
184 			      struct rtw_pwr_seq_cmd **cmd_seq)
185 {
186 	u8 cut_mask;
187 	u8 intf_mask;
188 	u8 cut;
189 	u32 idx = 0;
190 	struct rtw_pwr_seq_cmd *cmd;
191 	int ret;
192 
193 	cut = rtwdev->hal.cut_version;
194 	cut_mask = cut_version_to_mask(cut);
195 	switch (rtw_hci_type(rtwdev)) {
196 	case RTW_HCI_TYPE_PCIE:
197 		intf_mask = BIT(2);
198 		break;
199 	case RTW_HCI_TYPE_USB:
200 		intf_mask = BIT(1);
201 		break;
202 	default:
203 		return -EINVAL;
204 	}
205 
206 	do {
207 		cmd = cmd_seq[idx];
208 		if (!cmd)
209 			break;
210 
211 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
212 		if (ret)
213 			return -EBUSY;
214 
215 		idx++;
216 	} while (1);
217 
218 	return 0;
219 }
220 
221 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
222 {
223 	struct rtw_chip_info *chip = rtwdev->chip;
224 	struct rtw_pwr_seq_cmd **pwr_seq;
225 	u8 rpwm;
226 	bool cur_pwr;
227 
228 	rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
229 
230 	/* Check FW still exist or not */
231 	if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
232 		rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
233 		rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
234 	}
235 
236 	if (rtw_read8(rtwdev, REG_CR) == 0xea)
237 		cur_pwr = false;
238 	else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
239 		 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
240 		cur_pwr = false;
241 	else
242 		cur_pwr = true;
243 
244 	if (pwr_on && cur_pwr)
245 		return -EALREADY;
246 
247 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
248 	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
249 		return -EINVAL;
250 
251 	return 0;
252 }
253 
254 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
255 {
256 	u8 sys_func_en = rtwdev->chip->sys_func_en;
257 	u8 value8;
258 	u32 value, tmp;
259 
260 	value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
261 	value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
262 	rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
263 
264 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
265 	value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
266 	rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
267 
268 	/* disable boot-from-flash for driver's DL FW */
269 	tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
270 	if (tmp & BIT_BOOT_FSPI_EN) {
271 		rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
272 		value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
273 		rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
274 	}
275 
276 	return 0;
277 }
278 
279 int rtw_mac_power_on(struct rtw_dev *rtwdev)
280 {
281 	int ret = 0;
282 
283 	ret = rtw_mac_pre_system_cfg(rtwdev);
284 	if (ret)
285 		goto err;
286 
287 	ret = rtw_mac_power_switch(rtwdev, true);
288 	if (ret == -EALREADY) {
289 		rtw_mac_power_switch(rtwdev, false);
290 		ret = rtw_mac_power_switch(rtwdev, true);
291 		if (ret)
292 			goto err;
293 	} else if (ret) {
294 		goto err;
295 	}
296 
297 	ret = rtw_mac_init_system_cfg(rtwdev);
298 	if (ret)
299 		goto err;
300 
301 	return 0;
302 
303 err:
304 	rtw_err(rtwdev, "mac power on failed");
305 	return ret;
306 }
307 
308 void rtw_mac_power_off(struct rtw_dev *rtwdev)
309 {
310 	rtw_mac_power_switch(rtwdev, false);
311 }
312 
313 static bool check_firmware_size(const u8 *data, u32 size)
314 {
315 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
316 	u32 dmem_size;
317 	u32 imem_size;
318 	u32 emem_size;
319 	u32 real_size;
320 
321 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
322 	imem_size = le32_to_cpu(fw_hdr->imem_size);
323 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
324 		    le32_to_cpu(fw_hdr->emem_size) : 0;
325 
326 	dmem_size += FW_HDR_CHKSUM_SIZE;
327 	imem_size += FW_HDR_CHKSUM_SIZE;
328 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
329 	real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
330 	if (real_size != size)
331 		return false;
332 
333 	return true;
334 }
335 
336 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
337 {
338 	if (enable) {
339 		/* cpu io interface enable */
340 		rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
341 
342 		/* cpu enable */
343 		rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
344 	} else {
345 		/* cpu io interface disable */
346 		rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
347 
348 		/* cpu disable */
349 		rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
350 	}
351 }
352 
353 #define DLFW_RESTORE_REG_NUM 6
354 
355 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
356 					 struct rtw_backup_info *bckp)
357 {
358 	u8 tmp;
359 	u8 bckp_idx = 0;
360 
361 	/* set HIQ to hi priority */
362 	bckp[bckp_idx].len = 1;
363 	bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
364 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
365 	bckp_idx++;
366 	tmp = RTW_DMA_MAPPING_HIGH << 6;
367 	rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
368 
369 	/* DLFW only use HIQ, map HIQ to hi priority */
370 	bckp[bckp_idx].len = 1;
371 	bckp[bckp_idx].reg = REG_CR;
372 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
373 	bckp_idx++;
374 	bckp[bckp_idx].len = 4;
375 	bckp[bckp_idx].reg = REG_H2CQ_CSR;
376 	bckp[bckp_idx].val = BIT_H2CQ_FULL;
377 	bckp_idx++;
378 	tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
379 	rtw_write8(rtwdev, REG_CR, tmp);
380 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
381 
382 	/* Config hi priority queue and public priority queue page number */
383 	bckp[bckp_idx].len = 2;
384 	bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
385 	bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
386 	bckp_idx++;
387 	bckp[bckp_idx].len = 4;
388 	bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
389 	bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
390 	bckp_idx++;
391 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
392 	rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
393 
394 	/* Disable beacon related functions */
395 	tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
396 	bckp[bckp_idx].len = 1;
397 	bckp[bckp_idx].reg = REG_BCN_CTRL;
398 	bckp[bckp_idx].val = tmp;
399 	bckp_idx++;
400 	tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
401 	rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
402 
403 	WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
404 }
405 
406 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
407 {
408 	rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
409 	rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
410 	rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
411 	rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
412 }
413 
414 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
415 					  struct rtw_backup_info *bckp,
416 					  u8 bckp_num)
417 {
418 	rtw_restore_reg(rtwdev, bckp, bckp_num);
419 }
420 
421 #define TX_DESC_SIZE 48
422 
423 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
424 				       const u8 *data, u32 size)
425 {
426 	u8 *buf;
427 	int ret;
428 
429 	buf = kmemdup(data, size, GFP_KERNEL);
430 	if (!buf)
431 		return -ENOMEM;
432 
433 	ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
434 	kfree(buf);
435 	return ret;
436 }
437 
438 static int
439 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
440 {
441 	int ret;
442 
443 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
444 	    !((size + TX_DESC_SIZE) & (512 - 1)))
445 		size += 1;
446 
447 	ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
448 	if (ret)
449 		rtw_err(rtwdev, "failed to download rsvd page\n");
450 
451 	return ret;
452 }
453 
454 static int
455 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
456 {
457 	rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
458 	rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
459 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
460 
461 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
462 		return -EBUSY;
463 
464 	return 0;
465 }
466 
467 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
468 				   u32 len, u8 first)
469 {
470 	u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
471 
472 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
473 		return -EBUSY;
474 
475 	ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
476 	if (!first)
477 		ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
478 
479 	if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
480 		return -EBUSY;
481 
482 	return 0;
483 }
484 
485 static bool
486 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
487 {
488 	u8 fw_ctrl;
489 
490 	fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
491 
492 	if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
493 		if (addr < OCPBASE_DMEM_88XX) {
494 			fw_ctrl |= BIT_IMEM_DW_OK;
495 			fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
496 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
497 		} else {
498 			fw_ctrl |= BIT_DMEM_DW_OK;
499 			fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
500 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
501 		}
502 
503 		rtw_err(rtwdev, "invalid fw checksum\n");
504 
505 		return false;
506 	}
507 
508 	if (addr < OCPBASE_DMEM_88XX) {
509 		fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
510 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
511 	} else {
512 		fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
513 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
514 	}
515 
516 	return true;
517 }
518 
519 static int
520 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
521 			 u32 src, u32 dst, u32 size)
522 {
523 	struct rtw_chip_info *chip = rtwdev->chip;
524 	u32 desc_size = chip->tx_pkt_desc_sz;
525 	u8 first_part;
526 	u32 mem_offset;
527 	u32 residue_size;
528 	u32 pkt_size;
529 	u32 max_size = 0x1000;
530 	u32 val;
531 	int ret;
532 
533 	mem_offset = 0;
534 	first_part = 1;
535 	residue_size = size;
536 
537 	val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
538 	val |= BIT_DDMACH0_RESET_CHKSUM_STS;
539 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
540 
541 	while (residue_size) {
542 		if (residue_size >= max_size)
543 			pkt_size = max_size;
544 		else
545 			pkt_size = residue_size;
546 
547 		ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
548 					data + mem_offset, pkt_size);
549 		if (ret)
550 			return ret;
551 
552 		ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
553 					      src + desc_size,
554 					      dst + mem_offset, pkt_size,
555 					      first_part);
556 		if (ret)
557 			return ret;
558 
559 		first_part = 0;
560 		mem_offset += pkt_size;
561 		residue_size -= pkt_size;
562 	}
563 
564 	if (!check_fw_checksum(rtwdev, dst))
565 		return -EINVAL;
566 
567 	return 0;
568 }
569 
570 static int
571 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
572 {
573 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
574 	const u8 *cur_fw;
575 	u16 val;
576 	u32 imem_size;
577 	u32 dmem_size;
578 	u32 emem_size;
579 	u32 addr;
580 	int ret;
581 
582 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
583 	imem_size = le32_to_cpu(fw_hdr->imem_size);
584 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
585 		    le32_to_cpu(fw_hdr->emem_size) : 0;
586 	dmem_size += FW_HDR_CHKSUM_SIZE;
587 	imem_size += FW_HDR_CHKSUM_SIZE;
588 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
589 
590 	val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
591 	val |= BIT_MCUFWDL_EN;
592 	rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
593 
594 	cur_fw = data + FW_HDR_SIZE;
595 	addr = le32_to_cpu(fw_hdr->dmem_addr);
596 	addr &= ~BIT(31);
597 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
598 	if (ret)
599 		return ret;
600 
601 	cur_fw = data + FW_HDR_SIZE + dmem_size;
602 	addr = le32_to_cpu(fw_hdr->imem_addr);
603 	addr &= ~BIT(31);
604 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
605 	if (ret)
606 		return ret;
607 
608 	if (emem_size) {
609 		cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
610 		addr = le32_to_cpu(fw_hdr->emem_addr);
611 		addr &= ~BIT(31);
612 		ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
613 					       emem_size);
614 		if (ret)
615 			return ret;
616 	}
617 
618 	return 0;
619 }
620 
621 static int download_firmware_validate(struct rtw_dev *rtwdev)
622 {
623 	u32 fw_key;
624 
625 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
626 		fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
627 		if (fw_key == ILLEGAL_KEY_GROUP)
628 			rtw_err(rtwdev, "invalid fw key\n");
629 		return -EINVAL;
630 	}
631 
632 	return 0;
633 }
634 
635 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
636 {
637 	u16 fw_ctrl;
638 
639 	rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
640 
641 	/* Check IMEM & DMEM checksum is OK or not */
642 	fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
643 	if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
644 		return;
645 
646 	fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
647 	rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
648 }
649 
650 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
651 {
652 	struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
653 	const u8 *data = fw->firmware->data;
654 	u32 size = fw->firmware->size;
655 	u32 ltecoex_bckp;
656 	int ret;
657 
658 	if (!check_firmware_size(data, size))
659 		return -EINVAL;
660 
661 	if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
662 		return -EBUSY;
663 
664 	wlan_cpu_enable(rtwdev, false);
665 
666 	download_firmware_reg_backup(rtwdev, bckp);
667 	download_firmware_reset_platform(rtwdev);
668 
669 	ret = start_download_firmware(rtwdev, data, size);
670 	if (ret)
671 		goto dlfw_fail;
672 
673 	download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
674 
675 	download_firmware_end_flow(rtwdev);
676 
677 	wlan_cpu_enable(rtwdev, true);
678 
679 	if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
680 		return -EBUSY;
681 
682 	ret = download_firmware_validate(rtwdev);
683 	if (ret)
684 		goto dlfw_fail;
685 
686 	/* reset desc and index */
687 	rtw_hci_setup(rtwdev);
688 
689 	rtwdev->h2c.last_box_num = 0;
690 	rtwdev->h2c.seq = 0;
691 
692 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
693 
694 	return 0;
695 
696 dlfw_fail:
697 	/* Disable FWDL_EN */
698 	rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
699 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
700 
701 	return ret;
702 }
703 
704 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
705 {
706 	struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
707 	u32 prio_queues = 0;
708 
709 	if (queues & BIT(IEEE80211_AC_VO))
710 		prio_queues |= BIT(rqpn->dma_map_vo);
711 	if (queues & BIT(IEEE80211_AC_VI))
712 		prio_queues |= BIT(rqpn->dma_map_vi);
713 	if (queues & BIT(IEEE80211_AC_BE))
714 		prio_queues |= BIT(rqpn->dma_map_be);
715 	if (queues & BIT(IEEE80211_AC_BK))
716 		prio_queues |= BIT(rqpn->dma_map_bk);
717 
718 	return prio_queues;
719 }
720 
721 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
722 				       u32 prio_queue, bool drop)
723 {
724 	u32 addr;
725 	u16 avail_page, rsvd_page;
726 	int i;
727 
728 	switch (prio_queue) {
729 	case RTW_DMA_MAPPING_EXTRA:
730 		addr = REG_FIFOPAGE_INFO_4;
731 		break;
732 	case RTW_DMA_MAPPING_LOW:
733 		addr = REG_FIFOPAGE_INFO_2;
734 		break;
735 	case RTW_DMA_MAPPING_NORMAL:
736 		addr = REG_FIFOPAGE_INFO_3;
737 		break;
738 	case RTW_DMA_MAPPING_HIGH:
739 		addr = REG_FIFOPAGE_INFO_1;
740 		break;
741 	default:
742 		return;
743 	}
744 
745 	/* check if all of the reserved pages are available for 100 msecs */
746 	for (i = 0; i < 5; i++) {
747 		rsvd_page = rtw_read16(rtwdev, addr);
748 		avail_page = rtw_read16(rtwdev, addr + 2);
749 		if (rsvd_page == avail_page)
750 			return;
751 
752 		msleep(20);
753 	}
754 
755 	/* priority queue is still not empty, throw a warning,
756 	 *
757 	 * Note that if we want to flush the tx queue when having a lot of
758 	 * traffic (ex, 100Mbps up), some of the packets could be dropped.
759 	 * And it requires like ~2secs to flush the full priority queue.
760 	 */
761 	if (!drop)
762 		rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
763 }
764 
765 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
766 				      u32 prio_queues, bool drop)
767 {
768 	u32 q;
769 
770 	for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
771 		if (prio_queues & BIT(q))
772 			__rtw_mac_flush_prio_queue(rtwdev, q, drop);
773 }
774 
775 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
776 {
777 	u32 prio_queues = 0;
778 
779 	/* If all of the hardware queues are requested to flush,
780 	 * or the priority queues are not mapped yet,
781 	 * flush all of the priority queues
782 	 */
783 	if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
784 		prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
785 	else
786 		prio_queues = get_priority_queues(rtwdev, queues);
787 
788 	rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
789 }
790 
791 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
792 {
793 	struct rtw_chip_info *chip = rtwdev->chip;
794 	struct rtw_rqpn *rqpn = NULL;
795 	u16 txdma_pq_map = 0;
796 
797 	switch (rtw_hci_type(rtwdev)) {
798 	case RTW_HCI_TYPE_PCIE:
799 		rqpn = &chip->rqpn_table[1];
800 		break;
801 	case RTW_HCI_TYPE_USB:
802 		if (rtwdev->hci.bulkout_num == 2)
803 			rqpn = &chip->rqpn_table[2];
804 		else if (rtwdev->hci.bulkout_num == 3)
805 			rqpn = &chip->rqpn_table[3];
806 		else if (rtwdev->hci.bulkout_num == 4)
807 			rqpn = &chip->rqpn_table[4];
808 		else
809 			return -EINVAL;
810 		break;
811 	default:
812 		return -EINVAL;
813 	}
814 
815 	rtwdev->fifo.rqpn = rqpn;
816 	txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
817 	txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
818 	txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
819 	txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
820 	txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
821 	txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
822 	rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
823 
824 	rtw_write8(rtwdev, REG_CR, 0);
825 	rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
826 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
827 
828 	return 0;
829 }
830 
831 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
832 {
833 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
834 	struct rtw_chip_info *chip = rtwdev->chip;
835 	u16 cur_pg_addr;
836 	u8 csi_buf_pg_num = chip->csi_buf_pg_num;
837 
838 	/* config rsvd page num */
839 	fifo->rsvd_drv_pg_num = 8;
840 	fifo->txff_pg_num = chip->txff_size >> 7;
841 	fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
842 			   RSVD_PG_H2C_EXTRAINFO_NUM +
843 			   RSVD_PG_H2C_STATICINFO_NUM +
844 			   RSVD_PG_H2CQ_NUM +
845 			   RSVD_PG_CPU_INSTRUCTION_NUM +
846 			   RSVD_PG_FW_TXBUF_NUM +
847 			   csi_buf_pg_num;
848 
849 	if (fifo->rsvd_pg_num > fifo->txff_pg_num)
850 		return -ENOMEM;
851 
852 	fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
853 	fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
854 
855 	cur_pg_addr = fifo->txff_pg_num;
856 	cur_pg_addr -= csi_buf_pg_num;
857 	fifo->rsvd_csibuf_addr = cur_pg_addr;
858 	cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
859 	fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
860 	cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
861 	fifo->rsvd_cpu_instr_addr = cur_pg_addr;
862 	cur_pg_addr -= RSVD_PG_H2CQ_NUM;
863 	fifo->rsvd_h2cq_addr = cur_pg_addr;
864 	cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
865 	fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
866 	cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
867 	fifo->rsvd_h2c_info_addr = cur_pg_addr;
868 	cur_pg_addr -= fifo->rsvd_drv_pg_num;
869 	fifo->rsvd_drv_addr = cur_pg_addr;
870 
871 	if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
872 		rtw_err(rtwdev, "wrong rsvd driver address\n");
873 		return -EINVAL;
874 	}
875 
876 	return 0;
877 }
878 
879 static int priority_queue_cfg(struct rtw_dev *rtwdev)
880 {
881 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
882 	struct rtw_chip_info *chip = rtwdev->chip;
883 	struct rtw_page_table *pg_tbl = NULL;
884 	u16 pubq_num;
885 	int ret;
886 
887 	ret = set_trx_fifo_info(rtwdev);
888 	if (ret)
889 		return ret;
890 
891 	switch (rtw_hci_type(rtwdev)) {
892 	case RTW_HCI_TYPE_PCIE:
893 		pg_tbl = &chip->page_table[1];
894 		break;
895 	case RTW_HCI_TYPE_USB:
896 		if (rtwdev->hci.bulkout_num == 2)
897 			pg_tbl = &chip->page_table[2];
898 		else if (rtwdev->hci.bulkout_num == 3)
899 			pg_tbl = &chip->page_table[3];
900 		else if (rtwdev->hci.bulkout_num == 4)
901 			pg_tbl = &chip->page_table[4];
902 		else
903 			return -EINVAL;
904 		break;
905 	default:
906 		return -EINVAL;
907 	}
908 
909 	pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
910 		   pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
911 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
912 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
913 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
914 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
915 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
916 	rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
917 
918 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
919 	rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
920 
921 	rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
922 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
923 	rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
924 	rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
925 	rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
926 
927 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
928 		return -EBUSY;
929 
930 	rtw_write8(rtwdev, REG_CR + 3, 0);
931 
932 	return 0;
933 }
934 
935 static int init_h2c(struct rtw_dev *rtwdev)
936 {
937 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
938 	u8 value8;
939 	u32 value32;
940 	u32 h2cq_addr;
941 	u32 h2cq_size;
942 	u32 h2cq_free;
943 	u32 wp, rp;
944 
945 	h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
946 	h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
947 
948 	value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
949 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
950 	rtw_write32(rtwdev, REG_H2C_HEAD, value32);
951 
952 	value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
953 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
954 	rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
955 
956 	value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
957 	value32 &= 0xFFFC0000;
958 	value32 |= (h2cq_addr + h2cq_size);
959 	rtw_write32(rtwdev, REG_H2C_TAIL, value32);
960 
961 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
962 	value8 = (u8)((value8 & 0xFC) | 0x01);
963 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
964 
965 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
966 	value8 = (u8)((value8 & 0xFB) | 0x04);
967 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
968 
969 	value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
970 	value8 = (u8)((value8 & 0x7f) | 0x80);
971 	rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
972 
973 	wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
974 	rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
975 	h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
976 
977 	if (h2cq_size != h2cq_free) {
978 		rtw_err(rtwdev, "H2C queue mismatch\n");
979 		return -EINVAL;
980 	}
981 
982 	return 0;
983 }
984 
985 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
986 {
987 	int ret;
988 
989 	ret = txdma_queue_mapping(rtwdev);
990 	if (ret)
991 		return ret;
992 
993 	ret = priority_queue_cfg(rtwdev);
994 	if (ret)
995 		return ret;
996 
997 	ret = init_h2c(rtwdev);
998 	if (ret)
999 		return ret;
1000 
1001 	return 0;
1002 }
1003 
1004 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1005 {
1006 	u8 value8;
1007 
1008 	rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1009 	value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1010 	value8 &= 0xF0;
1011 	/* For rxdesc len = 0 issue */
1012 	value8 |= 0xF;
1013 	rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1014 	rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1015 	rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1016 
1017 	return 0;
1018 }
1019 
1020 int rtw_mac_init(struct rtw_dev *rtwdev)
1021 {
1022 	struct rtw_chip_info *chip = rtwdev->chip;
1023 	int ret;
1024 
1025 	ret = rtw_init_trx_cfg(rtwdev);
1026 	if (ret)
1027 		return ret;
1028 
1029 	ret = chip->ops->mac_init(rtwdev);
1030 	if (ret)
1031 		return ret;
1032 
1033 	ret = rtw_drv_info_cfg(rtwdev);
1034 	if (ret)
1035 		return ret;
1036 
1037 	return 0;
1038 }
1039