1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include "main.h" 6 #include "mac.h" 7 #include "reg.h" 8 #include "fw.h" 9 #include "debug.h" 10 11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, 12 u8 primary_ch_idx) 13 { 14 u8 txsc40 = 0, txsc20 = 0; 15 u32 value32; 16 u8 value8; 17 18 txsc20 = primary_ch_idx; 19 if (bw == RTW_CHANNEL_WIDTH_80) { 20 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) 21 txsc40 = RTW_SC_40_UPPER; 22 else 23 txsc40 = RTW_SC_40_LOWER; 24 } 25 rtw_write8(rtwdev, REG_DATA_SC, 26 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40)); 27 28 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL); 29 value32 &= ~BIT_RFMOD; 30 switch (bw) { 31 case RTW_CHANNEL_WIDTH_80: 32 value32 |= BIT_RFMOD_80M; 33 break; 34 case RTW_CHANNEL_WIDTH_40: 35 value32 |= BIT_RFMOD_40M; 36 break; 37 case RTW_CHANNEL_WIDTH_20: 38 default: 39 break; 40 } 41 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32); 42 43 if (rtw_chip_wcpu_11n(rtwdev)) 44 return; 45 46 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL); 47 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL); 48 rtw_write32(rtwdev, REG_AFE_CTRL1, value32); 49 50 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED); 51 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED); 52 53 value8 = rtw_read8(rtwdev, REG_CCK_CHECK); 54 value8 = value8 & ~BIT_CHECK_CCK_EN; 55 if (IS_CH_5G_BAND(channel)) 56 value8 |= BIT_CHECK_CCK_EN; 57 rtw_write8(rtwdev, REG_CCK_CHECK, value8); 58 } 59 EXPORT_SYMBOL(rtw_set_channel_mac); 60 61 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) 62 { 63 u32 value32; 64 u8 value8; 65 66 rtw_write8(rtwdev, REG_RSV_CTRL, 0); 67 68 if (rtw_chip_wcpu_11n(rtwdev)) { 69 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO) 70 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL); 71 else 72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL); 73 return 0; 74 } 75 76 switch (rtw_hci_type(rtwdev)) { 77 case RTW_HCI_TYPE_PCIE: 78 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN); 79 break; 80 case RTW_HCI_TYPE_USB: 81 break; 82 default: 83 return -EINVAL; 84 } 85 86 /* config PIN Mux */ 87 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1); 88 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL; 89 rtw_write32(rtwdev, REG_PAD_CTRL1, value32); 90 91 value32 = rtw_read32(rtwdev, REG_LED_CFG); 92 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN); 93 rtw_write32(rtwdev, REG_LED_CFG, value32); 94 95 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG); 96 value32 |= BIT_WLRFE_4_5_EN; 97 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32); 98 99 /* disable BB/RF */ 100 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); 101 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); 102 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8); 103 104 value8 = rtw_read8(rtwdev, REG_RF_CTRL); 105 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN); 106 rtw_write8(rtwdev, REG_RF_CTRL, value8); 107 108 value32 = rtw_read32(rtwdev, REG_WLRF1); 109 value32 &= ~BIT_WLRF1_BBRF_EN; 110 rtw_write32(rtwdev, REG_WLRF1, value32); 111 112 return 0; 113 } 114 115 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) 116 { 117 u32 val; 118 119 target &= mask; 120 121 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target, 122 50, 50 * RTW_PWR_POLLING_CNT, false, 123 rtwdev, addr) == 0; 124 } 125 126 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev, 127 const struct rtw_pwr_seq_cmd *cmd) 128 { 129 u8 value; 130 u32 offset; 131 132 if (cmd->base == RTW_PWR_ADDR_SDIO) 133 offset = cmd->offset | SDIO_LOCAL_OFFSET; 134 else 135 offset = cmd->offset; 136 137 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 138 return 0; 139 140 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE) 141 goto err; 142 143 /* if PCIE, toggle BIT_PFM_WOWL and try again */ 144 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL); 145 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 146 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 147 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 148 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 149 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 150 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 151 152 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 153 return 0; 154 155 err: 156 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n", 157 offset, cmd->mask, cmd->value); 158 return -EBUSY; 159 } 160 161 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, 162 u8 cut_mask, 163 const struct rtw_pwr_seq_cmd *cmd) 164 { 165 const struct rtw_pwr_seq_cmd *cur_cmd; 166 u32 offset; 167 u8 value; 168 169 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) { 170 if (!(cur_cmd->intf_mask & intf_mask) || 171 !(cur_cmd->cut_mask & cut_mask)) 172 continue; 173 174 switch (cur_cmd->cmd) { 175 case RTW_PWR_CMD_WRITE: 176 offset = cur_cmd->offset; 177 178 if (cur_cmd->base == RTW_PWR_ADDR_SDIO) 179 offset |= SDIO_LOCAL_OFFSET; 180 181 value = rtw_read8(rtwdev, offset); 182 value &= ~cur_cmd->mask; 183 value |= (cur_cmd->value & cur_cmd->mask); 184 rtw_write8(rtwdev, offset, value); 185 break; 186 case RTW_PWR_CMD_POLLING: 187 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd)) 188 return -EBUSY; 189 break; 190 case RTW_PWR_CMD_DELAY: 191 if (cur_cmd->value == RTW_PWR_DELAY_US) 192 udelay(cur_cmd->offset); 193 else 194 mdelay(cur_cmd->offset); 195 break; 196 case RTW_PWR_CMD_READ: 197 break; 198 default: 199 return -EINVAL; 200 } 201 } 202 203 return 0; 204 } 205 206 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, 207 const struct rtw_pwr_seq_cmd **cmd_seq) 208 { 209 u8 cut_mask; 210 u8 intf_mask; 211 u8 cut; 212 u32 idx = 0; 213 const struct rtw_pwr_seq_cmd *cmd; 214 int ret; 215 216 cut = rtwdev->hal.cut_version; 217 cut_mask = cut_version_to_mask(cut); 218 switch (rtw_hci_type(rtwdev)) { 219 case RTW_HCI_TYPE_PCIE: 220 intf_mask = BIT(2); 221 break; 222 case RTW_HCI_TYPE_USB: 223 intf_mask = BIT(1); 224 break; 225 default: 226 return -EINVAL; 227 } 228 229 do { 230 cmd = cmd_seq[idx]; 231 if (!cmd) 232 break; 233 234 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd); 235 if (ret) 236 return -EBUSY; 237 238 idx++; 239 } while (1); 240 241 return 0; 242 } 243 244 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) 245 { 246 struct rtw_chip_info *chip = rtwdev->chip; 247 const struct rtw_pwr_seq_cmd **pwr_seq; 248 u8 rpwm; 249 bool cur_pwr; 250 251 if (rtw_chip_wcpu_11ac(rtwdev)) { 252 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); 253 254 /* Check FW still exist or not */ 255 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) { 256 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE; 257 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm); 258 } 259 } 260 261 if (rtw_read8(rtwdev, REG_CR) == 0xea) 262 cur_pwr = false; 263 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 264 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) 265 cur_pwr = false; 266 else 267 cur_pwr = true; 268 269 if (pwr_on == cur_pwr) 270 return -EALREADY; 271 272 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq; 273 if (rtw_pwr_seq_parser(rtwdev, pwr_seq)) 274 return -EINVAL; 275 276 return 0; 277 } 278 279 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 280 { 281 u8 sys_func_en = rtwdev->chip->sys_func_en; 282 u8 value8; 283 u32 value, tmp; 284 285 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON); 286 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN; 287 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value); 288 289 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en); 290 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C; 291 rtw_write8(rtwdev, REG_CR_EXT + 3, value8); 292 293 /* disable boot-from-flash for driver's DL FW */ 294 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL); 295 if (tmp & BIT_BOOT_FSPI_EN) { 296 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN)); 297 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN); 298 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value); 299 } 300 301 return 0; 302 } 303 304 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev) 305 { 306 rtw_write8(rtwdev, REG_CR, 0xff); 307 mdelay(2); 308 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f); 309 mdelay(2); 310 311 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN); 312 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); 313 314 rtw_write16(rtwdev, REG_CR, 0x2ff); 315 316 return 0; 317 } 318 319 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 320 { 321 if (rtw_chip_wcpu_11n(rtwdev)) 322 return __rtw_mac_init_system_cfg_legacy(rtwdev); 323 324 return __rtw_mac_init_system_cfg(rtwdev); 325 } 326 327 int rtw_mac_power_on(struct rtw_dev *rtwdev) 328 { 329 int ret = 0; 330 331 ret = rtw_mac_pre_system_cfg(rtwdev); 332 if (ret) 333 goto err; 334 335 ret = rtw_mac_power_switch(rtwdev, true); 336 if (ret == -EALREADY) { 337 rtw_mac_power_switch(rtwdev, false); 338 ret = rtw_mac_power_switch(rtwdev, true); 339 if (ret) 340 goto err; 341 } else if (ret) { 342 goto err; 343 } 344 345 ret = rtw_mac_init_system_cfg(rtwdev); 346 if (ret) 347 goto err; 348 349 return 0; 350 351 err: 352 rtw_err(rtwdev, "mac power on failed"); 353 return ret; 354 } 355 356 void rtw_mac_power_off(struct rtw_dev *rtwdev) 357 { 358 rtw_mac_power_switch(rtwdev, false); 359 } 360 361 static bool check_firmware_size(const u8 *data, u32 size) 362 { 363 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 364 u32 dmem_size; 365 u32 imem_size; 366 u32 emem_size; 367 u32 real_size; 368 369 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 370 imem_size = le32_to_cpu(fw_hdr->imem_size); 371 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 372 le32_to_cpu(fw_hdr->emem_size) : 0; 373 374 dmem_size += FW_HDR_CHKSUM_SIZE; 375 imem_size += FW_HDR_CHKSUM_SIZE; 376 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 377 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size; 378 if (real_size != size) 379 return false; 380 381 return true; 382 } 383 384 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable) 385 { 386 if (enable) { 387 /* cpu io interface enable */ 388 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 389 390 /* cpu enable */ 391 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 392 } else { 393 /* cpu io interface disable */ 394 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 395 396 /* cpu disable */ 397 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 398 } 399 } 400 401 #define DLFW_RESTORE_REG_NUM 6 402 403 static void download_firmware_reg_backup(struct rtw_dev *rtwdev, 404 struct rtw_backup_info *bckp) 405 { 406 u8 tmp; 407 u8 bckp_idx = 0; 408 409 /* set HIQ to hi priority */ 410 bckp[bckp_idx].len = 1; 411 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1; 412 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1); 413 bckp_idx++; 414 tmp = RTW_DMA_MAPPING_HIGH << 6; 415 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp); 416 417 /* DLFW only use HIQ, map HIQ to hi priority */ 418 bckp[bckp_idx].len = 1; 419 bckp[bckp_idx].reg = REG_CR; 420 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR); 421 bckp_idx++; 422 bckp[bckp_idx].len = 4; 423 bckp[bckp_idx].reg = REG_H2CQ_CSR; 424 bckp[bckp_idx].val = BIT_H2CQ_FULL; 425 bckp_idx++; 426 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN; 427 rtw_write8(rtwdev, REG_CR, tmp); 428 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 429 430 /* Config hi priority queue and public priority queue page number */ 431 bckp[bckp_idx].len = 2; 432 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1; 433 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1); 434 bckp_idx++; 435 bckp[bckp_idx].len = 4; 436 bckp[bckp_idx].reg = REG_RQPN_CTRL_2; 437 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN; 438 bckp_idx++; 439 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200); 440 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val); 441 442 /* Disable beacon related functions */ 443 tmp = rtw_read8(rtwdev, REG_BCN_CTRL); 444 bckp[bckp_idx].len = 1; 445 bckp[bckp_idx].reg = REG_BCN_CTRL; 446 bckp[bckp_idx].val = tmp; 447 bckp_idx++; 448 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT); 449 rtw_write8(rtwdev, REG_BCN_CTRL, tmp); 450 451 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n"); 452 } 453 454 static void download_firmware_reset_platform(struct rtw_dev *rtwdev) 455 { 456 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 457 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 458 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 459 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 460 } 461 462 static void download_firmware_reg_restore(struct rtw_dev *rtwdev, 463 struct rtw_backup_info *bckp, 464 u8 bckp_num) 465 { 466 rtw_restore_reg(rtwdev, bckp, bckp_num); 467 } 468 469 #define TX_DESC_SIZE 48 470 471 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, 472 const u8 *data, u32 size) 473 { 474 u8 *buf; 475 int ret; 476 477 buf = kmemdup(data, size, GFP_KERNEL); 478 if (!buf) 479 return -ENOMEM; 480 481 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); 482 kfree(buf); 483 return ret; 484 } 485 486 static int 487 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size) 488 { 489 int ret; 490 491 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 492 !((size + TX_DESC_SIZE) & (512 - 1))) 493 size += 1; 494 495 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size); 496 if (ret) 497 rtw_err(rtwdev, "failed to download rsvd page\n"); 498 499 return ret; 500 } 501 502 static int 503 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl) 504 { 505 rtw_write32(rtwdev, REG_DDMA_CH0SA, src); 506 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst); 507 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl); 508 509 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 510 return -EBUSY; 511 512 return 0; 513 } 514 515 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, 516 u32 len, u8 first) 517 { 518 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN; 519 520 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 521 return -EBUSY; 522 523 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN; 524 if (!first) 525 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT; 526 527 if (iddma_enable(rtwdev, src, dst, ch0_ctrl)) 528 return -EBUSY; 529 530 return 0; 531 } 532 533 static bool 534 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) 535 { 536 u8 fw_ctrl; 537 538 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL); 539 540 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) { 541 if (addr < OCPBASE_DMEM_88XX) { 542 fw_ctrl |= BIT_IMEM_DW_OK; 543 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK; 544 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 545 } else { 546 fw_ctrl |= BIT_DMEM_DW_OK; 547 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK; 548 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 549 } 550 551 rtw_err(rtwdev, "invalid fw checksum\n"); 552 553 return false; 554 } 555 556 if (addr < OCPBASE_DMEM_88XX) { 557 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK); 558 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 559 } else { 560 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK); 561 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 562 } 563 564 return true; 565 } 566 567 static int 568 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data, 569 u32 src, u32 dst, u32 size) 570 { 571 struct rtw_chip_info *chip = rtwdev->chip; 572 u32 desc_size = chip->tx_pkt_desc_sz; 573 u8 first_part; 574 u32 mem_offset; 575 u32 residue_size; 576 u32 pkt_size; 577 u32 max_size = 0x1000; 578 u32 val; 579 int ret; 580 581 mem_offset = 0; 582 first_part = 1; 583 residue_size = size; 584 585 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL); 586 val |= BIT_DDMACH0_RESET_CHKSUM_STS; 587 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val); 588 589 while (residue_size) { 590 if (residue_size >= max_size) 591 pkt_size = max_size; 592 else 593 pkt_size = residue_size; 594 595 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7), 596 data + mem_offset, pkt_size); 597 if (ret) 598 return ret; 599 600 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX + 601 src + desc_size, 602 dst + mem_offset, pkt_size, 603 first_part); 604 if (ret) 605 return ret; 606 607 first_part = 0; 608 mem_offset += pkt_size; 609 residue_size -= pkt_size; 610 } 611 612 if (!check_fw_checksum(rtwdev, dst)) 613 return -EINVAL; 614 615 return 0; 616 } 617 618 static int 619 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size) 620 { 621 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 622 const u8 *cur_fw; 623 u16 val; 624 u32 imem_size; 625 u32 dmem_size; 626 u32 emem_size; 627 u32 addr; 628 int ret; 629 630 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 631 imem_size = le32_to_cpu(fw_hdr->imem_size); 632 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 633 le32_to_cpu(fw_hdr->emem_size) : 0; 634 dmem_size += FW_HDR_CHKSUM_SIZE; 635 imem_size += FW_HDR_CHKSUM_SIZE; 636 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 637 638 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800); 639 val |= BIT_MCUFWDL_EN; 640 rtw_write16(rtwdev, REG_MCUFW_CTRL, val); 641 642 cur_fw = data + FW_HDR_SIZE; 643 addr = le32_to_cpu(fw_hdr->dmem_addr); 644 addr &= ~BIT(31); 645 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size); 646 if (ret) 647 return ret; 648 649 cur_fw = data + FW_HDR_SIZE + dmem_size; 650 addr = le32_to_cpu(fw_hdr->imem_addr); 651 addr &= ~BIT(31); 652 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size); 653 if (ret) 654 return ret; 655 656 if (emem_size) { 657 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size; 658 addr = le32_to_cpu(fw_hdr->emem_addr); 659 addr &= ~BIT(31); 660 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, 661 emem_size); 662 if (ret) 663 return ret; 664 } 665 666 return 0; 667 } 668 669 static int download_firmware_validate(struct rtw_dev *rtwdev) 670 { 671 u32 fw_key; 672 673 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) { 674 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK; 675 if (fw_key == ILLEGAL_KEY_GROUP) 676 rtw_err(rtwdev, "invalid fw key\n"); 677 return -EINVAL; 678 } 679 680 return 0; 681 } 682 683 static void download_firmware_end_flow(struct rtw_dev *rtwdev) 684 { 685 u16 fw_ctrl; 686 687 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF); 688 689 /* Check IMEM & DMEM checksum is OK or not */ 690 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL); 691 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK) 692 return; 693 694 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN; 695 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 696 } 697 698 static int __rtw_download_firmware(struct rtw_dev *rtwdev, 699 struct rtw_fw_state *fw) 700 { 701 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM]; 702 const u8 *data = fw->firmware->data; 703 u32 size = fw->firmware->size; 704 u32 ltecoex_bckp; 705 int ret; 706 707 if (!check_firmware_size(data, size)) 708 return -EINVAL; 709 710 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) 711 return -EBUSY; 712 713 wlan_cpu_enable(rtwdev, false); 714 715 download_firmware_reg_backup(rtwdev, bckp); 716 download_firmware_reset_platform(rtwdev); 717 718 ret = start_download_firmware(rtwdev, data, size); 719 if (ret) 720 goto dlfw_fail; 721 722 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM); 723 724 download_firmware_end_flow(rtwdev); 725 726 wlan_cpu_enable(rtwdev, true); 727 728 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) 729 return -EBUSY; 730 731 ret = download_firmware_validate(rtwdev); 732 if (ret) 733 goto dlfw_fail; 734 735 /* reset desc and index */ 736 rtw_hci_setup(rtwdev); 737 738 rtwdev->h2c.last_box_num = 0; 739 rtwdev->h2c.seq = 0; 740 741 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 742 743 return 0; 744 745 dlfw_fail: 746 /* Disable FWDL_EN */ 747 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 748 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 749 750 return ret; 751 } 752 753 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) 754 { 755 int try; 756 757 if (en) { 758 wlan_cpu_enable(rtwdev, false); 759 wlan_cpu_enable(rtwdev, true); 760 761 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 762 763 for (try = 0; try < 10; try++) { 764 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN) 765 goto fwdl_ready; 766 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 767 msleep(20); 768 } 769 rtw_err(rtwdev, "failed to check fw download ready\n"); 770 fwdl_ready: 771 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN); 772 } else { 773 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 774 } 775 } 776 777 static void 778 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) 779 { 780 u32 val32; 781 u32 block_nr; 782 u32 remain_size; 783 u32 write_addr = FW_START_ADDR_LEGACY; 784 const __le32 *ptr = (const __le32 *)data; 785 u32 block; 786 __le32 remain_data = 0; 787 788 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY; 789 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1); 790 791 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 792 val32 &= ~BIT_ROM_PGE; 793 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE; 794 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 795 796 for (block = 0; block < block_nr; block++) { 797 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr)); 798 799 write_addr += DLFW_BLK_SIZE_LEGACY; 800 ptr++; 801 } 802 803 if (remain_size) { 804 memcpy(&remain_data, ptr, remain_size); 805 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); 806 } 807 } 808 809 static int 810 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) 811 { 812 u32 page; 813 u32 total_page; 814 u32 last_page_size; 815 816 data += sizeof(struct rtw_fw_hdr_legacy); 817 size -= sizeof(struct rtw_fw_hdr_legacy); 818 819 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY; 820 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1); 821 822 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); 823 824 for (page = 0; page < total_page; page++) { 825 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); 826 data += DLFW_PAGE_SIZE_LEGACY; 827 } 828 if (last_page_size) 829 write_firmware_page(rtwdev, page, data, last_page_size); 830 831 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { 832 rtw_err(rtwdev, "failed to check download firmware report\n"); 833 return -EINVAL; 834 } 835 836 return 0; 837 } 838 839 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev) 840 { 841 u32 val32; 842 int try; 843 844 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 845 val32 |= BIT_MCUFWDL_RDY; 846 val32 &= ~BIT_WINTINI_RDY; 847 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 848 849 wlan_cpu_enable(rtwdev, false); 850 wlan_cpu_enable(rtwdev, true); 851 852 for (try = 0; try < 10; try++) { 853 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 854 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY) 855 return 0; 856 msleep(20); 857 } 858 859 rtw_err(rtwdev, "failed to validate firmware\n"); 860 return -EINVAL; 861 } 862 863 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, 864 struct rtw_fw_state *fw) 865 { 866 int ret = 0; 867 868 en_download_firmware_legacy(rtwdev, true); 869 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); 870 en_download_firmware_legacy(rtwdev, false); 871 if (ret) 872 goto out; 873 874 ret = download_firmware_validate_legacy(rtwdev); 875 if (ret) 876 goto out; 877 878 /* reset desc and index */ 879 rtw_hci_setup(rtwdev); 880 881 rtwdev->h2c.last_box_num = 0; 882 rtwdev->h2c.seq = 0; 883 884 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 885 886 out: 887 return ret; 888 } 889 890 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 891 { 892 if (rtw_chip_wcpu_11n(rtwdev)) 893 return __rtw_download_firmware_legacy(rtwdev, fw); 894 895 return __rtw_download_firmware(rtwdev, fw); 896 } 897 898 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) 899 { 900 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn; 901 u32 prio_queues = 0; 902 903 if (queues & BIT(IEEE80211_AC_VO)) 904 prio_queues |= BIT(rqpn->dma_map_vo); 905 if (queues & BIT(IEEE80211_AC_VI)) 906 prio_queues |= BIT(rqpn->dma_map_vi); 907 if (queues & BIT(IEEE80211_AC_BE)) 908 prio_queues |= BIT(rqpn->dma_map_be); 909 if (queues & BIT(IEEE80211_AC_BK)) 910 prio_queues |= BIT(rqpn->dma_map_bk); 911 912 return prio_queues; 913 } 914 915 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, 916 u32 prio_queue, bool drop) 917 { 918 struct rtw_chip_info *chip = rtwdev->chip; 919 const struct rtw_prioq_addr *addr; 920 bool wsize; 921 u16 avail_page, rsvd_page; 922 int i; 923 924 if (prio_queue >= RTW_DMA_MAPPING_MAX) 925 return; 926 927 addr = &chip->prioq_addrs->prio[prio_queue]; 928 wsize = chip->prioq_addrs->wsize; 929 930 /* check if all of the reserved pages are available for 100 msecs */ 931 for (i = 0; i < 5; i++) { 932 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : 933 rtw_read8(rtwdev, addr->rsvd); 934 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : 935 rtw_read8(rtwdev, addr->avail); 936 if (rsvd_page == avail_page) 937 return; 938 939 msleep(20); 940 } 941 942 /* priority queue is still not empty, throw a warning, 943 * 944 * Note that if we want to flush the tx queue when having a lot of 945 * traffic (ex, 100Mbps up), some of the packets could be dropped. 946 * And it requires like ~2secs to flush the full priority queue. 947 */ 948 if (!drop) 949 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue); 950 } 951 952 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, 953 u32 prio_queues, bool drop) 954 { 955 u32 q; 956 957 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++) 958 if (prio_queues & BIT(q)) 959 __rtw_mac_flush_prio_queue(rtwdev, q, drop); 960 } 961 962 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) 963 { 964 u32 prio_queues = 0; 965 966 /* If all of the hardware queues are requested to flush, 967 * or the priority queues are not mapped yet, 968 * flush all of the priority queues 969 */ 970 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn) 971 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1; 972 else 973 prio_queues = get_priority_queues(rtwdev, queues); 974 975 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop); 976 } 977 978 static int txdma_queue_mapping(struct rtw_dev *rtwdev) 979 { 980 struct rtw_chip_info *chip = rtwdev->chip; 981 const struct rtw_rqpn *rqpn = NULL; 982 u16 txdma_pq_map = 0; 983 984 switch (rtw_hci_type(rtwdev)) { 985 case RTW_HCI_TYPE_PCIE: 986 rqpn = &chip->rqpn_table[1]; 987 break; 988 case RTW_HCI_TYPE_USB: 989 if (rtwdev->hci.bulkout_num == 2) 990 rqpn = &chip->rqpn_table[2]; 991 else if (rtwdev->hci.bulkout_num == 3) 992 rqpn = &chip->rqpn_table[3]; 993 else if (rtwdev->hci.bulkout_num == 4) 994 rqpn = &chip->rqpn_table[4]; 995 else 996 return -EINVAL; 997 break; 998 default: 999 return -EINVAL; 1000 } 1001 1002 rtwdev->fifo.rqpn = rqpn; 1003 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); 1004 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); 1005 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); 1006 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); 1007 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); 1008 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); 1009 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); 1010 1011 rtw_write8(rtwdev, REG_CR, 0); 1012 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE); 1013 if (rtw_chip_wcpu_11ac(rtwdev)) 1014 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 1015 1016 return 0; 1017 } 1018 1019 static int set_trx_fifo_info(struct rtw_dev *rtwdev) 1020 { 1021 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1022 struct rtw_chip_info *chip = rtwdev->chip; 1023 u16 cur_pg_addr; 1024 u8 csi_buf_pg_num = chip->csi_buf_pg_num; 1025 1026 /* config rsvd page num */ 1027 fifo->rsvd_drv_pg_num = 8; 1028 fifo->txff_pg_num = chip->txff_size >> 7; 1029 if (rtw_chip_wcpu_11n(rtwdev)) 1030 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; 1031 else 1032 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num + 1033 RSVD_PG_H2C_EXTRAINFO_NUM + 1034 RSVD_PG_H2C_STATICINFO_NUM + 1035 RSVD_PG_H2CQ_NUM + 1036 RSVD_PG_CPU_INSTRUCTION_NUM + 1037 RSVD_PG_FW_TXBUF_NUM + 1038 csi_buf_pg_num; 1039 1040 if (fifo->rsvd_pg_num > fifo->txff_pg_num) 1041 return -ENOMEM; 1042 1043 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num; 1044 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num; 1045 1046 cur_pg_addr = fifo->txff_pg_num; 1047 if (rtw_chip_wcpu_11ac(rtwdev)) { 1048 cur_pg_addr -= csi_buf_pg_num; 1049 fifo->rsvd_csibuf_addr = cur_pg_addr; 1050 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM; 1051 fifo->rsvd_fw_txbuf_addr = cur_pg_addr; 1052 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM; 1053 fifo->rsvd_cpu_instr_addr = cur_pg_addr; 1054 cur_pg_addr -= RSVD_PG_H2CQ_NUM; 1055 fifo->rsvd_h2cq_addr = cur_pg_addr; 1056 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM; 1057 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr; 1058 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM; 1059 fifo->rsvd_h2c_info_addr = cur_pg_addr; 1060 } 1061 cur_pg_addr -= fifo->rsvd_drv_pg_num; 1062 fifo->rsvd_drv_addr = cur_pg_addr; 1063 1064 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) { 1065 rtw_err(rtwdev, "wrong rsvd driver address\n"); 1066 return -EINVAL; 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int __priority_queue_cfg(struct rtw_dev *rtwdev, 1073 const struct rtw_page_table *pg_tbl, 1074 u16 pubq_num) 1075 { 1076 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1077 struct rtw_chip_info *chip = rtwdev->chip; 1078 1079 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num); 1080 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num); 1081 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num); 1082 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num); 1083 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num); 1084 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN); 1085 1086 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary); 1087 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16); 1088 1089 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary); 1090 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); 1091 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); 1092 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); 1093 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); 1094 1095 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) 1096 return -EBUSY; 1097 1098 rtw_write8(rtwdev, REG_CR + 3, 0); 1099 1100 return 0; 1101 } 1102 1103 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev, 1104 const struct rtw_page_table *pg_tbl, 1105 u16 pubq_num) 1106 { 1107 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1108 struct rtw_chip_info *chip = rtwdev->chip; 1109 u32 val32; 1110 1111 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); 1112 rtw_write32(rtwdev, REG_RQPN_NPQ, val32); 1113 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); 1114 rtw_write32(rtwdev, REG_RQPN, val32); 1115 1116 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); 1117 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1); 1118 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); 1119 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); 1120 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); 1121 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); 1122 1123 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT); 1124 1125 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0)) 1126 return -EBUSY; 1127 1128 return 0; 1129 } 1130 1131 static int priority_queue_cfg(struct rtw_dev *rtwdev) 1132 { 1133 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1134 struct rtw_chip_info *chip = rtwdev->chip; 1135 const struct rtw_page_table *pg_tbl = NULL; 1136 u16 pubq_num; 1137 int ret; 1138 1139 ret = set_trx_fifo_info(rtwdev); 1140 if (ret) 1141 return ret; 1142 1143 switch (rtw_hci_type(rtwdev)) { 1144 case RTW_HCI_TYPE_PCIE: 1145 pg_tbl = &chip->page_table[1]; 1146 break; 1147 case RTW_HCI_TYPE_USB: 1148 if (rtwdev->hci.bulkout_num == 2) 1149 pg_tbl = &chip->page_table[2]; 1150 else if (rtwdev->hci.bulkout_num == 3) 1151 pg_tbl = &chip->page_table[3]; 1152 else if (rtwdev->hci.bulkout_num == 4) 1153 pg_tbl = &chip->page_table[4]; 1154 else 1155 return -EINVAL; 1156 break; 1157 default: 1158 return -EINVAL; 1159 } 1160 1161 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - 1162 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; 1163 if (rtw_chip_wcpu_11n(rtwdev)) 1164 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num); 1165 else 1166 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num); 1167 } 1168 1169 static int init_h2c(struct rtw_dev *rtwdev) 1170 { 1171 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1172 u8 value8; 1173 u32 value32; 1174 u32 h2cq_addr; 1175 u32 h2cq_size; 1176 u32 h2cq_free; 1177 u32 wp, rp; 1178 1179 if (rtw_chip_wcpu_11n(rtwdev)) 1180 return 0; 1181 1182 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT; 1183 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT; 1184 1185 value32 = rtw_read32(rtwdev, REG_H2C_HEAD); 1186 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1187 rtw_write32(rtwdev, REG_H2C_HEAD, value32); 1188 1189 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR); 1190 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1191 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32); 1192 1193 value32 = rtw_read32(rtwdev, REG_H2C_TAIL); 1194 value32 &= 0xFFFC0000; 1195 value32 |= (h2cq_addr + h2cq_size); 1196 rtw_write32(rtwdev, REG_H2C_TAIL, value32); 1197 1198 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1199 value8 = (u8)((value8 & 0xFC) | 0x01); 1200 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1201 1202 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1203 value8 = (u8)((value8 & 0xFB) | 0x04); 1204 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1205 1206 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1); 1207 value8 = (u8)((value8 & 0x7f) | 0x80); 1208 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8); 1209 1210 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF; 1211 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF; 1212 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp; 1213 1214 if (h2cq_size != h2cq_free) { 1215 rtw_err(rtwdev, "H2C queue mismatch\n"); 1216 return -EINVAL; 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev) 1223 { 1224 int ret; 1225 1226 ret = txdma_queue_mapping(rtwdev); 1227 if (ret) 1228 return ret; 1229 1230 ret = priority_queue_cfg(rtwdev); 1231 if (ret) 1232 return ret; 1233 1234 ret = init_h2c(rtwdev); 1235 if (ret) 1236 return ret; 1237 1238 return 0; 1239 } 1240 1241 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev) 1242 { 1243 u8 value8; 1244 1245 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); 1246 if (rtw_chip_wcpu_11ac(rtwdev)) { 1247 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1); 1248 value8 &= 0xF0; 1249 /* For rxdesc len = 0 issue */ 1250 value8 |= 0xF; 1251 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8); 1252 } 1253 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS); 1254 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9)); 1255 1256 return 0; 1257 } 1258 1259 int rtw_mac_init(struct rtw_dev *rtwdev) 1260 { 1261 struct rtw_chip_info *chip = rtwdev->chip; 1262 int ret; 1263 1264 ret = rtw_init_trx_cfg(rtwdev); 1265 if (ret) 1266 return ret; 1267 1268 ret = chip->ops->mac_init(rtwdev); 1269 if (ret) 1270 return ret; 1271 1272 ret = rtw_drv_info_cfg(rtwdev); 1273 if (ret) 1274 return ret; 1275 1276 rtw_hci_interface_cfg(rtwdev); 1277 1278 return 0; 1279 } 1280