1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include "main.h" 6 #include "mac.h" 7 #include "reg.h" 8 #include "fw.h" 9 #include "debug.h" 10 #include "sdio.h" 11 12 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, 13 u8 primary_ch_idx) 14 { 15 u8 txsc40 = 0, txsc20 = 0; 16 u32 value32; 17 u8 value8; 18 19 txsc20 = primary_ch_idx; 20 if (bw == RTW_CHANNEL_WIDTH_80) { 21 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) 22 txsc40 = RTW_SC_40_UPPER; 23 else 24 txsc40 = RTW_SC_40_LOWER; 25 } 26 rtw_write8(rtwdev, REG_DATA_SC, 27 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40)); 28 29 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL); 30 value32 &= ~BIT_RFMOD; 31 switch (bw) { 32 case RTW_CHANNEL_WIDTH_80: 33 value32 |= BIT_RFMOD_80M; 34 break; 35 case RTW_CHANNEL_WIDTH_40: 36 value32 |= BIT_RFMOD_40M; 37 break; 38 case RTW_CHANNEL_WIDTH_20: 39 default: 40 break; 41 } 42 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32); 43 44 if (rtw_chip_wcpu_11n(rtwdev)) 45 return; 46 47 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL); 48 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL); 49 rtw_write32(rtwdev, REG_AFE_CTRL1, value32); 50 51 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED); 52 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED); 53 54 value8 = rtw_read8(rtwdev, REG_CCK_CHECK); 55 value8 = value8 & ~BIT_CHECK_CCK_EN; 56 if (IS_CH_5G_BAND(channel)) 57 value8 |= BIT_CHECK_CCK_EN; 58 rtw_write8(rtwdev, REG_CCK_CHECK, value8); 59 } 60 EXPORT_SYMBOL(rtw_set_channel_mac); 61 62 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) 63 { 64 unsigned int retry; 65 u32 value32; 66 u8 value8; 67 68 rtw_write8(rtwdev, REG_RSV_CTRL, 0); 69 70 if (rtw_chip_wcpu_11n(rtwdev)) { 71 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO) 72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL); 73 else 74 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL); 75 return 0; 76 } 77 78 switch (rtw_hci_type(rtwdev)) { 79 case RTW_HCI_TYPE_PCIE: 80 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); 81 break; 82 case RTW_HCI_TYPE_SDIO: 83 rtw_write8_clr(rtwdev, REG_SDIO_HSUS_CTRL, BIT_HCI_SUS_REQ); 84 85 for (retry = 0; retry < RTW_PWR_POLLING_CNT; retry++) { 86 if (rtw_read8(rtwdev, REG_SDIO_HSUS_CTRL) & BIT_HCI_RESUME_RDY) 87 break; 88 89 usleep_range(10, 50); 90 } 91 92 if (retry == RTW_PWR_POLLING_CNT) { 93 rtw_err(rtwdev, "failed to poll REG_SDIO_HSUS_CTRL[1]"); 94 return -ETIMEDOUT; 95 } 96 97 if (rtw_sdio_is_sdio30_supported(rtwdev)) 98 rtw_write8_set(rtwdev, REG_HCI_OPT_CTRL + 2, 99 BIT_SDIO_PAD_E5 >> 16); 100 else 101 rtw_write8_clr(rtwdev, REG_HCI_OPT_CTRL + 2, 102 BIT_SDIO_PAD_E5 >> 16); 103 break; 104 case RTW_HCI_TYPE_USB: 105 break; 106 default: 107 return -EINVAL; 108 } 109 110 /* config PIN Mux */ 111 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1); 112 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL; 113 rtw_write32(rtwdev, REG_PAD_CTRL1, value32); 114 115 value32 = rtw_read32(rtwdev, REG_LED_CFG); 116 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN); 117 rtw_write32(rtwdev, REG_LED_CFG, value32); 118 119 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG); 120 value32 |= BIT_WLRFE_4_5_EN; 121 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32); 122 123 /* disable BB/RF */ 124 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); 125 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); 126 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8); 127 128 value8 = rtw_read8(rtwdev, REG_RF_CTRL); 129 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN); 130 rtw_write8(rtwdev, REG_RF_CTRL, value8); 131 132 value32 = rtw_read32(rtwdev, REG_WLRF1); 133 value32 &= ~BIT_WLRF1_BBRF_EN; 134 rtw_write32(rtwdev, REG_WLRF1, value32); 135 136 return 0; 137 } 138 139 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) 140 { 141 u32 val; 142 143 target &= mask; 144 145 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target, 146 50, 50 * RTW_PWR_POLLING_CNT, false, 147 rtwdev, addr) == 0; 148 } 149 150 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev, 151 const struct rtw_pwr_seq_cmd *cmd) 152 { 153 u8 value; 154 u32 offset; 155 156 if (cmd->base == RTW_PWR_ADDR_SDIO) 157 offset = cmd->offset | SDIO_LOCAL_OFFSET; 158 else 159 offset = cmd->offset; 160 161 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 162 return 0; 163 164 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE) 165 goto err; 166 167 /* if PCIE, toggle BIT_PFM_WOWL and try again */ 168 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL); 169 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 170 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 171 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 172 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 173 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 174 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 175 176 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 177 return 0; 178 179 err: 180 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n", 181 offset, cmd->mask, cmd->value); 182 return -EBUSY; 183 } 184 185 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, 186 u8 cut_mask, 187 const struct rtw_pwr_seq_cmd *cmd) 188 { 189 const struct rtw_pwr_seq_cmd *cur_cmd; 190 u32 offset; 191 u8 value; 192 193 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) { 194 if (!(cur_cmd->intf_mask & intf_mask) || 195 !(cur_cmd->cut_mask & cut_mask)) 196 continue; 197 198 switch (cur_cmd->cmd) { 199 case RTW_PWR_CMD_WRITE: 200 offset = cur_cmd->offset; 201 202 if (cur_cmd->base == RTW_PWR_ADDR_SDIO) 203 offset |= SDIO_LOCAL_OFFSET; 204 205 value = rtw_read8(rtwdev, offset); 206 value &= ~cur_cmd->mask; 207 value |= (cur_cmd->value & cur_cmd->mask); 208 rtw_write8(rtwdev, offset, value); 209 break; 210 case RTW_PWR_CMD_POLLING: 211 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd)) 212 return -EBUSY; 213 break; 214 case RTW_PWR_CMD_DELAY: 215 if (cur_cmd->value == RTW_PWR_DELAY_US) 216 udelay(cur_cmd->offset); 217 else 218 mdelay(cur_cmd->offset); 219 break; 220 case RTW_PWR_CMD_READ: 221 break; 222 default: 223 return -EINVAL; 224 } 225 } 226 227 return 0; 228 } 229 230 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, 231 const struct rtw_pwr_seq_cmd **cmd_seq) 232 { 233 u8 cut_mask; 234 u8 intf_mask; 235 u8 cut; 236 u32 idx = 0; 237 const struct rtw_pwr_seq_cmd *cmd; 238 int ret; 239 240 cut = rtwdev->hal.cut_version; 241 cut_mask = cut_version_to_mask(cut); 242 switch (rtw_hci_type(rtwdev)) { 243 case RTW_HCI_TYPE_PCIE: 244 intf_mask = RTW_PWR_INTF_PCI_MSK; 245 break; 246 case RTW_HCI_TYPE_USB: 247 intf_mask = RTW_PWR_INTF_USB_MSK; 248 break; 249 case RTW_HCI_TYPE_SDIO: 250 intf_mask = RTW_PWR_INTF_SDIO_MSK; 251 break; 252 default: 253 return -EINVAL; 254 } 255 256 do { 257 cmd = cmd_seq[idx]; 258 if (!cmd) 259 break; 260 261 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd); 262 if (ret) 263 return ret; 264 265 idx++; 266 } while (1); 267 268 return 0; 269 } 270 271 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) 272 { 273 const struct rtw_chip_info *chip = rtwdev->chip; 274 const struct rtw_pwr_seq_cmd **pwr_seq; 275 u32 imr = 0; 276 u8 rpwm; 277 bool cur_pwr; 278 int ret; 279 280 if (rtw_chip_wcpu_11ac(rtwdev)) { 281 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); 282 283 /* Check FW still exist or not */ 284 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) { 285 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE; 286 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm); 287 } 288 } 289 290 if (rtw_read8(rtwdev, REG_CR) == 0xea) 291 cur_pwr = false; 292 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 293 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) 294 cur_pwr = false; 295 else 296 cur_pwr = true; 297 298 if (pwr_on == cur_pwr) 299 return -EALREADY; 300 301 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 302 imr = rtw_read32(rtwdev, REG_SDIO_HIMR); 303 rtw_write32(rtwdev, REG_SDIO_HIMR, 0); 304 } 305 306 if (!pwr_on) 307 clear_bit(RTW_FLAG_POWERON, rtwdev->flags); 308 309 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq; 310 ret = rtw_pwr_seq_parser(rtwdev, pwr_seq); 311 312 if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { 313 if (chip->id == RTW_CHIP_TYPE_8822C || 314 chip->id == RTW_CHIP_TYPE_8822B || 315 chip->id == RTW_CHIP_TYPE_8821C) 316 rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0)); 317 } 318 319 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 320 rtw_write32(rtwdev, REG_SDIO_HIMR, imr); 321 322 if (!ret && pwr_on) 323 set_bit(RTW_FLAG_POWERON, rtwdev->flags); 324 325 return ret; 326 } 327 328 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 329 { 330 u8 sys_func_en = rtwdev->chip->sys_func_en; 331 u8 value8; 332 u32 value, tmp; 333 334 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON); 335 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN; 336 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value); 337 338 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en); 339 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C; 340 rtw_write8(rtwdev, REG_CR_EXT + 3, value8); 341 342 /* disable boot-from-flash for driver's DL FW */ 343 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL); 344 if (tmp & BIT_BOOT_FSPI_EN) { 345 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN)); 346 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN); 347 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value); 348 } 349 350 return 0; 351 } 352 353 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev) 354 { 355 rtw_write8(rtwdev, REG_CR, 0xff); 356 mdelay(2); 357 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f); 358 mdelay(2); 359 360 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN); 361 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); 362 363 rtw_write16(rtwdev, REG_CR, 0x2ff); 364 365 return 0; 366 } 367 368 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 369 { 370 if (rtw_chip_wcpu_11n(rtwdev)) 371 return __rtw_mac_init_system_cfg_legacy(rtwdev); 372 373 return __rtw_mac_init_system_cfg(rtwdev); 374 } 375 376 int rtw_mac_power_on(struct rtw_dev *rtwdev) 377 { 378 int ret = 0; 379 380 ret = rtw_mac_pre_system_cfg(rtwdev); 381 if (ret) 382 goto err; 383 384 ret = rtw_mac_power_switch(rtwdev, true); 385 if (ret == -EALREADY) { 386 rtw_mac_power_switch(rtwdev, false); 387 388 ret = rtw_mac_pre_system_cfg(rtwdev); 389 if (ret) 390 goto err; 391 392 ret = rtw_mac_power_switch(rtwdev, true); 393 if (ret) 394 goto err; 395 } else if (ret) { 396 goto err; 397 } 398 399 ret = rtw_mac_init_system_cfg(rtwdev); 400 if (ret) 401 goto err; 402 403 return 0; 404 405 err: 406 rtw_err(rtwdev, "mac power on failed"); 407 return ret; 408 } 409 410 void rtw_mac_power_off(struct rtw_dev *rtwdev) 411 { 412 rtw_mac_power_switch(rtwdev, false); 413 } 414 415 static bool check_firmware_size(const u8 *data, u32 size) 416 { 417 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 418 u32 dmem_size; 419 u32 imem_size; 420 u32 emem_size; 421 u32 real_size; 422 423 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 424 imem_size = le32_to_cpu(fw_hdr->imem_size); 425 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 426 le32_to_cpu(fw_hdr->emem_size) : 0; 427 428 dmem_size += FW_HDR_CHKSUM_SIZE; 429 imem_size += FW_HDR_CHKSUM_SIZE; 430 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 431 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size; 432 if (real_size != size) 433 return false; 434 435 return true; 436 } 437 438 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable) 439 { 440 if (enable) { 441 /* cpu io interface enable */ 442 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 443 444 /* cpu enable */ 445 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 446 } else { 447 /* cpu io interface disable */ 448 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 449 450 /* cpu disable */ 451 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 452 } 453 } 454 455 #define DLFW_RESTORE_REG_NUM 6 456 457 static void download_firmware_reg_backup(struct rtw_dev *rtwdev, 458 struct rtw_backup_info *bckp) 459 { 460 u8 tmp; 461 u8 bckp_idx = 0; 462 463 /* set HIQ to hi priority */ 464 bckp[bckp_idx].len = 1; 465 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1; 466 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1); 467 bckp_idx++; 468 tmp = RTW_DMA_MAPPING_HIGH << 6; 469 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp); 470 471 /* DLFW only use HIQ, map HIQ to hi priority */ 472 bckp[bckp_idx].len = 1; 473 bckp[bckp_idx].reg = REG_CR; 474 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR); 475 bckp_idx++; 476 bckp[bckp_idx].len = 4; 477 bckp[bckp_idx].reg = REG_H2CQ_CSR; 478 bckp[bckp_idx].val = BIT_H2CQ_FULL; 479 bckp_idx++; 480 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN; 481 rtw_write8(rtwdev, REG_CR, tmp); 482 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 483 484 /* Config hi priority queue and public priority queue page number */ 485 bckp[bckp_idx].len = 2; 486 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1; 487 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1); 488 bckp_idx++; 489 bckp[bckp_idx].len = 4; 490 bckp[bckp_idx].reg = REG_RQPN_CTRL_2; 491 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN; 492 bckp_idx++; 493 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200); 494 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val); 495 496 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 497 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 498 499 /* Disable beacon related functions */ 500 tmp = rtw_read8(rtwdev, REG_BCN_CTRL); 501 bckp[bckp_idx].len = 1; 502 bckp[bckp_idx].reg = REG_BCN_CTRL; 503 bckp[bckp_idx].val = tmp; 504 bckp_idx++; 505 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT); 506 rtw_write8(rtwdev, REG_BCN_CTRL, tmp); 507 508 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n"); 509 } 510 511 static void download_firmware_reset_platform(struct rtw_dev *rtwdev) 512 { 513 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 514 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 515 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 516 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 517 } 518 519 static void download_firmware_reg_restore(struct rtw_dev *rtwdev, 520 struct rtw_backup_info *bckp, 521 u8 bckp_num) 522 { 523 rtw_restore_reg(rtwdev, bckp, bckp_num); 524 } 525 526 #define TX_DESC_SIZE 48 527 528 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, 529 const u8 *data, u32 size) 530 { 531 u8 *buf; 532 int ret; 533 534 buf = kmemdup(data, size, GFP_KERNEL); 535 if (!buf) 536 return -ENOMEM; 537 538 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); 539 kfree(buf); 540 return ret; 541 } 542 543 static int 544 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size) 545 { 546 int ret; 547 548 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 549 !((size + TX_DESC_SIZE) & (512 - 1))) 550 size += 1; 551 552 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size); 553 if (ret) 554 rtw_err(rtwdev, "failed to download rsvd page\n"); 555 556 return ret; 557 } 558 559 static int 560 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl) 561 { 562 rtw_write32(rtwdev, REG_DDMA_CH0SA, src); 563 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst); 564 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl); 565 566 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 567 return -EBUSY; 568 569 return 0; 570 } 571 572 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, 573 u32 len, u8 first) 574 { 575 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN; 576 577 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 578 return -EBUSY; 579 580 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN; 581 if (!first) 582 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT; 583 584 if (iddma_enable(rtwdev, src, dst, ch0_ctrl)) 585 return -EBUSY; 586 587 return 0; 588 } 589 590 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) 591 { 592 u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; 593 594 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { 595 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); 596 return -EBUSY; 597 } 598 599 ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; 600 601 if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { 602 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); 603 return -EBUSY; 604 } 605 606 return 0; 607 } 608 609 static bool 610 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) 611 { 612 u8 fw_ctrl; 613 614 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL); 615 616 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) { 617 if (addr < OCPBASE_DMEM_88XX) { 618 fw_ctrl |= BIT_IMEM_DW_OK; 619 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK; 620 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 621 } else { 622 fw_ctrl |= BIT_DMEM_DW_OK; 623 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK; 624 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 625 } 626 627 rtw_err(rtwdev, "invalid fw checksum\n"); 628 629 return false; 630 } 631 632 if (addr < OCPBASE_DMEM_88XX) { 633 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK); 634 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 635 } else { 636 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK); 637 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 638 } 639 640 return true; 641 } 642 643 static int 644 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data, 645 u32 src, u32 dst, u32 size) 646 { 647 const struct rtw_chip_info *chip = rtwdev->chip; 648 u32 desc_size = chip->tx_pkt_desc_sz; 649 u8 first_part; 650 u32 mem_offset; 651 u32 residue_size; 652 u32 pkt_size; 653 u32 max_size = 0x1000; 654 u32 val; 655 int ret; 656 657 mem_offset = 0; 658 first_part = 1; 659 residue_size = size; 660 661 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL); 662 val |= BIT_DDMACH0_RESET_CHKSUM_STS; 663 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val); 664 665 while (residue_size) { 666 if (residue_size >= max_size) 667 pkt_size = max_size; 668 else 669 pkt_size = residue_size; 670 671 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7), 672 data + mem_offset, pkt_size); 673 if (ret) 674 return ret; 675 676 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX + 677 src + desc_size, 678 dst + mem_offset, pkt_size, 679 first_part); 680 if (ret) 681 return ret; 682 683 first_part = 0; 684 mem_offset += pkt_size; 685 residue_size -= pkt_size; 686 } 687 688 if (!check_fw_checksum(rtwdev, dst)) 689 return -EINVAL; 690 691 return 0; 692 } 693 694 static int 695 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size) 696 { 697 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 698 const u8 *cur_fw; 699 u16 val; 700 u32 imem_size; 701 u32 dmem_size; 702 u32 emem_size; 703 u32 addr; 704 int ret; 705 706 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 707 imem_size = le32_to_cpu(fw_hdr->imem_size); 708 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 709 le32_to_cpu(fw_hdr->emem_size) : 0; 710 dmem_size += FW_HDR_CHKSUM_SIZE; 711 imem_size += FW_HDR_CHKSUM_SIZE; 712 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 713 714 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800); 715 val |= BIT_MCUFWDL_EN; 716 rtw_write16(rtwdev, REG_MCUFW_CTRL, val); 717 718 cur_fw = data + FW_HDR_SIZE; 719 addr = le32_to_cpu(fw_hdr->dmem_addr); 720 addr &= ~BIT(31); 721 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size); 722 if (ret) 723 return ret; 724 725 cur_fw = data + FW_HDR_SIZE + dmem_size; 726 addr = le32_to_cpu(fw_hdr->imem_addr); 727 addr &= ~BIT(31); 728 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size); 729 if (ret) 730 return ret; 731 732 if (emem_size) { 733 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size; 734 addr = le32_to_cpu(fw_hdr->emem_addr); 735 addr &= ~BIT(31); 736 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, 737 emem_size); 738 if (ret) 739 return ret; 740 } 741 742 return 0; 743 } 744 745 static int download_firmware_validate(struct rtw_dev *rtwdev) 746 { 747 u32 fw_key; 748 749 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) { 750 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK; 751 if (fw_key == ILLEGAL_KEY_GROUP) 752 rtw_err(rtwdev, "invalid fw key\n"); 753 return -EINVAL; 754 } 755 756 return 0; 757 } 758 759 static void download_firmware_end_flow(struct rtw_dev *rtwdev) 760 { 761 u16 fw_ctrl; 762 763 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF); 764 765 /* Check IMEM & DMEM checksum is OK or not */ 766 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL); 767 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK) 768 return; 769 770 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN; 771 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 772 } 773 774 static int __rtw_download_firmware(struct rtw_dev *rtwdev, 775 struct rtw_fw_state *fw) 776 { 777 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM]; 778 const u8 *data = fw->firmware->data; 779 u32 size = fw->firmware->size; 780 u32 ltecoex_bckp; 781 int ret; 782 783 if (!check_firmware_size(data, size)) 784 return -EINVAL; 785 786 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) 787 return -EBUSY; 788 789 wlan_cpu_enable(rtwdev, false); 790 791 download_firmware_reg_backup(rtwdev, bckp); 792 download_firmware_reset_platform(rtwdev); 793 794 ret = start_download_firmware(rtwdev, data, size); 795 if (ret) 796 goto dlfw_fail; 797 798 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM); 799 800 download_firmware_end_flow(rtwdev); 801 802 wlan_cpu_enable(rtwdev, true); 803 804 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { 805 ret = -EBUSY; 806 goto dlfw_fail; 807 } 808 809 ret = download_firmware_validate(rtwdev); 810 if (ret) 811 goto dlfw_fail; 812 813 /* reset desc and index */ 814 rtw_hci_setup(rtwdev); 815 816 rtwdev->h2c.last_box_num = 0; 817 rtwdev->h2c.seq = 0; 818 819 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 820 821 return 0; 822 823 dlfw_fail: 824 /* Disable FWDL_EN */ 825 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 826 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 827 828 return ret; 829 } 830 831 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) 832 { 833 int try; 834 835 if (en) { 836 wlan_cpu_enable(rtwdev, false); 837 wlan_cpu_enable(rtwdev, true); 838 839 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 840 841 for (try = 0; try < 10; try++) { 842 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN) 843 goto fwdl_ready; 844 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 845 msleep(20); 846 } 847 rtw_err(rtwdev, "failed to check fw download ready\n"); 848 fwdl_ready: 849 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN); 850 } else { 851 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 852 } 853 } 854 855 static void 856 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) 857 { 858 u32 val32; 859 u32 block_nr; 860 u32 remain_size; 861 u32 write_addr = FW_START_ADDR_LEGACY; 862 const __le32 *ptr = (const __le32 *)data; 863 u32 block; 864 __le32 remain_data = 0; 865 866 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY; 867 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1); 868 869 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 870 val32 &= ~BIT_ROM_PGE; 871 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE; 872 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 873 874 for (block = 0; block < block_nr; block++) { 875 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr)); 876 877 write_addr += DLFW_BLK_SIZE_LEGACY; 878 ptr++; 879 } 880 881 if (remain_size) { 882 memcpy(&remain_data, ptr, remain_size); 883 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); 884 } 885 } 886 887 static int 888 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) 889 { 890 u32 page; 891 u32 total_page; 892 u32 last_page_size; 893 894 data += sizeof(struct rtw_fw_hdr_legacy); 895 size -= sizeof(struct rtw_fw_hdr_legacy); 896 897 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY; 898 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1); 899 900 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); 901 902 for (page = 0; page < total_page; page++) { 903 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); 904 data += DLFW_PAGE_SIZE_LEGACY; 905 } 906 if (last_page_size) 907 write_firmware_page(rtwdev, page, data, last_page_size); 908 909 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { 910 rtw_err(rtwdev, "failed to check download firmware report\n"); 911 return -EINVAL; 912 } 913 914 return 0; 915 } 916 917 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev) 918 { 919 u32 val32; 920 int try; 921 922 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 923 val32 |= BIT_MCUFWDL_RDY; 924 val32 &= ~BIT_WINTINI_RDY; 925 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 926 927 wlan_cpu_enable(rtwdev, false); 928 wlan_cpu_enable(rtwdev, true); 929 930 for (try = 0; try < 10; try++) { 931 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 932 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY) 933 return 0; 934 msleep(20); 935 } 936 937 rtw_err(rtwdev, "failed to validate firmware\n"); 938 return -EINVAL; 939 } 940 941 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, 942 struct rtw_fw_state *fw) 943 { 944 int ret = 0; 945 946 en_download_firmware_legacy(rtwdev, true); 947 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); 948 en_download_firmware_legacy(rtwdev, false); 949 if (ret) 950 goto out; 951 952 ret = download_firmware_validate_legacy(rtwdev); 953 if (ret) 954 goto out; 955 956 /* reset desc and index */ 957 rtw_hci_setup(rtwdev); 958 959 rtwdev->h2c.last_box_num = 0; 960 rtwdev->h2c.seq = 0; 961 962 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 963 964 out: 965 return ret; 966 } 967 968 static 969 int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 970 { 971 if (rtw_chip_wcpu_11n(rtwdev)) 972 return __rtw_download_firmware_legacy(rtwdev, fw); 973 974 return __rtw_download_firmware(rtwdev, fw); 975 } 976 977 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 978 { 979 int ret; 980 981 ret = _rtw_download_firmware(rtwdev, fw); 982 if (ret) 983 return ret; 984 985 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE && 986 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 987 rtw_fw_set_recover_bt_device(rtwdev); 988 989 return 0; 990 } 991 992 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) 993 { 994 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn; 995 u32 prio_queues = 0; 996 997 if (queues & BIT(IEEE80211_AC_VO)) 998 prio_queues |= BIT(rqpn->dma_map_vo); 999 if (queues & BIT(IEEE80211_AC_VI)) 1000 prio_queues |= BIT(rqpn->dma_map_vi); 1001 if (queues & BIT(IEEE80211_AC_BE)) 1002 prio_queues |= BIT(rqpn->dma_map_be); 1003 if (queues & BIT(IEEE80211_AC_BK)) 1004 prio_queues |= BIT(rqpn->dma_map_bk); 1005 1006 return prio_queues; 1007 } 1008 1009 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, 1010 u32 prio_queue, bool drop) 1011 { 1012 const struct rtw_chip_info *chip = rtwdev->chip; 1013 const struct rtw_prioq_addr *addr; 1014 bool wsize; 1015 u16 avail_page, rsvd_page; 1016 int i; 1017 1018 if (prio_queue >= RTW_DMA_MAPPING_MAX) 1019 return; 1020 1021 addr = &chip->prioq_addrs->prio[prio_queue]; 1022 wsize = chip->prioq_addrs->wsize; 1023 1024 /* check if all of the reserved pages are available for 100 msecs */ 1025 for (i = 0; i < 5; i++) { 1026 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : 1027 rtw_read8(rtwdev, addr->rsvd); 1028 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : 1029 rtw_read8(rtwdev, addr->avail); 1030 if (rsvd_page == avail_page) 1031 return; 1032 1033 msleep(20); 1034 } 1035 1036 /* priority queue is still not empty, throw a warning, 1037 * 1038 * Note that if we want to flush the tx queue when having a lot of 1039 * traffic (ex, 100Mbps up), some of the packets could be dropped. 1040 * And it requires like ~2secs to flush the full priority queue. 1041 */ 1042 if (!drop) 1043 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue); 1044 } 1045 1046 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, 1047 u32 prio_queues, bool drop) 1048 { 1049 u32 q; 1050 1051 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++) 1052 if (prio_queues & BIT(q)) 1053 __rtw_mac_flush_prio_queue(rtwdev, q, drop); 1054 } 1055 1056 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) 1057 { 1058 u32 prio_queues = 0; 1059 1060 /* If all of the hardware queues are requested to flush, 1061 * or the priority queues are not mapped yet, 1062 * flush all of the priority queues 1063 */ 1064 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn) 1065 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1; 1066 else 1067 prio_queues = get_priority_queues(rtwdev, queues); 1068 1069 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop); 1070 } 1071 1072 static int txdma_queue_mapping(struct rtw_dev *rtwdev) 1073 { 1074 const struct rtw_chip_info *chip = rtwdev->chip; 1075 const struct rtw_rqpn *rqpn = NULL; 1076 u16 txdma_pq_map = 0; 1077 1078 switch (rtw_hci_type(rtwdev)) { 1079 case RTW_HCI_TYPE_PCIE: 1080 rqpn = &chip->rqpn_table[1]; 1081 break; 1082 case RTW_HCI_TYPE_USB: 1083 if (rtwdev->hci.bulkout_num == 2) 1084 rqpn = &chip->rqpn_table[2]; 1085 else if (rtwdev->hci.bulkout_num == 3) 1086 rqpn = &chip->rqpn_table[3]; 1087 else if (rtwdev->hci.bulkout_num == 4) 1088 rqpn = &chip->rqpn_table[4]; 1089 else 1090 return -EINVAL; 1091 break; 1092 case RTW_HCI_TYPE_SDIO: 1093 rqpn = &chip->rqpn_table[0]; 1094 break; 1095 default: 1096 return -EINVAL; 1097 } 1098 1099 rtwdev->fifo.rqpn = rqpn; 1100 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); 1101 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); 1102 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); 1103 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); 1104 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); 1105 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); 1106 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); 1107 1108 rtw_write8(rtwdev, REG_CR, 0); 1109 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE); 1110 if (rtw_chip_wcpu_11ac(rtwdev)) 1111 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 1112 1113 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 1114 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 1115 rtw_write32(rtwdev, REG_SDIO_TX_CTRL, 0); 1116 } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { 1117 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_ARBBW_EN); 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int set_trx_fifo_info(struct rtw_dev *rtwdev) 1124 { 1125 const struct rtw_chip_info *chip = rtwdev->chip; 1126 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1127 u16 cur_pg_addr; 1128 u8 csi_buf_pg_num = chip->csi_buf_pg_num; 1129 1130 /* config rsvd page num */ 1131 fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num; 1132 fifo->txff_pg_num = chip->txff_size >> 7; 1133 if (rtw_chip_wcpu_11n(rtwdev)) 1134 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; 1135 else 1136 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num + 1137 RSVD_PG_H2C_EXTRAINFO_NUM + 1138 RSVD_PG_H2C_STATICINFO_NUM + 1139 RSVD_PG_H2CQ_NUM + 1140 RSVD_PG_CPU_INSTRUCTION_NUM + 1141 RSVD_PG_FW_TXBUF_NUM + 1142 csi_buf_pg_num; 1143 1144 if (fifo->rsvd_pg_num > fifo->txff_pg_num) 1145 return -ENOMEM; 1146 1147 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num; 1148 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num; 1149 1150 cur_pg_addr = fifo->txff_pg_num; 1151 if (rtw_chip_wcpu_11ac(rtwdev)) { 1152 cur_pg_addr -= csi_buf_pg_num; 1153 fifo->rsvd_csibuf_addr = cur_pg_addr; 1154 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM; 1155 fifo->rsvd_fw_txbuf_addr = cur_pg_addr; 1156 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM; 1157 fifo->rsvd_cpu_instr_addr = cur_pg_addr; 1158 cur_pg_addr -= RSVD_PG_H2CQ_NUM; 1159 fifo->rsvd_h2cq_addr = cur_pg_addr; 1160 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM; 1161 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr; 1162 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM; 1163 fifo->rsvd_h2c_info_addr = cur_pg_addr; 1164 } 1165 cur_pg_addr -= fifo->rsvd_drv_pg_num; 1166 fifo->rsvd_drv_addr = cur_pg_addr; 1167 1168 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) { 1169 rtw_err(rtwdev, "wrong rsvd driver address\n"); 1170 return -EINVAL; 1171 } 1172 1173 return 0; 1174 } 1175 1176 static int __priority_queue_cfg(struct rtw_dev *rtwdev, 1177 const struct rtw_page_table *pg_tbl, 1178 u16 pubq_num) 1179 { 1180 const struct rtw_chip_info *chip = rtwdev->chip; 1181 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1182 1183 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num); 1184 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num); 1185 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num); 1186 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num); 1187 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num); 1188 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN); 1189 1190 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary); 1191 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16); 1192 1193 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary); 1194 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); 1195 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); 1196 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); 1197 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); 1198 1199 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) 1200 return -EBUSY; 1201 1202 rtw_write8(rtwdev, REG_CR + 3, 0); 1203 1204 return 0; 1205 } 1206 1207 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev, 1208 const struct rtw_page_table *pg_tbl, 1209 u16 pubq_num) 1210 { 1211 const struct rtw_chip_info *chip = rtwdev->chip; 1212 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1213 u32 val32; 1214 1215 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); 1216 rtw_write32(rtwdev, REG_RQPN_NPQ, val32); 1217 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); 1218 rtw_write32(rtwdev, REG_RQPN, val32); 1219 1220 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); 1221 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1); 1222 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); 1223 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); 1224 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); 1225 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); 1226 1227 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT); 1228 1229 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0)) 1230 return -EBUSY; 1231 1232 return 0; 1233 } 1234 1235 static int priority_queue_cfg(struct rtw_dev *rtwdev) 1236 { 1237 const struct rtw_chip_info *chip = rtwdev->chip; 1238 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1239 const struct rtw_page_table *pg_tbl = NULL; 1240 u16 pubq_num; 1241 int ret; 1242 1243 ret = set_trx_fifo_info(rtwdev); 1244 if (ret) 1245 return ret; 1246 1247 switch (rtw_hci_type(rtwdev)) { 1248 case RTW_HCI_TYPE_PCIE: 1249 pg_tbl = &chip->page_table[1]; 1250 break; 1251 case RTW_HCI_TYPE_USB: 1252 if (rtwdev->hci.bulkout_num == 2) 1253 pg_tbl = &chip->page_table[2]; 1254 else if (rtwdev->hci.bulkout_num == 3) 1255 pg_tbl = &chip->page_table[3]; 1256 else if (rtwdev->hci.bulkout_num == 4) 1257 pg_tbl = &chip->page_table[4]; 1258 else 1259 return -EINVAL; 1260 break; 1261 case RTW_HCI_TYPE_SDIO: 1262 pg_tbl = &chip->page_table[0]; 1263 break; 1264 default: 1265 return -EINVAL; 1266 } 1267 1268 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - 1269 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; 1270 if (rtw_chip_wcpu_11n(rtwdev)) 1271 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num); 1272 else 1273 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num); 1274 } 1275 1276 static int init_h2c(struct rtw_dev *rtwdev) 1277 { 1278 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1279 u8 value8; 1280 u32 value32; 1281 u32 h2cq_addr; 1282 u32 h2cq_size; 1283 u32 h2cq_free; 1284 u32 wp, rp; 1285 1286 if (rtw_chip_wcpu_11n(rtwdev)) 1287 return 0; 1288 1289 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT; 1290 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT; 1291 1292 value32 = rtw_read32(rtwdev, REG_H2C_HEAD); 1293 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1294 rtw_write32(rtwdev, REG_H2C_HEAD, value32); 1295 1296 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR); 1297 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1298 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32); 1299 1300 value32 = rtw_read32(rtwdev, REG_H2C_TAIL); 1301 value32 &= 0xFFFC0000; 1302 value32 |= (h2cq_addr + h2cq_size); 1303 rtw_write32(rtwdev, REG_H2C_TAIL, value32); 1304 1305 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1306 value8 = (u8)((value8 & 0xFC) | 0x01); 1307 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1308 1309 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1310 value8 = (u8)((value8 & 0xFB) | 0x04); 1311 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1312 1313 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1); 1314 value8 = (u8)((value8 & 0x7f) | 0x80); 1315 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8); 1316 1317 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF; 1318 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF; 1319 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp; 1320 1321 if (h2cq_size != h2cq_free) { 1322 rtw_err(rtwdev, "H2C queue mismatch\n"); 1323 return -EINVAL; 1324 } 1325 1326 return 0; 1327 } 1328 1329 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev) 1330 { 1331 int ret; 1332 1333 ret = txdma_queue_mapping(rtwdev); 1334 if (ret) 1335 return ret; 1336 1337 ret = priority_queue_cfg(rtwdev); 1338 if (ret) 1339 return ret; 1340 1341 ret = init_h2c(rtwdev); 1342 if (ret) 1343 return ret; 1344 1345 return 0; 1346 } 1347 1348 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev) 1349 { 1350 u8 value8; 1351 1352 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); 1353 if (rtw_chip_wcpu_11ac(rtwdev)) { 1354 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1); 1355 value8 &= 0xF0; 1356 /* For rxdesc len = 0 issue */ 1357 value8 |= 0xF; 1358 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8); 1359 } 1360 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS); 1361 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9)); 1362 1363 return 0; 1364 } 1365 1366 int rtw_mac_init(struct rtw_dev *rtwdev) 1367 { 1368 const struct rtw_chip_info *chip = rtwdev->chip; 1369 int ret; 1370 1371 ret = rtw_init_trx_cfg(rtwdev); 1372 if (ret) 1373 return ret; 1374 1375 ret = chip->ops->mac_init(rtwdev); 1376 if (ret) 1377 return ret; 1378 1379 ret = rtw_drv_info_cfg(rtwdev); 1380 if (ret) 1381 return ret; 1382 1383 rtw_hci_interface_cfg(rtwdev); 1384 1385 return 0; 1386 } 1387