1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include "main.h" 6 #include "mac.h" 7 #include "reg.h" 8 #include "fw.h" 9 #include "debug.h" 10 #include "sdio.h" 11 12 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, 13 u8 primary_ch_idx) 14 { 15 u8 txsc40 = 0, txsc20 = 0; 16 u32 value32; 17 u8 value8; 18 19 txsc20 = primary_ch_idx; 20 if (bw == RTW_CHANNEL_WIDTH_80) { 21 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) 22 txsc40 = RTW_SC_40_UPPER; 23 else 24 txsc40 = RTW_SC_40_LOWER; 25 } 26 rtw_write8(rtwdev, REG_DATA_SC, 27 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40)); 28 29 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL); 30 value32 &= ~BIT_RFMOD; 31 switch (bw) { 32 case RTW_CHANNEL_WIDTH_80: 33 value32 |= BIT_RFMOD_80M; 34 break; 35 case RTW_CHANNEL_WIDTH_40: 36 value32 |= BIT_RFMOD_40M; 37 break; 38 case RTW_CHANNEL_WIDTH_20: 39 default: 40 break; 41 } 42 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32); 43 44 if (rtw_chip_wcpu_11n(rtwdev)) 45 return; 46 47 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL); 48 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL); 49 rtw_write32(rtwdev, REG_AFE_CTRL1, value32); 50 51 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED); 52 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED); 53 54 value8 = rtw_read8(rtwdev, REG_CCK_CHECK); 55 value8 = value8 & ~BIT_CHECK_CCK_EN; 56 if (IS_CH_5G_BAND(channel)) 57 value8 |= BIT_CHECK_CCK_EN; 58 rtw_write8(rtwdev, REG_CCK_CHECK, value8); 59 } 60 EXPORT_SYMBOL(rtw_set_channel_mac); 61 62 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) 63 { 64 unsigned int retry; 65 u32 value32; 66 u8 value8; 67 68 rtw_write8(rtwdev, REG_RSV_CTRL, 0); 69 70 if (rtw_chip_wcpu_11n(rtwdev)) { 71 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO) 72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL); 73 else 74 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL); 75 return 0; 76 } 77 78 switch (rtw_hci_type(rtwdev)) { 79 case RTW_HCI_TYPE_PCIE: 80 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); 81 break; 82 case RTW_HCI_TYPE_SDIO: 83 rtw_write8_clr(rtwdev, REG_SDIO_HSUS_CTRL, BIT_HCI_SUS_REQ); 84 85 for (retry = 0; retry < RTW_PWR_POLLING_CNT; retry++) { 86 if (rtw_read8(rtwdev, REG_SDIO_HSUS_CTRL) & BIT_HCI_RESUME_RDY) 87 break; 88 89 usleep_range(10, 50); 90 } 91 92 if (retry == RTW_PWR_POLLING_CNT) { 93 rtw_err(rtwdev, "failed to poll REG_SDIO_HSUS_CTRL[1]"); 94 return -ETIMEDOUT; 95 } 96 97 if (rtw_sdio_is_sdio30_supported(rtwdev)) 98 rtw_write8_set(rtwdev, REG_HCI_OPT_CTRL + 2, 99 BIT_SDIO_PAD_E5 >> 16); 100 else 101 rtw_write8_clr(rtwdev, REG_HCI_OPT_CTRL + 2, 102 BIT_SDIO_PAD_E5 >> 16); 103 break; 104 case RTW_HCI_TYPE_USB: 105 break; 106 default: 107 return -EINVAL; 108 } 109 110 /* config PIN Mux */ 111 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1); 112 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL; 113 rtw_write32(rtwdev, REG_PAD_CTRL1, value32); 114 115 value32 = rtw_read32(rtwdev, REG_LED_CFG); 116 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN); 117 rtw_write32(rtwdev, REG_LED_CFG, value32); 118 119 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG); 120 value32 |= BIT_WLRFE_4_5_EN; 121 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32); 122 123 /* disable BB/RF */ 124 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); 125 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); 126 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8); 127 128 value8 = rtw_read8(rtwdev, REG_RF_CTRL); 129 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN); 130 rtw_write8(rtwdev, REG_RF_CTRL, value8); 131 132 value32 = rtw_read32(rtwdev, REG_WLRF1); 133 value32 &= ~BIT_WLRF1_BBRF_EN; 134 rtw_write32(rtwdev, REG_WLRF1, value32); 135 136 return 0; 137 } 138 139 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) 140 { 141 u32 val; 142 143 target &= mask; 144 145 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target, 146 50, 50 * RTW_PWR_POLLING_CNT, false, 147 rtwdev, addr) == 0; 148 } 149 150 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev, 151 const struct rtw_pwr_seq_cmd *cmd) 152 { 153 u8 value; 154 u32 offset; 155 156 if (cmd->base == RTW_PWR_ADDR_SDIO) 157 offset = cmd->offset | SDIO_LOCAL_OFFSET; 158 else 159 offset = cmd->offset; 160 161 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 162 return 0; 163 164 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE) 165 goto err; 166 167 /* if PCIE, toggle BIT_PFM_WOWL and try again */ 168 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL); 169 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 170 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 171 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 172 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 173 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 174 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 175 176 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 177 return 0; 178 179 err: 180 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n", 181 offset, cmd->mask, cmd->value); 182 return -EBUSY; 183 } 184 185 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, 186 u8 cut_mask, 187 const struct rtw_pwr_seq_cmd *cmd) 188 { 189 const struct rtw_pwr_seq_cmd *cur_cmd; 190 u32 offset; 191 u8 value; 192 193 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) { 194 if (!(cur_cmd->intf_mask & intf_mask) || 195 !(cur_cmd->cut_mask & cut_mask)) 196 continue; 197 198 switch (cur_cmd->cmd) { 199 case RTW_PWR_CMD_WRITE: 200 offset = cur_cmd->offset; 201 202 if (cur_cmd->base == RTW_PWR_ADDR_SDIO) 203 offset |= SDIO_LOCAL_OFFSET; 204 205 value = rtw_read8(rtwdev, offset); 206 value &= ~cur_cmd->mask; 207 value |= (cur_cmd->value & cur_cmd->mask); 208 rtw_write8(rtwdev, offset, value); 209 break; 210 case RTW_PWR_CMD_POLLING: 211 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd)) 212 return -EBUSY; 213 break; 214 case RTW_PWR_CMD_DELAY: 215 if (cur_cmd->value == RTW_PWR_DELAY_US) 216 udelay(cur_cmd->offset); 217 else 218 mdelay(cur_cmd->offset); 219 break; 220 case RTW_PWR_CMD_READ: 221 break; 222 default: 223 return -EINVAL; 224 } 225 } 226 227 return 0; 228 } 229 230 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, 231 const struct rtw_pwr_seq_cmd **cmd_seq) 232 { 233 u8 cut_mask; 234 u8 intf_mask; 235 u8 cut; 236 u32 idx = 0; 237 const struct rtw_pwr_seq_cmd *cmd; 238 int ret; 239 240 cut = rtwdev->hal.cut_version; 241 cut_mask = cut_version_to_mask(cut); 242 switch (rtw_hci_type(rtwdev)) { 243 case RTW_HCI_TYPE_PCIE: 244 intf_mask = RTW_PWR_INTF_PCI_MSK; 245 break; 246 case RTW_HCI_TYPE_USB: 247 intf_mask = RTW_PWR_INTF_USB_MSK; 248 break; 249 case RTW_HCI_TYPE_SDIO: 250 intf_mask = RTW_PWR_INTF_SDIO_MSK; 251 break; 252 default: 253 return -EINVAL; 254 } 255 256 do { 257 cmd = cmd_seq[idx]; 258 if (!cmd) 259 break; 260 261 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd); 262 if (ret) 263 return ret; 264 265 idx++; 266 } while (1); 267 268 return 0; 269 } 270 271 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) 272 { 273 const struct rtw_chip_info *chip = rtwdev->chip; 274 const struct rtw_pwr_seq_cmd **pwr_seq; 275 u32 imr = 0; 276 u8 rpwm; 277 bool cur_pwr; 278 int ret; 279 280 if (rtw_chip_wcpu_11ac(rtwdev)) { 281 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); 282 283 /* Check FW still exist or not */ 284 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) { 285 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE; 286 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm); 287 } 288 } 289 290 if (rtw_read8(rtwdev, REG_CR) == 0xea) 291 cur_pwr = false; 292 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 293 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) 294 cur_pwr = false; 295 else 296 cur_pwr = true; 297 298 if (pwr_on == cur_pwr) 299 return -EALREADY; 300 301 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 302 imr = rtw_read32(rtwdev, REG_SDIO_HIMR); 303 rtw_write32(rtwdev, REG_SDIO_HIMR, 0); 304 } 305 306 if (!pwr_on) 307 clear_bit(RTW_FLAG_POWERON, rtwdev->flags); 308 309 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq; 310 ret = rtw_pwr_seq_parser(rtwdev, pwr_seq); 311 312 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 313 rtw_write32(rtwdev, REG_SDIO_HIMR, imr); 314 315 if (!ret && pwr_on) 316 set_bit(RTW_FLAG_POWERON, rtwdev->flags); 317 318 return ret; 319 } 320 321 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 322 { 323 u8 sys_func_en = rtwdev->chip->sys_func_en; 324 u8 value8; 325 u32 value, tmp; 326 327 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON); 328 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN; 329 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value); 330 331 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en); 332 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C; 333 rtw_write8(rtwdev, REG_CR_EXT + 3, value8); 334 335 /* disable boot-from-flash for driver's DL FW */ 336 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL); 337 if (tmp & BIT_BOOT_FSPI_EN) { 338 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN)); 339 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN); 340 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value); 341 } 342 343 return 0; 344 } 345 346 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev) 347 { 348 rtw_write8(rtwdev, REG_CR, 0xff); 349 mdelay(2); 350 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f); 351 mdelay(2); 352 353 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN); 354 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); 355 356 rtw_write16(rtwdev, REG_CR, 0x2ff); 357 358 return 0; 359 } 360 361 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 362 { 363 if (rtw_chip_wcpu_11n(rtwdev)) 364 return __rtw_mac_init_system_cfg_legacy(rtwdev); 365 366 return __rtw_mac_init_system_cfg(rtwdev); 367 } 368 369 int rtw_mac_power_on(struct rtw_dev *rtwdev) 370 { 371 int ret = 0; 372 373 ret = rtw_mac_pre_system_cfg(rtwdev); 374 if (ret) 375 goto err; 376 377 ret = rtw_mac_power_switch(rtwdev, true); 378 if (ret == -EALREADY) { 379 rtw_mac_power_switch(rtwdev, false); 380 381 ret = rtw_mac_pre_system_cfg(rtwdev); 382 if (ret) 383 goto err; 384 385 ret = rtw_mac_power_switch(rtwdev, true); 386 if (ret) 387 goto err; 388 } else if (ret) { 389 goto err; 390 } 391 392 ret = rtw_mac_init_system_cfg(rtwdev); 393 if (ret) 394 goto err; 395 396 return 0; 397 398 err: 399 rtw_err(rtwdev, "mac power on failed"); 400 return ret; 401 } 402 403 void rtw_mac_power_off(struct rtw_dev *rtwdev) 404 { 405 rtw_mac_power_switch(rtwdev, false); 406 } 407 408 static bool check_firmware_size(const u8 *data, u32 size) 409 { 410 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 411 u32 dmem_size; 412 u32 imem_size; 413 u32 emem_size; 414 u32 real_size; 415 416 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 417 imem_size = le32_to_cpu(fw_hdr->imem_size); 418 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 419 le32_to_cpu(fw_hdr->emem_size) : 0; 420 421 dmem_size += FW_HDR_CHKSUM_SIZE; 422 imem_size += FW_HDR_CHKSUM_SIZE; 423 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 424 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size; 425 if (real_size != size) 426 return false; 427 428 return true; 429 } 430 431 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable) 432 { 433 if (enable) { 434 /* cpu io interface enable */ 435 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 436 437 /* cpu enable */ 438 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 439 } else { 440 /* cpu io interface disable */ 441 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 442 443 /* cpu disable */ 444 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 445 } 446 } 447 448 #define DLFW_RESTORE_REG_NUM 6 449 450 static void download_firmware_reg_backup(struct rtw_dev *rtwdev, 451 struct rtw_backup_info *bckp) 452 { 453 u8 tmp; 454 u8 bckp_idx = 0; 455 456 /* set HIQ to hi priority */ 457 bckp[bckp_idx].len = 1; 458 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1; 459 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1); 460 bckp_idx++; 461 tmp = RTW_DMA_MAPPING_HIGH << 6; 462 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp); 463 464 /* DLFW only use HIQ, map HIQ to hi priority */ 465 bckp[bckp_idx].len = 1; 466 bckp[bckp_idx].reg = REG_CR; 467 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR); 468 bckp_idx++; 469 bckp[bckp_idx].len = 4; 470 bckp[bckp_idx].reg = REG_H2CQ_CSR; 471 bckp[bckp_idx].val = BIT_H2CQ_FULL; 472 bckp_idx++; 473 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN; 474 rtw_write8(rtwdev, REG_CR, tmp); 475 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 476 477 /* Config hi priority queue and public priority queue page number */ 478 bckp[bckp_idx].len = 2; 479 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1; 480 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1); 481 bckp_idx++; 482 bckp[bckp_idx].len = 4; 483 bckp[bckp_idx].reg = REG_RQPN_CTRL_2; 484 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN; 485 bckp_idx++; 486 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200); 487 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val); 488 489 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 490 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 491 492 /* Disable beacon related functions */ 493 tmp = rtw_read8(rtwdev, REG_BCN_CTRL); 494 bckp[bckp_idx].len = 1; 495 bckp[bckp_idx].reg = REG_BCN_CTRL; 496 bckp[bckp_idx].val = tmp; 497 bckp_idx++; 498 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT); 499 rtw_write8(rtwdev, REG_BCN_CTRL, tmp); 500 501 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n"); 502 } 503 504 static void download_firmware_reset_platform(struct rtw_dev *rtwdev) 505 { 506 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 507 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 508 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 509 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 510 } 511 512 static void download_firmware_reg_restore(struct rtw_dev *rtwdev, 513 struct rtw_backup_info *bckp, 514 u8 bckp_num) 515 { 516 rtw_restore_reg(rtwdev, bckp, bckp_num); 517 } 518 519 #define TX_DESC_SIZE 48 520 521 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, 522 const u8 *data, u32 size) 523 { 524 u8 *buf; 525 int ret; 526 527 buf = kmemdup(data, size, GFP_KERNEL); 528 if (!buf) 529 return -ENOMEM; 530 531 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); 532 kfree(buf); 533 return ret; 534 } 535 536 static int 537 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size) 538 { 539 int ret; 540 541 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 542 !((size + TX_DESC_SIZE) & (512 - 1))) 543 size += 1; 544 545 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size); 546 if (ret) 547 rtw_err(rtwdev, "failed to download rsvd page\n"); 548 549 return ret; 550 } 551 552 static int 553 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl) 554 { 555 rtw_write32(rtwdev, REG_DDMA_CH0SA, src); 556 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst); 557 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl); 558 559 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 560 return -EBUSY; 561 562 return 0; 563 } 564 565 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, 566 u32 len, u8 first) 567 { 568 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN; 569 570 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 571 return -EBUSY; 572 573 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN; 574 if (!first) 575 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT; 576 577 if (iddma_enable(rtwdev, src, dst, ch0_ctrl)) 578 return -EBUSY; 579 580 return 0; 581 } 582 583 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) 584 { 585 u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; 586 587 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { 588 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); 589 return -EBUSY; 590 } 591 592 ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; 593 594 if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { 595 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); 596 return -EBUSY; 597 } 598 599 return 0; 600 } 601 602 static bool 603 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) 604 { 605 u8 fw_ctrl; 606 607 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL); 608 609 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) { 610 if (addr < OCPBASE_DMEM_88XX) { 611 fw_ctrl |= BIT_IMEM_DW_OK; 612 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK; 613 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 614 } else { 615 fw_ctrl |= BIT_DMEM_DW_OK; 616 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK; 617 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 618 } 619 620 rtw_err(rtwdev, "invalid fw checksum\n"); 621 622 return false; 623 } 624 625 if (addr < OCPBASE_DMEM_88XX) { 626 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK); 627 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 628 } else { 629 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK); 630 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 631 } 632 633 return true; 634 } 635 636 static int 637 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data, 638 u32 src, u32 dst, u32 size) 639 { 640 const struct rtw_chip_info *chip = rtwdev->chip; 641 u32 desc_size = chip->tx_pkt_desc_sz; 642 u8 first_part; 643 u32 mem_offset; 644 u32 residue_size; 645 u32 pkt_size; 646 u32 max_size = 0x1000; 647 u32 val; 648 int ret; 649 650 mem_offset = 0; 651 first_part = 1; 652 residue_size = size; 653 654 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL); 655 val |= BIT_DDMACH0_RESET_CHKSUM_STS; 656 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val); 657 658 while (residue_size) { 659 if (residue_size >= max_size) 660 pkt_size = max_size; 661 else 662 pkt_size = residue_size; 663 664 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7), 665 data + mem_offset, pkt_size); 666 if (ret) 667 return ret; 668 669 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX + 670 src + desc_size, 671 dst + mem_offset, pkt_size, 672 first_part); 673 if (ret) 674 return ret; 675 676 first_part = 0; 677 mem_offset += pkt_size; 678 residue_size -= pkt_size; 679 } 680 681 if (!check_fw_checksum(rtwdev, dst)) 682 return -EINVAL; 683 684 return 0; 685 } 686 687 static int 688 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size) 689 { 690 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 691 const u8 *cur_fw; 692 u16 val; 693 u32 imem_size; 694 u32 dmem_size; 695 u32 emem_size; 696 u32 addr; 697 int ret; 698 699 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 700 imem_size = le32_to_cpu(fw_hdr->imem_size); 701 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 702 le32_to_cpu(fw_hdr->emem_size) : 0; 703 dmem_size += FW_HDR_CHKSUM_SIZE; 704 imem_size += FW_HDR_CHKSUM_SIZE; 705 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 706 707 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800); 708 val |= BIT_MCUFWDL_EN; 709 rtw_write16(rtwdev, REG_MCUFW_CTRL, val); 710 711 cur_fw = data + FW_HDR_SIZE; 712 addr = le32_to_cpu(fw_hdr->dmem_addr); 713 addr &= ~BIT(31); 714 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size); 715 if (ret) 716 return ret; 717 718 cur_fw = data + FW_HDR_SIZE + dmem_size; 719 addr = le32_to_cpu(fw_hdr->imem_addr); 720 addr &= ~BIT(31); 721 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size); 722 if (ret) 723 return ret; 724 725 if (emem_size) { 726 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size; 727 addr = le32_to_cpu(fw_hdr->emem_addr); 728 addr &= ~BIT(31); 729 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, 730 emem_size); 731 if (ret) 732 return ret; 733 } 734 735 return 0; 736 } 737 738 static int download_firmware_validate(struct rtw_dev *rtwdev) 739 { 740 u32 fw_key; 741 742 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) { 743 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK; 744 if (fw_key == ILLEGAL_KEY_GROUP) 745 rtw_err(rtwdev, "invalid fw key\n"); 746 return -EINVAL; 747 } 748 749 return 0; 750 } 751 752 static void download_firmware_end_flow(struct rtw_dev *rtwdev) 753 { 754 u16 fw_ctrl; 755 756 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF); 757 758 /* Check IMEM & DMEM checksum is OK or not */ 759 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL); 760 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK) 761 return; 762 763 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN; 764 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 765 } 766 767 static int __rtw_download_firmware(struct rtw_dev *rtwdev, 768 struct rtw_fw_state *fw) 769 { 770 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM]; 771 const u8 *data = fw->firmware->data; 772 u32 size = fw->firmware->size; 773 u32 ltecoex_bckp; 774 int ret; 775 776 if (!check_firmware_size(data, size)) 777 return -EINVAL; 778 779 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) 780 return -EBUSY; 781 782 wlan_cpu_enable(rtwdev, false); 783 784 download_firmware_reg_backup(rtwdev, bckp); 785 download_firmware_reset_platform(rtwdev); 786 787 ret = start_download_firmware(rtwdev, data, size); 788 if (ret) 789 goto dlfw_fail; 790 791 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM); 792 793 download_firmware_end_flow(rtwdev); 794 795 wlan_cpu_enable(rtwdev, true); 796 797 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) 798 return -EBUSY; 799 800 ret = download_firmware_validate(rtwdev); 801 if (ret) 802 goto dlfw_fail; 803 804 /* reset desc and index */ 805 rtw_hci_setup(rtwdev); 806 807 rtwdev->h2c.last_box_num = 0; 808 rtwdev->h2c.seq = 0; 809 810 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 811 812 return 0; 813 814 dlfw_fail: 815 /* Disable FWDL_EN */ 816 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 817 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 818 819 return ret; 820 } 821 822 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) 823 { 824 int try; 825 826 if (en) { 827 wlan_cpu_enable(rtwdev, false); 828 wlan_cpu_enable(rtwdev, true); 829 830 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 831 832 for (try = 0; try < 10; try++) { 833 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN) 834 goto fwdl_ready; 835 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 836 msleep(20); 837 } 838 rtw_err(rtwdev, "failed to check fw download ready\n"); 839 fwdl_ready: 840 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN); 841 } else { 842 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 843 } 844 } 845 846 static void 847 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) 848 { 849 u32 val32; 850 u32 block_nr; 851 u32 remain_size; 852 u32 write_addr = FW_START_ADDR_LEGACY; 853 const __le32 *ptr = (const __le32 *)data; 854 u32 block; 855 __le32 remain_data = 0; 856 857 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY; 858 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1); 859 860 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 861 val32 &= ~BIT_ROM_PGE; 862 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE; 863 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 864 865 for (block = 0; block < block_nr; block++) { 866 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr)); 867 868 write_addr += DLFW_BLK_SIZE_LEGACY; 869 ptr++; 870 } 871 872 if (remain_size) { 873 memcpy(&remain_data, ptr, remain_size); 874 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); 875 } 876 } 877 878 static int 879 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) 880 { 881 u32 page; 882 u32 total_page; 883 u32 last_page_size; 884 885 data += sizeof(struct rtw_fw_hdr_legacy); 886 size -= sizeof(struct rtw_fw_hdr_legacy); 887 888 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY; 889 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1); 890 891 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); 892 893 for (page = 0; page < total_page; page++) { 894 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); 895 data += DLFW_PAGE_SIZE_LEGACY; 896 } 897 if (last_page_size) 898 write_firmware_page(rtwdev, page, data, last_page_size); 899 900 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { 901 rtw_err(rtwdev, "failed to check download firmware report\n"); 902 return -EINVAL; 903 } 904 905 return 0; 906 } 907 908 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev) 909 { 910 u32 val32; 911 int try; 912 913 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 914 val32 |= BIT_MCUFWDL_RDY; 915 val32 &= ~BIT_WINTINI_RDY; 916 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 917 918 wlan_cpu_enable(rtwdev, false); 919 wlan_cpu_enable(rtwdev, true); 920 921 for (try = 0; try < 10; try++) { 922 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 923 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY) 924 return 0; 925 msleep(20); 926 } 927 928 rtw_err(rtwdev, "failed to validate firmware\n"); 929 return -EINVAL; 930 } 931 932 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, 933 struct rtw_fw_state *fw) 934 { 935 int ret = 0; 936 937 en_download_firmware_legacy(rtwdev, true); 938 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); 939 en_download_firmware_legacy(rtwdev, false); 940 if (ret) 941 goto out; 942 943 ret = download_firmware_validate_legacy(rtwdev); 944 if (ret) 945 goto out; 946 947 /* reset desc and index */ 948 rtw_hci_setup(rtwdev); 949 950 rtwdev->h2c.last_box_num = 0; 951 rtwdev->h2c.seq = 0; 952 953 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 954 955 out: 956 return ret; 957 } 958 959 static 960 int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 961 { 962 if (rtw_chip_wcpu_11n(rtwdev)) 963 return __rtw_download_firmware_legacy(rtwdev, fw); 964 965 return __rtw_download_firmware(rtwdev, fw); 966 } 967 968 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 969 { 970 int ret; 971 972 ret = _rtw_download_firmware(rtwdev, fw); 973 if (ret) 974 return ret; 975 976 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE && 977 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 978 rtw_fw_set_recover_bt_device(rtwdev); 979 980 return 0; 981 } 982 983 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) 984 { 985 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn; 986 u32 prio_queues = 0; 987 988 if (queues & BIT(IEEE80211_AC_VO)) 989 prio_queues |= BIT(rqpn->dma_map_vo); 990 if (queues & BIT(IEEE80211_AC_VI)) 991 prio_queues |= BIT(rqpn->dma_map_vi); 992 if (queues & BIT(IEEE80211_AC_BE)) 993 prio_queues |= BIT(rqpn->dma_map_be); 994 if (queues & BIT(IEEE80211_AC_BK)) 995 prio_queues |= BIT(rqpn->dma_map_bk); 996 997 return prio_queues; 998 } 999 1000 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, 1001 u32 prio_queue, bool drop) 1002 { 1003 const struct rtw_chip_info *chip = rtwdev->chip; 1004 const struct rtw_prioq_addr *addr; 1005 bool wsize; 1006 u16 avail_page, rsvd_page; 1007 int i; 1008 1009 if (prio_queue >= RTW_DMA_MAPPING_MAX) 1010 return; 1011 1012 addr = &chip->prioq_addrs->prio[prio_queue]; 1013 wsize = chip->prioq_addrs->wsize; 1014 1015 /* check if all of the reserved pages are available for 100 msecs */ 1016 for (i = 0; i < 5; i++) { 1017 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : 1018 rtw_read8(rtwdev, addr->rsvd); 1019 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : 1020 rtw_read8(rtwdev, addr->avail); 1021 if (rsvd_page == avail_page) 1022 return; 1023 1024 msleep(20); 1025 } 1026 1027 /* priority queue is still not empty, throw a warning, 1028 * 1029 * Note that if we want to flush the tx queue when having a lot of 1030 * traffic (ex, 100Mbps up), some of the packets could be dropped. 1031 * And it requires like ~2secs to flush the full priority queue. 1032 */ 1033 if (!drop) 1034 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue); 1035 } 1036 1037 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, 1038 u32 prio_queues, bool drop) 1039 { 1040 u32 q; 1041 1042 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++) 1043 if (prio_queues & BIT(q)) 1044 __rtw_mac_flush_prio_queue(rtwdev, q, drop); 1045 } 1046 1047 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) 1048 { 1049 u32 prio_queues = 0; 1050 1051 /* If all of the hardware queues are requested to flush, 1052 * or the priority queues are not mapped yet, 1053 * flush all of the priority queues 1054 */ 1055 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn) 1056 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1; 1057 else 1058 prio_queues = get_priority_queues(rtwdev, queues); 1059 1060 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop); 1061 } 1062 1063 static int txdma_queue_mapping(struct rtw_dev *rtwdev) 1064 { 1065 const struct rtw_chip_info *chip = rtwdev->chip; 1066 const struct rtw_rqpn *rqpn = NULL; 1067 u16 txdma_pq_map = 0; 1068 1069 switch (rtw_hci_type(rtwdev)) { 1070 case RTW_HCI_TYPE_PCIE: 1071 rqpn = &chip->rqpn_table[1]; 1072 break; 1073 case RTW_HCI_TYPE_USB: 1074 if (rtwdev->hci.bulkout_num == 2) 1075 rqpn = &chip->rqpn_table[2]; 1076 else if (rtwdev->hci.bulkout_num == 3) 1077 rqpn = &chip->rqpn_table[3]; 1078 else if (rtwdev->hci.bulkout_num == 4) 1079 rqpn = &chip->rqpn_table[4]; 1080 else 1081 return -EINVAL; 1082 break; 1083 case RTW_HCI_TYPE_SDIO: 1084 rqpn = &chip->rqpn_table[0]; 1085 break; 1086 default: 1087 return -EINVAL; 1088 } 1089 1090 rtwdev->fifo.rqpn = rqpn; 1091 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); 1092 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); 1093 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); 1094 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); 1095 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); 1096 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); 1097 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); 1098 1099 rtw_write8(rtwdev, REG_CR, 0); 1100 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE); 1101 if (rtw_chip_wcpu_11ac(rtwdev)) 1102 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 1103 1104 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 1105 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 1106 rtw_write32(rtwdev, REG_SDIO_TX_CTRL, 0); 1107 } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { 1108 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_ARBBW_EN); 1109 } 1110 1111 return 0; 1112 } 1113 1114 static int set_trx_fifo_info(struct rtw_dev *rtwdev) 1115 { 1116 const struct rtw_chip_info *chip = rtwdev->chip; 1117 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1118 u16 cur_pg_addr; 1119 u8 csi_buf_pg_num = chip->csi_buf_pg_num; 1120 1121 /* config rsvd page num */ 1122 fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num; 1123 fifo->txff_pg_num = chip->txff_size >> 7; 1124 if (rtw_chip_wcpu_11n(rtwdev)) 1125 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; 1126 else 1127 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num + 1128 RSVD_PG_H2C_EXTRAINFO_NUM + 1129 RSVD_PG_H2C_STATICINFO_NUM + 1130 RSVD_PG_H2CQ_NUM + 1131 RSVD_PG_CPU_INSTRUCTION_NUM + 1132 RSVD_PG_FW_TXBUF_NUM + 1133 csi_buf_pg_num; 1134 1135 if (fifo->rsvd_pg_num > fifo->txff_pg_num) 1136 return -ENOMEM; 1137 1138 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num; 1139 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num; 1140 1141 cur_pg_addr = fifo->txff_pg_num; 1142 if (rtw_chip_wcpu_11ac(rtwdev)) { 1143 cur_pg_addr -= csi_buf_pg_num; 1144 fifo->rsvd_csibuf_addr = cur_pg_addr; 1145 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM; 1146 fifo->rsvd_fw_txbuf_addr = cur_pg_addr; 1147 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM; 1148 fifo->rsvd_cpu_instr_addr = cur_pg_addr; 1149 cur_pg_addr -= RSVD_PG_H2CQ_NUM; 1150 fifo->rsvd_h2cq_addr = cur_pg_addr; 1151 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM; 1152 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr; 1153 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM; 1154 fifo->rsvd_h2c_info_addr = cur_pg_addr; 1155 } 1156 cur_pg_addr -= fifo->rsvd_drv_pg_num; 1157 fifo->rsvd_drv_addr = cur_pg_addr; 1158 1159 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) { 1160 rtw_err(rtwdev, "wrong rsvd driver address\n"); 1161 return -EINVAL; 1162 } 1163 1164 return 0; 1165 } 1166 1167 static int __priority_queue_cfg(struct rtw_dev *rtwdev, 1168 const struct rtw_page_table *pg_tbl, 1169 u16 pubq_num) 1170 { 1171 const struct rtw_chip_info *chip = rtwdev->chip; 1172 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1173 1174 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num); 1175 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num); 1176 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num); 1177 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num); 1178 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num); 1179 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN); 1180 1181 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary); 1182 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16); 1183 1184 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary); 1185 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); 1186 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); 1187 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); 1188 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); 1189 1190 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) 1191 return -EBUSY; 1192 1193 rtw_write8(rtwdev, REG_CR + 3, 0); 1194 1195 return 0; 1196 } 1197 1198 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev, 1199 const struct rtw_page_table *pg_tbl, 1200 u16 pubq_num) 1201 { 1202 const struct rtw_chip_info *chip = rtwdev->chip; 1203 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1204 u32 val32; 1205 1206 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); 1207 rtw_write32(rtwdev, REG_RQPN_NPQ, val32); 1208 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); 1209 rtw_write32(rtwdev, REG_RQPN, val32); 1210 1211 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); 1212 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1); 1213 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); 1214 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); 1215 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); 1216 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); 1217 1218 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT); 1219 1220 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0)) 1221 return -EBUSY; 1222 1223 return 0; 1224 } 1225 1226 static int priority_queue_cfg(struct rtw_dev *rtwdev) 1227 { 1228 const struct rtw_chip_info *chip = rtwdev->chip; 1229 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1230 const struct rtw_page_table *pg_tbl = NULL; 1231 u16 pubq_num; 1232 int ret; 1233 1234 ret = set_trx_fifo_info(rtwdev); 1235 if (ret) 1236 return ret; 1237 1238 switch (rtw_hci_type(rtwdev)) { 1239 case RTW_HCI_TYPE_PCIE: 1240 pg_tbl = &chip->page_table[1]; 1241 break; 1242 case RTW_HCI_TYPE_USB: 1243 if (rtwdev->hci.bulkout_num == 2) 1244 pg_tbl = &chip->page_table[2]; 1245 else if (rtwdev->hci.bulkout_num == 3) 1246 pg_tbl = &chip->page_table[3]; 1247 else if (rtwdev->hci.bulkout_num == 4) 1248 pg_tbl = &chip->page_table[4]; 1249 else 1250 return -EINVAL; 1251 break; 1252 case RTW_HCI_TYPE_SDIO: 1253 pg_tbl = &chip->page_table[0]; 1254 break; 1255 default: 1256 return -EINVAL; 1257 } 1258 1259 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - 1260 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; 1261 if (rtw_chip_wcpu_11n(rtwdev)) 1262 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num); 1263 else 1264 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num); 1265 } 1266 1267 static int init_h2c(struct rtw_dev *rtwdev) 1268 { 1269 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1270 u8 value8; 1271 u32 value32; 1272 u32 h2cq_addr; 1273 u32 h2cq_size; 1274 u32 h2cq_free; 1275 u32 wp, rp; 1276 1277 if (rtw_chip_wcpu_11n(rtwdev)) 1278 return 0; 1279 1280 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT; 1281 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT; 1282 1283 value32 = rtw_read32(rtwdev, REG_H2C_HEAD); 1284 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1285 rtw_write32(rtwdev, REG_H2C_HEAD, value32); 1286 1287 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR); 1288 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1289 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32); 1290 1291 value32 = rtw_read32(rtwdev, REG_H2C_TAIL); 1292 value32 &= 0xFFFC0000; 1293 value32 |= (h2cq_addr + h2cq_size); 1294 rtw_write32(rtwdev, REG_H2C_TAIL, value32); 1295 1296 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1297 value8 = (u8)((value8 & 0xFC) | 0x01); 1298 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1299 1300 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1301 value8 = (u8)((value8 & 0xFB) | 0x04); 1302 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1303 1304 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1); 1305 value8 = (u8)((value8 & 0x7f) | 0x80); 1306 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8); 1307 1308 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF; 1309 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF; 1310 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp; 1311 1312 if (h2cq_size != h2cq_free) { 1313 rtw_err(rtwdev, "H2C queue mismatch\n"); 1314 return -EINVAL; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev) 1321 { 1322 int ret; 1323 1324 ret = txdma_queue_mapping(rtwdev); 1325 if (ret) 1326 return ret; 1327 1328 ret = priority_queue_cfg(rtwdev); 1329 if (ret) 1330 return ret; 1331 1332 ret = init_h2c(rtwdev); 1333 if (ret) 1334 return ret; 1335 1336 return 0; 1337 } 1338 1339 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev) 1340 { 1341 u8 value8; 1342 1343 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); 1344 if (rtw_chip_wcpu_11ac(rtwdev)) { 1345 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1); 1346 value8 &= 0xF0; 1347 /* For rxdesc len = 0 issue */ 1348 value8 |= 0xF; 1349 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8); 1350 } 1351 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS); 1352 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9)); 1353 1354 return 0; 1355 } 1356 1357 int rtw_mac_init(struct rtw_dev *rtwdev) 1358 { 1359 const struct rtw_chip_info *chip = rtwdev->chip; 1360 int ret; 1361 1362 ret = rtw_init_trx_cfg(rtwdev); 1363 if (ret) 1364 return ret; 1365 1366 ret = chip->ops->mac_init(rtwdev); 1367 if (ret) 1368 return ret; 1369 1370 ret = rtw_drv_info_cfg(rtwdev); 1371 if (ret) 1372 return ret; 1373 1374 rtw_hci_interface_cfg(rtwdev); 1375 1376 return 0; 1377 } 1378