Lines Matching refs:hba

31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
94 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) in ufs_mtk_is_boost_crypt_enabled() argument
96 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_boost_crypt_enabled()
101 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) in ufs_mtk_is_va09_supported() argument
103 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_va09_supported()
108 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) in ufs_mtk_is_broken_vcc() argument
110 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_broken_vcc()
115 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) in ufs_mtk_is_pmc_via_fastauto() argument
117 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_pmc_via_fastauto()
122 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) in ufs_mtk_cfg_unipro_cg() argument
127 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
133 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
136 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
139 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
142 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
147 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
150 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
153 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
158 static void ufs_mtk_crypto_enable(struct ufs_hba *hba) in ufs_mtk_crypto_enable() argument
164 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", in ufs_mtk_crypto_enable()
166 hba->caps &= ~UFSHCD_CAP_CRYPTO; in ufs_mtk_crypto_enable()
170 static void ufs_mtk_host_reset(struct ufs_hba *hba) in ufs_mtk_host_reset() argument
172 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_host_reset()
185 static void ufs_mtk_init_reset_control(struct ufs_hba *hba, in ufs_mtk_init_reset_control() argument
189 *rc = devm_reset_control_get(hba->dev, str); in ufs_mtk_init_reset_control()
191 dev_info(hba->dev, "Failed to get reset control %s: %ld\n", in ufs_mtk_init_reset_control()
197 static void ufs_mtk_init_reset(struct ufs_hba *hba) in ufs_mtk_init_reset() argument
199 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_reset()
201 ufs_mtk_init_reset_control(hba, &host->hci_reset, in ufs_mtk_init_reset()
203 ufs_mtk_init_reset_control(hba, &host->unipro_reset, in ufs_mtk_init_reset()
205 ufs_mtk_init_reset_control(hba, &host->crypto_reset, in ufs_mtk_init_reset()
209 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, in ufs_mtk_hce_enable_notify() argument
212 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_hce_enable_notify()
216 hba->vps->hba_enable_delay_us = 0; in ufs_mtk_hce_enable_notify()
218 hba->vps->hba_enable_delay_us = 600; in ufs_mtk_hce_enable_notify()
219 ufs_mtk_host_reset(hba); in ufs_mtk_hce_enable_notify()
222 if (hba->caps & UFSHCD_CAP_CRYPTO) in ufs_mtk_hce_enable_notify()
223 ufs_mtk_crypto_enable(hba); in ufs_mtk_hce_enable_notify()
226 ufshcd_writel(hba, 0, in ufs_mtk_hce_enable_notify()
228 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; in ufs_mtk_hce_enable_notify()
229 hba->ahit = 0; in ufs_mtk_hce_enable_notify()
236 ufshcd_writel(hba, in ufs_mtk_hce_enable_notify()
237 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, in ufs_mtk_hce_enable_notify()
244 static int ufs_mtk_bind_mphy(struct ufs_hba *hba) in ufs_mtk_bind_mphy() argument
246 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_bind_mphy()
247 struct device *dev = hba->dev; in ufs_mtk_bind_mphy()
282 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) in ufs_mtk_setup_ref_clk() argument
284 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk()
295 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
298 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
305 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
314 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); in ufs_mtk_setup_ref_clk()
330 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, in ufs_mtk_setup_ref_clk_wait_us() argument
333 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk_wait_us()
335 if (hba->dev_info.clk_gating_wait_us) { in ufs_mtk_setup_ref_clk_wait_us()
337 hba->dev_info.clk_gating_wait_us; in ufs_mtk_setup_ref_clk_wait_us()
345 static void ufs_mtk_dbg_sel(struct ufs_hba *hba) in ufs_mtk_dbg_sel() argument
347 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_dbg_sel()
350 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
351 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); in ufs_mtk_dbg_sel()
352 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); in ufs_mtk_dbg_sel()
353 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); in ufs_mtk_dbg_sel()
354 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); in ufs_mtk_dbg_sel()
356 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
360 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, in ufs_mtk_wait_idle_state() argument
376 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_idle_state()
377 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_idle_state()
397 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); in ufs_mtk_wait_idle_state()
400 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, in ufs_mtk_wait_link_state() argument
409 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_link_state()
410 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_link_state()
423 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) in ufs_mtk_mphy_power_on() argument
425 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mphy_power_on()
434 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
445 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
452 dev_info(hba->dev, in ufs_mtk_mphy_power_on()
478 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) in ufs_mtk_boost_crypt() argument
480 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_boost_crypt()
485 if (!ufs_mtk_is_boost_crypt_enabled(hba)) in ufs_mtk_boost_crypt()
494 dev_info(hba->dev, "clk_prepare_enable(): %d\n", in ufs_mtk_boost_crypt()
502 dev_info(hba->dev, in ufs_mtk_boost_crypt()
510 dev_info(hba->dev, in ufs_mtk_boost_crypt()
519 dev_info(hba->dev, in ufs_mtk_boost_crypt()
526 dev_info(hba->dev, in ufs_mtk_boost_crypt()
534 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, in ufs_mtk_init_host_clk() argument
539 ret = ufs_mtk_get_host_clk(hba->dev, name, clk); in ufs_mtk_init_host_clk()
541 dev_info(hba->dev, "%s: failed to get %s: %d", __func__, in ufs_mtk_init_host_clk()
548 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba) in ufs_mtk_init_boost_crypt() argument
550 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_boost_crypt()
552 struct device *dev = hba->dev; in ufs_mtk_init_boost_crypt()
575 if (ufs_mtk_init_host_clk(hba, "crypt_mux", in ufs_mtk_init_boost_crypt()
579 if (ufs_mtk_init_host_clk(hba, "crypt_lp", in ufs_mtk_init_boost_crypt()
583 if (ufs_mtk_init_host_clk(hba, "crypt_perf", in ufs_mtk_init_boost_crypt()
595 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) in ufs_mtk_init_va09_pwr_ctrl() argument
597 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_va09_pwr_ctrl()
599 host->reg_va09 = regulator_get(hba->dev, "va09"); in ufs_mtk_init_va09_pwr_ctrl()
601 dev_info(hba->dev, "failed to get va09"); in ufs_mtk_init_va09_pwr_ctrl()
606 static void ufs_mtk_init_host_caps(struct ufs_hba *hba) in ufs_mtk_init_host_caps() argument
608 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_host_caps()
609 struct device_node *np = hba->dev->of_node; in ufs_mtk_init_host_caps()
612 ufs_mtk_init_boost_crypt(hba); in ufs_mtk_init_host_caps()
615 ufs_mtk_init_va09_pwr_ctrl(hba); in ufs_mtk_init_host_caps()
626 dev_info(hba->dev, "caps: 0x%x", host->caps); in ufs_mtk_init_host_caps()
629 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost) in ufs_mtk_boost_pm_qos() argument
631 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_boost_pm_qos()
640 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up) in ufs_mtk_scale_perf() argument
642 ufs_mtk_boost_crypt(hba, scale_up); in ufs_mtk_scale_perf()
643 ufs_mtk_boost_pm_qos(hba, scale_up); in ufs_mtk_scale_perf()
646 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) in ufs_mtk_pwr_ctrl() argument
648 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pwr_ctrl()
652 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
653 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
654 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
656 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
657 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
658 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
671 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, in ufs_mtk_setup_clocks() argument
674 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_clocks()
687 if (ufshcd_is_link_off(hba)) { in ufs_mtk_setup_clocks()
689 } else if (ufshcd_is_link_hibern8(hba) || in ufs_mtk_setup_clocks()
690 (!ufshcd_can_hibern8_during_gating(hba) && in ufs_mtk_setup_clocks()
691 ufshcd_is_auto_hibern8_enabled(hba))) { in ufs_mtk_setup_clocks()
697 ret = ufs_mtk_wait_link_state(hba, in ufs_mtk_setup_clocks()
705 ufs_mtk_pwr_ctrl(hba, false); in ufs_mtk_setup_clocks()
707 ufs_mtk_pwr_ctrl(hba, true); in ufs_mtk_setup_clocks()
713 static void ufs_mtk_get_controller_version(struct ufs_hba *hba) in ufs_mtk_get_controller_version() argument
715 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_get_controller_version()
724 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); in ufs_mtk_get_controller_version()
732 if (hba->ufs_version < ufshci_version(3, 0)) in ufs_mtk_get_controller_version()
733 hba->ufs_version = ufshci_version(3, 0); in ufs_mtk_get_controller_version()
738 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) in ufs_mtk_get_ufs_hci_version() argument
740 return hba->ufs_version; in ufs_mtk_get_ufs_hci_version()
748 static void ufs_mtk_init_clocks(struct ufs_hba *hba) in ufs_mtk_init_clocks() argument
750 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_clocks()
751 struct list_head *head = &hba->clk_list_head; in ufs_mtk_init_clocks()
776 hba->caps &= ~UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init_clocks()
777 dev_info(hba->dev, in ufs_mtk_init_clocks()
784 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vcc() argument
786 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vcc()
787 struct device_node *np = hba->dev->of_node; in ufs_mtk_vreg_fix_vcc()
788 struct device *dev = hba->dev; in ufs_mtk_vreg_fix_vcc()
793 if (hba->vreg_info.vcc) in ufs_mtk_vreg_fix_vcc()
803 ver = (hba->dev_info.wspecversion & 0xF00) >> 8; in ufs_mtk_vreg_fix_vcc()
826 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vccqx() argument
828 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vccqx()
831 if (hba->dev_info.wspecversion >= 0x0300) { in ufs_mtk_vreg_fix_vccqx()
844 devm_kfree(hba->dev, (*vreg_off)->name); in ufs_mtk_vreg_fix_vccqx()
845 devm_kfree(hba->dev, *vreg_off); in ufs_mtk_vreg_fix_vccqx()
850 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) in ufs_mtk_init_mcq_irq() argument
852 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_mcq_irq()
858 pdev = container_of(hba->dev, struct platform_device, dev); in ufs_mtk_init_mcq_irq()
867 host->mcq_intr_info[i].hba = hba; in ufs_mtk_init_mcq_irq()
869 dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq); in ufs_mtk_init_mcq_irq()
891 static int ufs_mtk_init(struct ufs_hba *hba) in ufs_mtk_init() argument
894 struct device *dev = hba->dev; in ufs_mtk_init()
905 host->hba = hba; in ufs_mtk_init()
906 ufshcd_set_variant(hba, host); in ufs_mtk_init()
915 ufs_mtk_init_host_caps(hba); in ufs_mtk_init()
917 ufs_mtk_init_mcq_irq(hba); in ufs_mtk_init()
919 err = ufs_mtk_bind_mphy(hba); in ufs_mtk_init()
923 ufs_mtk_init_reset(hba); in ufs_mtk_init()
926 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_mtk_init()
929 hba->caps |= UFSHCD_CAP_CLK_GATING; in ufs_mtk_init()
932 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_mtk_init()
935 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_mtk_init()
938 hba->caps |= UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init()
940 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; in ufs_mtk_init()
941 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; in ufs_mtk_init()
942 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; in ufs_mtk_init()
943 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); in ufs_mtk_init()
946 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_mtk_init()
948 ufs_mtk_init_clocks(hba); in ufs_mtk_init()
957 ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_init()
958 ufs_mtk_setup_clocks(hba, true, POST_CHANGE); in ufs_mtk_init()
960 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); in ufs_mtk_init()
969 ufshcd_set_variant(hba, NULL); in ufs_mtk_init()
974 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, in ufs_mtk_pmc_via_fastauto() argument
977 if (!ufs_mtk_is_pmc_via_fastauto(hba)) in ufs_mtk_pmc_via_fastauto()
980 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate) in ufs_mtk_pmc_via_fastauto()
994 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, in ufs_mtk_pre_pwr_change() argument
998 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pre_pwr_change()
1014 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { in ufs_mtk_pre_pwr_change()
1015 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); in ufs_mtk_pre_pwr_change()
1016 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
1018 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); in ufs_mtk_pre_pwr_change()
1019 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
1021 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufs_mtk_pre_pwr_change()
1023 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufs_mtk_pre_pwr_change()
1025 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufs_mtk_pre_pwr_change()
1028 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), in ufs_mtk_pre_pwr_change()
1031 ret = ufshcd_uic_change_pwr_mode(hba, in ufs_mtk_pre_pwr_change()
1035 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n", in ufs_mtk_pre_pwr_change()
1041 ret = ufshcd_dme_configure_adapt(hba, in ufs_mtk_pre_pwr_change()
1049 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, in ufs_mtk_pwr_change_notify() argument
1058 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, in ufs_mtk_pwr_change_notify()
1071 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_unipro_set_lpm() argument
1074 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_unipro_set_lpm()
1076 ret = ufshcd_dme_set(hba, in ufs_mtk_unipro_set_lpm()
1091 static int ufs_mtk_pre_link(struct ufs_hba *hba) in ufs_mtk_pre_link() argument
1096 ufs_mtk_get_controller_version(hba); in ufs_mtk_pre_link()
1098 ret = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_pre_link()
1107 ret = ufshcd_disable_host_tx_lcc(hba); in ufs_mtk_pre_link()
1112 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); in ufs_mtk_pre_link()
1118 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); in ufs_mtk_pre_link()
1123 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) in ufs_mtk_setup_clk_gating() argument
1127 if (ufshcd_is_clkgating_allowed(hba)) { in ufs_mtk_setup_clk_gating()
1128 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) in ufs_mtk_setup_clk_gating()
1130 hba->ahit); in ufs_mtk_setup_clk_gating()
1133 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); in ufs_mtk_setup_clk_gating()
1137 static void ufs_mtk_post_link(struct ufs_hba *hba) in ufs_mtk_post_link() argument
1140 ufs_mtk_cfg_unipro_cg(hba, true); in ufs_mtk_post_link()
1143 if (ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_post_link()
1144 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | in ufs_mtk_post_link()
1147 ufs_mtk_setup_clk_gating(hba); in ufs_mtk_post_link()
1150 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, in ufs_mtk_link_startup_notify() argument
1157 ret = ufs_mtk_pre_link(hba); in ufs_mtk_link_startup_notify()
1160 ufs_mtk_post_link(hba); in ufs_mtk_link_startup_notify()
1170 static int ufs_mtk_device_reset(struct ufs_hba *hba) in ufs_mtk_device_reset() argument
1175 ufshcd_hba_stop(hba); in ufs_mtk_device_reset()
1193 dev_info(hba->dev, "device reset done\n"); in ufs_mtk_device_reset()
1198 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) in ufs_mtk_link_set_hpm() argument
1202 err = ufshcd_hba_enable(hba); in ufs_mtk_link_set_hpm()
1206 err = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_hpm()
1210 err = ufshcd_uic_hibern8_exit(hba); in ufs_mtk_link_set_hpm()
1212 ufshcd_set_link_active(hba); in ufs_mtk_link_set_hpm()
1216 if (!hba->mcq_enabled) { in ufs_mtk_link_set_hpm()
1217 err = ufshcd_make_hba_operational(hba); in ufs_mtk_link_set_hpm()
1219 ufs_mtk_config_mcq(hba, false); in ufs_mtk_link_set_hpm()
1220 ufshcd_mcq_make_queues_operational(hba); in ufs_mtk_link_set_hpm()
1221 ufshcd_mcq_config_mac(hba, hba->nutrs); in ufs_mtk_link_set_hpm()
1223 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1, in ufs_mtk_link_set_hpm()
1233 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) in ufs_mtk_link_set_lpm() argument
1238 ufshcd_writel(hba, in ufs_mtk_link_set_lpm()
1239 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100), in ufs_mtk_link_set_lpm()
1242 err = ufs_mtk_unipro_set_lpm(hba, true); in ufs_mtk_link_set_lpm()
1245 ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_lpm()
1252 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vccqx_set_lpm() argument
1256 if (hba->vreg_info.vccq) in ufs_mtk_vccqx_set_lpm()
1257 vccqx = hba->vreg_info.vccq; in ufs_mtk_vccqx_set_lpm()
1259 vccqx = hba->vreg_info.vccq2; in ufs_mtk_vccqx_set_lpm()
1265 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vsx_set_lpm() argument
1270 (unsigned long)hba->dev_info.wspecversion, in ufs_mtk_vsx_set_lpm()
1274 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_dev_vreg_set_lpm() argument
1276 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) in ufs_mtk_dev_vreg_set_lpm()
1280 if (!hba->vreg_info.vcc) in ufs_mtk_dev_vreg_set_lpm()
1284 if (lpm && ufshcd_is_ufs_dev_active(hba)) in ufs_mtk_dev_vreg_set_lpm()
1288 if (lpm && hba->vreg_info.vcc->enabled) in ufs_mtk_dev_vreg_set_lpm()
1292 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1293 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1295 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1296 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1300 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) in ufs_mtk_auto_hibern8_disable() argument
1305 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufs_mtk_auto_hibern8_disable()
1308 ufs_mtk_wait_idle_state(hba, 5); in ufs_mtk_auto_hibern8_disable()
1310 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); in ufs_mtk_auto_hibern8_disable()
1312 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); in ufs_mtk_auto_hibern8_disable()
1315 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, in ufs_mtk_suspend() argument
1322 if (ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_suspend()
1323 ufs_mtk_auto_hibern8_disable(hba); in ufs_mtk_suspend()
1327 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_suspend()
1328 err = ufs_mtk_link_set_lpm(hba); in ufs_mtk_suspend()
1333 if (!ufshcd_is_link_active(hba)) { in ufs_mtk_suspend()
1339 err = ufs_mtk_mphy_power_on(hba, false); in ufs_mtk_suspend()
1344 if (ufshcd_is_link_off(hba)) in ufs_mtk_suspend()
1356 ufshcd_set_link_off(hba); in ufs_mtk_suspend()
1360 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_mtk_resume() argument
1365 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) in ufs_mtk_resume()
1366 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_resume()
1370 err = ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_resume()
1374 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_resume()
1375 err = ufs_mtk_link_set_hpm(hba); in ufs_mtk_resume()
1382 return ufshcd_link_recovery(hba); in ufs_mtk_resume()
1385 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) in ufs_mtk_dbg_register_dump() argument
1388 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10, in ufs_mtk_dbg_register_dump()
1391 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); in ufs_mtk_dbg_register_dump()
1394 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, in ufs_mtk_dbg_register_dump()
1399 ufs_mtk_dbg_sel(hba); in ufs_mtk_dbg_register_dump()
1400 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); in ufs_mtk_dbg_register_dump()
1403 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) in ufs_mtk_apply_dev_quirks() argument
1405 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_mtk_apply_dev_quirks()
1409 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); in ufs_mtk_apply_dev_quirks()
1410 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); in ufs_mtk_apply_dev_quirks()
1419 ufs_mtk_setup_ref_clk_wait_us(hba, 1); in ufs_mtk_apply_dev_quirks()
1421 ufs_mtk_setup_ref_clk_wait_us(hba, 30); in ufs_mtk_apply_dev_quirks()
1423 ufs_mtk_setup_ref_clk_wait_us(hba, 100); in ufs_mtk_apply_dev_quirks()
1425 ufs_mtk_setup_ref_clk_wait_us(hba, in ufs_mtk_apply_dev_quirks()
1430 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) in ufs_mtk_fixup_dev_quirks() argument
1432 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); in ufs_mtk_fixup_dev_quirks()
1434 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && in ufs_mtk_fixup_dev_quirks()
1435 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { in ufs_mtk_fixup_dev_quirks()
1436 hba->vreg_info.vcc->always_on = true; in ufs_mtk_fixup_dev_quirks()
1441 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | in ufs_mtk_fixup_dev_quirks()
1445 ufs_mtk_vreg_fix_vcc(hba); in ufs_mtk_fixup_dev_quirks()
1446 ufs_mtk_vreg_fix_vccqx(hba); in ufs_mtk_fixup_dev_quirks()
1449 static void ufs_mtk_event_notify(struct ufs_hba *hba, in ufs_mtk_event_notify() argument
1460 dev_info(hba->dev, in ufs_mtk_event_notify()
1468 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]); in ufs_mtk_event_notify()
1473 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]); in ufs_mtk_event_notify()
1477 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, in ufs_mtk_config_scaling_param() argument
1482 hba->clk_scaling.min_gear = UFS_HS_G4; in ufs_mtk_config_scaling_param()
1484 hba->vps->devfreq_profile.polling_ms = 200; in ufs_mtk_config_scaling_param()
1485 hba->vps->ondemand_data.upthreshold = 50; in ufs_mtk_config_scaling_param()
1486 hba->vps->ondemand_data.downdifferential = 20; in ufs_mtk_config_scaling_param()
1501 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) in ufs_mtk_clk_scale() argument
1503 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_clk_scale()
1510 dev_info(hba->dev, in ufs_mtk_clk_scale()
1524 dev_info(hba->dev, in ufs_mtk_clk_scale()
1533 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, in ufs_mtk_clk_scale_notify() argument
1536 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_clk_scale_notify()
1541 ufs_mtk_clk_scale(hba, scale_up); in ufs_mtk_clk_scale_notify()
1544 ufs_mtk_scale_perf(hba, scale_up); in ufs_mtk_clk_scale_notify()
1550 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba) in ufs_mtk_get_hba_mac() argument
1555 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba) in ufs_mtk_op_runtime_config() argument
1560 hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD; in ufs_mtk_op_runtime_config()
1561 hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS; in ufs_mtk_op_runtime_config()
1562 hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD; in ufs_mtk_op_runtime_config()
1563 hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS; in ufs_mtk_op_runtime_config()
1566 opr = &hba->mcq_opr[i]; in ufs_mtk_op_runtime_config()
1568 opr->base = hba->mmio_base + opr->offset; in ufs_mtk_op_runtime_config()
1574 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba) in ufs_mtk_mcq_config_resource() argument
1576 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mcq_config_resource()
1580 dev_info(hba->dev, "IRQs not ready. MCQ disabled."); in ufs_mtk_mcq_config_resource()
1584 hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities); in ufs_mtk_mcq_config_resource()
1591 struct ufs_hba *hba = mcq_intr_info->hba; in ufs_mtk_mcq_intr() local
1596 hwq = &hba->uhq[qid]; in ufs_mtk_mcq_intr()
1598 events = ufshcd_mcq_read_cqis(hba, qid); in ufs_mtk_mcq_intr()
1600 ufshcd_mcq_write_cqis(hba, events, qid); in ufs_mtk_mcq_intr()
1603 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufs_mtk_mcq_intr()
1608 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba) in ufs_mtk_config_mcq_irq() argument
1610 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_config_mcq_irq()
1617 dev_err(hba->dev, "invalid irq. %d\n", i); in ufs_mtk_config_mcq_irq()
1622 ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD, in ufs_mtk_config_mcq_irq()
1625 dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : ""); in ufs_mtk_config_mcq_irq()
1628 dev_err(hba->dev, "Cannot request irq %d\n", ret); in ufs_mtk_config_mcq_irq()
1636 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq) in ufs_mtk_config_mcq() argument
1638 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_config_mcq()
1643 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1646 ret = ufs_mtk_config_mcq_irq(hba); in ufs_mtk_config_mcq()
1654 ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1655 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1660 static int ufs_mtk_config_esi(struct ufs_hba *hba) in ufs_mtk_config_esi() argument
1662 return ufs_mtk_config_mcq(hba, true); in ufs_mtk_config_esi()
1753 struct ufs_hba *hba = platform_get_drvdata(pdev); in ufs_mtk_remove() local
1756 ufshcd_remove(hba); in ufs_mtk_remove()
1763 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_suspend() local
1770 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_system_suspend()
1777 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_resume() local
1779 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_system_resume()
1788 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_suspend() local
1795 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_runtime_suspend()
1802 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_resume() local
1804 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_runtime_resume()