xref: /openbmc/linux/drivers/ufs/host/ufs-mediatek.c (revision 144679df)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23 
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32 
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34 	{ .wmanufacturerid = UFS_ANY_VENDOR,
35 	  .model = UFS_ANY_MODEL,
36 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39 	  .model = "H9HQ21AFAMZDAR",
40 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
41 	{}
42 };
43 
44 static const struct of_device_id ufs_mtk_of_match[] = {
45 	{ .compatible = "mediatek,mt8183-ufshci" },
46 	{},
47 };
48 
49 /*
50  * Details of UIC Errors
51  */
52 static const char *const ufs_uic_err_str[] = {
53 	"PHY Adapter Layer",
54 	"Data Link Layer",
55 	"Network Link Layer",
56 	"Transport Link Layer",
57 	"DME"
58 };
59 
60 static const char *const ufs_uic_pa_err_str[] = {
61 	"PHY error on Lane 0",
62 	"PHY error on Lane 1",
63 	"PHY error on Lane 2",
64 	"PHY error on Lane 3",
65 	"Generic PHY Adapter Error. This should be the LINERESET indication"
66 };
67 
68 static const char *const ufs_uic_dl_err_str[] = {
69 	"NAC_RECEIVED",
70 	"TCx_REPLAY_TIMER_EXPIRED",
71 	"AFCx_REQUEST_TIMER_EXPIRED",
72 	"FCx_PROTECTION_TIMER_EXPIRED",
73 	"CRC_ERROR",
74 	"RX_BUFFER_OVERFLOW",
75 	"MAX_FRAME_LENGTH_EXCEEDED",
76 	"WRONG_SEQUENCE_NUMBER",
77 	"AFC_FRAME_SYNTAX_ERROR",
78 	"NAC_FRAME_SYNTAX_ERROR",
79 	"EOF_SYNTAX_ERROR",
80 	"FRAME_SYNTAX_ERROR",
81 	"BAD_CTRL_SYMBOL_TYPE",
82 	"PA_INIT_ERROR",
83 	"PA_ERROR_IND_RECEIVED",
84 	"PA_INIT"
85 };
86 
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
88 {
89 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
90 
91 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
92 }
93 
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
95 {
96 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 
98 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
99 }
100 
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
102 {
103 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 
105 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
106 }
107 
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
109 {
110 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 
112 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
113 }
114 
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
116 {
117 	u32 tmp;
118 
119 	if (enable) {
120 		ufshcd_dme_get(hba,
121 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
122 		tmp = tmp |
123 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
124 		      (1 << SYS_CLK_GATE_EN) |
125 		      (1 << TX_CLK_GATE_EN);
126 		ufshcd_dme_set(hba,
127 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
128 
129 		ufshcd_dme_get(hba,
130 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
132 		ufshcd_dme_set(hba,
133 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
134 	} else {
135 		ufshcd_dme_get(hba,
136 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138 			      (1 << SYS_CLK_GATE_EN) |
139 			      (1 << TX_CLK_GATE_EN));
140 		ufshcd_dme_set(hba,
141 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
142 
143 		ufshcd_dme_get(hba,
144 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
146 		ufshcd_dme_set(hba,
147 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
148 	}
149 }
150 
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
152 {
153 	struct arm_smccc_res res;
154 
155 	ufs_mtk_crypto_ctrl(res, 1);
156 	if (res.a0) {
157 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
158 			 __func__, res.a0);
159 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
160 	}
161 }
162 
163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
164 {
165 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
166 
167 	reset_control_assert(host->hci_reset);
168 	reset_control_assert(host->crypto_reset);
169 	reset_control_assert(host->unipro_reset);
170 
171 	usleep_range(100, 110);
172 
173 	reset_control_deassert(host->unipro_reset);
174 	reset_control_deassert(host->crypto_reset);
175 	reset_control_deassert(host->hci_reset);
176 }
177 
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179 				       struct reset_control **rc,
180 				       char *str)
181 {
182 	*rc = devm_reset_control_get(hba->dev, str);
183 	if (IS_ERR(*rc)) {
184 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
185 			 str, PTR_ERR(*rc));
186 		*rc = NULL;
187 	}
188 }
189 
190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
191 {
192 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
193 
194 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
195 				   "hci_rst");
196 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
197 				   "unipro_rst");
198 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
199 				   "crypto_rst");
200 }
201 
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203 				     enum ufs_notify_change_status status)
204 {
205 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206 
207 	if (status == PRE_CHANGE) {
208 		if (host->unipro_lpm) {
209 			hba->vps->hba_enable_delay_us = 0;
210 		} else {
211 			hba->vps->hba_enable_delay_us = 600;
212 			ufs_mtk_host_reset(hba);
213 		}
214 
215 		if (hba->caps & UFSHCD_CAP_CRYPTO)
216 			ufs_mtk_crypto_enable(hba);
217 
218 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219 			ufshcd_writel(hba, 0,
220 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
221 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
222 			hba->ahit = 0;
223 		}
224 
225 		/*
226 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227 		 * to prevent host hang issue
228 		 */
229 		ufshcd_writel(hba,
230 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
231 			      REG_UFS_XOUFS_CTRL);
232 	}
233 
234 	return 0;
235 }
236 
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 	struct device *dev = hba->dev;
241 	struct device_node *np = dev->of_node;
242 	int err = 0;
243 
244 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
245 
246 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
247 		/*
248 		 * UFS driver might be probed before the phy driver does.
249 		 * In that case we would like to return EPROBE_DEFER code.
250 		 */
251 		err = -EPROBE_DEFER;
252 		dev_info(dev,
253 			 "%s: required phy hasn't probed yet. err = %d\n",
254 			__func__, err);
255 	} else if (IS_ERR(host->mphy)) {
256 		err = PTR_ERR(host->mphy);
257 		if (err != -ENODEV) {
258 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
259 				 err);
260 		}
261 	}
262 
263 	if (err)
264 		host->mphy = NULL;
265 	/*
266 	 * Allow unbound mphy because not every platform needs specific
267 	 * mphy control.
268 	 */
269 	if (err == -ENODEV)
270 		err = 0;
271 
272 	return err;
273 }
274 
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
276 {
277 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 	struct arm_smccc_res res;
279 	ktime_t timeout, time_checked;
280 	u32 value;
281 
282 	if (host->ref_clk_enabled == on)
283 		return 0;
284 
285 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
286 
287 	if (on) {
288 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
289 	} else {
290 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
292 	}
293 
294 	/* Wait for ack */
295 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
296 	do {
297 		time_checked = ktime_get();
298 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
299 
300 		/* Wait until ack bit equals to req bit */
301 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
302 			goto out;
303 
304 		usleep_range(100, 200);
305 	} while (ktime_before(time_checked, timeout));
306 
307 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
308 
309 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
310 
311 	return -ETIMEDOUT;
312 
313 out:
314 	host->ref_clk_enabled = on;
315 	if (on)
316 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
317 
318 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
319 
320 	return 0;
321 }
322 
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
324 					  u16 gating_us)
325 {
326 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
327 
328 	if (hba->dev_info.clk_gating_wait_us) {
329 		host->ref_clk_gating_wait_us =
330 			hba->dev_info.clk_gating_wait_us;
331 	} else {
332 		host->ref_clk_gating_wait_us = gating_us;
333 	}
334 
335 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
336 }
337 
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
339 {
340 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
341 
342 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
348 	} else {
349 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
350 	}
351 }
352 
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354 			    unsigned long retry_ms)
355 {
356 	u64 timeout, time_checked;
357 	u32 val, sm;
358 	bool wait_idle;
359 
360 	/* cannot use plain ktime_get() in suspend */
361 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
362 
363 	/* wait a specific time after check base */
364 	udelay(10);
365 	wait_idle = false;
366 
367 	do {
368 		time_checked = ktime_get_mono_fast_ns();
369 		ufs_mtk_dbg_sel(hba);
370 		val = ufshcd_readl(hba, REG_UFS_PROBE);
371 
372 		sm = val & 0x1f;
373 
374 		/*
375 		 * if state is in H8 enter and H8 enter confirm
376 		 * wait until return to idle state.
377 		 */
378 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
379 			wait_idle = true;
380 			udelay(50);
381 			continue;
382 		} else if (!wait_idle)
383 			break;
384 
385 		if (wait_idle && (sm == VS_HCE_BASE))
386 			break;
387 	} while (time_checked < timeout);
388 
389 	if (wait_idle && sm != VS_HCE_BASE)
390 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
391 }
392 
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394 				   unsigned long max_wait_ms)
395 {
396 	ktime_t timeout, time_checked;
397 	u32 val;
398 
399 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
400 	do {
401 		time_checked = ktime_get();
402 		ufs_mtk_dbg_sel(hba);
403 		val = ufshcd_readl(hba, REG_UFS_PROBE);
404 		val = val >> 28;
405 
406 		if (val == state)
407 			return 0;
408 
409 		/* Sleep for max. 200us */
410 		usleep_range(100, 200);
411 	} while (ktime_before(time_checked, timeout));
412 
413 	return -ETIMEDOUT;
414 }
415 
416 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
417 {
418 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
419 	struct phy *mphy = host->mphy;
420 	struct arm_smccc_res res;
421 	int ret = 0;
422 
423 	if (!mphy || !(on ^ host->mphy_powered_on))
424 		return 0;
425 
426 	if (on) {
427 		if (ufs_mtk_is_va09_supported(hba)) {
428 			ret = regulator_enable(host->reg_va09);
429 			if (ret < 0)
430 				goto out;
431 			/* wait 200 us to stablize VA09 */
432 			usleep_range(200, 210);
433 			ufs_mtk_va09_pwr_ctrl(res, 1);
434 		}
435 		phy_power_on(mphy);
436 	} else {
437 		phy_power_off(mphy);
438 		if (ufs_mtk_is_va09_supported(hba)) {
439 			ufs_mtk_va09_pwr_ctrl(res, 0);
440 			ret = regulator_disable(host->reg_va09);
441 		}
442 	}
443 out:
444 	if (ret) {
445 		dev_info(hba->dev,
446 			 "failed to %s va09: %d\n",
447 			 on ? "enable" : "disable",
448 			 ret);
449 	} else {
450 		host->mphy_powered_on = on;
451 	}
452 
453 	return ret;
454 }
455 
456 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
457 				struct clk **clk_out)
458 {
459 	struct clk *clk;
460 	int err = 0;
461 
462 	clk = devm_clk_get(dev, name);
463 	if (IS_ERR(clk))
464 		err = PTR_ERR(clk);
465 	else
466 		*clk_out = clk;
467 
468 	return err;
469 }
470 
471 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
472 {
473 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
474 	struct ufs_mtk_crypt_cfg *cfg;
475 	struct regulator *reg;
476 	int volt, ret;
477 
478 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
479 		return;
480 
481 	cfg = host->crypt;
482 	volt = cfg->vcore_volt;
483 	reg = cfg->reg_vcore;
484 
485 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
486 	if (ret) {
487 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
488 			 ret);
489 		return;
490 	}
491 
492 	if (boost) {
493 		ret = regulator_set_voltage(reg, volt, INT_MAX);
494 		if (ret) {
495 			dev_info(hba->dev,
496 				 "failed to set vcore to %d\n", volt);
497 			goto out;
498 		}
499 
500 		ret = clk_set_parent(cfg->clk_crypt_mux,
501 				     cfg->clk_crypt_perf);
502 		if (ret) {
503 			dev_info(hba->dev,
504 				 "failed to set clk_crypt_perf\n");
505 			regulator_set_voltage(reg, 0, INT_MAX);
506 			goto out;
507 		}
508 	} else {
509 		ret = clk_set_parent(cfg->clk_crypt_mux,
510 				     cfg->clk_crypt_lp);
511 		if (ret) {
512 			dev_info(hba->dev,
513 				 "failed to set clk_crypt_lp\n");
514 			goto out;
515 		}
516 
517 		ret = regulator_set_voltage(reg, 0, INT_MAX);
518 		if (ret) {
519 			dev_info(hba->dev,
520 				 "failed to set vcore to MIN\n");
521 		}
522 	}
523 out:
524 	clk_disable_unprepare(cfg->clk_crypt_mux);
525 }
526 
527 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
528 				 struct clk **clk)
529 {
530 	int ret;
531 
532 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
533 	if (ret) {
534 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
535 			 name, ret);
536 	}
537 
538 	return ret;
539 }
540 
541 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
542 {
543 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
544 	struct ufs_mtk_crypt_cfg *cfg;
545 	struct device *dev = hba->dev;
546 	struct regulator *reg;
547 	u32 volt;
548 
549 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
550 				   GFP_KERNEL);
551 	if (!host->crypt)
552 		goto disable_caps;
553 
554 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
555 	if (IS_ERR(reg)) {
556 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
557 			 PTR_ERR(reg));
558 		goto disable_caps;
559 	}
560 
561 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
562 				 &volt)) {
563 		dev_info(dev, "failed to get boost-crypt-vcore-min");
564 		goto disable_caps;
565 	}
566 
567 	cfg = host->crypt;
568 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
569 				  &cfg->clk_crypt_mux))
570 		goto disable_caps;
571 
572 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
573 				  &cfg->clk_crypt_lp))
574 		goto disable_caps;
575 
576 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
577 				  &cfg->clk_crypt_perf))
578 		goto disable_caps;
579 
580 	cfg->reg_vcore = reg;
581 	cfg->vcore_volt = volt;
582 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
583 
584 disable_caps:
585 	return;
586 }
587 
588 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
589 {
590 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
591 
592 	host->reg_va09 = regulator_get(hba->dev, "va09");
593 	if (IS_ERR(host->reg_va09))
594 		dev_info(hba->dev, "failed to get va09");
595 	else
596 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
597 }
598 
599 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
600 {
601 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
602 	struct device_node *np = hba->dev->of_node;
603 
604 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
605 		ufs_mtk_init_boost_crypt(hba);
606 
607 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
608 		ufs_mtk_init_va09_pwr_ctrl(hba);
609 
610 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
611 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
612 
613 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
614 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
615 
616 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
617 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
618 
619 	dev_info(hba->dev, "caps: 0x%x", host->caps);
620 }
621 
622 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
623 {
624 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
625 
626 	if (!host || !host->pm_qos_init)
627 		return;
628 
629 	cpu_latency_qos_update_request(&host->pm_qos_req,
630 				       boost ? 0 : PM_QOS_DEFAULT_VALUE);
631 }
632 
633 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
634 {
635 	ufs_mtk_boost_crypt(hba, scale_up);
636 	ufs_mtk_boost_pm_qos(hba, scale_up);
637 }
638 
639 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
640 {
641 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
642 
643 	if (on) {
644 		phy_power_on(host->mphy);
645 		ufs_mtk_setup_ref_clk(hba, on);
646 		if (!ufshcd_is_clkscaling_supported(hba))
647 			ufs_mtk_scale_perf(hba, on);
648 	} else {
649 		if (!ufshcd_is_clkscaling_supported(hba))
650 			ufs_mtk_scale_perf(hba, on);
651 		ufs_mtk_setup_ref_clk(hba, on);
652 		phy_power_off(host->mphy);
653 	}
654 }
655 
656 /**
657  * ufs_mtk_setup_clocks - enables/disable clocks
658  * @hba: host controller instance
659  * @on: If true, enable clocks else disable them.
660  * @status: PRE_CHANGE or POST_CHANGE notify
661  *
662  * Returns 0 on success, non-zero on failure.
663  */
664 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
665 				enum ufs_notify_change_status status)
666 {
667 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
668 	bool clk_pwr_off = false;
669 	int ret = 0;
670 
671 	/*
672 	 * In case ufs_mtk_init() is not yet done, simply ignore.
673 	 * This ufs_mtk_setup_clocks() shall be called from
674 	 * ufs_mtk_init() after init is done.
675 	 */
676 	if (!host)
677 		return 0;
678 
679 	if (!on && status == PRE_CHANGE) {
680 		if (ufshcd_is_link_off(hba)) {
681 			clk_pwr_off = true;
682 		} else if (ufshcd_is_link_hibern8(hba) ||
683 			 (!ufshcd_can_hibern8_during_gating(hba) &&
684 			 ufshcd_is_auto_hibern8_enabled(hba))) {
685 			/*
686 			 * Gate ref-clk and poweroff mphy if link state is in
687 			 * OFF or Hibern8 by either Auto-Hibern8 or
688 			 * ufshcd_link_state_transition().
689 			 */
690 			ret = ufs_mtk_wait_link_state(hba,
691 						      VS_LINK_HIBERN8,
692 						      15);
693 			if (!ret)
694 				clk_pwr_off = true;
695 		}
696 
697 		if (clk_pwr_off)
698 			ufs_mtk_pwr_ctrl(hba, false);
699 	} else if (on && status == POST_CHANGE) {
700 		ufs_mtk_pwr_ctrl(hba, true);
701 	}
702 
703 	return ret;
704 }
705 
706 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
707 {
708 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
709 	int ret, ver = 0;
710 
711 	if (host->hw_ver.major)
712 		return;
713 
714 	/* Set default (minimum) version anyway */
715 	host->hw_ver.major = 2;
716 
717 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
718 	if (!ret) {
719 		if (ver >= UFS_UNIPRO_VER_1_8) {
720 			host->hw_ver.major = 3;
721 			/*
722 			 * Fix HCI version for some platforms with
723 			 * incorrect version
724 			 */
725 			if (hba->ufs_version < ufshci_version(3, 0))
726 				hba->ufs_version = ufshci_version(3, 0);
727 		}
728 	}
729 }
730 
731 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
732 {
733 	return hba->ufs_version;
734 }
735 
736 /**
737  * ufs_mtk_init_clocks - Init mtk driver private clocks
738  *
739  * @hba: per adapter instance
740  */
741 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
742 {
743 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
744 	struct list_head *head = &hba->clk_list_head;
745 	struct ufs_mtk_clk *mclk = &host->mclk;
746 	struct ufs_clk_info *clki, *clki_tmp;
747 
748 	/*
749 	 * Find private clocks and store them in struct ufs_mtk_clk.
750 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
751 	 * being switched on/off in clock gating.
752 	 */
753 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
754 		if (!strcmp(clki->name, "ufs_sel")) {
755 			host->mclk.ufs_sel_clki = clki;
756 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
757 			host->mclk.ufs_sel_max_clki = clki;
758 			clk_disable_unprepare(clki->clk);
759 			list_del(&clki->list);
760 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
761 			host->mclk.ufs_sel_min_clki = clki;
762 			clk_disable_unprepare(clki->clk);
763 			list_del(&clki->list);
764 		}
765 	}
766 
767 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
768 	    !mclk->ufs_sel_min_clki) {
769 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
770 		dev_info(hba->dev,
771 			 "%s: Clk-scaling not ready. Feature disabled.",
772 			 __func__);
773 	}
774 }
775 
776 #define MAX_VCC_NAME 30
777 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
778 {
779 	struct ufs_vreg_info *info = &hba->vreg_info;
780 	struct device_node *np = hba->dev->of_node;
781 	struct device *dev = hba->dev;
782 	char vcc_name[MAX_VCC_NAME];
783 	struct arm_smccc_res res;
784 	int err, ver;
785 
786 	if (hba->vreg_info.vcc)
787 		return 0;
788 
789 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
790 		ufs_mtk_get_vcc_num(res);
791 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
792 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
793 		else
794 			return -ENODEV;
795 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
796 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
797 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
798 	} else {
799 		return 0;
800 	}
801 
802 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
803 	if (err)
804 		return err;
805 
806 	err = ufshcd_get_vreg(dev, info->vcc);
807 	if (err)
808 		return err;
809 
810 	err = regulator_enable(info->vcc->reg);
811 	if (!err) {
812 		info->vcc->enabled = true;
813 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
814 	}
815 
816 	return err;
817 }
818 
819 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
820 {
821 	struct ufs_vreg_info *info = &hba->vreg_info;
822 	struct ufs_vreg **vreg_on, **vreg_off;
823 
824 	if (hba->dev_info.wspecversion >= 0x0300) {
825 		vreg_on = &info->vccq;
826 		vreg_off = &info->vccq2;
827 	} else {
828 		vreg_on = &info->vccq2;
829 		vreg_off = &info->vccq;
830 	}
831 
832 	if (*vreg_on)
833 		(*vreg_on)->always_on = true;
834 
835 	if (*vreg_off) {
836 		regulator_disable((*vreg_off)->reg);
837 		devm_kfree(hba->dev, (*vreg_off)->name);
838 		devm_kfree(hba->dev, *vreg_off);
839 		*vreg_off = NULL;
840 	}
841 }
842 
843 /**
844  * ufs_mtk_init - find other essential mmio bases
845  * @hba: host controller instance
846  *
847  * Binds PHY with controller and powers up PHY enabling clocks
848  * and regulators.
849  *
850  * Returns -EPROBE_DEFER if binding fails, returns negative error
851  * on phy power up failure and returns zero on success.
852  */
853 static int ufs_mtk_init(struct ufs_hba *hba)
854 {
855 	const struct of_device_id *id;
856 	struct device *dev = hba->dev;
857 	struct ufs_mtk_host *host;
858 	int err = 0;
859 
860 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
861 	if (!host) {
862 		err = -ENOMEM;
863 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
864 		goto out;
865 	}
866 
867 	host->hba = hba;
868 	ufshcd_set_variant(hba, host);
869 
870 	id = of_match_device(ufs_mtk_of_match, dev);
871 	if (!id) {
872 		err = -EINVAL;
873 		goto out;
874 	}
875 
876 	/* Initialize host capability */
877 	ufs_mtk_init_host_caps(hba);
878 
879 	err = ufs_mtk_bind_mphy(hba);
880 	if (err)
881 		goto out_variant_clear;
882 
883 	ufs_mtk_init_reset(hba);
884 
885 	/* Enable runtime autosuspend */
886 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
887 
888 	/* Enable clock-gating */
889 	hba->caps |= UFSHCD_CAP_CLK_GATING;
890 
891 	/* Enable inline encryption */
892 	hba->caps |= UFSHCD_CAP_CRYPTO;
893 
894 	/* Enable WriteBooster */
895 	hba->caps |= UFSHCD_CAP_WB_EN;
896 
897 	/* Enable clk scaling*/
898 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
899 
900 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
901 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
902 
903 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
904 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
905 
906 	ufs_mtk_init_clocks(hba);
907 
908 	/*
909 	 * ufshcd_vops_init() is invoked after
910 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
911 	 * phy clock setup is skipped.
912 	 *
913 	 * Enable phy clocks specifically here.
914 	 */
915 	ufs_mtk_mphy_power_on(hba, true);
916 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
917 
918 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
919 
920 	/* Initialize pm-qos request */
921 	cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
922 	host->pm_qos_init = true;
923 
924 	goto out;
925 
926 out_variant_clear:
927 	ufshcd_set_variant(hba, NULL);
928 out:
929 	return err;
930 }
931 
932 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
933 				     struct ufs_pa_layer_attr *dev_req_params)
934 {
935 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
936 		return false;
937 
938 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
939 		return false;
940 
941 	if (dev_req_params->pwr_tx != FAST_MODE &&
942 	    dev_req_params->gear_tx < UFS_HS_G4)
943 		return false;
944 
945 	if (dev_req_params->pwr_rx != FAST_MODE &&
946 	    dev_req_params->gear_rx < UFS_HS_G4)
947 		return false;
948 
949 	return true;
950 }
951 
952 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
953 				  struct ufs_pa_layer_attr *dev_max_params,
954 				  struct ufs_pa_layer_attr *dev_req_params)
955 {
956 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
957 	struct ufs_dev_params host_cap;
958 	int ret;
959 
960 	ufshcd_init_pwr_dev_param(&host_cap);
961 	host_cap.hs_rx_gear = UFS_HS_G5;
962 	host_cap.hs_tx_gear = UFS_HS_G5;
963 
964 	ret = ufshcd_get_pwr_dev_param(&host_cap,
965 				       dev_max_params,
966 				       dev_req_params);
967 	if (ret) {
968 		pr_info("%s: failed to determine capabilities\n",
969 			__func__);
970 	}
971 
972 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
973 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
974 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
975 
976 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
977 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
978 
979 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
980 			       dev_req_params->lane_tx);
981 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
982 			       dev_req_params->lane_rx);
983 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
984 			       dev_req_params->hs_rate);
985 
986 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
987 			       PA_NO_ADAPT);
988 
989 		ret = ufshcd_uic_change_pwr_mode(hba,
990 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
991 
992 		if (ret) {
993 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
994 				__func__, ret);
995 		}
996 	}
997 
998 	if (host->hw_ver.major >= 3) {
999 		ret = ufshcd_dme_configure_adapt(hba,
1000 					   dev_req_params->gear_tx,
1001 					   PA_INITIAL_ADAPT);
1002 	}
1003 
1004 	return ret;
1005 }
1006 
1007 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1008 				     enum ufs_notify_change_status stage,
1009 				     struct ufs_pa_layer_attr *dev_max_params,
1010 				     struct ufs_pa_layer_attr *dev_req_params)
1011 {
1012 	int ret = 0;
1013 
1014 	switch (stage) {
1015 	case PRE_CHANGE:
1016 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1017 					     dev_req_params);
1018 		break;
1019 	case POST_CHANGE:
1020 		break;
1021 	default:
1022 		ret = -EINVAL;
1023 		break;
1024 	}
1025 
1026 	return ret;
1027 }
1028 
1029 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1030 {
1031 	int ret;
1032 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1033 
1034 	ret = ufshcd_dme_set(hba,
1035 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1036 			     lpm ? 1 : 0);
1037 	if (!ret || !lpm) {
1038 		/*
1039 		 * Forcibly set as non-LPM mode if UIC commands is failed
1040 		 * to use default hba_enable_delay_us value for re-enabling
1041 		 * the host.
1042 		 */
1043 		host->unipro_lpm = lpm;
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1050 {
1051 	int ret;
1052 	u32 tmp;
1053 
1054 	ufs_mtk_get_controller_version(hba);
1055 
1056 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1057 	if (ret)
1058 		return ret;
1059 
1060 	/*
1061 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1062 	 * to make sure that both host and device TX LCC are disabled
1063 	 * once link startup is completed.
1064 	 */
1065 	ret = ufshcd_disable_host_tx_lcc(hba);
1066 	if (ret)
1067 		return ret;
1068 
1069 	/* disable deep stall */
1070 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1071 	if (ret)
1072 		return ret;
1073 
1074 	tmp &= ~(1 << 6);
1075 
1076 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1077 
1078 	return ret;
1079 }
1080 
1081 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1082 {
1083 	u32 ah_ms;
1084 
1085 	if (ufshcd_is_clkgating_allowed(hba)) {
1086 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1087 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1088 					  hba->ahit);
1089 		else
1090 			ah_ms = 10;
1091 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1092 	}
1093 }
1094 
1095 static void ufs_mtk_post_link(struct ufs_hba *hba)
1096 {
1097 	/* enable unipro clock gating feature */
1098 	ufs_mtk_cfg_unipro_cg(hba, true);
1099 
1100 	/* will be configured during probe hba */
1101 	if (ufshcd_is_auto_hibern8_supported(hba))
1102 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1103 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1104 
1105 	ufs_mtk_setup_clk_gating(hba);
1106 }
1107 
1108 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1109 				       enum ufs_notify_change_status stage)
1110 {
1111 	int ret = 0;
1112 
1113 	switch (stage) {
1114 	case PRE_CHANGE:
1115 		ret = ufs_mtk_pre_link(hba);
1116 		break;
1117 	case POST_CHANGE:
1118 		ufs_mtk_post_link(hba);
1119 		break;
1120 	default:
1121 		ret = -EINVAL;
1122 		break;
1123 	}
1124 
1125 	return ret;
1126 }
1127 
1128 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1129 {
1130 	struct arm_smccc_res res;
1131 
1132 	/* disable hba before device reset */
1133 	ufshcd_hba_stop(hba);
1134 
1135 	ufs_mtk_device_reset_ctrl(0, res);
1136 
1137 	/*
1138 	 * The reset signal is active low. UFS devices shall detect
1139 	 * more than or equal to 1us of positive or negative RST_n
1140 	 * pulse width.
1141 	 *
1142 	 * To be on safe side, keep the reset low for at least 10us.
1143 	 */
1144 	usleep_range(10, 15);
1145 
1146 	ufs_mtk_device_reset_ctrl(1, res);
1147 
1148 	/* Some devices may need time to respond to rst_n */
1149 	usleep_range(10000, 15000);
1150 
1151 	dev_info(hba->dev, "device reset done\n");
1152 
1153 	return 0;
1154 }
1155 
1156 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1157 {
1158 	int err;
1159 
1160 	err = ufshcd_hba_enable(hba);
1161 	if (err)
1162 		return err;
1163 
1164 	err = ufs_mtk_unipro_set_lpm(hba, false);
1165 	if (err)
1166 		return err;
1167 
1168 	err = ufshcd_uic_hibern8_exit(hba);
1169 	if (!err)
1170 		ufshcd_set_link_active(hba);
1171 	else
1172 		return err;
1173 
1174 	err = ufshcd_make_hba_operational(hba);
1175 	if (err)
1176 		return err;
1177 
1178 	return 0;
1179 }
1180 
1181 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1182 {
1183 	int err;
1184 
1185 	/* Disable reset confirm feature by UniPro */
1186 	ufshcd_writel(hba,
1187 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1188 		      REG_UFS_XOUFS_CTRL);
1189 
1190 	err = ufs_mtk_unipro_set_lpm(hba, true);
1191 	if (err) {
1192 		/* Resume UniPro state for following error recovery */
1193 		ufs_mtk_unipro_set_lpm(hba, false);
1194 		return err;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1201 {
1202 	struct ufs_vreg *vccqx = NULL;
1203 
1204 	if (hba->vreg_info.vccq)
1205 		vccqx = hba->vreg_info.vccq;
1206 	else
1207 		vccqx = hba->vreg_info.vccq2;
1208 
1209 	regulator_set_mode(vccqx->reg,
1210 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1211 }
1212 
1213 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1214 {
1215 	struct arm_smccc_res res;
1216 
1217 	ufs_mtk_device_pwr_ctrl(!lpm,
1218 				(unsigned long)hba->dev_info.wspecversion,
1219 				res);
1220 }
1221 
1222 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1223 {
1224 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1225 		return;
1226 
1227 	/* Skip if VCC is assumed always-on */
1228 	if (!hba->vreg_info.vcc)
1229 		return;
1230 
1231 	/* Bypass LPM when device is still active */
1232 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1233 		return;
1234 
1235 	/* Bypass LPM if VCC is enabled */
1236 	if (lpm && hba->vreg_info.vcc->enabled)
1237 		return;
1238 
1239 	if (lpm) {
1240 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1241 		ufs_mtk_vsx_set_lpm(hba, lpm);
1242 	} else {
1243 		ufs_mtk_vsx_set_lpm(hba, lpm);
1244 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1245 	}
1246 }
1247 
1248 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1249 {
1250 	int ret;
1251 
1252 	/* disable auto-hibern8 */
1253 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1254 
1255 	/* wait host return to idle state when auto-hibern8 off */
1256 	ufs_mtk_wait_idle_state(hba, 5);
1257 
1258 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1259 	if (ret)
1260 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1261 }
1262 
1263 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1264 	enum ufs_notify_change_status status)
1265 {
1266 	int err;
1267 	struct arm_smccc_res res;
1268 
1269 	if (status == PRE_CHANGE) {
1270 		if (ufshcd_is_auto_hibern8_supported(hba))
1271 			ufs_mtk_auto_hibern8_disable(hba);
1272 		return 0;
1273 	}
1274 
1275 	if (ufshcd_is_link_hibern8(hba)) {
1276 		err = ufs_mtk_link_set_lpm(hba);
1277 		if (err)
1278 			goto fail;
1279 	}
1280 
1281 	if (!ufshcd_is_link_active(hba)) {
1282 		/*
1283 		 * Make sure no error will be returned to prevent
1284 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1285 		 * in low-power mode.
1286 		 */
1287 		err = ufs_mtk_mphy_power_on(hba, false);
1288 		if (err)
1289 			goto fail;
1290 	}
1291 
1292 	if (ufshcd_is_link_off(hba))
1293 		ufs_mtk_device_reset_ctrl(0, res);
1294 
1295 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1296 
1297 	return 0;
1298 fail:
1299 	/*
1300 	 * Set link as off state enforcedly to trigger
1301 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1302 	 * for completed host reset.
1303 	 */
1304 	ufshcd_set_link_off(hba);
1305 	return -EAGAIN;
1306 }
1307 
1308 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1309 {
1310 	int err;
1311 	struct arm_smccc_res res;
1312 
1313 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1314 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1315 
1316 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1317 
1318 	err = ufs_mtk_mphy_power_on(hba, true);
1319 	if (err)
1320 		goto fail;
1321 
1322 	if (ufshcd_is_link_hibern8(hba)) {
1323 		err = ufs_mtk_link_set_hpm(hba);
1324 		if (err)
1325 			goto fail;
1326 	}
1327 
1328 	return 0;
1329 fail:
1330 	return ufshcd_link_recovery(hba);
1331 }
1332 
1333 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1334 {
1335 	/* Dump ufshci register 0x140 ~ 0x14C */
1336 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1337 			 "XOUFS Ctrl (0x140): ");
1338 
1339 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1340 
1341 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1342 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1343 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1344 			 "MPHY Ctrl (0x2200): ");
1345 
1346 	/* Direct debugging information to REG_MTK_PROBE */
1347 	ufs_mtk_dbg_sel(hba);
1348 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1349 }
1350 
1351 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1352 {
1353 	struct ufs_dev_info *dev_info = &hba->dev_info;
1354 	u16 mid = dev_info->wmanufacturerid;
1355 
1356 	if (mid == UFS_VENDOR_SAMSUNG) {
1357 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1358 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1359 	}
1360 
1361 	/*
1362 	 * Decide waiting time before gating reference clock and
1363 	 * after ungating reference clock according to vendors'
1364 	 * requirements.
1365 	 */
1366 	if (mid == UFS_VENDOR_SAMSUNG)
1367 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1368 	else if (mid == UFS_VENDOR_SKHYNIX)
1369 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1370 	else if (mid == UFS_VENDOR_TOSHIBA)
1371 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1372 	else
1373 		ufs_mtk_setup_ref_clk_wait_us(hba,
1374 					      REFCLK_DEFAULT_WAIT_US);
1375 	return 0;
1376 }
1377 
1378 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1379 {
1380 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1381 
1382 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1383 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1384 		hba->vreg_info.vcc->always_on = true;
1385 		/*
1386 		 * VCC will be kept always-on thus we don't
1387 		 * need any delay during regulator operations
1388 		 */
1389 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1390 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1391 	}
1392 
1393 	ufs_mtk_vreg_fix_vcc(hba);
1394 	ufs_mtk_vreg_fix_vccqx(hba);
1395 }
1396 
1397 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1398 				 enum ufs_event_type evt, void *data)
1399 {
1400 	unsigned int val = *(u32 *)data;
1401 	unsigned long reg;
1402 	u8 bit;
1403 
1404 	trace_ufs_mtk_event(evt, val);
1405 
1406 	/* Print details of UIC Errors */
1407 	if (evt <= UFS_EVT_DME_ERR) {
1408 		dev_info(hba->dev,
1409 			 "Host UIC Error Code (%s): %08x\n",
1410 			 ufs_uic_err_str[evt], val);
1411 		reg = val;
1412 	}
1413 
1414 	if (evt == UFS_EVT_PA_ERR) {
1415 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1416 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1417 	}
1418 
1419 	if (evt == UFS_EVT_DL_ERR) {
1420 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1421 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1422 	}
1423 }
1424 
1425 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1426 				struct devfreq_dev_profile *profile,
1427 				struct devfreq_simple_ondemand_data *data)
1428 {
1429 	/* Customize min gear in clk scaling */
1430 	hba->clk_scaling.min_gear = UFS_HS_G4;
1431 
1432 	hba->vps->devfreq_profile.polling_ms = 200;
1433 	hba->vps->ondemand_data.upthreshold = 50;
1434 	hba->vps->ondemand_data.downdifferential = 20;
1435 }
1436 
1437 /**
1438  * ufs_mtk_clk_scale - Internal clk scaling operation
1439  *
1440  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1441  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1442  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1443  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1444  * This prevent changing rate of pll clock that is shared between modules.
1445  *
1446  * @hba: per adapter instance
1447  * @scale_up: True for scaling up and false for scaling down
1448  */
1449 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1450 {
1451 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1452 	struct ufs_mtk_clk *mclk = &host->mclk;
1453 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1454 	int ret = 0;
1455 
1456 	ret = clk_prepare_enable(clki->clk);
1457 	if (ret) {
1458 		dev_info(hba->dev,
1459 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1460 		return;
1461 	}
1462 
1463 	if (scale_up) {
1464 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1465 		clki->curr_freq = clki->max_freq;
1466 	} else {
1467 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1468 		clki->curr_freq = clki->min_freq;
1469 	}
1470 
1471 	if (ret) {
1472 		dev_info(hba->dev,
1473 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1474 	}
1475 
1476 	clk_disable_unprepare(clki->clk);
1477 
1478 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1479 }
1480 
1481 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1482 				    enum ufs_notify_change_status status)
1483 {
1484 	if (!ufshcd_is_clkscaling_supported(hba))
1485 		return 0;
1486 
1487 	if (status == PRE_CHANGE) {
1488 		/* Switch parent before clk_set_rate() */
1489 		ufs_mtk_clk_scale(hba, scale_up);
1490 	} else {
1491 		/* Request interrupt latency QoS accordingly */
1492 		ufs_mtk_scale_perf(hba, scale_up);
1493 	}
1494 
1495 	return 0;
1496 }
1497 
1498 /*
1499  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1500  *
1501  * The variant operations configure the necessary controller and PHY
1502  * handshake during initialization.
1503  */
1504 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1505 	.name                = "mediatek.ufshci",
1506 	.init                = ufs_mtk_init,
1507 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1508 	.setup_clocks        = ufs_mtk_setup_clocks,
1509 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1510 	.link_startup_notify = ufs_mtk_link_startup_notify,
1511 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1512 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1513 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1514 	.suspend             = ufs_mtk_suspend,
1515 	.resume              = ufs_mtk_resume,
1516 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1517 	.device_reset        = ufs_mtk_device_reset,
1518 	.event_notify        = ufs_mtk_event_notify,
1519 	.config_scaling_param = ufs_mtk_config_scaling_param,
1520 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1521 };
1522 
1523 /**
1524  * ufs_mtk_probe - probe routine of the driver
1525  * @pdev: pointer to Platform device handle
1526  *
1527  * Return zero for success and non-zero for failure
1528  */
1529 static int ufs_mtk_probe(struct platform_device *pdev)
1530 {
1531 	int err;
1532 	struct device *dev = &pdev->dev;
1533 	struct device_node *reset_node;
1534 	struct platform_device *reset_pdev;
1535 	struct device_link *link;
1536 
1537 	reset_node = of_find_compatible_node(NULL, NULL,
1538 					     "ti,syscon-reset");
1539 	if (!reset_node) {
1540 		dev_notice(dev, "find ti,syscon-reset fail\n");
1541 		goto skip_reset;
1542 	}
1543 	reset_pdev = of_find_device_by_node(reset_node);
1544 	if (!reset_pdev) {
1545 		dev_notice(dev, "find reset_pdev fail\n");
1546 		goto skip_reset;
1547 	}
1548 	link = device_link_add(dev, &reset_pdev->dev,
1549 		DL_FLAG_AUTOPROBE_CONSUMER);
1550 	put_device(&reset_pdev->dev);
1551 	if (!link) {
1552 		dev_notice(dev, "add reset device_link fail\n");
1553 		goto skip_reset;
1554 	}
1555 	/* supplier is not probed */
1556 	if (link->status == DL_STATE_DORMANT) {
1557 		err = -EPROBE_DEFER;
1558 		goto out;
1559 	}
1560 
1561 skip_reset:
1562 	/* perform generic probe */
1563 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1564 
1565 out:
1566 	if (err)
1567 		dev_info(dev, "probe failed %d\n", err);
1568 
1569 	of_node_put(reset_node);
1570 	return err;
1571 }
1572 
1573 /**
1574  * ufs_mtk_remove - set driver_data of the device to NULL
1575  * @pdev: pointer to platform device handle
1576  *
1577  * Always return 0
1578  */
1579 static int ufs_mtk_remove(struct platform_device *pdev)
1580 {
1581 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1582 
1583 	pm_runtime_get_sync(&(pdev)->dev);
1584 	ufshcd_remove(hba);
1585 	return 0;
1586 }
1587 
1588 #ifdef CONFIG_PM_SLEEP
1589 static int ufs_mtk_system_suspend(struct device *dev)
1590 {
1591 	struct ufs_hba *hba = dev_get_drvdata(dev);
1592 	int ret;
1593 
1594 	ret = ufshcd_system_suspend(dev);
1595 	if (ret)
1596 		return ret;
1597 
1598 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1599 
1600 	return 0;
1601 }
1602 
1603 static int ufs_mtk_system_resume(struct device *dev)
1604 {
1605 	struct ufs_hba *hba = dev_get_drvdata(dev);
1606 
1607 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1608 
1609 	return ufshcd_system_resume(dev);
1610 }
1611 #endif
1612 
1613 #ifdef CONFIG_PM
1614 static int ufs_mtk_runtime_suspend(struct device *dev)
1615 {
1616 	struct ufs_hba *hba = dev_get_drvdata(dev);
1617 	int ret = 0;
1618 
1619 	ret = ufshcd_runtime_suspend(dev);
1620 	if (ret)
1621 		return ret;
1622 
1623 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1624 
1625 	return 0;
1626 }
1627 
1628 static int ufs_mtk_runtime_resume(struct device *dev)
1629 {
1630 	struct ufs_hba *hba = dev_get_drvdata(dev);
1631 
1632 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1633 
1634 	return ufshcd_runtime_resume(dev);
1635 }
1636 #endif
1637 
1638 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1639 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1640 				ufs_mtk_system_resume)
1641 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1642 			   ufs_mtk_runtime_resume, NULL)
1643 	.prepare	 = ufshcd_suspend_prepare,
1644 	.complete	 = ufshcd_resume_complete,
1645 };
1646 
1647 static struct platform_driver ufs_mtk_pltform = {
1648 	.probe      = ufs_mtk_probe,
1649 	.remove     = ufs_mtk_remove,
1650 	.shutdown   = ufshcd_pltfrm_shutdown,
1651 	.driver = {
1652 		.name   = "ufshcd-mtk",
1653 		.pm     = &ufs_mtk_pm_ops,
1654 		.of_match_table = ufs_mtk_of_match,
1655 	},
1656 };
1657 
1658 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1659 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1660 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1661 MODULE_LICENSE("GPL v2");
1662 
1663 module_platform_driver(ufs_mtk_pltform);
1664