xref: /openbmc/linux/drivers/ufs/host/ufs-mediatek.c (revision bc41a722)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23 
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32 
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34 	{ .wmanufacturerid = UFS_ANY_VENDOR,
35 	  .model = UFS_ANY_MODEL,
36 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39 	  .model = "H9HQ21AFAMZDAR",
40 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
41 	{}
42 };
43 
44 static const struct of_device_id ufs_mtk_of_match[] = {
45 	{ .compatible = "mediatek,mt8183-ufshci" },
46 	{},
47 };
48 
49 /*
50  * Details of UIC Errors
51  */
52 static const char *const ufs_uic_err_str[] = {
53 	"PHY Adapter Layer",
54 	"Data Link Layer",
55 	"Network Link Layer",
56 	"Transport Link Layer",
57 	"DME"
58 };
59 
60 static const char *const ufs_uic_pa_err_str[] = {
61 	"PHY error on Lane 0",
62 	"PHY error on Lane 1",
63 	"PHY error on Lane 2",
64 	"PHY error on Lane 3",
65 	"Generic PHY Adapter Error. This should be the LINERESET indication"
66 };
67 
68 static const char *const ufs_uic_dl_err_str[] = {
69 	"NAC_RECEIVED",
70 	"TCx_REPLAY_TIMER_EXPIRED",
71 	"AFCx_REQUEST_TIMER_EXPIRED",
72 	"FCx_PROTECTION_TIMER_EXPIRED",
73 	"CRC_ERROR",
74 	"RX_BUFFER_OVERFLOW",
75 	"MAX_FRAME_LENGTH_EXCEEDED",
76 	"WRONG_SEQUENCE_NUMBER",
77 	"AFC_FRAME_SYNTAX_ERROR",
78 	"NAC_FRAME_SYNTAX_ERROR",
79 	"EOF_SYNTAX_ERROR",
80 	"FRAME_SYNTAX_ERROR",
81 	"BAD_CTRL_SYMBOL_TYPE",
82 	"PA_INIT_ERROR",
83 	"PA_ERROR_IND_RECEIVED",
84 	"PA_INIT"
85 };
86 
87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
88 {
89 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
90 
91 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
92 }
93 
94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
95 {
96 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 
98 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
99 }
100 
101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
102 {
103 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 
105 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
106 }
107 
108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
109 {
110 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 
112 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
113 }
114 
115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
116 {
117 	u32 tmp;
118 
119 	if (enable) {
120 		ufshcd_dme_get(hba,
121 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
122 		tmp = tmp |
123 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
124 		      (1 << SYS_CLK_GATE_EN) |
125 		      (1 << TX_CLK_GATE_EN);
126 		ufshcd_dme_set(hba,
127 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
128 
129 		ufshcd_dme_get(hba,
130 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
132 		ufshcd_dme_set(hba,
133 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
134 	} else {
135 		ufshcd_dme_get(hba,
136 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138 			      (1 << SYS_CLK_GATE_EN) |
139 			      (1 << TX_CLK_GATE_EN));
140 		ufshcd_dme_set(hba,
141 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
142 
143 		ufshcd_dme_get(hba,
144 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
146 		ufshcd_dme_set(hba,
147 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
148 	}
149 }
150 
151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
152 {
153 	struct arm_smccc_res res;
154 
155 	ufs_mtk_crypto_ctrl(res, 1);
156 	if (res.a0) {
157 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
158 			 __func__, res.a0);
159 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
160 	}
161 }
162 
163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
164 {
165 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
166 
167 	reset_control_assert(host->hci_reset);
168 	reset_control_assert(host->crypto_reset);
169 	reset_control_assert(host->unipro_reset);
170 
171 	usleep_range(100, 110);
172 
173 	reset_control_deassert(host->unipro_reset);
174 	reset_control_deassert(host->crypto_reset);
175 	reset_control_deassert(host->hci_reset);
176 }
177 
178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179 				       struct reset_control **rc,
180 				       char *str)
181 {
182 	*rc = devm_reset_control_get(hba->dev, str);
183 	if (IS_ERR(*rc)) {
184 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
185 			 str, PTR_ERR(*rc));
186 		*rc = NULL;
187 	}
188 }
189 
190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
191 {
192 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
193 
194 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
195 				   "hci_rst");
196 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
197 				   "unipro_rst");
198 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
199 				   "crypto_rst");
200 }
201 
202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203 				     enum ufs_notify_change_status status)
204 {
205 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206 
207 	if (status == PRE_CHANGE) {
208 		if (host->unipro_lpm) {
209 			hba->vps->hba_enable_delay_us = 0;
210 		} else {
211 			hba->vps->hba_enable_delay_us = 600;
212 			ufs_mtk_host_reset(hba);
213 		}
214 
215 		if (hba->caps & UFSHCD_CAP_CRYPTO)
216 			ufs_mtk_crypto_enable(hba);
217 
218 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219 			ufshcd_writel(hba, 0,
220 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
221 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
222 			hba->ahit = 0;
223 		}
224 
225 		/*
226 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227 		 * to prevent host hang issue
228 		 */
229 		ufshcd_writel(hba,
230 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
231 			      REG_UFS_XOUFS_CTRL);
232 	}
233 
234 	return 0;
235 }
236 
237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 	struct device *dev = hba->dev;
241 	struct device_node *np = dev->of_node;
242 	int err = 0;
243 
244 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
245 
246 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
247 		/*
248 		 * UFS driver might be probed before the phy driver does.
249 		 * In that case we would like to return EPROBE_DEFER code.
250 		 */
251 		err = -EPROBE_DEFER;
252 		dev_info(dev,
253 			 "%s: required phy hasn't probed yet. err = %d\n",
254 			__func__, err);
255 	} else if (IS_ERR(host->mphy)) {
256 		err = PTR_ERR(host->mphy);
257 		if (err != -ENODEV) {
258 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
259 				 err);
260 		}
261 	}
262 
263 	if (err)
264 		host->mphy = NULL;
265 	/*
266 	 * Allow unbound mphy because not every platform needs specific
267 	 * mphy control.
268 	 */
269 	if (err == -ENODEV)
270 		err = 0;
271 
272 	return err;
273 }
274 
275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
276 {
277 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 	struct arm_smccc_res res;
279 	ktime_t timeout, time_checked;
280 	u32 value;
281 
282 	if (host->ref_clk_enabled == on)
283 		return 0;
284 
285 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
286 
287 	if (on) {
288 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
289 	} else {
290 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
292 	}
293 
294 	/* Wait for ack */
295 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
296 	do {
297 		time_checked = ktime_get();
298 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
299 
300 		/* Wait until ack bit equals to req bit */
301 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
302 			goto out;
303 
304 		usleep_range(100, 200);
305 	} while (ktime_before(time_checked, timeout));
306 
307 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
308 
309 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
310 
311 	return -ETIMEDOUT;
312 
313 out:
314 	host->ref_clk_enabled = on;
315 	if (on)
316 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
317 
318 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
319 
320 	return 0;
321 }
322 
323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
324 					  u16 gating_us)
325 {
326 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
327 
328 	if (hba->dev_info.clk_gating_wait_us) {
329 		host->ref_clk_gating_wait_us =
330 			hba->dev_info.clk_gating_wait_us;
331 	} else {
332 		host->ref_clk_gating_wait_us = gating_us;
333 	}
334 
335 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
336 }
337 
338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
339 {
340 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
341 
342 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
348 	} else {
349 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
350 	}
351 }
352 
353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354 			    unsigned long retry_ms)
355 {
356 	u64 timeout, time_checked;
357 	u32 val, sm;
358 	bool wait_idle;
359 
360 	/* cannot use plain ktime_get() in suspend */
361 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
362 
363 	/* wait a specific time after check base */
364 	udelay(10);
365 	wait_idle = false;
366 
367 	do {
368 		time_checked = ktime_get_mono_fast_ns();
369 		ufs_mtk_dbg_sel(hba);
370 		val = ufshcd_readl(hba, REG_UFS_PROBE);
371 
372 		sm = val & 0x1f;
373 
374 		/*
375 		 * if state is in H8 enter and H8 enter confirm
376 		 * wait until return to idle state.
377 		 */
378 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
379 			wait_idle = true;
380 			udelay(50);
381 			continue;
382 		} else if (!wait_idle)
383 			break;
384 
385 		if (wait_idle && (sm == VS_HCE_BASE))
386 			break;
387 	} while (time_checked < timeout);
388 
389 	if (wait_idle && sm != VS_HCE_BASE)
390 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
391 }
392 
393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394 				   unsigned long max_wait_ms)
395 {
396 	ktime_t timeout, time_checked;
397 	u32 val;
398 
399 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
400 	do {
401 		time_checked = ktime_get();
402 		ufs_mtk_dbg_sel(hba);
403 		val = ufshcd_readl(hba, REG_UFS_PROBE);
404 		val = val >> 28;
405 
406 		if (val == state)
407 			return 0;
408 
409 		/* Sleep for max. 200us */
410 		usleep_range(100, 200);
411 	} while (ktime_before(time_checked, timeout));
412 
413 	return -ETIMEDOUT;
414 }
415 
416 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
417 {
418 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
419 	struct phy *mphy = host->mphy;
420 	struct arm_smccc_res res;
421 	int ret = 0;
422 
423 	if (!mphy || !(on ^ host->mphy_powered_on))
424 		return 0;
425 
426 	if (on) {
427 		if (ufs_mtk_is_va09_supported(hba)) {
428 			ret = regulator_enable(host->reg_va09);
429 			if (ret < 0)
430 				goto out;
431 			/* wait 200 us to stablize VA09 */
432 			usleep_range(200, 210);
433 			ufs_mtk_va09_pwr_ctrl(res, 1);
434 		}
435 		phy_power_on(mphy);
436 	} else {
437 		phy_power_off(mphy);
438 		if (ufs_mtk_is_va09_supported(hba)) {
439 			ufs_mtk_va09_pwr_ctrl(res, 0);
440 			ret = regulator_disable(host->reg_va09);
441 		}
442 	}
443 out:
444 	if (ret) {
445 		dev_info(hba->dev,
446 			 "failed to %s va09: %d\n",
447 			 on ? "enable" : "disable",
448 			 ret);
449 	} else {
450 		host->mphy_powered_on = on;
451 	}
452 
453 	return ret;
454 }
455 
456 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
457 				struct clk **clk_out)
458 {
459 	struct clk *clk;
460 	int err = 0;
461 
462 	clk = devm_clk_get(dev, name);
463 	if (IS_ERR(clk))
464 		err = PTR_ERR(clk);
465 	else
466 		*clk_out = clk;
467 
468 	return err;
469 }
470 
471 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
472 {
473 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
474 	struct ufs_mtk_crypt_cfg *cfg;
475 	struct regulator *reg;
476 	int volt, ret;
477 
478 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
479 		return;
480 
481 	cfg = host->crypt;
482 	volt = cfg->vcore_volt;
483 	reg = cfg->reg_vcore;
484 
485 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
486 	if (ret) {
487 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
488 			 ret);
489 		return;
490 	}
491 
492 	if (boost) {
493 		ret = regulator_set_voltage(reg, volt, INT_MAX);
494 		if (ret) {
495 			dev_info(hba->dev,
496 				 "failed to set vcore to %d\n", volt);
497 			goto out;
498 		}
499 
500 		ret = clk_set_parent(cfg->clk_crypt_mux,
501 				     cfg->clk_crypt_perf);
502 		if (ret) {
503 			dev_info(hba->dev,
504 				 "failed to set clk_crypt_perf\n");
505 			regulator_set_voltage(reg, 0, INT_MAX);
506 			goto out;
507 		}
508 	} else {
509 		ret = clk_set_parent(cfg->clk_crypt_mux,
510 				     cfg->clk_crypt_lp);
511 		if (ret) {
512 			dev_info(hba->dev,
513 				 "failed to set clk_crypt_lp\n");
514 			goto out;
515 		}
516 
517 		ret = regulator_set_voltage(reg, 0, INT_MAX);
518 		if (ret) {
519 			dev_info(hba->dev,
520 				 "failed to set vcore to MIN\n");
521 		}
522 	}
523 out:
524 	clk_disable_unprepare(cfg->clk_crypt_mux);
525 }
526 
527 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
528 				 struct clk **clk)
529 {
530 	int ret;
531 
532 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
533 	if (ret) {
534 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
535 			 name, ret);
536 	}
537 
538 	return ret;
539 }
540 
541 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
542 {
543 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
544 	struct ufs_mtk_crypt_cfg *cfg;
545 	struct device *dev = hba->dev;
546 	struct regulator *reg;
547 	u32 volt;
548 
549 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
550 				   GFP_KERNEL);
551 	if (!host->crypt)
552 		goto disable_caps;
553 
554 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
555 	if (IS_ERR(reg)) {
556 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
557 			 PTR_ERR(reg));
558 		goto disable_caps;
559 	}
560 
561 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
562 				 &volt)) {
563 		dev_info(dev, "failed to get boost-crypt-vcore-min");
564 		goto disable_caps;
565 	}
566 
567 	cfg = host->crypt;
568 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
569 				  &cfg->clk_crypt_mux))
570 		goto disable_caps;
571 
572 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
573 				  &cfg->clk_crypt_lp))
574 		goto disable_caps;
575 
576 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
577 				  &cfg->clk_crypt_perf))
578 		goto disable_caps;
579 
580 	cfg->reg_vcore = reg;
581 	cfg->vcore_volt = volt;
582 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
583 
584 disable_caps:
585 	return;
586 }
587 
588 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
589 {
590 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
591 
592 	host->reg_va09 = regulator_get(hba->dev, "va09");
593 	if (IS_ERR(host->reg_va09))
594 		dev_info(hba->dev, "failed to get va09");
595 	else
596 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
597 }
598 
599 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
600 {
601 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
602 	struct device_node *np = hba->dev->of_node;
603 
604 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
605 		ufs_mtk_init_boost_crypt(hba);
606 
607 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
608 		ufs_mtk_init_va09_pwr_ctrl(hba);
609 
610 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
611 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
612 
613 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
614 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
615 
616 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
617 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
618 
619 	dev_info(hba->dev, "caps: 0x%x", host->caps);
620 }
621 
622 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
623 {
624 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
625 
626 	if (!host || !host->pm_qos_init)
627 		return;
628 
629 	cpu_latency_qos_update_request(&host->pm_qos_req,
630 				       boost ? 0 : PM_QOS_DEFAULT_VALUE);
631 }
632 
633 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
634 {
635 	ufs_mtk_boost_crypt(hba, scale_up);
636 	ufs_mtk_boost_pm_qos(hba, scale_up);
637 }
638 
639 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
640 {
641 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
642 
643 	if (on) {
644 		phy_power_on(host->mphy);
645 		ufs_mtk_setup_ref_clk(hba, on);
646 		if (!ufshcd_is_clkscaling_supported(hba))
647 			ufs_mtk_scale_perf(hba, on);
648 	} else {
649 		if (!ufshcd_is_clkscaling_supported(hba))
650 			ufs_mtk_scale_perf(hba, on);
651 		ufs_mtk_setup_ref_clk(hba, on);
652 		phy_power_off(host->mphy);
653 	}
654 }
655 
656 /**
657  * ufs_mtk_setup_clocks - enables/disable clocks
658  * @hba: host controller instance
659  * @on: If true, enable clocks else disable them.
660  * @status: PRE_CHANGE or POST_CHANGE notify
661  *
662  * Returns 0 on success, non-zero on failure.
663  */
664 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
665 				enum ufs_notify_change_status status)
666 {
667 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
668 	bool clk_pwr_off = false;
669 	int ret = 0;
670 
671 	/*
672 	 * In case ufs_mtk_init() is not yet done, simply ignore.
673 	 * This ufs_mtk_setup_clocks() shall be called from
674 	 * ufs_mtk_init() after init is done.
675 	 */
676 	if (!host)
677 		return 0;
678 
679 	if (!on && status == PRE_CHANGE) {
680 		if (ufshcd_is_link_off(hba)) {
681 			clk_pwr_off = true;
682 		} else if (ufshcd_is_link_hibern8(hba) ||
683 			 (!ufshcd_can_hibern8_during_gating(hba) &&
684 			 ufshcd_is_auto_hibern8_enabled(hba))) {
685 			/*
686 			 * Gate ref-clk and poweroff mphy if link state is in
687 			 * OFF or Hibern8 by either Auto-Hibern8 or
688 			 * ufshcd_link_state_transition().
689 			 */
690 			ret = ufs_mtk_wait_link_state(hba,
691 						      VS_LINK_HIBERN8,
692 						      15);
693 			if (!ret)
694 				clk_pwr_off = true;
695 		}
696 
697 		if (clk_pwr_off)
698 			ufs_mtk_pwr_ctrl(hba, false);
699 	} else if (on && status == POST_CHANGE) {
700 		ufs_mtk_pwr_ctrl(hba, true);
701 	}
702 
703 	return ret;
704 }
705 
706 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
707 {
708 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
709 	int ret, ver = 0;
710 
711 	if (host->hw_ver.major)
712 		return;
713 
714 	/* Set default (minimum) version anyway */
715 	host->hw_ver.major = 2;
716 
717 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
718 	if (!ret) {
719 		if (ver >= UFS_UNIPRO_VER_1_8) {
720 			host->hw_ver.major = 3;
721 			/*
722 			 * Fix HCI version for some platforms with
723 			 * incorrect version
724 			 */
725 			if (hba->ufs_version < ufshci_version(3, 0))
726 				hba->ufs_version = ufshci_version(3, 0);
727 		}
728 	}
729 }
730 
731 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
732 {
733 	return hba->ufs_version;
734 }
735 
736 /**
737  * ufs_mtk_init_clocks - Init mtk driver private clocks
738  *
739  * @hba: per adapter instance
740  */
741 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
742 {
743 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
744 	struct list_head *head = &hba->clk_list_head;
745 	struct ufs_mtk_clk *mclk = &host->mclk;
746 	struct ufs_clk_info *clki, *clki_tmp;
747 
748 	/*
749 	 * Find private clocks and store them in struct ufs_mtk_clk.
750 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
751 	 * being switched on/off in clock gating.
752 	 */
753 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
754 		if (!strcmp(clki->name, "ufs_sel")) {
755 			host->mclk.ufs_sel_clki = clki;
756 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
757 			host->mclk.ufs_sel_max_clki = clki;
758 			clk_disable_unprepare(clki->clk);
759 			list_del(&clki->list);
760 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
761 			host->mclk.ufs_sel_min_clki = clki;
762 			clk_disable_unprepare(clki->clk);
763 			list_del(&clki->list);
764 		}
765 	}
766 
767 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
768 	    !mclk->ufs_sel_min_clki) {
769 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
770 		dev_info(hba->dev,
771 			 "%s: Clk-scaling not ready. Feature disabled.",
772 			 __func__);
773 	}
774 }
775 
776 #define MAX_VCC_NAME 30
777 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
778 {
779 	struct ufs_vreg_info *info = &hba->vreg_info;
780 	struct device_node *np = hba->dev->of_node;
781 	struct device *dev = hba->dev;
782 	char vcc_name[MAX_VCC_NAME];
783 	struct arm_smccc_res res;
784 	int err, ver;
785 
786 	if (hba->vreg_info.vcc)
787 		return 0;
788 
789 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
790 		ufs_mtk_get_vcc_num(res);
791 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
792 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
793 		else
794 			return -ENODEV;
795 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
796 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
797 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
798 	} else {
799 		return 0;
800 	}
801 
802 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
803 	if (err)
804 		return err;
805 
806 	err = ufshcd_get_vreg(dev, info->vcc);
807 	if (err)
808 		return err;
809 
810 	err = regulator_enable(info->vcc->reg);
811 	if (!err) {
812 		info->vcc->enabled = true;
813 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
814 	}
815 
816 	return err;
817 }
818 
819 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
820 {
821 	struct ufs_vreg_info *info = &hba->vreg_info;
822 	struct ufs_vreg **vreg_on, **vreg_off;
823 
824 	if (hba->dev_info.wspecversion >= 0x0300) {
825 		vreg_on = &info->vccq;
826 		vreg_off = &info->vccq2;
827 	} else {
828 		vreg_on = &info->vccq2;
829 		vreg_off = &info->vccq;
830 	}
831 
832 	if (*vreg_on)
833 		(*vreg_on)->always_on = true;
834 
835 	if (*vreg_off) {
836 		regulator_disable((*vreg_off)->reg);
837 		devm_kfree(hba->dev, (*vreg_off)->name);
838 		devm_kfree(hba->dev, *vreg_off);
839 		*vreg_off = NULL;
840 	}
841 }
842 
843 /**
844  * ufs_mtk_init - find other essential mmio bases
845  * @hba: host controller instance
846  *
847  * Binds PHY with controller and powers up PHY enabling clocks
848  * and regulators.
849  *
850  * Returns -EPROBE_DEFER if binding fails, returns negative error
851  * on phy power up failure and returns zero on success.
852  */
853 static int ufs_mtk_init(struct ufs_hba *hba)
854 {
855 	const struct of_device_id *id;
856 	struct device *dev = hba->dev;
857 	struct ufs_mtk_host *host;
858 	int err = 0;
859 
860 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
861 	if (!host) {
862 		err = -ENOMEM;
863 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
864 		goto out;
865 	}
866 
867 	host->hba = hba;
868 	ufshcd_set_variant(hba, host);
869 
870 	id = of_match_device(ufs_mtk_of_match, dev);
871 	if (!id) {
872 		err = -EINVAL;
873 		goto out;
874 	}
875 
876 	/* Initialize host capability */
877 	ufs_mtk_init_host_caps(hba);
878 
879 	err = ufs_mtk_bind_mphy(hba);
880 	if (err)
881 		goto out_variant_clear;
882 
883 	ufs_mtk_init_reset(hba);
884 
885 	/* Enable runtime autosuspend */
886 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
887 
888 	/* Enable clock-gating */
889 	hba->caps |= UFSHCD_CAP_CLK_GATING;
890 
891 	/* Enable inline encryption */
892 	hba->caps |= UFSHCD_CAP_CRYPTO;
893 
894 	/* Enable WriteBooster */
895 	hba->caps |= UFSHCD_CAP_WB_EN;
896 
897 	/* Enable clk scaling*/
898 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
899 
900 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
901 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
902 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
903 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
904 
905 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
906 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
907 
908 	ufs_mtk_init_clocks(hba);
909 
910 	/*
911 	 * ufshcd_vops_init() is invoked after
912 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
913 	 * phy clock setup is skipped.
914 	 *
915 	 * Enable phy clocks specifically here.
916 	 */
917 	ufs_mtk_mphy_power_on(hba, true);
918 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
919 
920 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
921 
922 	/* Initialize pm-qos request */
923 	cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
924 	host->pm_qos_init = true;
925 
926 	goto out;
927 
928 out_variant_clear:
929 	ufshcd_set_variant(hba, NULL);
930 out:
931 	return err;
932 }
933 
934 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
935 				     struct ufs_pa_layer_attr *dev_req_params)
936 {
937 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
938 		return false;
939 
940 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
941 		return false;
942 
943 	if (dev_req_params->pwr_tx != FAST_MODE &&
944 	    dev_req_params->gear_tx < UFS_HS_G4)
945 		return false;
946 
947 	if (dev_req_params->pwr_rx != FAST_MODE &&
948 	    dev_req_params->gear_rx < UFS_HS_G4)
949 		return false;
950 
951 	return true;
952 }
953 
954 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
955 				  struct ufs_pa_layer_attr *dev_max_params,
956 				  struct ufs_pa_layer_attr *dev_req_params)
957 {
958 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
959 	struct ufs_dev_params host_cap;
960 	int ret;
961 
962 	ufshcd_init_pwr_dev_param(&host_cap);
963 	host_cap.hs_rx_gear = UFS_HS_G5;
964 	host_cap.hs_tx_gear = UFS_HS_G5;
965 
966 	ret = ufshcd_get_pwr_dev_param(&host_cap,
967 				       dev_max_params,
968 				       dev_req_params);
969 	if (ret) {
970 		pr_info("%s: failed to determine capabilities\n",
971 			__func__);
972 	}
973 
974 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
975 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
976 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
977 
978 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
979 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
980 
981 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
982 			       dev_req_params->lane_tx);
983 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
984 			       dev_req_params->lane_rx);
985 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
986 			       dev_req_params->hs_rate);
987 
988 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
989 			       PA_NO_ADAPT);
990 
991 		ret = ufshcd_uic_change_pwr_mode(hba,
992 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
993 
994 		if (ret) {
995 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
996 				__func__, ret);
997 		}
998 	}
999 
1000 	if (host->hw_ver.major >= 3) {
1001 		ret = ufshcd_dme_configure_adapt(hba,
1002 					   dev_req_params->gear_tx,
1003 					   PA_INITIAL_ADAPT);
1004 	}
1005 
1006 	return ret;
1007 }
1008 
1009 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1010 				     enum ufs_notify_change_status stage,
1011 				     struct ufs_pa_layer_attr *dev_max_params,
1012 				     struct ufs_pa_layer_attr *dev_req_params)
1013 {
1014 	int ret = 0;
1015 
1016 	switch (stage) {
1017 	case PRE_CHANGE:
1018 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1019 					     dev_req_params);
1020 		break;
1021 	case POST_CHANGE:
1022 		break;
1023 	default:
1024 		ret = -EINVAL;
1025 		break;
1026 	}
1027 
1028 	return ret;
1029 }
1030 
1031 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1032 {
1033 	int ret;
1034 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1035 
1036 	ret = ufshcd_dme_set(hba,
1037 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1038 			     lpm ? 1 : 0);
1039 	if (!ret || !lpm) {
1040 		/*
1041 		 * Forcibly set as non-LPM mode if UIC commands is failed
1042 		 * to use default hba_enable_delay_us value for re-enabling
1043 		 * the host.
1044 		 */
1045 		host->unipro_lpm = lpm;
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1052 {
1053 	int ret;
1054 	u32 tmp;
1055 
1056 	ufs_mtk_get_controller_version(hba);
1057 
1058 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1059 	if (ret)
1060 		return ret;
1061 
1062 	/*
1063 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1064 	 * to make sure that both host and device TX LCC are disabled
1065 	 * once link startup is completed.
1066 	 */
1067 	ret = ufshcd_disable_host_tx_lcc(hba);
1068 	if (ret)
1069 		return ret;
1070 
1071 	/* disable deep stall */
1072 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1073 	if (ret)
1074 		return ret;
1075 
1076 	tmp &= ~(1 << 6);
1077 
1078 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1079 
1080 	return ret;
1081 }
1082 
1083 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1084 {
1085 	u32 ah_ms;
1086 
1087 	if (ufshcd_is_clkgating_allowed(hba)) {
1088 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1089 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1090 					  hba->ahit);
1091 		else
1092 			ah_ms = 10;
1093 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1094 	}
1095 }
1096 
1097 static void ufs_mtk_post_link(struct ufs_hba *hba)
1098 {
1099 	/* enable unipro clock gating feature */
1100 	ufs_mtk_cfg_unipro_cg(hba, true);
1101 
1102 	/* will be configured during probe hba */
1103 	if (ufshcd_is_auto_hibern8_supported(hba))
1104 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1105 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1106 
1107 	ufs_mtk_setup_clk_gating(hba);
1108 }
1109 
1110 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1111 				       enum ufs_notify_change_status stage)
1112 {
1113 	int ret = 0;
1114 
1115 	switch (stage) {
1116 	case PRE_CHANGE:
1117 		ret = ufs_mtk_pre_link(hba);
1118 		break;
1119 	case POST_CHANGE:
1120 		ufs_mtk_post_link(hba);
1121 		break;
1122 	default:
1123 		ret = -EINVAL;
1124 		break;
1125 	}
1126 
1127 	return ret;
1128 }
1129 
1130 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1131 {
1132 	struct arm_smccc_res res;
1133 
1134 	/* disable hba before device reset */
1135 	ufshcd_hba_stop(hba);
1136 
1137 	ufs_mtk_device_reset_ctrl(0, res);
1138 
1139 	/*
1140 	 * The reset signal is active low. UFS devices shall detect
1141 	 * more than or equal to 1us of positive or negative RST_n
1142 	 * pulse width.
1143 	 *
1144 	 * To be on safe side, keep the reset low for at least 10us.
1145 	 */
1146 	usleep_range(10, 15);
1147 
1148 	ufs_mtk_device_reset_ctrl(1, res);
1149 
1150 	/* Some devices may need time to respond to rst_n */
1151 	usleep_range(10000, 15000);
1152 
1153 	dev_info(hba->dev, "device reset done\n");
1154 
1155 	return 0;
1156 }
1157 
1158 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1159 {
1160 	int err;
1161 
1162 	err = ufshcd_hba_enable(hba);
1163 	if (err)
1164 		return err;
1165 
1166 	err = ufs_mtk_unipro_set_lpm(hba, false);
1167 	if (err)
1168 		return err;
1169 
1170 	err = ufshcd_uic_hibern8_exit(hba);
1171 	if (!err)
1172 		ufshcd_set_link_active(hba);
1173 	else
1174 		return err;
1175 
1176 	err = ufshcd_make_hba_operational(hba);
1177 	if (err)
1178 		return err;
1179 
1180 	return 0;
1181 }
1182 
1183 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1184 {
1185 	int err;
1186 
1187 	/* Disable reset confirm feature by UniPro */
1188 	ufshcd_writel(hba,
1189 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1190 		      REG_UFS_XOUFS_CTRL);
1191 
1192 	err = ufs_mtk_unipro_set_lpm(hba, true);
1193 	if (err) {
1194 		/* Resume UniPro state for following error recovery */
1195 		ufs_mtk_unipro_set_lpm(hba, false);
1196 		return err;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1203 {
1204 	struct ufs_vreg *vccqx = NULL;
1205 
1206 	if (hba->vreg_info.vccq)
1207 		vccqx = hba->vreg_info.vccq;
1208 	else
1209 		vccqx = hba->vreg_info.vccq2;
1210 
1211 	regulator_set_mode(vccqx->reg,
1212 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1213 }
1214 
1215 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1216 {
1217 	struct arm_smccc_res res;
1218 
1219 	ufs_mtk_device_pwr_ctrl(!lpm,
1220 				(unsigned long)hba->dev_info.wspecversion,
1221 				res);
1222 }
1223 
1224 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1225 {
1226 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1227 		return;
1228 
1229 	/* Skip if VCC is assumed always-on */
1230 	if (!hba->vreg_info.vcc)
1231 		return;
1232 
1233 	/* Bypass LPM when device is still active */
1234 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1235 		return;
1236 
1237 	/* Bypass LPM if VCC is enabled */
1238 	if (lpm && hba->vreg_info.vcc->enabled)
1239 		return;
1240 
1241 	if (lpm) {
1242 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1243 		ufs_mtk_vsx_set_lpm(hba, lpm);
1244 	} else {
1245 		ufs_mtk_vsx_set_lpm(hba, lpm);
1246 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1247 	}
1248 }
1249 
1250 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1251 {
1252 	int ret;
1253 
1254 	/* disable auto-hibern8 */
1255 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1256 
1257 	/* wait host return to idle state when auto-hibern8 off */
1258 	ufs_mtk_wait_idle_state(hba, 5);
1259 
1260 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1261 	if (ret)
1262 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1263 }
1264 
1265 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1266 	enum ufs_notify_change_status status)
1267 {
1268 	int err;
1269 	struct arm_smccc_res res;
1270 
1271 	if (status == PRE_CHANGE) {
1272 		if (ufshcd_is_auto_hibern8_supported(hba))
1273 			ufs_mtk_auto_hibern8_disable(hba);
1274 		return 0;
1275 	}
1276 
1277 	if (ufshcd_is_link_hibern8(hba)) {
1278 		err = ufs_mtk_link_set_lpm(hba);
1279 		if (err)
1280 			goto fail;
1281 	}
1282 
1283 	if (!ufshcd_is_link_active(hba)) {
1284 		/*
1285 		 * Make sure no error will be returned to prevent
1286 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1287 		 * in low-power mode.
1288 		 */
1289 		err = ufs_mtk_mphy_power_on(hba, false);
1290 		if (err)
1291 			goto fail;
1292 	}
1293 
1294 	if (ufshcd_is_link_off(hba))
1295 		ufs_mtk_device_reset_ctrl(0, res);
1296 
1297 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1298 
1299 	return 0;
1300 fail:
1301 	/*
1302 	 * Set link as off state enforcedly to trigger
1303 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1304 	 * for completed host reset.
1305 	 */
1306 	ufshcd_set_link_off(hba);
1307 	return -EAGAIN;
1308 }
1309 
1310 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1311 {
1312 	int err;
1313 	struct arm_smccc_res res;
1314 
1315 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1316 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1317 
1318 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1319 
1320 	err = ufs_mtk_mphy_power_on(hba, true);
1321 	if (err)
1322 		goto fail;
1323 
1324 	if (ufshcd_is_link_hibern8(hba)) {
1325 		err = ufs_mtk_link_set_hpm(hba);
1326 		if (err)
1327 			goto fail;
1328 	}
1329 
1330 	return 0;
1331 fail:
1332 	return ufshcd_link_recovery(hba);
1333 }
1334 
1335 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1336 {
1337 	/* Dump ufshci register 0x140 ~ 0x14C */
1338 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1339 			 "XOUFS Ctrl (0x140): ");
1340 
1341 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1342 
1343 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1344 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1345 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1346 			 "MPHY Ctrl (0x2200): ");
1347 
1348 	/* Direct debugging information to REG_MTK_PROBE */
1349 	ufs_mtk_dbg_sel(hba);
1350 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1351 }
1352 
1353 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1354 {
1355 	struct ufs_dev_info *dev_info = &hba->dev_info;
1356 	u16 mid = dev_info->wmanufacturerid;
1357 
1358 	if (mid == UFS_VENDOR_SAMSUNG) {
1359 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1360 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1361 	}
1362 
1363 	/*
1364 	 * Decide waiting time before gating reference clock and
1365 	 * after ungating reference clock according to vendors'
1366 	 * requirements.
1367 	 */
1368 	if (mid == UFS_VENDOR_SAMSUNG)
1369 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1370 	else if (mid == UFS_VENDOR_SKHYNIX)
1371 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1372 	else if (mid == UFS_VENDOR_TOSHIBA)
1373 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1374 	else
1375 		ufs_mtk_setup_ref_clk_wait_us(hba,
1376 					      REFCLK_DEFAULT_WAIT_US);
1377 	return 0;
1378 }
1379 
1380 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1381 {
1382 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1383 
1384 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1385 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1386 		hba->vreg_info.vcc->always_on = true;
1387 		/*
1388 		 * VCC will be kept always-on thus we don't
1389 		 * need any delay during regulator operations
1390 		 */
1391 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1392 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1393 	}
1394 
1395 	ufs_mtk_vreg_fix_vcc(hba);
1396 	ufs_mtk_vreg_fix_vccqx(hba);
1397 }
1398 
1399 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1400 				 enum ufs_event_type evt, void *data)
1401 {
1402 	unsigned int val = *(u32 *)data;
1403 	unsigned long reg;
1404 	u8 bit;
1405 
1406 	trace_ufs_mtk_event(evt, val);
1407 
1408 	/* Print details of UIC Errors */
1409 	if (evt <= UFS_EVT_DME_ERR) {
1410 		dev_info(hba->dev,
1411 			 "Host UIC Error Code (%s): %08x\n",
1412 			 ufs_uic_err_str[evt], val);
1413 		reg = val;
1414 	}
1415 
1416 	if (evt == UFS_EVT_PA_ERR) {
1417 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1418 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1419 	}
1420 
1421 	if (evt == UFS_EVT_DL_ERR) {
1422 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1423 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1424 	}
1425 }
1426 
1427 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1428 				struct devfreq_dev_profile *profile,
1429 				struct devfreq_simple_ondemand_data *data)
1430 {
1431 	/* Customize min gear in clk scaling */
1432 	hba->clk_scaling.min_gear = UFS_HS_G4;
1433 
1434 	hba->vps->devfreq_profile.polling_ms = 200;
1435 	hba->vps->ondemand_data.upthreshold = 50;
1436 	hba->vps->ondemand_data.downdifferential = 20;
1437 }
1438 
1439 /**
1440  * ufs_mtk_clk_scale - Internal clk scaling operation
1441  *
1442  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1443  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1444  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1445  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1446  * This prevent changing rate of pll clock that is shared between modules.
1447  *
1448  * @hba: per adapter instance
1449  * @scale_up: True for scaling up and false for scaling down
1450  */
1451 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1452 {
1453 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1454 	struct ufs_mtk_clk *mclk = &host->mclk;
1455 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1456 	int ret = 0;
1457 
1458 	ret = clk_prepare_enable(clki->clk);
1459 	if (ret) {
1460 		dev_info(hba->dev,
1461 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1462 		return;
1463 	}
1464 
1465 	if (scale_up) {
1466 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1467 		clki->curr_freq = clki->max_freq;
1468 	} else {
1469 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1470 		clki->curr_freq = clki->min_freq;
1471 	}
1472 
1473 	if (ret) {
1474 		dev_info(hba->dev,
1475 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1476 	}
1477 
1478 	clk_disable_unprepare(clki->clk);
1479 
1480 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1481 }
1482 
1483 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1484 				    enum ufs_notify_change_status status)
1485 {
1486 	if (!ufshcd_is_clkscaling_supported(hba))
1487 		return 0;
1488 
1489 	if (status == PRE_CHANGE) {
1490 		/* Switch parent before clk_set_rate() */
1491 		ufs_mtk_clk_scale(hba, scale_up);
1492 	} else {
1493 		/* Request interrupt latency QoS accordingly */
1494 		ufs_mtk_scale_perf(hba, scale_up);
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 /*
1501  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1502  *
1503  * The variant operations configure the necessary controller and PHY
1504  * handshake during initialization.
1505  */
1506 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1507 	.name                = "mediatek.ufshci",
1508 	.init                = ufs_mtk_init,
1509 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1510 	.setup_clocks        = ufs_mtk_setup_clocks,
1511 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1512 	.link_startup_notify = ufs_mtk_link_startup_notify,
1513 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1514 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1515 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1516 	.suspend             = ufs_mtk_suspend,
1517 	.resume              = ufs_mtk_resume,
1518 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1519 	.device_reset        = ufs_mtk_device_reset,
1520 	.event_notify        = ufs_mtk_event_notify,
1521 	.config_scaling_param = ufs_mtk_config_scaling_param,
1522 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1523 };
1524 
1525 /**
1526  * ufs_mtk_probe - probe routine of the driver
1527  * @pdev: pointer to Platform device handle
1528  *
1529  * Return zero for success and non-zero for failure
1530  */
1531 static int ufs_mtk_probe(struct platform_device *pdev)
1532 {
1533 	int err;
1534 	struct device *dev = &pdev->dev;
1535 	struct device_node *reset_node;
1536 	struct platform_device *reset_pdev;
1537 	struct device_link *link;
1538 
1539 	reset_node = of_find_compatible_node(NULL, NULL,
1540 					     "ti,syscon-reset");
1541 	if (!reset_node) {
1542 		dev_notice(dev, "find ti,syscon-reset fail\n");
1543 		goto skip_reset;
1544 	}
1545 	reset_pdev = of_find_device_by_node(reset_node);
1546 	if (!reset_pdev) {
1547 		dev_notice(dev, "find reset_pdev fail\n");
1548 		goto skip_reset;
1549 	}
1550 	link = device_link_add(dev, &reset_pdev->dev,
1551 		DL_FLAG_AUTOPROBE_CONSUMER);
1552 	put_device(&reset_pdev->dev);
1553 	if (!link) {
1554 		dev_notice(dev, "add reset device_link fail\n");
1555 		goto skip_reset;
1556 	}
1557 	/* supplier is not probed */
1558 	if (link->status == DL_STATE_DORMANT) {
1559 		err = -EPROBE_DEFER;
1560 		goto out;
1561 	}
1562 
1563 skip_reset:
1564 	/* perform generic probe */
1565 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1566 
1567 out:
1568 	if (err)
1569 		dev_info(dev, "probe failed %d\n", err);
1570 
1571 	of_node_put(reset_node);
1572 	return err;
1573 }
1574 
1575 /**
1576  * ufs_mtk_remove - set driver_data of the device to NULL
1577  * @pdev: pointer to platform device handle
1578  *
1579  * Always return 0
1580  */
1581 static int ufs_mtk_remove(struct platform_device *pdev)
1582 {
1583 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1584 
1585 	pm_runtime_get_sync(&(pdev)->dev);
1586 	ufshcd_remove(hba);
1587 	return 0;
1588 }
1589 
1590 #ifdef CONFIG_PM_SLEEP
1591 static int ufs_mtk_system_suspend(struct device *dev)
1592 {
1593 	struct ufs_hba *hba = dev_get_drvdata(dev);
1594 	int ret;
1595 
1596 	ret = ufshcd_system_suspend(dev);
1597 	if (ret)
1598 		return ret;
1599 
1600 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1601 
1602 	return 0;
1603 }
1604 
1605 static int ufs_mtk_system_resume(struct device *dev)
1606 {
1607 	struct ufs_hba *hba = dev_get_drvdata(dev);
1608 
1609 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1610 
1611 	return ufshcd_system_resume(dev);
1612 }
1613 #endif
1614 
1615 #ifdef CONFIG_PM
1616 static int ufs_mtk_runtime_suspend(struct device *dev)
1617 {
1618 	struct ufs_hba *hba = dev_get_drvdata(dev);
1619 	int ret = 0;
1620 
1621 	ret = ufshcd_runtime_suspend(dev);
1622 	if (ret)
1623 		return ret;
1624 
1625 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1626 
1627 	return 0;
1628 }
1629 
1630 static int ufs_mtk_runtime_resume(struct device *dev)
1631 {
1632 	struct ufs_hba *hba = dev_get_drvdata(dev);
1633 
1634 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1635 
1636 	return ufshcd_runtime_resume(dev);
1637 }
1638 #endif
1639 
1640 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1641 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1642 				ufs_mtk_system_resume)
1643 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1644 			   ufs_mtk_runtime_resume, NULL)
1645 	.prepare	 = ufshcd_suspend_prepare,
1646 	.complete	 = ufshcd_resume_complete,
1647 };
1648 
1649 static struct platform_driver ufs_mtk_pltform = {
1650 	.probe      = ufs_mtk_probe,
1651 	.remove     = ufs_mtk_remove,
1652 	.driver = {
1653 		.name   = "ufshcd-mtk",
1654 		.pm     = &ufs_mtk_pm_ops,
1655 		.of_match_table = ufs_mtk_of_match,
1656 	},
1657 };
1658 
1659 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1660 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1661 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1662 MODULE_LICENSE("GPL v2");
1663 
1664 module_platform_driver(ufs_mtk_pltform);
1665