xref: /openbmc/linux/drivers/ufs/host/ufs-mediatek.c (revision dd5e5554)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/regulator/consumer.h>
20 #include <linux/reset.h>
21 #include <linux/sched/clock.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23 
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32 
33 #define ufs_mtk_smc(cmd, val, res) \
34 	arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
35 		      cmd, val, 0, 0, 0, 0, 0, &(res))
36 
37 #define ufs_mtk_va09_pwr_ctrl(res, on) \
38 	ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
39 
40 #define ufs_mtk_crypto_ctrl(res, enable) \
41 	ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
42 
43 #define ufs_mtk_ref_clk_notify(on, res) \
44 	ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
45 
46 #define ufs_mtk_device_reset_ctrl(high, res) \
47 	ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
48 
49 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
50 	{ .wmanufacturerid = UFS_VENDOR_MICRON,
51 	  .model = UFS_ANY_MODEL,
52 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
53 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
54 	  .model = "H9HQ21AFAMZDAR",
55 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
56 	{}
57 };
58 
59 static const struct of_device_id ufs_mtk_of_match[] = {
60 	{ .compatible = "mediatek,mt8183-ufshci" },
61 	{},
62 };
63 
64 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
65 {
66 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
67 
68 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
69 }
70 
71 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
72 {
73 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
74 
75 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
76 }
77 
78 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
79 {
80 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
81 
82 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
83 }
84 
85 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
86 {
87 	u32 tmp;
88 
89 	if (enable) {
90 		ufshcd_dme_get(hba,
91 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
92 		tmp = tmp |
93 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
94 		      (1 << SYS_CLK_GATE_EN) |
95 		      (1 << TX_CLK_GATE_EN);
96 		ufshcd_dme_set(hba,
97 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
98 
99 		ufshcd_dme_get(hba,
100 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
101 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
102 		ufshcd_dme_set(hba,
103 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
104 	} else {
105 		ufshcd_dme_get(hba,
106 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
107 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
108 			      (1 << SYS_CLK_GATE_EN) |
109 			      (1 << TX_CLK_GATE_EN));
110 		ufshcd_dme_set(hba,
111 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
112 
113 		ufshcd_dme_get(hba,
114 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
115 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
116 		ufshcd_dme_set(hba,
117 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
118 	}
119 }
120 
121 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
122 {
123 	struct arm_smccc_res res;
124 
125 	ufs_mtk_crypto_ctrl(res, 1);
126 	if (res.a0) {
127 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
128 			 __func__, res.a0);
129 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
130 	}
131 }
132 
133 static void ufs_mtk_host_reset(struct ufs_hba *hba)
134 {
135 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
136 
137 	reset_control_assert(host->hci_reset);
138 	reset_control_assert(host->crypto_reset);
139 	reset_control_assert(host->unipro_reset);
140 
141 	usleep_range(100, 110);
142 
143 	reset_control_deassert(host->unipro_reset);
144 	reset_control_deassert(host->crypto_reset);
145 	reset_control_deassert(host->hci_reset);
146 }
147 
148 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
149 				       struct reset_control **rc,
150 				       char *str)
151 {
152 	*rc = devm_reset_control_get(hba->dev, str);
153 	if (IS_ERR(*rc)) {
154 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
155 			 str, PTR_ERR(*rc));
156 		*rc = NULL;
157 	}
158 }
159 
160 static void ufs_mtk_init_reset(struct ufs_hba *hba)
161 {
162 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
163 
164 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
165 				   "hci_rst");
166 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
167 				   "unipro_rst");
168 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
169 				   "crypto_rst");
170 }
171 
172 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
173 				     enum ufs_notify_change_status status)
174 {
175 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
176 
177 	if (status == PRE_CHANGE) {
178 		if (host->unipro_lpm) {
179 			hba->vps->hba_enable_delay_us = 0;
180 		} else {
181 			hba->vps->hba_enable_delay_us = 600;
182 			ufs_mtk_host_reset(hba);
183 		}
184 
185 		if (hba->caps & UFSHCD_CAP_CRYPTO)
186 			ufs_mtk_crypto_enable(hba);
187 
188 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
189 			ufshcd_writel(hba, 0,
190 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
191 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192 			hba->ahit = 0;
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200 {
201 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202 	struct device *dev = hba->dev;
203 	struct device_node *np = dev->of_node;
204 	int err = 0;
205 
206 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207 
208 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209 		/*
210 		 * UFS driver might be probed before the phy driver does.
211 		 * In that case we would like to return EPROBE_DEFER code.
212 		 */
213 		err = -EPROBE_DEFER;
214 		dev_info(dev,
215 			 "%s: required phy hasn't probed yet. err = %d\n",
216 			__func__, err);
217 	} else if (IS_ERR(host->mphy)) {
218 		err = PTR_ERR(host->mphy);
219 		if (err != -ENODEV) {
220 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
221 				 err);
222 		}
223 	}
224 
225 	if (err)
226 		host->mphy = NULL;
227 	/*
228 	 * Allow unbound mphy because not every platform needs specific
229 	 * mphy control.
230 	 */
231 	if (err == -ENODEV)
232 		err = 0;
233 
234 	return err;
235 }
236 
237 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 	struct arm_smccc_res res;
241 	ktime_t timeout, time_checked;
242 	u32 value;
243 
244 	if (host->ref_clk_enabled == on)
245 		return 0;
246 
247 	if (on) {
248 		ufs_mtk_ref_clk_notify(on, res);
249 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
250 	} else {
251 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
252 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253 	}
254 
255 	/* Wait for ack */
256 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257 	do {
258 		time_checked = ktime_get();
259 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260 
261 		/* Wait until ack bit equals to req bit */
262 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263 			goto out;
264 
265 		usleep_range(100, 200);
266 	} while (ktime_before(time_checked, timeout));
267 
268 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269 
270 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271 
272 	return -ETIMEDOUT;
273 
274 out:
275 	host->ref_clk_enabled = on;
276 	if (on)
277 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
278 	else
279 		ufs_mtk_ref_clk_notify(on, res);
280 
281 	return 0;
282 }
283 
284 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285 					  u16 gating_us)
286 {
287 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288 
289 	if (hba->dev_info.clk_gating_wait_us) {
290 		host->ref_clk_gating_wait_us =
291 			hba->dev_info.clk_gating_wait_us;
292 	} else {
293 		host->ref_clk_gating_wait_us = gating_us;
294 	}
295 
296 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
297 }
298 
299 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
300 {
301 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
302 
303 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
304 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
305 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
306 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
307 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
308 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
309 	} else {
310 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
311 	}
312 }
313 
314 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
315 			    unsigned long retry_ms)
316 {
317 	u64 timeout, time_checked;
318 	u32 val, sm;
319 	bool wait_idle;
320 
321 	/* cannot use plain ktime_get() in suspend */
322 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
323 
324 	/* wait a specific time after check base */
325 	udelay(10);
326 	wait_idle = false;
327 
328 	do {
329 		time_checked = ktime_get_mono_fast_ns();
330 		ufs_mtk_dbg_sel(hba);
331 		val = ufshcd_readl(hba, REG_UFS_PROBE);
332 
333 		sm = val & 0x1f;
334 
335 		/*
336 		 * if state is in H8 enter and H8 enter confirm
337 		 * wait until return to idle state.
338 		 */
339 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
340 			wait_idle = true;
341 			udelay(50);
342 			continue;
343 		} else if (!wait_idle)
344 			break;
345 
346 		if (wait_idle && (sm == VS_HCE_BASE))
347 			break;
348 	} while (time_checked < timeout);
349 
350 	if (wait_idle && sm != VS_HCE_BASE)
351 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
352 }
353 
354 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
355 				   unsigned long max_wait_ms)
356 {
357 	ktime_t timeout, time_checked;
358 	u32 val;
359 
360 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
361 	do {
362 		time_checked = ktime_get();
363 		ufs_mtk_dbg_sel(hba);
364 		val = ufshcd_readl(hba, REG_UFS_PROBE);
365 		val = val >> 28;
366 
367 		if (val == state)
368 			return 0;
369 
370 		/* Sleep for max. 200us */
371 		usleep_range(100, 200);
372 	} while (ktime_before(time_checked, timeout));
373 
374 	if (val == state)
375 		return 0;
376 
377 	return -ETIMEDOUT;
378 }
379 
380 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
381 {
382 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
383 	struct phy *mphy = host->mphy;
384 	struct arm_smccc_res res;
385 	int ret = 0;
386 
387 	if (!mphy || !(on ^ host->mphy_powered_on))
388 		return 0;
389 
390 	if (on) {
391 		if (ufs_mtk_is_va09_supported(hba)) {
392 			ret = regulator_enable(host->reg_va09);
393 			if (ret < 0)
394 				goto out;
395 			/* wait 200 us to stablize VA09 */
396 			usleep_range(200, 210);
397 			ufs_mtk_va09_pwr_ctrl(res, 1);
398 		}
399 		phy_power_on(mphy);
400 	} else {
401 		phy_power_off(mphy);
402 		if (ufs_mtk_is_va09_supported(hba)) {
403 			ufs_mtk_va09_pwr_ctrl(res, 0);
404 			ret = regulator_disable(host->reg_va09);
405 			if (ret < 0)
406 				goto out;
407 		}
408 	}
409 out:
410 	if (ret) {
411 		dev_info(hba->dev,
412 			 "failed to %s va09: %d\n",
413 			 on ? "enable" : "disable",
414 			 ret);
415 	} else {
416 		host->mphy_powered_on = on;
417 	}
418 
419 	return ret;
420 }
421 
422 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
423 				struct clk **clk_out)
424 {
425 	struct clk *clk;
426 	int err = 0;
427 
428 	clk = devm_clk_get(dev, name);
429 	if (IS_ERR(clk))
430 		err = PTR_ERR(clk);
431 	else
432 		*clk_out = clk;
433 
434 	return err;
435 }
436 
437 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
438 {
439 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
440 	struct ufs_mtk_crypt_cfg *cfg;
441 	struct regulator *reg;
442 	int volt, ret;
443 
444 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
445 		return;
446 
447 	cfg = host->crypt;
448 	volt = cfg->vcore_volt;
449 	reg = cfg->reg_vcore;
450 
451 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
452 	if (ret) {
453 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
454 			 ret);
455 		return;
456 	}
457 
458 	if (boost) {
459 		ret = regulator_set_voltage(reg, volt, INT_MAX);
460 		if (ret) {
461 			dev_info(hba->dev,
462 				 "failed to set vcore to %d\n", volt);
463 			goto out;
464 		}
465 
466 		ret = clk_set_parent(cfg->clk_crypt_mux,
467 				     cfg->clk_crypt_perf);
468 		if (ret) {
469 			dev_info(hba->dev,
470 				 "failed to set clk_crypt_perf\n");
471 			regulator_set_voltage(reg, 0, INT_MAX);
472 			goto out;
473 		}
474 	} else {
475 		ret = clk_set_parent(cfg->clk_crypt_mux,
476 				     cfg->clk_crypt_lp);
477 		if (ret) {
478 			dev_info(hba->dev,
479 				 "failed to set clk_crypt_lp\n");
480 			goto out;
481 		}
482 
483 		ret = regulator_set_voltage(reg, 0, INT_MAX);
484 		if (ret) {
485 			dev_info(hba->dev,
486 				 "failed to set vcore to MIN\n");
487 		}
488 	}
489 out:
490 	clk_disable_unprepare(cfg->clk_crypt_mux);
491 }
492 
493 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
494 				 struct clk **clk)
495 {
496 	int ret;
497 
498 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
499 	if (ret) {
500 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
501 			 name, ret);
502 	}
503 
504 	return ret;
505 }
506 
507 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
508 {
509 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
510 	struct ufs_mtk_crypt_cfg *cfg;
511 	struct device *dev = hba->dev;
512 	struct regulator *reg;
513 	u32 volt;
514 
515 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
516 				   GFP_KERNEL);
517 	if (!host->crypt)
518 		goto disable_caps;
519 
520 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
521 	if (IS_ERR(reg)) {
522 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
523 			 PTR_ERR(reg));
524 		goto disable_caps;
525 	}
526 
527 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
528 				 &volt)) {
529 		dev_info(dev, "failed to get boost-crypt-vcore-min");
530 		goto disable_caps;
531 	}
532 
533 	cfg = host->crypt;
534 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
535 				  &cfg->clk_crypt_mux))
536 		goto disable_caps;
537 
538 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
539 				  &cfg->clk_crypt_lp))
540 		goto disable_caps;
541 
542 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
543 				  &cfg->clk_crypt_perf))
544 		goto disable_caps;
545 
546 	cfg->reg_vcore = reg;
547 	cfg->vcore_volt = volt;
548 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
549 
550 disable_caps:
551 	return;
552 }
553 
554 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
555 {
556 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
557 
558 	host->reg_va09 = regulator_get(hba->dev, "va09");
559 	if (IS_ERR(host->reg_va09))
560 		dev_info(hba->dev, "failed to get va09");
561 	else
562 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
563 }
564 
565 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
566 {
567 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
568 	struct device_node *np = hba->dev->of_node;
569 
570 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
571 		ufs_mtk_init_boost_crypt(hba);
572 
573 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
574 		ufs_mtk_init_va09_pwr_ctrl(hba);
575 
576 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
577 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
578 
579 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
580 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
581 
582 	dev_info(hba->dev, "caps: 0x%x", host->caps);
583 }
584 
585 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
586 {
587 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
588 
589 	ufs_mtk_boost_crypt(hba, up);
590 	ufs_mtk_setup_ref_clk(hba, up);
591 
592 	if (up)
593 		phy_power_on(host->mphy);
594 	else
595 		phy_power_off(host->mphy);
596 }
597 
598 /**
599  * ufs_mtk_setup_clocks - enables/disable clocks
600  * @hba: host controller instance
601  * @on: If true, enable clocks else disable them.
602  * @status: PRE_CHANGE or POST_CHANGE notify
603  *
604  * Returns 0 on success, non-zero on failure.
605  */
606 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
607 				enum ufs_notify_change_status status)
608 {
609 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
610 	bool clk_pwr_off = false;
611 	int ret = 0;
612 
613 	/*
614 	 * In case ufs_mtk_init() is not yet done, simply ignore.
615 	 * This ufs_mtk_setup_clocks() shall be called from
616 	 * ufs_mtk_init() after init is done.
617 	 */
618 	if (!host)
619 		return 0;
620 
621 	if (!on && status == PRE_CHANGE) {
622 		if (ufshcd_is_link_off(hba)) {
623 			clk_pwr_off = true;
624 		} else if (ufshcd_is_link_hibern8(hba) ||
625 			 (!ufshcd_can_hibern8_during_gating(hba) &&
626 			 ufshcd_is_auto_hibern8_enabled(hba))) {
627 			/*
628 			 * Gate ref-clk and poweroff mphy if link state is in
629 			 * OFF or Hibern8 by either Auto-Hibern8 or
630 			 * ufshcd_link_state_transition().
631 			 */
632 			ret = ufs_mtk_wait_link_state(hba,
633 						      VS_LINK_HIBERN8,
634 						      15);
635 			if (!ret)
636 				clk_pwr_off = true;
637 		}
638 
639 		if (clk_pwr_off)
640 			ufs_mtk_scale_perf(hba, false);
641 	} else if (on && status == POST_CHANGE) {
642 		ufs_mtk_scale_perf(hba, true);
643 	}
644 
645 	return ret;
646 }
647 
648 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
649 {
650 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
651 	int ret, ver = 0;
652 
653 	if (host->hw_ver.major)
654 		return;
655 
656 	/* Set default (minimum) version anyway */
657 	host->hw_ver.major = 2;
658 
659 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
660 	if (!ret) {
661 		if (ver >= UFS_UNIPRO_VER_1_8) {
662 			host->hw_ver.major = 3;
663 			/*
664 			 * Fix HCI version for some platforms with
665 			 * incorrect version
666 			 */
667 			if (hba->ufs_version < ufshci_version(3, 0))
668 				hba->ufs_version = ufshci_version(3, 0);
669 		}
670 	}
671 }
672 
673 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
674 {
675 	return hba->ufs_version;
676 }
677 
678 /**
679  * ufs_mtk_init - find other essential mmio bases
680  * @hba: host controller instance
681  *
682  * Binds PHY with controller and powers up PHY enabling clocks
683  * and regulators.
684  *
685  * Returns -EPROBE_DEFER if binding fails, returns negative error
686  * on phy power up failure and returns zero on success.
687  */
688 static int ufs_mtk_init(struct ufs_hba *hba)
689 {
690 	const struct of_device_id *id;
691 	struct device *dev = hba->dev;
692 	struct ufs_mtk_host *host;
693 	int err = 0;
694 
695 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
696 	if (!host) {
697 		err = -ENOMEM;
698 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
699 		goto out;
700 	}
701 
702 	host->hba = hba;
703 	ufshcd_set_variant(hba, host);
704 
705 	id = of_match_device(ufs_mtk_of_match, dev);
706 	if (!id) {
707 		err = -EINVAL;
708 		goto out;
709 	}
710 
711 	/* Initialize host capability */
712 	ufs_mtk_init_host_caps(hba);
713 
714 	err = ufs_mtk_bind_mphy(hba);
715 	if (err)
716 		goto out_variant_clear;
717 
718 	ufs_mtk_init_reset(hba);
719 
720 	/* Enable runtime autosuspend */
721 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
722 
723 	/* Enable clock-gating */
724 	hba->caps |= UFSHCD_CAP_CLK_GATING;
725 
726 	/* Enable inline encryption */
727 	hba->caps |= UFSHCD_CAP_CRYPTO;
728 
729 	/* Enable WriteBooster */
730 	hba->caps |= UFSHCD_CAP_WB_EN;
731 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
732 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
733 
734 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
735 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
736 
737 	/*
738 	 * ufshcd_vops_init() is invoked after
739 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
740 	 * phy clock setup is skipped.
741 	 *
742 	 * Enable phy clocks specifically here.
743 	 */
744 	ufs_mtk_mphy_power_on(hba, true);
745 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
746 
747 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
748 
749 	goto out;
750 
751 out_variant_clear:
752 	ufshcd_set_variant(hba, NULL);
753 out:
754 	return err;
755 }
756 
757 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
758 				  struct ufs_pa_layer_attr *dev_max_params,
759 				  struct ufs_pa_layer_attr *dev_req_params)
760 {
761 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
762 	struct ufs_dev_params host_cap;
763 	int ret;
764 
765 	ufshcd_init_pwr_dev_param(&host_cap);
766 	host_cap.hs_rx_gear = UFS_HS_G4;
767 	host_cap.hs_tx_gear = UFS_HS_G4;
768 
769 	ret = ufshcd_get_pwr_dev_param(&host_cap,
770 				       dev_max_params,
771 				       dev_req_params);
772 	if (ret) {
773 		pr_info("%s: failed to determine capabilities\n",
774 			__func__);
775 	}
776 
777 	if (host->hw_ver.major >= 3) {
778 		ret = ufshcd_dme_configure_adapt(hba,
779 					   dev_req_params->gear_tx,
780 					   PA_INITIAL_ADAPT);
781 	}
782 
783 	return ret;
784 }
785 
786 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
787 				     enum ufs_notify_change_status stage,
788 				     struct ufs_pa_layer_attr *dev_max_params,
789 				     struct ufs_pa_layer_attr *dev_req_params)
790 {
791 	int ret = 0;
792 
793 	switch (stage) {
794 	case PRE_CHANGE:
795 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
796 					     dev_req_params);
797 		break;
798 	case POST_CHANGE:
799 		break;
800 	default:
801 		ret = -EINVAL;
802 		break;
803 	}
804 
805 	return ret;
806 }
807 
808 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
809 {
810 	int ret;
811 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
812 
813 	ret = ufshcd_dme_set(hba,
814 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
815 			     lpm ? 1 : 0);
816 	if (!ret || !lpm) {
817 		/*
818 		 * Forcibly set as non-LPM mode if UIC commands is failed
819 		 * to use default hba_enable_delay_us value for re-enabling
820 		 * the host.
821 		 */
822 		host->unipro_lpm = lpm;
823 	}
824 
825 	return ret;
826 }
827 
828 static int ufs_mtk_pre_link(struct ufs_hba *hba)
829 {
830 	int ret;
831 	u32 tmp;
832 
833 	ufs_mtk_get_controller_version(hba);
834 
835 	ret = ufs_mtk_unipro_set_lpm(hba, false);
836 	if (ret)
837 		return ret;
838 
839 	/*
840 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
841 	 * to make sure that both host and device TX LCC are disabled
842 	 * once link startup is completed.
843 	 */
844 	ret = ufshcd_disable_host_tx_lcc(hba);
845 	if (ret)
846 		return ret;
847 
848 	/* disable deep stall */
849 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
850 	if (ret)
851 		return ret;
852 
853 	tmp &= ~(1 << 6);
854 
855 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
856 
857 	return ret;
858 }
859 
860 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
861 {
862 	u32 ah_ms;
863 
864 	if (ufshcd_is_clkgating_allowed(hba)) {
865 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
866 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
867 					  hba->ahit);
868 		else
869 			ah_ms = 10;
870 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
871 	}
872 }
873 
874 static int ufs_mtk_post_link(struct ufs_hba *hba)
875 {
876 	/* enable unipro clock gating feature */
877 	ufs_mtk_cfg_unipro_cg(hba, true);
878 
879 	/* will be configured during probe hba */
880 	if (ufshcd_is_auto_hibern8_supported(hba))
881 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
882 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
883 
884 	ufs_mtk_setup_clk_gating(hba);
885 
886 	return 0;
887 }
888 
889 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
890 				       enum ufs_notify_change_status stage)
891 {
892 	int ret = 0;
893 
894 	switch (stage) {
895 	case PRE_CHANGE:
896 		ret = ufs_mtk_pre_link(hba);
897 		break;
898 	case POST_CHANGE:
899 		ret = ufs_mtk_post_link(hba);
900 		break;
901 	default:
902 		ret = -EINVAL;
903 		break;
904 	}
905 
906 	return ret;
907 }
908 
909 static int ufs_mtk_device_reset(struct ufs_hba *hba)
910 {
911 	struct arm_smccc_res res;
912 
913 	/* disable hba before device reset */
914 	ufshcd_hba_stop(hba);
915 
916 	ufs_mtk_device_reset_ctrl(0, res);
917 
918 	/*
919 	 * The reset signal is active low. UFS devices shall detect
920 	 * more than or equal to 1us of positive or negative RST_n
921 	 * pulse width.
922 	 *
923 	 * To be on safe side, keep the reset low for at least 10us.
924 	 */
925 	usleep_range(10, 15);
926 
927 	ufs_mtk_device_reset_ctrl(1, res);
928 
929 	/* Some devices may need time to respond to rst_n */
930 	usleep_range(10000, 15000);
931 
932 	dev_info(hba->dev, "device reset done\n");
933 
934 	return 0;
935 }
936 
937 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
938 {
939 	int err;
940 
941 	err = ufshcd_hba_enable(hba);
942 	if (err)
943 		return err;
944 
945 	err = ufs_mtk_unipro_set_lpm(hba, false);
946 	if (err)
947 		return err;
948 
949 	err = ufshcd_uic_hibern8_exit(hba);
950 	if (!err)
951 		ufshcd_set_link_active(hba);
952 	else
953 		return err;
954 
955 	err = ufshcd_make_hba_operational(hba);
956 	if (err)
957 		return err;
958 
959 	return 0;
960 }
961 
962 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
963 {
964 	int err;
965 
966 	err = ufs_mtk_unipro_set_lpm(hba, true);
967 	if (err) {
968 		/* Resume UniPro state for following error recovery */
969 		ufs_mtk_unipro_set_lpm(hba, false);
970 		return err;
971 	}
972 
973 	return 0;
974 }
975 
976 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
977 {
978 	if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
979 		return;
980 
981 	if (lpm && !hba->vreg_info.vcc->enabled)
982 		regulator_set_mode(hba->vreg_info.vccq2->reg,
983 				   REGULATOR_MODE_IDLE);
984 	else if (!lpm)
985 		regulator_set_mode(hba->vreg_info.vccq2->reg,
986 				   REGULATOR_MODE_NORMAL);
987 }
988 
989 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
990 {
991 	int ret;
992 
993 	/* disable auto-hibern8 */
994 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
995 
996 	/* wait host return to idle state when auto-hibern8 off */
997 	ufs_mtk_wait_idle_state(hba, 5);
998 
999 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1000 	if (ret)
1001 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1002 }
1003 
1004 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1005 	enum ufs_notify_change_status status)
1006 {
1007 	int err;
1008 	struct arm_smccc_res res;
1009 
1010 	if (status == PRE_CHANGE) {
1011 		if (!ufshcd_is_auto_hibern8_supported(hba))
1012 			return 0;
1013 		ufs_mtk_auto_hibern8_disable(hba);
1014 		return 0;
1015 	}
1016 
1017 	if (ufshcd_is_link_hibern8(hba)) {
1018 		err = ufs_mtk_link_set_lpm(hba);
1019 		if (err)
1020 			goto fail;
1021 	}
1022 
1023 	if (!ufshcd_is_link_active(hba)) {
1024 		/*
1025 		 * Make sure no error will be returned to prevent
1026 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1027 		 * in low-power mode.
1028 		 */
1029 		ufs_mtk_vreg_set_lpm(hba, true);
1030 		err = ufs_mtk_mphy_power_on(hba, false);
1031 		if (err)
1032 			goto fail;
1033 	}
1034 
1035 	if (ufshcd_is_link_off(hba))
1036 		ufs_mtk_device_reset_ctrl(0, res);
1037 
1038 	return 0;
1039 fail:
1040 	/*
1041 	 * Set link as off state enforcedly to trigger
1042 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1043 	 * for completed host reset.
1044 	 */
1045 	ufshcd_set_link_off(hba);
1046 	return -EAGAIN;
1047 }
1048 
1049 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1050 {
1051 	int err;
1052 
1053 	err = ufs_mtk_mphy_power_on(hba, true);
1054 	if (err)
1055 		goto fail;
1056 
1057 	ufs_mtk_vreg_set_lpm(hba, false);
1058 
1059 	if (ufshcd_is_link_hibern8(hba)) {
1060 		err = ufs_mtk_link_set_hpm(hba);
1061 		if (err)
1062 			goto fail;
1063 	}
1064 
1065 	return 0;
1066 fail:
1067 	return ufshcd_link_recovery(hba);
1068 }
1069 
1070 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1071 {
1072 	ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1073 
1074 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1075 
1076 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1077 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1078 			 "MPHY Ctrl ");
1079 
1080 	/* Direct debugging information to REG_MTK_PROBE */
1081 	ufs_mtk_dbg_sel(hba);
1082 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1083 }
1084 
1085 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1086 {
1087 	struct ufs_dev_info *dev_info = &hba->dev_info;
1088 	u16 mid = dev_info->wmanufacturerid;
1089 
1090 	if (mid == UFS_VENDOR_SAMSUNG)
1091 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1092 
1093 	/*
1094 	 * Decide waiting time before gating reference clock and
1095 	 * after ungating reference clock according to vendors'
1096 	 * requirements.
1097 	 */
1098 	if (mid == UFS_VENDOR_SAMSUNG)
1099 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1100 	else if (mid == UFS_VENDOR_SKHYNIX)
1101 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1102 	else if (mid == UFS_VENDOR_TOSHIBA)
1103 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1104 	else
1105 		ufs_mtk_setup_ref_clk_wait_us(hba,
1106 					      REFCLK_DEFAULT_WAIT_US);
1107 
1108 	return 0;
1109 }
1110 
1111 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1112 {
1113 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1114 
1115 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1116 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1117 		hba->vreg_info.vcc->always_on = true;
1118 		/*
1119 		 * VCC will be kept always-on thus we don't
1120 		 * need any delay during regulator operations
1121 		 */
1122 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1123 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1124 	}
1125 }
1126 
1127 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1128 				 enum ufs_event_type evt, void *data)
1129 {
1130 	unsigned int val = *(u32 *)data;
1131 
1132 	trace_ufs_mtk_event(evt, val);
1133 }
1134 
1135 /*
1136  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1137  *
1138  * The variant operations configure the necessary controller and PHY
1139  * handshake during initialization.
1140  */
1141 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1142 	.name                = "mediatek.ufshci",
1143 	.init                = ufs_mtk_init,
1144 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1145 	.setup_clocks        = ufs_mtk_setup_clocks,
1146 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1147 	.link_startup_notify = ufs_mtk_link_startup_notify,
1148 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1149 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1150 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1151 	.suspend             = ufs_mtk_suspend,
1152 	.resume              = ufs_mtk_resume,
1153 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1154 	.device_reset        = ufs_mtk_device_reset,
1155 	.event_notify        = ufs_mtk_event_notify,
1156 };
1157 
1158 /**
1159  * ufs_mtk_probe - probe routine of the driver
1160  * @pdev: pointer to Platform device handle
1161  *
1162  * Return zero for success and non-zero for failure
1163  */
1164 static int ufs_mtk_probe(struct platform_device *pdev)
1165 {
1166 	int err;
1167 	struct device *dev = &pdev->dev;
1168 	struct device_node *reset_node;
1169 	struct platform_device *reset_pdev;
1170 	struct device_link *link;
1171 
1172 	reset_node = of_find_compatible_node(NULL, NULL,
1173 					     "ti,syscon-reset");
1174 	if (!reset_node) {
1175 		dev_notice(dev, "find ti,syscon-reset fail\n");
1176 		goto skip_reset;
1177 	}
1178 	reset_pdev = of_find_device_by_node(reset_node);
1179 	if (!reset_pdev) {
1180 		dev_notice(dev, "find reset_pdev fail\n");
1181 		goto skip_reset;
1182 	}
1183 	link = device_link_add(dev, &reset_pdev->dev,
1184 		DL_FLAG_AUTOPROBE_CONSUMER);
1185 	put_device(&reset_pdev->dev);
1186 	if (!link) {
1187 		dev_notice(dev, "add reset device_link fail\n");
1188 		goto skip_reset;
1189 	}
1190 	/* supplier is not probed */
1191 	if (link->status == DL_STATE_DORMANT) {
1192 		err = -EPROBE_DEFER;
1193 		goto out;
1194 	}
1195 
1196 skip_reset:
1197 	/* perform generic probe */
1198 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1199 
1200 out:
1201 	if (err)
1202 		dev_info(dev, "probe failed %d\n", err);
1203 
1204 	of_node_put(reset_node);
1205 	return err;
1206 }
1207 
1208 /**
1209  * ufs_mtk_remove - set driver_data of the device to NULL
1210  * @pdev: pointer to platform device handle
1211  *
1212  * Always return 0
1213  */
1214 static int ufs_mtk_remove(struct platform_device *pdev)
1215 {
1216 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1217 
1218 	pm_runtime_get_sync(&(pdev)->dev);
1219 	ufshcd_remove(hba);
1220 	return 0;
1221 }
1222 
1223 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1224 	SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1225 	SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1226 	.prepare	 = ufshcd_suspend_prepare,
1227 	.complete	 = ufshcd_resume_complete,
1228 };
1229 
1230 static struct platform_driver ufs_mtk_pltform = {
1231 	.probe      = ufs_mtk_probe,
1232 	.remove     = ufs_mtk_remove,
1233 	.shutdown   = ufshcd_pltfrm_shutdown,
1234 	.driver = {
1235 		.name   = "ufshcd-mtk",
1236 		.pm     = &ufs_mtk_pm_ops,
1237 		.of_match_table = ufs_mtk_of_match,
1238 	},
1239 };
1240 
1241 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1242 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1243 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1244 MODULE_LICENSE("GPL v2");
1245 
1246 module_platform_driver(ufs_mtk_pltform);
1247