xref: /openbmc/linux/drivers/ufs/host/ufs-mediatek.c (revision 3a17fefe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_qos.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/reset.h>
23 #include <linux/soc/mediatek/mtk_sip_svc.h>
24 
25 #include <ufs/ufshcd.h>
26 #include "ufshcd-pltfrm.h"
27 #include <ufs/ufs_quirks.h>
28 #include <ufs/unipro.h>
29 #include "ufs-mediatek.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 
33 #define CREATE_TRACE_POINTS
34 #include "ufs-mediatek-trace.h"
35 #undef CREATE_TRACE_POINTS
36 
37 #define MAX_SUPP_MAC 64
38 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 
40 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
41 	{ .wmanufacturerid = UFS_ANY_VENDOR,
42 	  .model = UFS_ANY_MODEL,
43 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
44 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{},
54 };
55 
56 /*
57  * Details of UIC Errors
58  */
59 static const char *const ufs_uic_err_str[] = {
60 	"PHY Adapter Layer",
61 	"Data Link Layer",
62 	"Network Link Layer",
63 	"Transport Link Layer",
64 	"DME"
65 };
66 
67 static const char *const ufs_uic_pa_err_str[] = {
68 	"PHY error on Lane 0",
69 	"PHY error on Lane 1",
70 	"PHY error on Lane 2",
71 	"PHY error on Lane 3",
72 	"Generic PHY Adapter Error. This should be the LINERESET indication"
73 };
74 
75 static const char *const ufs_uic_dl_err_str[] = {
76 	"NAC_RECEIVED",
77 	"TCx_REPLAY_TIMER_EXPIRED",
78 	"AFCx_REQUEST_TIMER_EXPIRED",
79 	"FCx_PROTECTION_TIMER_EXPIRED",
80 	"CRC_ERROR",
81 	"RX_BUFFER_OVERFLOW",
82 	"MAX_FRAME_LENGTH_EXCEEDED",
83 	"WRONG_SEQUENCE_NUMBER",
84 	"AFC_FRAME_SYNTAX_ERROR",
85 	"NAC_FRAME_SYNTAX_ERROR",
86 	"EOF_SYNTAX_ERROR",
87 	"FRAME_SYNTAX_ERROR",
88 	"BAD_CTRL_SYMBOL_TYPE",
89 	"PA_INIT_ERROR",
90 	"PA_ERROR_IND_RECEIVED",
91 	"PA_INIT"
92 };
93 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)94 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
95 {
96 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97 
98 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
99 }
100 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)101 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
102 {
103 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104 
105 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
106 }
107 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)108 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
109 {
110 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111 
112 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
113 }
114 
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)115 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
116 {
117 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
118 
119 	return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
120 }
121 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)122 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
123 {
124 	u32 tmp;
125 
126 	if (enable) {
127 		ufshcd_dme_get(hba,
128 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
129 		tmp = tmp |
130 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
131 		      (1 << SYS_CLK_GATE_EN) |
132 		      (1 << TX_CLK_GATE_EN);
133 		ufshcd_dme_set(hba,
134 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
135 
136 		ufshcd_dme_get(hba,
137 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
138 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
139 		ufshcd_dme_set(hba,
140 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
141 	} else {
142 		ufshcd_dme_get(hba,
143 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
144 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
145 			      (1 << SYS_CLK_GATE_EN) |
146 			      (1 << TX_CLK_GATE_EN));
147 		ufshcd_dme_set(hba,
148 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
149 
150 		ufshcd_dme_get(hba,
151 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
152 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
153 		ufshcd_dme_set(hba,
154 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
155 	}
156 }
157 
ufs_mtk_crypto_enable(struct ufs_hba * hba)158 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
159 {
160 	struct arm_smccc_res res;
161 
162 	ufs_mtk_crypto_ctrl(res, 1);
163 	if (res.a0) {
164 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
165 			 __func__, res.a0);
166 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
167 	}
168 }
169 
ufs_mtk_host_reset(struct ufs_hba * hba)170 static void ufs_mtk_host_reset(struct ufs_hba *hba)
171 {
172 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
173 
174 	reset_control_assert(host->hci_reset);
175 	reset_control_assert(host->crypto_reset);
176 	reset_control_assert(host->unipro_reset);
177 
178 	usleep_range(100, 110);
179 
180 	reset_control_deassert(host->unipro_reset);
181 	reset_control_deassert(host->crypto_reset);
182 	reset_control_deassert(host->hci_reset);
183 }
184 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)185 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
186 				       struct reset_control **rc,
187 				       char *str)
188 {
189 	*rc = devm_reset_control_get(hba->dev, str);
190 	if (IS_ERR(*rc)) {
191 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
192 			 str, PTR_ERR(*rc));
193 		*rc = NULL;
194 	}
195 }
196 
ufs_mtk_init_reset(struct ufs_hba * hba)197 static void ufs_mtk_init_reset(struct ufs_hba *hba)
198 {
199 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
200 
201 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
202 				   "hci_rst");
203 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
204 				   "unipro_rst");
205 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
206 				   "crypto_rst");
207 }
208 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)209 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
210 				     enum ufs_notify_change_status status)
211 {
212 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
213 
214 	if (status == PRE_CHANGE) {
215 		if (host->unipro_lpm) {
216 			hba->vps->hba_enable_delay_us = 0;
217 		} else {
218 			hba->vps->hba_enable_delay_us = 600;
219 			ufs_mtk_host_reset(hba);
220 		}
221 
222 		if (hba->caps & UFSHCD_CAP_CRYPTO)
223 			ufs_mtk_crypto_enable(hba);
224 
225 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
226 			ufshcd_writel(hba, 0,
227 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
228 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
229 			hba->ahit = 0;
230 		}
231 
232 		/*
233 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
234 		 * to prevent host hang issue
235 		 */
236 		ufshcd_writel(hba,
237 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
238 			      REG_UFS_XOUFS_CTRL);
239 	}
240 
241 	return 0;
242 }
243 
ufs_mtk_bind_mphy(struct ufs_hba * hba)244 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
245 {
246 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
247 	struct device *dev = hba->dev;
248 	struct device_node *np = dev->of_node;
249 	int err = 0;
250 
251 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
252 
253 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
254 		/*
255 		 * UFS driver might be probed before the phy driver does.
256 		 * In that case we would like to return EPROBE_DEFER code.
257 		 */
258 		err = -EPROBE_DEFER;
259 		dev_info(dev,
260 			 "%s: required phy hasn't probed yet. err = %d\n",
261 			__func__, err);
262 	} else if (IS_ERR(host->mphy)) {
263 		err = PTR_ERR(host->mphy);
264 		if (err != -ENODEV) {
265 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
266 				 err);
267 		}
268 	}
269 
270 	if (err)
271 		host->mphy = NULL;
272 	/*
273 	 * Allow unbound mphy because not every platform needs specific
274 	 * mphy control.
275 	 */
276 	if (err == -ENODEV)
277 		err = 0;
278 
279 	return err;
280 }
281 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)282 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
283 {
284 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
285 	struct arm_smccc_res res;
286 	ktime_t timeout, time_checked;
287 	u32 value;
288 
289 	if (host->ref_clk_enabled == on)
290 		return 0;
291 
292 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
293 
294 	if (on) {
295 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
296 	} else {
297 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
298 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
299 	}
300 
301 	/* Wait for ack */
302 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
303 	do {
304 		time_checked = ktime_get();
305 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
306 
307 		/* Wait until ack bit equals to req bit */
308 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
309 			goto out;
310 
311 		usleep_range(100, 200);
312 	} while (ktime_before(time_checked, timeout));
313 
314 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
315 
316 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
317 
318 	return -ETIMEDOUT;
319 
320 out:
321 	host->ref_clk_enabled = on;
322 	if (on)
323 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
324 
325 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
326 
327 	return 0;
328 }
329 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us)330 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
331 					  u16 gating_us)
332 {
333 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
334 
335 	if (hba->dev_info.clk_gating_wait_us) {
336 		host->ref_clk_gating_wait_us =
337 			hba->dev_info.clk_gating_wait_us;
338 	} else {
339 		host->ref_clk_gating_wait_us = gating_us;
340 	}
341 
342 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
343 }
344 
ufs_mtk_dbg_sel(struct ufs_hba * hba)345 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
346 {
347 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
348 
349 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
350 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
351 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
352 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
353 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
354 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
355 	} else {
356 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
357 	}
358 }
359 
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)360 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
361 			    unsigned long retry_ms)
362 {
363 	u64 timeout, time_checked;
364 	u32 val, sm;
365 	bool wait_idle;
366 
367 	/* cannot use plain ktime_get() in suspend */
368 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
369 
370 	/* wait a specific time after check base */
371 	udelay(10);
372 	wait_idle = false;
373 
374 	do {
375 		time_checked = ktime_get_mono_fast_ns();
376 		ufs_mtk_dbg_sel(hba);
377 		val = ufshcd_readl(hba, REG_UFS_PROBE);
378 
379 		sm = val & 0x1f;
380 
381 		/*
382 		 * if state is in H8 enter and H8 enter confirm
383 		 * wait until return to idle state.
384 		 */
385 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
386 			wait_idle = true;
387 			udelay(50);
388 			continue;
389 		} else if (!wait_idle)
390 			break;
391 
392 		if (wait_idle && (sm == VS_HCE_BASE))
393 			break;
394 	} while (time_checked < timeout);
395 
396 	if (wait_idle && sm != VS_HCE_BASE)
397 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
398 }
399 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)400 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
401 				   unsigned long max_wait_ms)
402 {
403 	ktime_t timeout, time_checked;
404 	u32 val;
405 
406 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
407 	do {
408 		time_checked = ktime_get();
409 		ufs_mtk_dbg_sel(hba);
410 		val = ufshcd_readl(hba, REG_UFS_PROBE);
411 		val = val >> 28;
412 
413 		if (val == state)
414 			return 0;
415 
416 		/* Sleep for max. 200us */
417 		usleep_range(100, 200);
418 	} while (ktime_before(time_checked, timeout));
419 
420 	return -ETIMEDOUT;
421 }
422 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)423 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
424 {
425 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
426 	struct phy *mphy = host->mphy;
427 	struct arm_smccc_res res;
428 	int ret = 0;
429 
430 	if (!mphy || !(on ^ host->mphy_powered_on))
431 		return 0;
432 
433 	if (on) {
434 		if (ufs_mtk_is_va09_supported(hba)) {
435 			ret = regulator_enable(host->reg_va09);
436 			if (ret < 0)
437 				goto out;
438 			/* wait 200 us to stablize VA09 */
439 			usleep_range(200, 210);
440 			ufs_mtk_va09_pwr_ctrl(res, 1);
441 		}
442 		phy_power_on(mphy);
443 	} else {
444 		phy_power_off(mphy);
445 		if (ufs_mtk_is_va09_supported(hba)) {
446 			ufs_mtk_va09_pwr_ctrl(res, 0);
447 			ret = regulator_disable(host->reg_va09);
448 		}
449 	}
450 out:
451 	if (ret) {
452 		dev_info(hba->dev,
453 			 "failed to %s va09: %d\n",
454 			 on ? "enable" : "disable",
455 			 ret);
456 	} else {
457 		host->mphy_powered_on = on;
458 	}
459 
460 	return ret;
461 }
462 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)463 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
464 				struct clk **clk_out)
465 {
466 	struct clk *clk;
467 	int err = 0;
468 
469 	clk = devm_clk_get(dev, name);
470 	if (IS_ERR(clk))
471 		err = PTR_ERR(clk);
472 	else
473 		*clk_out = clk;
474 
475 	return err;
476 }
477 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)478 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
479 {
480 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
481 	struct ufs_mtk_crypt_cfg *cfg;
482 	struct regulator *reg;
483 	int volt, ret;
484 
485 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
486 		return;
487 
488 	cfg = host->crypt;
489 	volt = cfg->vcore_volt;
490 	reg = cfg->reg_vcore;
491 
492 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
493 	if (ret) {
494 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
495 			 ret);
496 		return;
497 	}
498 
499 	if (boost) {
500 		ret = regulator_set_voltage(reg, volt, INT_MAX);
501 		if (ret) {
502 			dev_info(hba->dev,
503 				 "failed to set vcore to %d\n", volt);
504 			goto out;
505 		}
506 
507 		ret = clk_set_parent(cfg->clk_crypt_mux,
508 				     cfg->clk_crypt_perf);
509 		if (ret) {
510 			dev_info(hba->dev,
511 				 "failed to set clk_crypt_perf\n");
512 			regulator_set_voltage(reg, 0, INT_MAX);
513 			goto out;
514 		}
515 	} else {
516 		ret = clk_set_parent(cfg->clk_crypt_mux,
517 				     cfg->clk_crypt_lp);
518 		if (ret) {
519 			dev_info(hba->dev,
520 				 "failed to set clk_crypt_lp\n");
521 			goto out;
522 		}
523 
524 		ret = regulator_set_voltage(reg, 0, INT_MAX);
525 		if (ret) {
526 			dev_info(hba->dev,
527 				 "failed to set vcore to MIN\n");
528 		}
529 	}
530 out:
531 	clk_disable_unprepare(cfg->clk_crypt_mux);
532 }
533 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)534 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
535 				 struct clk **clk)
536 {
537 	int ret;
538 
539 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
540 	if (ret) {
541 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
542 			 name, ret);
543 	}
544 
545 	return ret;
546 }
547 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)548 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
549 {
550 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
551 	struct ufs_mtk_crypt_cfg *cfg;
552 	struct device *dev = hba->dev;
553 	struct regulator *reg;
554 	u32 volt;
555 
556 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
557 				   GFP_KERNEL);
558 	if (!host->crypt)
559 		goto disable_caps;
560 
561 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
562 	if (IS_ERR(reg)) {
563 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
564 			 PTR_ERR(reg));
565 		goto disable_caps;
566 	}
567 
568 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
569 				 &volt)) {
570 		dev_info(dev, "failed to get boost-crypt-vcore-min");
571 		goto disable_caps;
572 	}
573 
574 	cfg = host->crypt;
575 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
576 				  &cfg->clk_crypt_mux))
577 		goto disable_caps;
578 
579 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
580 				  &cfg->clk_crypt_lp))
581 		goto disable_caps;
582 
583 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
584 				  &cfg->clk_crypt_perf))
585 		goto disable_caps;
586 
587 	cfg->reg_vcore = reg;
588 	cfg->vcore_volt = volt;
589 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
590 
591 disable_caps:
592 	return;
593 }
594 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)595 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
596 {
597 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
598 
599 	host->reg_va09 = regulator_get(hba->dev, "va09");
600 	if (IS_ERR(host->reg_va09))
601 		dev_info(hba->dev, "failed to get va09");
602 	else
603 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
604 }
605 
ufs_mtk_init_host_caps(struct ufs_hba * hba)606 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
607 {
608 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
609 	struct device_node *np = hba->dev->of_node;
610 
611 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
612 		ufs_mtk_init_boost_crypt(hba);
613 
614 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
615 		ufs_mtk_init_va09_pwr_ctrl(hba);
616 
617 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
618 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
619 
620 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
621 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
622 
623 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
624 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
625 
626 	dev_info(hba->dev, "caps: 0x%x", host->caps);
627 }
628 
ufs_mtk_boost_pm_qos(struct ufs_hba * hba,bool boost)629 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
630 {
631 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
632 
633 	if (!host || !host->pm_qos_init)
634 		return;
635 
636 	cpu_latency_qos_update_request(&host->pm_qos_req,
637 				       boost ? 0 : PM_QOS_DEFAULT_VALUE);
638 }
639 
ufs_mtk_scale_perf(struct ufs_hba * hba,bool scale_up)640 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
641 {
642 	ufs_mtk_boost_crypt(hba, scale_up);
643 	ufs_mtk_boost_pm_qos(hba, scale_up);
644 }
645 
ufs_mtk_pwr_ctrl(struct ufs_hba * hba,bool on)646 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
647 {
648 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
649 
650 	if (on) {
651 		phy_power_on(host->mphy);
652 		ufs_mtk_setup_ref_clk(hba, on);
653 		if (!ufshcd_is_clkscaling_supported(hba))
654 			ufs_mtk_scale_perf(hba, on);
655 	} else {
656 		if (!ufshcd_is_clkscaling_supported(hba))
657 			ufs_mtk_scale_perf(hba, on);
658 		ufs_mtk_setup_ref_clk(hba, on);
659 		phy_power_off(host->mphy);
660 	}
661 }
662 
663 /**
664  * ufs_mtk_setup_clocks - enables/disable clocks
665  * @hba: host controller instance
666  * @on: If true, enable clocks else disable them.
667  * @status: PRE_CHANGE or POST_CHANGE notify
668  *
669  * Return: 0 on success, non-zero on failure.
670  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)671 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
672 				enum ufs_notify_change_status status)
673 {
674 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
675 	bool clk_pwr_off = false;
676 	int ret = 0;
677 
678 	/*
679 	 * In case ufs_mtk_init() is not yet done, simply ignore.
680 	 * This ufs_mtk_setup_clocks() shall be called from
681 	 * ufs_mtk_init() after init is done.
682 	 */
683 	if (!host)
684 		return 0;
685 
686 	if (!on && status == PRE_CHANGE) {
687 		if (ufshcd_is_link_off(hba)) {
688 			clk_pwr_off = true;
689 		} else if (ufshcd_is_link_hibern8(hba) ||
690 			 (!ufshcd_can_hibern8_during_gating(hba) &&
691 			 ufshcd_is_auto_hibern8_enabled(hba))) {
692 			/*
693 			 * Gate ref-clk and poweroff mphy if link state is in
694 			 * OFF or Hibern8 by either Auto-Hibern8 or
695 			 * ufshcd_link_state_transition().
696 			 */
697 			ret = ufs_mtk_wait_link_state(hba,
698 						      VS_LINK_HIBERN8,
699 						      15);
700 			if (!ret)
701 				clk_pwr_off = true;
702 		}
703 
704 		if (clk_pwr_off)
705 			ufs_mtk_pwr_ctrl(hba, false);
706 	} else if (on && status == POST_CHANGE) {
707 		ufs_mtk_pwr_ctrl(hba, true);
708 	}
709 
710 	return ret;
711 }
712 
ufs_mtk_get_controller_version(struct ufs_hba * hba)713 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
714 {
715 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
716 	int ret, ver = 0;
717 
718 	if (host->hw_ver.major)
719 		return;
720 
721 	/* Set default (minimum) version anyway */
722 	host->hw_ver.major = 2;
723 
724 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
725 	if (!ret) {
726 		if (ver >= UFS_UNIPRO_VER_1_8) {
727 			host->hw_ver.major = 3;
728 			/*
729 			 * Fix HCI version for some platforms with
730 			 * incorrect version
731 			 */
732 			if (hba->ufs_version < ufshci_version(3, 0))
733 				hba->ufs_version = ufshci_version(3, 0);
734 		}
735 	}
736 }
737 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)738 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
739 {
740 	return hba->ufs_version;
741 }
742 
743 /**
744  * ufs_mtk_init_clocks - Init mtk driver private clocks
745  *
746  * @hba: per adapter instance
747  */
ufs_mtk_init_clocks(struct ufs_hba * hba)748 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
749 {
750 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
751 	struct list_head *head = &hba->clk_list_head;
752 	struct ufs_mtk_clk *mclk = &host->mclk;
753 	struct ufs_clk_info *clki, *clki_tmp;
754 
755 	/*
756 	 * Find private clocks and store them in struct ufs_mtk_clk.
757 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
758 	 * being switched on/off in clock gating.
759 	 */
760 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
761 		if (!strcmp(clki->name, "ufs_sel")) {
762 			host->mclk.ufs_sel_clki = clki;
763 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
764 			host->mclk.ufs_sel_max_clki = clki;
765 			clk_disable_unprepare(clki->clk);
766 			list_del(&clki->list);
767 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
768 			host->mclk.ufs_sel_min_clki = clki;
769 			clk_disable_unprepare(clki->clk);
770 			list_del(&clki->list);
771 		}
772 	}
773 
774 	if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
775 	    !mclk->ufs_sel_min_clki) {
776 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
777 		dev_info(hba->dev,
778 			 "%s: Clk-scaling not ready. Feature disabled.",
779 			 __func__);
780 	}
781 }
782 
783 #define MAX_VCC_NAME 30
ufs_mtk_vreg_fix_vcc(struct ufs_hba * hba)784 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
785 {
786 	struct ufs_vreg_info *info = &hba->vreg_info;
787 	struct device_node *np = hba->dev->of_node;
788 	struct device *dev = hba->dev;
789 	char vcc_name[MAX_VCC_NAME];
790 	struct arm_smccc_res res;
791 	int err, ver;
792 
793 	if (hba->vreg_info.vcc)
794 		return 0;
795 
796 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
797 		ufs_mtk_get_vcc_num(res);
798 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
799 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
800 		else
801 			return -ENODEV;
802 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
803 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
804 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
805 	} else {
806 		return 0;
807 	}
808 
809 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
810 	if (err)
811 		return err;
812 
813 	err = ufshcd_get_vreg(dev, info->vcc);
814 	if (err)
815 		return err;
816 
817 	err = regulator_enable(info->vcc->reg);
818 	if (!err) {
819 		info->vcc->enabled = true;
820 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
821 	}
822 
823 	return err;
824 }
825 
ufs_mtk_vreg_fix_vccqx(struct ufs_hba * hba)826 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
827 {
828 	struct ufs_vreg_info *info = &hba->vreg_info;
829 	struct ufs_vreg **vreg_on, **vreg_off;
830 
831 	if (hba->dev_info.wspecversion >= 0x0300) {
832 		vreg_on = &info->vccq;
833 		vreg_off = &info->vccq2;
834 	} else {
835 		vreg_on = &info->vccq2;
836 		vreg_off = &info->vccq;
837 	}
838 
839 	if (*vreg_on)
840 		(*vreg_on)->always_on = true;
841 
842 	if (*vreg_off) {
843 		regulator_disable((*vreg_off)->reg);
844 		devm_kfree(hba->dev, (*vreg_off)->name);
845 		devm_kfree(hba->dev, *vreg_off);
846 		*vreg_off = NULL;
847 	}
848 }
849 
ufs_mtk_init_mcq_irq(struct ufs_hba * hba)850 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
851 {
852 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
853 	struct platform_device *pdev;
854 	int i;
855 	int irq;
856 
857 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
858 	pdev = container_of(hba->dev, struct platform_device, dev);
859 
860 	for (i = 0; i < host->mcq_nr_intr; i++) {
861 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
862 		irq = platform_get_irq(pdev, i + 1);
863 		if (irq < 0) {
864 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
865 			goto failed;
866 		}
867 		host->mcq_intr_info[i].hba = hba;
868 		host->mcq_intr_info[i].irq = irq;
869 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
870 	}
871 
872 	return;
873 failed:
874        /* invalidate irq info */
875 	for (i = 0; i < host->mcq_nr_intr; i++)
876 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
877 
878 	host->mcq_nr_intr = 0;
879 }
880 
881 /**
882  * ufs_mtk_init - find other essential mmio bases
883  * @hba: host controller instance
884  *
885  * Binds PHY with controller and powers up PHY enabling clocks
886  * and regulators.
887  *
888  * Return: -EPROBE_DEFER if binding fails, returns negative error
889  * on phy power up failure and returns zero on success.
890  */
ufs_mtk_init(struct ufs_hba * hba)891 static int ufs_mtk_init(struct ufs_hba *hba)
892 {
893 	const struct of_device_id *id;
894 	struct device *dev = hba->dev;
895 	struct ufs_mtk_host *host;
896 	int err = 0;
897 
898 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
899 	if (!host) {
900 		err = -ENOMEM;
901 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
902 		goto out;
903 	}
904 
905 	host->hba = hba;
906 	ufshcd_set_variant(hba, host);
907 
908 	id = of_match_device(ufs_mtk_of_match, dev);
909 	if (!id) {
910 		err = -EINVAL;
911 		goto out;
912 	}
913 
914 	/* Initialize host capability */
915 	ufs_mtk_init_host_caps(hba);
916 
917 	ufs_mtk_init_mcq_irq(hba);
918 
919 	err = ufs_mtk_bind_mphy(hba);
920 	if (err)
921 		goto out_variant_clear;
922 
923 	ufs_mtk_init_reset(hba);
924 
925 	/* Enable runtime autosuspend */
926 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
927 
928 	/* Enable clock-gating */
929 	hba->caps |= UFSHCD_CAP_CLK_GATING;
930 
931 	/* Enable inline encryption */
932 	hba->caps |= UFSHCD_CAP_CRYPTO;
933 
934 	/* Enable WriteBooster */
935 	hba->caps |= UFSHCD_CAP_WB_EN;
936 
937 	/* Enable clk scaling*/
938 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
939 
940 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
941 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
942 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
943 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
944 
945 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
946 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
947 
948 	ufs_mtk_init_clocks(hba);
949 
950 	/*
951 	 * ufshcd_vops_init() is invoked after
952 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
953 	 * phy clock setup is skipped.
954 	 *
955 	 * Enable phy clocks specifically here.
956 	 */
957 	ufs_mtk_mphy_power_on(hba, true);
958 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
959 
960 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
961 
962 	/* Initialize pm-qos request */
963 	cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
964 	host->pm_qos_init = true;
965 
966 	goto out;
967 
968 out_variant_clear:
969 	ufshcd_set_variant(hba, NULL);
970 out:
971 	return err;
972 }
973 
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)974 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
975 				     struct ufs_pa_layer_attr *dev_req_params)
976 {
977 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
978 		return false;
979 
980 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
981 		return false;
982 
983 	if (dev_req_params->pwr_tx != FAST_MODE &&
984 	    dev_req_params->gear_tx < UFS_HS_G4)
985 		return false;
986 
987 	if (dev_req_params->pwr_rx != FAST_MODE &&
988 	    dev_req_params->gear_rx < UFS_HS_G4)
989 		return false;
990 
991 	return true;
992 }
993 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)994 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
995 				  struct ufs_pa_layer_attr *dev_max_params,
996 				  struct ufs_pa_layer_attr *dev_req_params)
997 {
998 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
999 	struct ufs_dev_params host_cap;
1000 	int ret;
1001 
1002 	ufshcd_init_pwr_dev_param(&host_cap);
1003 	host_cap.hs_rx_gear = UFS_HS_G5;
1004 	host_cap.hs_tx_gear = UFS_HS_G5;
1005 
1006 	ret = ufshcd_get_pwr_dev_param(&host_cap,
1007 				       dev_max_params,
1008 				       dev_req_params);
1009 	if (ret) {
1010 		pr_info("%s: failed to determine capabilities\n",
1011 			__func__);
1012 	}
1013 
1014 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1015 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1016 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1017 
1018 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1019 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1020 
1021 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1022 			       dev_req_params->lane_tx);
1023 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1024 			       dev_req_params->lane_rx);
1025 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1026 			       dev_req_params->hs_rate);
1027 
1028 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1029 			       PA_NO_ADAPT);
1030 
1031 		ret = ufshcd_uic_change_pwr_mode(hba,
1032 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1033 
1034 		if (ret) {
1035 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1036 				__func__, ret);
1037 		}
1038 	}
1039 
1040 	if (host->hw_ver.major >= 3) {
1041 		ret = ufshcd_dme_configure_adapt(hba,
1042 					   dev_req_params->gear_tx,
1043 					   PA_INITIAL_ADAPT);
1044 	}
1045 
1046 	return ret;
1047 }
1048 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1049 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1050 				     enum ufs_notify_change_status stage,
1051 				     struct ufs_pa_layer_attr *dev_max_params,
1052 				     struct ufs_pa_layer_attr *dev_req_params)
1053 {
1054 	int ret = 0;
1055 
1056 	switch (stage) {
1057 	case PRE_CHANGE:
1058 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1059 					     dev_req_params);
1060 		break;
1061 	case POST_CHANGE:
1062 		break;
1063 	default:
1064 		ret = -EINVAL;
1065 		break;
1066 	}
1067 
1068 	return ret;
1069 }
1070 
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)1071 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1072 {
1073 	int ret;
1074 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1075 
1076 	ret = ufshcd_dme_set(hba,
1077 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1078 			     lpm ? 1 : 0);
1079 	if (!ret || !lpm) {
1080 		/*
1081 		 * Forcibly set as non-LPM mode if UIC commands is failed
1082 		 * to use default hba_enable_delay_us value for re-enabling
1083 		 * the host.
1084 		 */
1085 		host->unipro_lpm = lpm;
1086 	}
1087 
1088 	return ret;
1089 }
1090 
ufs_mtk_pre_link(struct ufs_hba * hba)1091 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1092 {
1093 	int ret;
1094 	u32 tmp;
1095 
1096 	ufs_mtk_get_controller_version(hba);
1097 
1098 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1099 	if (ret)
1100 		return ret;
1101 
1102 	/*
1103 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1104 	 * to make sure that both host and device TX LCC are disabled
1105 	 * once link startup is completed.
1106 	 */
1107 	ret = ufshcd_disable_host_tx_lcc(hba);
1108 	if (ret)
1109 		return ret;
1110 
1111 	/* disable deep stall */
1112 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1113 	if (ret)
1114 		return ret;
1115 
1116 	tmp &= ~(1 << 6);
1117 
1118 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1119 
1120 	return ret;
1121 }
1122 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)1123 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1124 {
1125 	u32 ah_ms;
1126 
1127 	if (ufshcd_is_clkgating_allowed(hba)) {
1128 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1129 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1130 					  hba->ahit);
1131 		else
1132 			ah_ms = 10;
1133 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1134 	}
1135 }
1136 
ufs_mtk_post_link(struct ufs_hba * hba)1137 static void ufs_mtk_post_link(struct ufs_hba *hba)
1138 {
1139 	/* enable unipro clock gating feature */
1140 	ufs_mtk_cfg_unipro_cg(hba, true);
1141 
1142 	/* will be configured during probe hba */
1143 	if (ufshcd_is_auto_hibern8_supported(hba))
1144 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1145 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1146 
1147 	ufs_mtk_setup_clk_gating(hba);
1148 }
1149 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)1150 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1151 				       enum ufs_notify_change_status stage)
1152 {
1153 	int ret = 0;
1154 
1155 	switch (stage) {
1156 	case PRE_CHANGE:
1157 		ret = ufs_mtk_pre_link(hba);
1158 		break;
1159 	case POST_CHANGE:
1160 		ufs_mtk_post_link(hba);
1161 		break;
1162 	default:
1163 		ret = -EINVAL;
1164 		break;
1165 	}
1166 
1167 	return ret;
1168 }
1169 
ufs_mtk_device_reset(struct ufs_hba * hba)1170 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1171 {
1172 	struct arm_smccc_res res;
1173 
1174 	/* disable hba before device reset */
1175 	ufshcd_hba_stop(hba);
1176 
1177 	ufs_mtk_device_reset_ctrl(0, res);
1178 
1179 	/*
1180 	 * The reset signal is active low. UFS devices shall detect
1181 	 * more than or equal to 1us of positive or negative RST_n
1182 	 * pulse width.
1183 	 *
1184 	 * To be on safe side, keep the reset low for at least 10us.
1185 	 */
1186 	usleep_range(10, 15);
1187 
1188 	ufs_mtk_device_reset_ctrl(1, res);
1189 
1190 	/* Some devices may need time to respond to rst_n */
1191 	usleep_range(10000, 15000);
1192 
1193 	dev_info(hba->dev, "device reset done\n");
1194 
1195 	return 0;
1196 }
1197 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)1198 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1199 {
1200 	int err;
1201 
1202 	err = ufshcd_hba_enable(hba);
1203 	if (err)
1204 		return err;
1205 
1206 	err = ufs_mtk_unipro_set_lpm(hba, false);
1207 	if (err)
1208 		return err;
1209 
1210 	err = ufshcd_uic_hibern8_exit(hba);
1211 	if (!err)
1212 		ufshcd_set_link_active(hba);
1213 	else
1214 		return err;
1215 
1216 	if (!hba->mcq_enabled) {
1217 		err = ufshcd_make_hba_operational(hba);
1218 	} else {
1219 		ufs_mtk_config_mcq(hba, false);
1220 		ufshcd_mcq_make_queues_operational(hba);
1221 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1222 		/* Enable MCQ mode */
1223 		ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
1224 			      REG_UFS_MEM_CFG);
1225 	}
1226 
1227 	if (err)
1228 		return err;
1229 
1230 	return 0;
1231 }
1232 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1233 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1234 {
1235 	int err;
1236 
1237 	/* Disable reset confirm feature by UniPro */
1238 	ufshcd_writel(hba,
1239 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1240 		      REG_UFS_XOUFS_CTRL);
1241 
1242 	err = ufs_mtk_unipro_set_lpm(hba, true);
1243 	if (err) {
1244 		/* Resume UniPro state for following error recovery */
1245 		ufs_mtk_unipro_set_lpm(hba, false);
1246 		return err;
1247 	}
1248 
1249 	return 0;
1250 }
1251 
ufs_mtk_vccqx_set_lpm(struct ufs_hba * hba,bool lpm)1252 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1253 {
1254 	struct ufs_vreg *vccqx = NULL;
1255 
1256 	if (hba->vreg_info.vccq)
1257 		vccqx = hba->vreg_info.vccq;
1258 	else
1259 		vccqx = hba->vreg_info.vccq2;
1260 
1261 	regulator_set_mode(vccqx->reg,
1262 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1263 }
1264 
ufs_mtk_vsx_set_lpm(struct ufs_hba * hba,bool lpm)1265 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1266 {
1267 	struct arm_smccc_res res;
1268 
1269 	ufs_mtk_device_pwr_ctrl(!lpm,
1270 				(unsigned long)hba->dev_info.wspecversion,
1271 				res);
1272 }
1273 
ufs_mtk_dev_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1274 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1275 {
1276 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1277 		return;
1278 
1279 	/* Skip if VCC is assumed always-on */
1280 	if (!hba->vreg_info.vcc)
1281 		return;
1282 
1283 	/* Bypass LPM when device is still active */
1284 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1285 		return;
1286 
1287 	/* Bypass LPM if VCC is enabled */
1288 	if (lpm && hba->vreg_info.vcc->enabled)
1289 		return;
1290 
1291 	if (lpm) {
1292 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1293 		ufs_mtk_vsx_set_lpm(hba, lpm);
1294 	} else {
1295 		ufs_mtk_vsx_set_lpm(hba, lpm);
1296 		ufs_mtk_vccqx_set_lpm(hba, lpm);
1297 	}
1298 }
1299 
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1300 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1301 {
1302 	int ret;
1303 
1304 	/* disable auto-hibern8 */
1305 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1306 
1307 	/* wait host return to idle state when auto-hibern8 off */
1308 	ufs_mtk_wait_idle_state(hba, 5);
1309 
1310 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1311 	if (ret)
1312 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1313 }
1314 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1315 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1316 	enum ufs_notify_change_status status)
1317 {
1318 	int err;
1319 	struct arm_smccc_res res;
1320 
1321 	if (status == PRE_CHANGE) {
1322 		if (ufshcd_is_auto_hibern8_supported(hba))
1323 			ufs_mtk_auto_hibern8_disable(hba);
1324 		return 0;
1325 	}
1326 
1327 	if (ufshcd_is_link_hibern8(hba)) {
1328 		err = ufs_mtk_link_set_lpm(hba);
1329 		if (err)
1330 			goto fail;
1331 	}
1332 
1333 	if (!ufshcd_is_link_active(hba)) {
1334 		/*
1335 		 * Make sure no error will be returned to prevent
1336 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1337 		 * in low-power mode.
1338 		 */
1339 		err = ufs_mtk_mphy_power_on(hba, false);
1340 		if (err)
1341 			goto fail;
1342 	}
1343 
1344 	if (ufshcd_is_link_off(hba))
1345 		ufs_mtk_device_reset_ctrl(0, res);
1346 
1347 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1348 
1349 	return 0;
1350 fail:
1351 	/*
1352 	 * Set link as off state enforcedly to trigger
1353 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1354 	 * for completed host reset.
1355 	 */
1356 	ufshcd_set_link_off(hba);
1357 	return -EAGAIN;
1358 }
1359 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1360 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1361 {
1362 	int err;
1363 	struct arm_smccc_res res;
1364 
1365 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1366 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1367 
1368 	ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1369 
1370 	err = ufs_mtk_mphy_power_on(hba, true);
1371 	if (err)
1372 		goto fail;
1373 
1374 	if (ufshcd_is_link_hibern8(hba)) {
1375 		err = ufs_mtk_link_set_hpm(hba);
1376 		if (err)
1377 			goto fail;
1378 	}
1379 
1380 	return 0;
1381 fail:
1382 	return ufshcd_link_recovery(hba);
1383 }
1384 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1385 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1386 {
1387 	/* Dump ufshci register 0x140 ~ 0x14C */
1388 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1389 			 "XOUFS Ctrl (0x140): ");
1390 
1391 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1392 
1393 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1394 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1395 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1396 			 "MPHY Ctrl (0x2200): ");
1397 
1398 	/* Direct debugging information to REG_MTK_PROBE */
1399 	ufs_mtk_dbg_sel(hba);
1400 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1401 }
1402 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1403 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1404 {
1405 	struct ufs_dev_info *dev_info = &hba->dev_info;
1406 	u16 mid = dev_info->wmanufacturerid;
1407 
1408 	if (mid == UFS_VENDOR_SAMSUNG) {
1409 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1410 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1411 	}
1412 
1413 	/*
1414 	 * Decide waiting time before gating reference clock and
1415 	 * after ungating reference clock according to vendors'
1416 	 * requirements.
1417 	 */
1418 	if (mid == UFS_VENDOR_SAMSUNG)
1419 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1420 	else if (mid == UFS_VENDOR_SKHYNIX)
1421 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1422 	else if (mid == UFS_VENDOR_TOSHIBA)
1423 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1424 	else
1425 		ufs_mtk_setup_ref_clk_wait_us(hba,
1426 					      REFCLK_DEFAULT_WAIT_US);
1427 	return 0;
1428 }
1429 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1430 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1431 {
1432 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1433 
1434 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1435 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1436 		hba->vreg_info.vcc->always_on = true;
1437 		/*
1438 		 * VCC will be kept always-on thus we don't
1439 		 * need any delay during regulator operations
1440 		 */
1441 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1442 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1443 	}
1444 
1445 	ufs_mtk_vreg_fix_vcc(hba);
1446 	ufs_mtk_vreg_fix_vccqx(hba);
1447 }
1448 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1449 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1450 				 enum ufs_event_type evt, void *data)
1451 {
1452 	unsigned int val = *(u32 *)data;
1453 	unsigned long reg;
1454 	u8 bit;
1455 
1456 	trace_ufs_mtk_event(evt, val);
1457 
1458 	/* Print details of UIC Errors */
1459 	if (evt <= UFS_EVT_DME_ERR) {
1460 		dev_info(hba->dev,
1461 			 "Host UIC Error Code (%s): %08x\n",
1462 			 ufs_uic_err_str[evt], val);
1463 		reg = val;
1464 	}
1465 
1466 	if (evt == UFS_EVT_PA_ERR) {
1467 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1468 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1469 	}
1470 
1471 	if (evt == UFS_EVT_DL_ERR) {
1472 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1473 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1474 	}
1475 }
1476 
ufs_mtk_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * profile,struct devfreq_simple_ondemand_data * data)1477 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1478 				struct devfreq_dev_profile *profile,
1479 				struct devfreq_simple_ondemand_data *data)
1480 {
1481 	/* Customize min gear in clk scaling */
1482 	hba->clk_scaling.min_gear = UFS_HS_G4;
1483 
1484 	hba->vps->devfreq_profile.polling_ms = 200;
1485 	hba->vps->ondemand_data.upthreshold = 50;
1486 	hba->vps->ondemand_data.downdifferential = 20;
1487 }
1488 
1489 /**
1490  * ufs_mtk_clk_scale - Internal clk scaling operation
1491  *
1492  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1493  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1494  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1495  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1496  * This prevent changing rate of pll clock that is shared between modules.
1497  *
1498  * @hba: per adapter instance
1499  * @scale_up: True for scaling up and false for scaling down
1500  */
ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)1501 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1502 {
1503 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1504 	struct ufs_mtk_clk *mclk = &host->mclk;
1505 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1506 	int ret = 0;
1507 
1508 	ret = clk_prepare_enable(clki->clk);
1509 	if (ret) {
1510 		dev_info(hba->dev,
1511 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1512 		return;
1513 	}
1514 
1515 	if (scale_up) {
1516 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1517 		clki->curr_freq = clki->max_freq;
1518 	} else {
1519 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1520 		clki->curr_freq = clki->min_freq;
1521 	}
1522 
1523 	if (ret) {
1524 		dev_info(hba->dev,
1525 			 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1526 	}
1527 
1528 	clk_disable_unprepare(clki->clk);
1529 
1530 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1531 }
1532 
ufs_mtk_clk_scale_notify(struct ufs_hba * hba,bool scale_up,enum ufs_notify_change_status status)1533 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1534 				    enum ufs_notify_change_status status)
1535 {
1536 	if (!ufshcd_is_clkscaling_supported(hba))
1537 		return 0;
1538 
1539 	if (status == PRE_CHANGE) {
1540 		/* Switch parent before clk_set_rate() */
1541 		ufs_mtk_clk_scale(hba, scale_up);
1542 	} else {
1543 		/* Request interrupt latency QoS accordingly */
1544 		ufs_mtk_scale_perf(hba, scale_up);
1545 	}
1546 
1547 	return 0;
1548 }
1549 
ufs_mtk_get_hba_mac(struct ufs_hba * hba)1550 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1551 {
1552 	return MAX_SUPP_MAC;
1553 }
1554 
ufs_mtk_op_runtime_config(struct ufs_hba * hba)1555 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1556 {
1557 	struct ufshcd_mcq_opr_info_t *opr;
1558 	int i;
1559 
1560 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1561 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1562 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1563 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1564 
1565 	for (i = 0; i < OPR_MAX; i++) {
1566 		opr = &hba->mcq_opr[i];
1567 		opr->stride = REG_UFS_MCQ_STRIDE;
1568 		opr->base = hba->mmio_base + opr->offset;
1569 	}
1570 
1571 	return 0;
1572 }
1573 
ufs_mtk_mcq_config_resource(struct ufs_hba * hba)1574 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1575 {
1576 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1577 
1578 	/* fail mcq initialization if interrupt is not filled properly */
1579 	if (!host->mcq_nr_intr) {
1580 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1581 		return -EINVAL;
1582 	}
1583 
1584 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1585 	return 0;
1586 }
1587 
ufs_mtk_mcq_intr(int irq,void * __intr_info)1588 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1589 {
1590 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1591 	struct ufs_hba *hba = mcq_intr_info->hba;
1592 	struct ufs_hw_queue *hwq;
1593 	u32 events;
1594 	int qid = mcq_intr_info->qid;
1595 
1596 	hwq = &hba->uhq[qid];
1597 
1598 	events = ufshcd_mcq_read_cqis(hba, qid);
1599 	if (events)
1600 		ufshcd_mcq_write_cqis(hba, events, qid);
1601 
1602 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1603 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
1604 
1605 	return IRQ_HANDLED;
1606 }
1607 
ufs_mtk_config_mcq_irq(struct ufs_hba * hba)1608 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1609 {
1610 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1611 	u32 irq, i;
1612 	int ret;
1613 
1614 	for (i = 0; i < host->mcq_nr_intr; i++) {
1615 		irq = host->mcq_intr_info[i].irq;
1616 		if (irq == MTK_MCQ_INVALID_IRQ) {
1617 			dev_err(hba->dev, "invalid irq. %d\n", i);
1618 			return -ENOPARAM;
1619 		}
1620 
1621 		host->mcq_intr_info[i].qid = i;
1622 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
1623 				       &host->mcq_intr_info[i]);
1624 
1625 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
1626 
1627 		if (ret) {
1628 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
1629 			return ret;
1630 		}
1631 	}
1632 
1633 	return 0;
1634 }
1635 
ufs_mtk_config_mcq(struct ufs_hba * hba,bool irq)1636 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
1637 {
1638 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1639 	int ret = 0;
1640 
1641 	if (!host->mcq_set_intr) {
1642 		/* Disable irq option register */
1643 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
1644 
1645 		if (irq) {
1646 			ret = ufs_mtk_config_mcq_irq(hba);
1647 			if (ret)
1648 				return ret;
1649 		}
1650 
1651 		host->mcq_set_intr = true;
1652 	}
1653 
1654 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
1655 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
1656 
1657 	return 0;
1658 }
1659 
ufs_mtk_config_esi(struct ufs_hba * hba)1660 static int ufs_mtk_config_esi(struct ufs_hba *hba)
1661 {
1662 	return ufs_mtk_config_mcq(hba, true);
1663 }
1664 
1665 /*
1666  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1667  *
1668  * The variant operations configure the necessary controller and PHY
1669  * handshake during initialization.
1670  */
1671 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1672 	.name                = "mediatek.ufshci",
1673 	.init                = ufs_mtk_init,
1674 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1675 	.setup_clocks        = ufs_mtk_setup_clocks,
1676 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1677 	.link_startup_notify = ufs_mtk_link_startup_notify,
1678 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1679 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1680 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1681 	.suspend             = ufs_mtk_suspend,
1682 	.resume              = ufs_mtk_resume,
1683 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1684 	.device_reset        = ufs_mtk_device_reset,
1685 	.event_notify        = ufs_mtk_event_notify,
1686 	.config_scaling_param = ufs_mtk_config_scaling_param,
1687 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
1688 	/* mcq vops */
1689 	.get_hba_mac         = ufs_mtk_get_hba_mac,
1690 	.op_runtime_config   = ufs_mtk_op_runtime_config,
1691 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
1692 	.config_esi          = ufs_mtk_config_esi,
1693 };
1694 
1695 /**
1696  * ufs_mtk_probe - probe routine of the driver
1697  * @pdev: pointer to Platform device handle
1698  *
1699  * Return: zero for success and non-zero for failure.
1700  */
ufs_mtk_probe(struct platform_device * pdev)1701 static int ufs_mtk_probe(struct platform_device *pdev)
1702 {
1703 	int err;
1704 	struct device *dev = &pdev->dev;
1705 	struct device_node *reset_node;
1706 	struct platform_device *reset_pdev;
1707 	struct device_link *link;
1708 
1709 	reset_node = of_find_compatible_node(NULL, NULL,
1710 					     "ti,syscon-reset");
1711 	if (!reset_node) {
1712 		dev_notice(dev, "find ti,syscon-reset fail\n");
1713 		goto skip_reset;
1714 	}
1715 	reset_pdev = of_find_device_by_node(reset_node);
1716 	if (!reset_pdev) {
1717 		dev_notice(dev, "find reset_pdev fail\n");
1718 		goto skip_reset;
1719 	}
1720 	link = device_link_add(dev, &reset_pdev->dev,
1721 		DL_FLAG_AUTOPROBE_CONSUMER);
1722 	put_device(&reset_pdev->dev);
1723 	if (!link) {
1724 		dev_notice(dev, "add reset device_link fail\n");
1725 		goto skip_reset;
1726 	}
1727 	/* supplier is not probed */
1728 	if (link->status == DL_STATE_DORMANT) {
1729 		err = -EPROBE_DEFER;
1730 		goto out;
1731 	}
1732 
1733 skip_reset:
1734 	/* perform generic probe */
1735 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1736 
1737 out:
1738 	if (err)
1739 		dev_err(dev, "probe failed %d\n", err);
1740 
1741 	of_node_put(reset_node);
1742 	return err;
1743 }
1744 
1745 /**
1746  * ufs_mtk_remove - set driver_data of the device to NULL
1747  * @pdev: pointer to platform device handle
1748  *
1749  * Always return 0
1750  */
ufs_mtk_remove(struct platform_device * pdev)1751 static int ufs_mtk_remove(struct platform_device *pdev)
1752 {
1753 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1754 
1755 	pm_runtime_get_sync(&(pdev)->dev);
1756 	ufshcd_remove(hba);
1757 	return 0;
1758 }
1759 
1760 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)1761 static int ufs_mtk_system_suspend(struct device *dev)
1762 {
1763 	struct ufs_hba *hba = dev_get_drvdata(dev);
1764 	int ret;
1765 
1766 	ret = ufshcd_system_suspend(dev);
1767 	if (ret)
1768 		return ret;
1769 
1770 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1771 
1772 	return 0;
1773 }
1774 
ufs_mtk_system_resume(struct device * dev)1775 static int ufs_mtk_system_resume(struct device *dev)
1776 {
1777 	struct ufs_hba *hba = dev_get_drvdata(dev);
1778 
1779 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1780 
1781 	return ufshcd_system_resume(dev);
1782 }
1783 #endif
1784 
1785 #ifdef CONFIG_PM
ufs_mtk_runtime_suspend(struct device * dev)1786 static int ufs_mtk_runtime_suspend(struct device *dev)
1787 {
1788 	struct ufs_hba *hba = dev_get_drvdata(dev);
1789 	int ret = 0;
1790 
1791 	ret = ufshcd_runtime_suspend(dev);
1792 	if (ret)
1793 		return ret;
1794 
1795 	ufs_mtk_dev_vreg_set_lpm(hba, true);
1796 
1797 	return 0;
1798 }
1799 
ufs_mtk_runtime_resume(struct device * dev)1800 static int ufs_mtk_runtime_resume(struct device *dev)
1801 {
1802 	struct ufs_hba *hba = dev_get_drvdata(dev);
1803 
1804 	ufs_mtk_dev_vreg_set_lpm(hba, false);
1805 
1806 	return ufshcd_runtime_resume(dev);
1807 }
1808 #endif
1809 
1810 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1811 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1812 				ufs_mtk_system_resume)
1813 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1814 			   ufs_mtk_runtime_resume, NULL)
1815 	.prepare	 = ufshcd_suspend_prepare,
1816 	.complete	 = ufshcd_resume_complete,
1817 };
1818 
1819 static struct platform_driver ufs_mtk_pltform = {
1820 	.probe      = ufs_mtk_probe,
1821 	.remove     = ufs_mtk_remove,
1822 	.driver = {
1823 		.name   = "ufshcd-mtk",
1824 		.pm     = &ufs_mtk_pm_ops,
1825 		.of_match_table = ufs_mtk_of_match,
1826 	},
1827 };
1828 
1829 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1830 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1831 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1832 MODULE_LICENSE("GPL v2");
1833 
1834 module_platform_driver(ufs_mtk_pltform);
1835