xref: /openbmc/linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (revision bc5aa3a0)
1 /*
2  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/platform_device.h>
15 
16 #include "dsi_phy.h"
17 
18 #define S_DIV_ROUND_UP(n, d)	\
19 	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
20 
21 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
22 				s32 min_result, bool even)
23 {
24 	s32 v;
25 
26 	v = (tmax - tmin) * percent;
27 	v = S_DIV_ROUND_UP(v, 100) + tmin;
28 	if (even && (v & 0x1))
29 		return max_t(s32, min_result, v - 1);
30 	else
31 		return max_t(s32, min_result, v);
32 }
33 
34 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
35 					s32 ui, s32 coeff, s32 pcnt)
36 {
37 	s32 tmax, tmin, clk_z;
38 	s32 temp;
39 
40 	/* reset */
41 	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
42 	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
43 	if (tmin > 255) {
44 		tmax = 511;
45 		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
46 	} else {
47 		tmax = 255;
48 		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
49 	}
50 
51 	/* adjust */
52 	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
53 	timing->clk_zero = clk_z + 8 - temp;
54 }
55 
56 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
57 	const unsigned long bit_rate, const unsigned long esc_rate)
58 {
59 	s32 ui, lpx;
60 	s32 tmax, tmin;
61 	s32 pcnt0 = 10;
62 	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
63 	s32 pcnt2 = 10;
64 	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
65 	s32 coeff = 1000; /* Precision, should avoid overflow */
66 	s32 temp;
67 
68 	if (!bit_rate || !esc_rate)
69 		return -EINVAL;
70 
71 	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
72 	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
73 
74 	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
75 	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
76 	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
77 
78 	temp = lpx / ui;
79 	if (temp & 0x1)
80 		timing->hs_rqst = temp;
81 	else
82 		timing->hs_rqst = max_t(s32, 0, temp - 2);
83 
84 	/* Calculate clk_zero after clk_prepare and hs_rqst */
85 	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
86 
87 	temp = 105 * coeff + 12 * ui - 20 * coeff;
88 	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89 	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
90 	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
91 
92 	temp = 85 * coeff + 6 * ui;
93 	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
94 	temp = 40 * coeff + 4 * ui;
95 	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
96 	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
97 
98 	tmax = 255;
99 	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
100 	temp = 145 * coeff + 10 * ui - temp;
101 	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
102 	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
103 
104 	temp = 105 * coeff + 12 * ui - 20 * coeff;
105 	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
106 	temp = 60 * coeff + 4 * ui;
107 	tmin = DIV_ROUND_UP(temp, ui) - 2;
108 	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
109 
110 	tmax = 255;
111 	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
112 	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
113 
114 	tmax = 63;
115 	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
116 	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
117 	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
118 	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
119 
120 	tmax = 63;
121 	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
122 	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
123 	temp += 8 * ui + lpx;
124 	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
125 	if (tmin > tmax) {
126 		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
127 		timing->clk_pre = temp >> 1;
128 	} else {
129 		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
130 	}
131 
132 	timing->ta_go = 3;
133 	timing->ta_sure = 0;
134 	timing->ta_get = 4;
135 
136 	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
137 		timing->clk_pre, timing->clk_post, timing->clk_zero,
138 		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
139 		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
140 		timing->hs_rqst);
141 
142 	return 0;
143 }
144 
145 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
146 				u32 bit_mask)
147 {
148 	int phy_id = phy->id;
149 	u32 val;
150 
151 	if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
152 		return;
153 
154 	val = dsi_phy_read(phy->base + reg);
155 
156 	if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
157 		dsi_phy_write(phy->base + reg, val | bit_mask);
158 	else
159 		dsi_phy_write(phy->base + reg, val & (~bit_mask));
160 }
161 
162 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
163 {
164 	struct regulator_bulk_data *s = phy->supplies;
165 	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
166 	struct device *dev = &phy->pdev->dev;
167 	int num = phy->cfg->reg_cfg.num;
168 	int i, ret;
169 
170 	for (i = 0; i < num; i++)
171 		s[i].supply = regs[i].name;
172 
173 	ret = devm_regulator_bulk_get(dev, num, s);
174 	if (ret < 0) {
175 		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
176 						__func__, ret);
177 		return ret;
178 	}
179 
180 	return 0;
181 }
182 
183 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
184 {
185 	struct regulator_bulk_data *s = phy->supplies;
186 	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
187 	int num = phy->cfg->reg_cfg.num;
188 	int i;
189 
190 	DBG("");
191 	for (i = num - 1; i >= 0; i--)
192 		if (regs[i].disable_load >= 0)
193 			regulator_set_load(s[i].consumer, regs[i].disable_load);
194 
195 	regulator_bulk_disable(num, s);
196 }
197 
198 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
199 {
200 	struct regulator_bulk_data *s = phy->supplies;
201 	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
202 	struct device *dev = &phy->pdev->dev;
203 	int num = phy->cfg->reg_cfg.num;
204 	int ret, i;
205 
206 	DBG("");
207 	for (i = 0; i < num; i++) {
208 		if (regs[i].enable_load >= 0) {
209 			ret = regulator_set_load(s[i].consumer,
210 							regs[i].enable_load);
211 			if (ret < 0) {
212 				dev_err(dev,
213 					"regulator %d set op mode failed, %d\n",
214 					i, ret);
215 				goto fail;
216 			}
217 		}
218 	}
219 
220 	ret = regulator_bulk_enable(num, s);
221 	if (ret < 0) {
222 		dev_err(dev, "regulator enable failed, %d\n", ret);
223 		goto fail;
224 	}
225 
226 	return 0;
227 
228 fail:
229 	for (i--; i >= 0; i--)
230 		regulator_set_load(s[i].consumer, regs[i].disable_load);
231 	return ret;
232 }
233 
234 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
235 {
236 	struct device *dev = &phy->pdev->dev;
237 	int ret;
238 
239 	pm_runtime_get_sync(dev);
240 
241 	ret = clk_prepare_enable(phy->ahb_clk);
242 	if (ret) {
243 		dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
244 		pm_runtime_put_sync(dev);
245 	}
246 
247 	return ret;
248 }
249 
250 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
251 {
252 	clk_disable_unprepare(phy->ahb_clk);
253 	pm_runtime_put_sync(&phy->pdev->dev);
254 }
255 
256 static const struct of_device_id dsi_phy_dt_match[] = {
257 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
258 	{ .compatible = "qcom,dsi-phy-28nm-hpm",
259 	  .data = &dsi_phy_28nm_hpm_cfgs },
260 	{ .compatible = "qcom,dsi-phy-28nm-lp",
261 	  .data = &dsi_phy_28nm_lp_cfgs },
262 #endif
263 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
264 	{ .compatible = "qcom,dsi-phy-20nm",
265 	  .data = &dsi_phy_20nm_cfgs },
266 #endif
267 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
268 	{ .compatible = "qcom,dsi-phy-28nm-8960",
269 	  .data = &dsi_phy_28nm_8960_cfgs },
270 #endif
271 	{}
272 };
273 
274 /*
275  * Currently, we only support one SoC for each PHY type. When we have multiple
276  * SoCs for the same PHY, we can try to make the index searching a bit more
277  * clever.
278  */
279 static int dsi_phy_get_id(struct msm_dsi_phy *phy)
280 {
281 	struct platform_device *pdev = phy->pdev;
282 	const struct msm_dsi_phy_cfg *cfg = phy->cfg;
283 	struct resource *res;
284 	int i;
285 
286 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
287 	if (!res)
288 		return -EINVAL;
289 
290 	for (i = 0; i < cfg->num_dsi_phy; i++) {
291 		if (cfg->io_start[i] == res->start)
292 			return i;
293 	}
294 
295 	return -EINVAL;
296 }
297 
298 static int dsi_phy_driver_probe(struct platform_device *pdev)
299 {
300 	struct msm_dsi_phy *phy;
301 	struct device *dev = &pdev->dev;
302 	const struct of_device_id *match;
303 	int ret;
304 
305 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
306 	if (!phy)
307 		return -ENOMEM;
308 
309 	match = of_match_node(dsi_phy_dt_match, dev->of_node);
310 	if (!match)
311 		return -ENODEV;
312 
313 	phy->cfg = match->data;
314 	phy->pdev = pdev;
315 
316 	phy->id = dsi_phy_get_id(phy);
317 	if (phy->id < 0) {
318 		ret = phy->id;
319 		dev_err(dev, "%s: couldn't identify PHY index, %d\n",
320 			__func__, ret);
321 		goto fail;
322 	}
323 
324 	phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
325 				"qcom,dsi-phy-regulator-ldo-mode");
326 
327 	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
328 	if (IS_ERR(phy->base)) {
329 		dev_err(dev, "%s: failed to map phy base\n", __func__);
330 		ret = -ENOMEM;
331 		goto fail;
332 	}
333 
334 	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
335 				"DSI_PHY_REG");
336 	if (IS_ERR(phy->reg_base)) {
337 		dev_err(dev, "%s: failed to map phy regulator base\n",
338 			__func__);
339 		ret = -ENOMEM;
340 		goto fail;
341 	}
342 
343 	ret = dsi_phy_regulator_init(phy);
344 	if (ret) {
345 		dev_err(dev, "%s: failed to init regulator\n", __func__);
346 		goto fail;
347 	}
348 
349 	phy->ahb_clk = devm_clk_get(dev, "iface_clk");
350 	if (IS_ERR(phy->ahb_clk)) {
351 		dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
352 		ret = PTR_ERR(phy->ahb_clk);
353 		goto fail;
354 	}
355 
356 	/* PLL init will call into clk_register which requires
357 	 * register access, so we need to enable power and ahb clock.
358 	 */
359 	ret = dsi_phy_enable_resource(phy);
360 	if (ret)
361 		goto fail;
362 
363 	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
364 	if (!phy->pll)
365 		dev_info(dev,
366 			"%s: pll init failed, need separate pll clk driver\n",
367 			__func__);
368 
369 	dsi_phy_disable_resource(phy);
370 
371 	platform_set_drvdata(pdev, phy);
372 
373 	return 0;
374 
375 fail:
376 	return ret;
377 }
378 
379 static int dsi_phy_driver_remove(struct platform_device *pdev)
380 {
381 	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
382 
383 	if (phy && phy->pll) {
384 		msm_dsi_pll_destroy(phy->pll);
385 		phy->pll = NULL;
386 	}
387 
388 	platform_set_drvdata(pdev, NULL);
389 
390 	return 0;
391 }
392 
393 static struct platform_driver dsi_phy_platform_driver = {
394 	.probe      = dsi_phy_driver_probe,
395 	.remove     = dsi_phy_driver_remove,
396 	.driver     = {
397 		.name   = "msm_dsi_phy",
398 		.of_match_table = dsi_phy_dt_match,
399 	},
400 };
401 
402 void __init msm_dsi_phy_driver_register(void)
403 {
404 	platform_driver_register(&dsi_phy_platform_driver);
405 }
406 
407 void __exit msm_dsi_phy_driver_unregister(void)
408 {
409 	platform_driver_unregister(&dsi_phy_platform_driver);
410 }
411 
412 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
413 	const unsigned long bit_rate, const unsigned long esc_rate)
414 {
415 	struct device *dev = &phy->pdev->dev;
416 	int ret;
417 
418 	if (!phy || !phy->cfg->ops.enable)
419 		return -EINVAL;
420 
421 	ret = dsi_phy_regulator_enable(phy);
422 	if (ret) {
423 		dev_err(dev, "%s: regulator enable failed, %d\n",
424 			__func__, ret);
425 		return ret;
426 	}
427 
428 	ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
429 	if (ret) {
430 		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
431 		dsi_phy_regulator_disable(phy);
432 		return ret;
433 	}
434 
435 	return 0;
436 }
437 
438 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
439 {
440 	if (!phy || !phy->cfg->ops.disable)
441 		return;
442 
443 	phy->cfg->ops.disable(phy);
444 
445 	dsi_phy_regulator_disable(phy);
446 }
447 
448 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
449 					u32 *clk_pre, u32 *clk_post)
450 {
451 	if (!phy)
452 		return;
453 
454 	if (clk_pre)
455 		*clk_pre = phy->timing.clk_pre;
456 	if (clk_post)
457 		*clk_post = phy->timing.clk_post;
458 }
459 
460 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
461 {
462 	if (!phy)
463 		return NULL;
464 
465 	return phy->pll;
466 }
467 
468