xref: /openbmc/linux/drivers/gpu/drm/msm/msm_mdss.c (revision 3d40aed8)
1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/interconnect.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdesc.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/reset.h>
15 
16 #include "msm_drv.h"
17 #include "msm_kms.h"
18 
19 #define HW_REV				0x0
20 #define HW_INTR_STATUS			0x0010
21 
22 #define UBWC_DEC_HW_VERSION		0x58
23 #define UBWC_STATIC			0x144
24 #define UBWC_CTRL_2			0x150
25 #define UBWC_PREDICTION_MODE		0x154
26 
27 #define MIN_IB_BW	400000000UL /* Min ib vote 400MB */
28 
29 struct msm_mdss_data {
30 	u32 ubwc_version;
31 	/* can be read from register 0x58 */
32 	u32 ubwc_dec_version;
33 	u32 ubwc_swizzle;
34 	u32 ubwc_static;
35 	u32 highest_bank_bit;
36 	u32 macrotile_mode;
37 };
38 
39 struct msm_mdss {
40 	struct device *dev;
41 
42 	void __iomem *mmio;
43 	struct clk_bulk_data *clocks;
44 	size_t num_clocks;
45 	bool is_mdp5;
46 	struct {
47 		unsigned long enabled_mask;
48 		struct irq_domain *domain;
49 	} irq_controller;
50 	const struct msm_mdss_data *mdss_data;
51 	struct icc_path *path[2];
52 	u32 num_paths;
53 };
54 
55 static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
56 					    struct msm_mdss *msm_mdss)
57 {
58 	struct icc_path *path0;
59 	struct icc_path *path1;
60 
61 	path0 = of_icc_get(dev, "mdp0-mem");
62 	if (IS_ERR_OR_NULL(path0))
63 		return PTR_ERR_OR_ZERO(path0);
64 
65 	msm_mdss->path[0] = path0;
66 	msm_mdss->num_paths = 1;
67 
68 	path1 = of_icc_get(dev, "mdp1-mem");
69 	if (!IS_ERR_OR_NULL(path1)) {
70 		msm_mdss->path[1] = path1;
71 		msm_mdss->num_paths++;
72 	}
73 
74 	return 0;
75 }
76 
77 static void msm_mdss_put_icc_path(void *data)
78 {
79 	struct msm_mdss *msm_mdss = data;
80 	int i;
81 
82 	for (i = 0; i < msm_mdss->num_paths; i++)
83 		icc_put(msm_mdss->path[i]);
84 }
85 
86 static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
87 {
88 	int i;
89 
90 	for (i = 0; i < msm_mdss->num_paths; i++)
91 		icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
92 }
93 
94 static void msm_mdss_irq(struct irq_desc *desc)
95 {
96 	struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
97 	struct irq_chip *chip = irq_desc_get_chip(desc);
98 	u32 interrupts;
99 
100 	chained_irq_enter(chip, desc);
101 
102 	interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
103 
104 	while (interrupts) {
105 		irq_hw_number_t hwirq = fls(interrupts) - 1;
106 		int rc;
107 
108 		rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
109 					       hwirq);
110 		if (rc < 0) {
111 			dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
112 				  hwirq, rc);
113 			break;
114 		}
115 
116 		interrupts &= ~(1 << hwirq);
117 	}
118 
119 	chained_irq_exit(chip, desc);
120 }
121 
122 static void msm_mdss_irq_mask(struct irq_data *irqd)
123 {
124 	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
125 
126 	/* memory barrier */
127 	smp_mb__before_atomic();
128 	clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
129 	/* memory barrier */
130 	smp_mb__after_atomic();
131 }
132 
133 static void msm_mdss_irq_unmask(struct irq_data *irqd)
134 {
135 	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
136 
137 	/* memory barrier */
138 	smp_mb__before_atomic();
139 	set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
140 	/* memory barrier */
141 	smp_mb__after_atomic();
142 }
143 
144 static struct irq_chip msm_mdss_irq_chip = {
145 	.name = "msm_mdss",
146 	.irq_mask = msm_mdss_irq_mask,
147 	.irq_unmask = msm_mdss_irq_unmask,
148 };
149 
150 static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
151 
152 static int msm_mdss_irqdomain_map(struct irq_domain *domain,
153 		unsigned int irq, irq_hw_number_t hwirq)
154 {
155 	struct msm_mdss *msm_mdss = domain->host_data;
156 
157 	irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
158 	irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
159 
160 	return irq_set_chip_data(irq, msm_mdss);
161 }
162 
163 static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
164 	.map = msm_mdss_irqdomain_map,
165 	.xlate = irq_domain_xlate_onecell,
166 };
167 
168 static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
169 {
170 	struct device *dev;
171 	struct irq_domain *domain;
172 
173 	dev = msm_mdss->dev;
174 
175 	domain = irq_domain_add_linear(dev->of_node, 32,
176 			&msm_mdss_irqdomain_ops, msm_mdss);
177 	if (!domain) {
178 		dev_err(dev, "failed to add irq_domain\n");
179 		return -EINVAL;
180 	}
181 
182 	msm_mdss->irq_controller.enabled_mask = 0;
183 	msm_mdss->irq_controller.domain = domain;
184 
185 	return 0;
186 }
187 
188 #define UBWC_1_0 0x10000000
189 #define UBWC_2_0 0x20000000
190 #define UBWC_3_0 0x30000000
191 #define UBWC_4_0 0x40000000
192 
193 static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
194 {
195 	const struct msm_mdss_data *data = msm_mdss->mdss_data;
196 
197 	writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC);
198 }
199 
200 static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
201 {
202 	const struct msm_mdss_data *data = msm_mdss->mdss_data;
203 	u32 value = (data->ubwc_swizzle & 0x1) |
204 		    (data->highest_bank_bit & 0x3) << 4 |
205 		    (data->macrotile_mode & 0x1) << 12;
206 
207 	if (data->ubwc_version == UBWC_3_0)
208 		value |= BIT(10);
209 
210 	if (data->ubwc_version == UBWC_1_0)
211 		value |= BIT(8);
212 
213 	writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
214 }
215 
216 static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
217 {
218 	const struct msm_mdss_data *data = msm_mdss->mdss_data;
219 	u32 value = (data->ubwc_swizzle & 0x7) |
220 		    (data->ubwc_static & 0x1) << 3 |
221 		    (data->highest_bank_bit & 0x7) << 4 |
222 		    (data->macrotile_mode & 0x1) << 12;
223 
224 	writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
225 
226 	if (data->ubwc_version == UBWC_3_0) {
227 		writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
228 		writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
229 	} else {
230 		writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
231 		writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
232 	}
233 }
234 
235 static int msm_mdss_enable(struct msm_mdss *msm_mdss)
236 {
237 	int ret;
238 
239 	/*
240 	 * Several components have AXI clocks that can only be turned on if
241 	 * the interconnect is enabled (non-zero bandwidth). Let's make sure
242 	 * that the interconnects are at least at a minimum amount.
243 	 */
244 	msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
245 
246 	ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
247 	if (ret) {
248 		dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
249 		return ret;
250 	}
251 
252 	/*
253 	 * Register access requires MDSS_MDP_CLK, which is not enabled by the
254 	 * mdss on mdp5 hardware. Skip it for now.
255 	 */
256 	if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data)
257 		return 0;
258 
259 	/*
260 	 * ubwc config is part of the "mdss" region which is not accessible
261 	 * from the rest of the driver. hardcode known configurations here
262 	 *
263 	 * Decoder version can be read from the UBWC_DEC_HW_VERSION reg,
264 	 * UBWC_n and the rest of params comes from hw data.
265 	 */
266 	switch (msm_mdss->mdss_data->ubwc_dec_version) {
267 	case UBWC_2_0:
268 		msm_mdss_setup_ubwc_dec_20(msm_mdss);
269 		break;
270 	case UBWC_3_0:
271 		msm_mdss_setup_ubwc_dec_30(msm_mdss);
272 		break;
273 	case UBWC_4_0:
274 		msm_mdss_setup_ubwc_dec_40(msm_mdss);
275 		break;
276 	default:
277 		dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
278 			msm_mdss->mdss_data->ubwc_dec_version);
279 		dev_err(msm_mdss->dev, "HW_REV: 0x%x\n",
280 			readl_relaxed(msm_mdss->mmio + HW_REV));
281 		dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n",
282 			readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION));
283 		break;
284 	}
285 
286 	return ret;
287 }
288 
289 static int msm_mdss_disable(struct msm_mdss *msm_mdss)
290 {
291 	clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
292 	msm_mdss_icc_request_bw(msm_mdss, 0);
293 
294 	return 0;
295 }
296 
297 static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
298 {
299 	struct platform_device *pdev = to_platform_device(msm_mdss->dev);
300 	int irq;
301 
302 	pm_runtime_suspend(msm_mdss->dev);
303 	pm_runtime_disable(msm_mdss->dev);
304 	irq_domain_remove(msm_mdss->irq_controller.domain);
305 	msm_mdss->irq_controller.domain = NULL;
306 	irq = platform_get_irq(pdev, 0);
307 	irq_set_chained_handler_and_data(irq, NULL, NULL);
308 }
309 
310 static int msm_mdss_reset(struct device *dev)
311 {
312 	struct reset_control *reset;
313 
314 	reset = reset_control_get_optional_exclusive(dev, NULL);
315 	if (!reset) {
316 		/* Optional reset not specified */
317 		return 0;
318 	} else if (IS_ERR(reset)) {
319 		return dev_err_probe(dev, PTR_ERR(reset),
320 				     "failed to acquire mdss reset\n");
321 	}
322 
323 	reset_control_assert(reset);
324 	/*
325 	 * Tests indicate that reset has to be held for some period of time,
326 	 * make it one frame in a typical system
327 	 */
328 	msleep(20);
329 	reset_control_deassert(reset);
330 
331 	reset_control_put(reset);
332 
333 	return 0;
334 }
335 
336 /*
337  * MDP5 MDSS uses at most three specified clocks.
338  */
339 #define MDP5_MDSS_NUM_CLOCKS 3
340 static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
341 {
342 	struct clk_bulk_data *bulk;
343 	int num_clocks = 0;
344 	int ret;
345 
346 	if (!pdev)
347 		return -EINVAL;
348 
349 	bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
350 	if (!bulk)
351 		return -ENOMEM;
352 
353 	bulk[num_clocks++].id = "iface";
354 	bulk[num_clocks++].id = "bus";
355 	bulk[num_clocks++].id = "vsync";
356 
357 	ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
358 	if (ret)
359 		return ret;
360 
361 	*clocks = bulk;
362 
363 	return num_clocks;
364 }
365 
366 static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
367 {
368 	struct msm_mdss *msm_mdss;
369 	int ret;
370 	int irq;
371 
372 	ret = msm_mdss_reset(&pdev->dev);
373 	if (ret)
374 		return ERR_PTR(ret);
375 
376 	msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
377 	if (!msm_mdss)
378 		return ERR_PTR(-ENOMEM);
379 
380 	msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
381 	if (IS_ERR(msm_mdss->mmio))
382 		return ERR_CAST(msm_mdss->mmio);
383 
384 	dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
385 
386 	ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
387 	if (ret)
388 		return ERR_PTR(ret);
389 	ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
390 	if (ret)
391 		return ERR_PTR(ret);
392 
393 	if (is_mdp5)
394 		ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
395 	else
396 		ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
397 	if (ret < 0) {
398 		dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
399 		return ERR_PTR(ret);
400 	}
401 	msm_mdss->num_clocks = ret;
402 	msm_mdss->is_mdp5 = is_mdp5;
403 
404 	msm_mdss->dev = &pdev->dev;
405 
406 	irq = platform_get_irq(pdev, 0);
407 	if (irq < 0)
408 		return ERR_PTR(irq);
409 
410 	ret = _msm_mdss_irq_domain_add(msm_mdss);
411 	if (ret)
412 		return ERR_PTR(ret);
413 
414 	irq_set_chained_handler_and_data(irq, msm_mdss_irq,
415 					 msm_mdss);
416 
417 	pm_runtime_enable(&pdev->dev);
418 
419 	return msm_mdss;
420 }
421 
422 static int __maybe_unused mdss_runtime_suspend(struct device *dev)
423 {
424 	struct msm_mdss *mdss = dev_get_drvdata(dev);
425 
426 	DBG("");
427 
428 	return msm_mdss_disable(mdss);
429 }
430 
431 static int __maybe_unused mdss_runtime_resume(struct device *dev)
432 {
433 	struct msm_mdss *mdss = dev_get_drvdata(dev);
434 
435 	DBG("");
436 
437 	return msm_mdss_enable(mdss);
438 }
439 
440 static int __maybe_unused mdss_pm_suspend(struct device *dev)
441 {
442 
443 	if (pm_runtime_suspended(dev))
444 		return 0;
445 
446 	return mdss_runtime_suspend(dev);
447 }
448 
449 static int __maybe_unused mdss_pm_resume(struct device *dev)
450 {
451 	if (pm_runtime_suspended(dev))
452 		return 0;
453 
454 	return mdss_runtime_resume(dev);
455 }
456 
457 static const struct dev_pm_ops mdss_pm_ops = {
458 	SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
459 	SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
460 };
461 
462 static int mdss_probe(struct platform_device *pdev)
463 {
464 	struct msm_mdss *mdss;
465 	bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
466 	struct device *dev = &pdev->dev;
467 	int ret;
468 
469 	mdss = msm_mdss_init(pdev, is_mdp5);
470 	if (IS_ERR(mdss))
471 		return PTR_ERR(mdss);
472 
473 	mdss->mdss_data = of_device_get_match_data(&pdev->dev);
474 
475 	platform_set_drvdata(pdev, mdss);
476 
477 	/*
478 	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
479 	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
480 	 * Populate the children devices, find the MDP5/DPU node, and then add
481 	 * the interfaces to our components list.
482 	 */
483 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
484 	if (ret) {
485 		DRM_DEV_ERROR(dev, "failed to populate children devices\n");
486 		msm_mdss_destroy(mdss);
487 		return ret;
488 	}
489 
490 	return 0;
491 }
492 
493 static int mdss_remove(struct platform_device *pdev)
494 {
495 	struct msm_mdss *mdss = platform_get_drvdata(pdev);
496 
497 	of_platform_depopulate(&pdev->dev);
498 
499 	msm_mdss_destroy(mdss);
500 
501 	return 0;
502 }
503 
504 static const struct msm_mdss_data sc7180_data = {
505 	.ubwc_version = UBWC_2_0,
506 	.ubwc_dec_version = UBWC_2_0,
507 	.ubwc_static = 0x1e,
508 };
509 
510 static const struct msm_mdss_data sc7280_data = {
511 	.ubwc_version = UBWC_3_0,
512 	.ubwc_dec_version = UBWC_4_0,
513 	.ubwc_swizzle = 6,
514 	.ubwc_static = 1,
515 	.highest_bank_bit = 1,
516 	.macrotile_mode = 1,
517 };
518 
519 static const struct msm_mdss_data sc8180x_data = {
520 	.ubwc_version = UBWC_3_0,
521 	.ubwc_dec_version = UBWC_3_0,
522 	.highest_bank_bit = 3,
523 	.macrotile_mode = 1,
524 };
525 
526 static const struct msm_mdss_data sc8280xp_data = {
527 	.ubwc_version = UBWC_4_0,
528 	.ubwc_dec_version = UBWC_4_0,
529 	.ubwc_swizzle = 6,
530 	.ubwc_static = 1,
531 	.highest_bank_bit = 2,
532 	.macrotile_mode = 1,
533 };
534 
535 static const struct msm_mdss_data sdm845_data = {
536 	.ubwc_version = UBWC_2_0,
537 	.ubwc_dec_version = UBWC_2_0,
538 	.highest_bank_bit = 2,
539 };
540 
541 static const struct msm_mdss_data sm6350_data = {
542 	.ubwc_version = UBWC_2_0,
543 	.ubwc_dec_version = UBWC_2_0,
544 	.ubwc_swizzle = 6,
545 	.ubwc_static = 0x1e,
546 	.highest_bank_bit = 1,
547 };
548 
549 static const struct msm_mdss_data sm8150_data = {
550 	.ubwc_version = UBWC_3_0,
551 	.ubwc_dec_version = UBWC_3_0,
552 	.highest_bank_bit = 2,
553 };
554 
555 static const struct msm_mdss_data sm6115_data = {
556 	.ubwc_version = UBWC_1_0,
557 	.ubwc_dec_version = UBWC_2_0,
558 	.ubwc_swizzle = 7,
559 	.ubwc_static = 0x11f,
560 };
561 
562 static const struct msm_mdss_data sm8250_data = {
563 	.ubwc_version = UBWC_4_0,
564 	.ubwc_dec_version = UBWC_4_0,
565 	.ubwc_swizzle = 6,
566 	.ubwc_static = 1,
567 	/* TODO: highest_bank_bit = 2 for LP_DDR4 */
568 	.highest_bank_bit = 3,
569 	.macrotile_mode = 1,
570 };
571 
572 static const struct of_device_id mdss_dt_match[] = {
573 	{ .compatible = "qcom,mdss" },
574 	{ .compatible = "qcom,msm8998-mdss" },
575 	{ .compatible = "qcom,qcm2290-mdss" },
576 	{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
577 	{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
578 	{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
579 	{ .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
580 	{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
581 	{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
582 	{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
583 	{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
584 	{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
585 	{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
586 	{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
587 	{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
588 	{ .compatible = "qcom,sm8550-mdss", .data = &sm8250_data },
589 	{}
590 };
591 MODULE_DEVICE_TABLE(of, mdss_dt_match);
592 
593 static struct platform_driver mdss_platform_driver = {
594 	.probe      = mdss_probe,
595 	.remove     = mdss_remove,
596 	.driver     = {
597 		.name   = "msm-mdss",
598 		.of_match_table = mdss_dt_match,
599 		.pm     = &mdss_pm_ops,
600 	},
601 };
602 
603 void __init msm_mdss_register(void)
604 {
605 	platform_driver_register(&mdss_platform_driver);
606 }
607 
608 void __exit msm_mdss_unregister(void)
609 {
610 	platform_driver_unregister(&mdss_platform_driver);
611 }
612