xref: /openbmc/linux/drivers/gpu/drm/msm/msm_mdss.c (revision e1072257ff6544ac13e59d41d5fa1f2d3a98c63a)
187729e2aSDmitry Baryshkov /*
287729e2aSDmitry Baryshkov  * SPDX-License-Identifier: GPL-2.0
387729e2aSDmitry Baryshkov  * Copyright (c) 2018, The Linux Foundation
487729e2aSDmitry Baryshkov  */
587729e2aSDmitry Baryshkov 
6*e1072257SDmitry Baryshkov #include <linux/clk.h>
787729e2aSDmitry Baryshkov #include <linux/irq.h>
887729e2aSDmitry Baryshkov #include <linux/irqchip.h>
987729e2aSDmitry Baryshkov #include <linux/irqdesc.h>
1087729e2aSDmitry Baryshkov #include <linux/irqchip/chained_irq.h>
11*e1072257SDmitry Baryshkov #include <linux/pm_runtime.h>
1287729e2aSDmitry Baryshkov 
1387729e2aSDmitry Baryshkov /* for DPU_HW_* defines */
1487729e2aSDmitry Baryshkov #include "disp/dpu1/dpu_hw_catalog.h"
1587729e2aSDmitry Baryshkov 
1687729e2aSDmitry Baryshkov #define HW_REV				0x0
1787729e2aSDmitry Baryshkov #define HW_INTR_STATUS			0x0010
1887729e2aSDmitry Baryshkov 
1987729e2aSDmitry Baryshkov #define UBWC_STATIC			0x144
2087729e2aSDmitry Baryshkov #define UBWC_CTRL_2			0x150
2187729e2aSDmitry Baryshkov #define UBWC_PREDICTION_MODE		0x154
2287729e2aSDmitry Baryshkov 
23*e1072257SDmitry Baryshkov struct msm_mdss {
24*e1072257SDmitry Baryshkov 	struct device *dev;
25*e1072257SDmitry Baryshkov 
2687729e2aSDmitry Baryshkov 	void __iomem *mmio;
2787729e2aSDmitry Baryshkov 	struct clk_bulk_data *clocks;
2887729e2aSDmitry Baryshkov 	size_t num_clocks;
2987729e2aSDmitry Baryshkov 	bool is_mdp5;
3087729e2aSDmitry Baryshkov 	struct {
3187729e2aSDmitry Baryshkov 		unsigned long enabled_mask;
3287729e2aSDmitry Baryshkov 		struct irq_domain *domain;
3387729e2aSDmitry Baryshkov 	} irq_controller;
3487729e2aSDmitry Baryshkov };
3587729e2aSDmitry Baryshkov 
3687729e2aSDmitry Baryshkov static void msm_mdss_irq(struct irq_desc *desc)
3787729e2aSDmitry Baryshkov {
38*e1072257SDmitry Baryshkov 	struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
3987729e2aSDmitry Baryshkov 	struct irq_chip *chip = irq_desc_get_chip(desc);
4087729e2aSDmitry Baryshkov 	u32 interrupts;
4187729e2aSDmitry Baryshkov 
4287729e2aSDmitry Baryshkov 	chained_irq_enter(chip, desc);
4387729e2aSDmitry Baryshkov 
44*e1072257SDmitry Baryshkov 	interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
4587729e2aSDmitry Baryshkov 
4687729e2aSDmitry Baryshkov 	while (interrupts) {
4787729e2aSDmitry Baryshkov 		irq_hw_number_t hwirq = fls(interrupts) - 1;
4887729e2aSDmitry Baryshkov 		int rc;
4987729e2aSDmitry Baryshkov 
50*e1072257SDmitry Baryshkov 		rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
5187729e2aSDmitry Baryshkov 					       hwirq);
5287729e2aSDmitry Baryshkov 		if (rc < 0) {
53*e1072257SDmitry Baryshkov 			dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
5487729e2aSDmitry Baryshkov 				  hwirq, rc);
5587729e2aSDmitry Baryshkov 			break;
5687729e2aSDmitry Baryshkov 		}
5787729e2aSDmitry Baryshkov 
5887729e2aSDmitry Baryshkov 		interrupts &= ~(1 << hwirq);
5987729e2aSDmitry Baryshkov 	}
6087729e2aSDmitry Baryshkov 
6187729e2aSDmitry Baryshkov 	chained_irq_exit(chip, desc);
6287729e2aSDmitry Baryshkov }
6387729e2aSDmitry Baryshkov 
6487729e2aSDmitry Baryshkov static void msm_mdss_irq_mask(struct irq_data *irqd)
6587729e2aSDmitry Baryshkov {
66*e1072257SDmitry Baryshkov 	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
6787729e2aSDmitry Baryshkov 
6887729e2aSDmitry Baryshkov 	/* memory barrier */
6987729e2aSDmitry Baryshkov 	smp_mb__before_atomic();
70*e1072257SDmitry Baryshkov 	clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
7187729e2aSDmitry Baryshkov 	/* memory barrier */
7287729e2aSDmitry Baryshkov 	smp_mb__after_atomic();
7387729e2aSDmitry Baryshkov }
7487729e2aSDmitry Baryshkov 
7587729e2aSDmitry Baryshkov static void msm_mdss_irq_unmask(struct irq_data *irqd)
7687729e2aSDmitry Baryshkov {
77*e1072257SDmitry Baryshkov 	struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
7887729e2aSDmitry Baryshkov 
7987729e2aSDmitry Baryshkov 	/* memory barrier */
8087729e2aSDmitry Baryshkov 	smp_mb__before_atomic();
81*e1072257SDmitry Baryshkov 	set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
8287729e2aSDmitry Baryshkov 	/* memory barrier */
8387729e2aSDmitry Baryshkov 	smp_mb__after_atomic();
8487729e2aSDmitry Baryshkov }
8587729e2aSDmitry Baryshkov 
8687729e2aSDmitry Baryshkov static struct irq_chip msm_mdss_irq_chip = {
87*e1072257SDmitry Baryshkov 	.name = "msm_mdss",
8887729e2aSDmitry Baryshkov 	.irq_mask = msm_mdss_irq_mask,
8987729e2aSDmitry Baryshkov 	.irq_unmask = msm_mdss_irq_unmask,
9087729e2aSDmitry Baryshkov };
9187729e2aSDmitry Baryshkov 
9287729e2aSDmitry Baryshkov static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
9387729e2aSDmitry Baryshkov 
9487729e2aSDmitry Baryshkov static int msm_mdss_irqdomain_map(struct irq_domain *domain,
9587729e2aSDmitry Baryshkov 		unsigned int irq, irq_hw_number_t hwirq)
9687729e2aSDmitry Baryshkov {
97*e1072257SDmitry Baryshkov 	struct msm_mdss *msm_mdss = domain->host_data;
9887729e2aSDmitry Baryshkov 
9987729e2aSDmitry Baryshkov 	irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
10087729e2aSDmitry Baryshkov 	irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
10187729e2aSDmitry Baryshkov 
102*e1072257SDmitry Baryshkov 	return irq_set_chip_data(irq, msm_mdss);
10387729e2aSDmitry Baryshkov }
10487729e2aSDmitry Baryshkov 
10587729e2aSDmitry Baryshkov static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
10687729e2aSDmitry Baryshkov 	.map = msm_mdss_irqdomain_map,
10787729e2aSDmitry Baryshkov 	.xlate = irq_domain_xlate_onecell,
10887729e2aSDmitry Baryshkov };
10987729e2aSDmitry Baryshkov 
110*e1072257SDmitry Baryshkov static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
11187729e2aSDmitry Baryshkov {
11287729e2aSDmitry Baryshkov 	struct device *dev;
11387729e2aSDmitry Baryshkov 	struct irq_domain *domain;
11487729e2aSDmitry Baryshkov 
115*e1072257SDmitry Baryshkov 	dev = msm_mdss->dev;
11687729e2aSDmitry Baryshkov 
11787729e2aSDmitry Baryshkov 	domain = irq_domain_add_linear(dev->of_node, 32,
118*e1072257SDmitry Baryshkov 			&msm_mdss_irqdomain_ops, msm_mdss);
11987729e2aSDmitry Baryshkov 	if (!domain) {
120*e1072257SDmitry Baryshkov 		dev_err(dev, "failed to add irq_domain\n");
12187729e2aSDmitry Baryshkov 		return -EINVAL;
12287729e2aSDmitry Baryshkov 	}
12387729e2aSDmitry Baryshkov 
124*e1072257SDmitry Baryshkov 	msm_mdss->irq_controller.enabled_mask = 0;
125*e1072257SDmitry Baryshkov 	msm_mdss->irq_controller.domain = domain;
12687729e2aSDmitry Baryshkov 
12787729e2aSDmitry Baryshkov 	return 0;
12887729e2aSDmitry Baryshkov }
12987729e2aSDmitry Baryshkov 
130*e1072257SDmitry Baryshkov int msm_mdss_enable(struct msm_mdss *msm_mdss)
13187729e2aSDmitry Baryshkov {
13287729e2aSDmitry Baryshkov 	int ret;
13387729e2aSDmitry Baryshkov 
134*e1072257SDmitry Baryshkov 	ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
13587729e2aSDmitry Baryshkov 	if (ret) {
136*e1072257SDmitry Baryshkov 		dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
13787729e2aSDmitry Baryshkov 		return ret;
13887729e2aSDmitry Baryshkov 	}
13987729e2aSDmitry Baryshkov 
14087729e2aSDmitry Baryshkov 	/*
14187729e2aSDmitry Baryshkov 	 * HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on
14287729e2aSDmitry Baryshkov 	 * mdp5 hardware. Skip reading it for now.
14387729e2aSDmitry Baryshkov 	 */
144*e1072257SDmitry Baryshkov 	if (msm_mdss->is_mdp5)
14587729e2aSDmitry Baryshkov 		return 0;
14687729e2aSDmitry Baryshkov 
14787729e2aSDmitry Baryshkov 	/*
14887729e2aSDmitry Baryshkov 	 * ubwc config is part of the "mdss" region which is not accessible
14987729e2aSDmitry Baryshkov 	 * from the rest of the driver. hardcode known configurations here
15087729e2aSDmitry Baryshkov 	 */
151*e1072257SDmitry Baryshkov 	switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
15287729e2aSDmitry Baryshkov 	case DPU_HW_VER_500:
15387729e2aSDmitry Baryshkov 	case DPU_HW_VER_501:
154*e1072257SDmitry Baryshkov 		writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
15587729e2aSDmitry Baryshkov 		break;
15687729e2aSDmitry Baryshkov 	case DPU_HW_VER_600:
15787729e2aSDmitry Baryshkov 		/* TODO: 0x102e for LP_DDR4 */
158*e1072257SDmitry Baryshkov 		writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
159*e1072257SDmitry Baryshkov 		writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
160*e1072257SDmitry Baryshkov 		writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
16187729e2aSDmitry Baryshkov 		break;
16287729e2aSDmitry Baryshkov 	case DPU_HW_VER_620:
163*e1072257SDmitry Baryshkov 		writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
16487729e2aSDmitry Baryshkov 		break;
16587729e2aSDmitry Baryshkov 	case DPU_HW_VER_720:
166*e1072257SDmitry Baryshkov 		writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
16787729e2aSDmitry Baryshkov 		break;
16887729e2aSDmitry Baryshkov 	}
16987729e2aSDmitry Baryshkov 
17087729e2aSDmitry Baryshkov 	return ret;
17187729e2aSDmitry Baryshkov }
17287729e2aSDmitry Baryshkov 
173*e1072257SDmitry Baryshkov int msm_mdss_disable(struct msm_mdss *msm_mdss)
17487729e2aSDmitry Baryshkov {
175*e1072257SDmitry Baryshkov 	clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
17687729e2aSDmitry Baryshkov 
17787729e2aSDmitry Baryshkov 	return 0;
17887729e2aSDmitry Baryshkov }
17987729e2aSDmitry Baryshkov 
180*e1072257SDmitry Baryshkov void msm_mdss_destroy(struct msm_mdss *msm_mdss)
18187729e2aSDmitry Baryshkov {
182*e1072257SDmitry Baryshkov 	struct platform_device *pdev = to_platform_device(msm_mdss->dev);
18387729e2aSDmitry Baryshkov 	int irq;
18487729e2aSDmitry Baryshkov 
185*e1072257SDmitry Baryshkov 	pm_runtime_suspend(msm_mdss->dev);
186*e1072257SDmitry Baryshkov 	pm_runtime_disable(msm_mdss->dev);
187*e1072257SDmitry Baryshkov 	irq_domain_remove(msm_mdss->irq_controller.domain);
188*e1072257SDmitry Baryshkov 	msm_mdss->irq_controller.domain = NULL;
18987729e2aSDmitry Baryshkov 	irq = platform_get_irq(pdev, 0);
19087729e2aSDmitry Baryshkov 	irq_set_chained_handler_and_data(irq, NULL, NULL);
19187729e2aSDmitry Baryshkov }
19287729e2aSDmitry Baryshkov 
19387729e2aSDmitry Baryshkov /*
19487729e2aSDmitry Baryshkov  * MDP5 MDSS uses at most three specified clocks.
19587729e2aSDmitry Baryshkov  */
19687729e2aSDmitry Baryshkov #define MDP5_MDSS_NUM_CLOCKS 3
19787729e2aSDmitry Baryshkov static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
19887729e2aSDmitry Baryshkov {
19987729e2aSDmitry Baryshkov 	struct clk_bulk_data *bulk;
20087729e2aSDmitry Baryshkov 	int num_clocks = 0;
20187729e2aSDmitry Baryshkov 	int ret;
20287729e2aSDmitry Baryshkov 
20387729e2aSDmitry Baryshkov 	if (!pdev)
20487729e2aSDmitry Baryshkov 		return -EINVAL;
20587729e2aSDmitry Baryshkov 
20687729e2aSDmitry Baryshkov 	bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
20787729e2aSDmitry Baryshkov 	if (!bulk)
20887729e2aSDmitry Baryshkov 		return -ENOMEM;
20987729e2aSDmitry Baryshkov 
21087729e2aSDmitry Baryshkov 	bulk[num_clocks++].id = "iface";
21187729e2aSDmitry Baryshkov 	bulk[num_clocks++].id = "bus";
21287729e2aSDmitry Baryshkov 	bulk[num_clocks++].id = "vsync";
21387729e2aSDmitry Baryshkov 
21487729e2aSDmitry Baryshkov 	ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
21587729e2aSDmitry Baryshkov 	if (ret)
21687729e2aSDmitry Baryshkov 		return ret;
21787729e2aSDmitry Baryshkov 
21887729e2aSDmitry Baryshkov 	*clocks = bulk;
21987729e2aSDmitry Baryshkov 
22087729e2aSDmitry Baryshkov 	return num_clocks;
22187729e2aSDmitry Baryshkov }
22287729e2aSDmitry Baryshkov 
223*e1072257SDmitry Baryshkov struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
22487729e2aSDmitry Baryshkov {
225*e1072257SDmitry Baryshkov 	struct msm_mdss *msm_mdss;
22687729e2aSDmitry Baryshkov 	int ret;
22787729e2aSDmitry Baryshkov 	int irq;
22887729e2aSDmitry Baryshkov 
229*e1072257SDmitry Baryshkov 	msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
230*e1072257SDmitry Baryshkov 	if (!msm_mdss)
231*e1072257SDmitry Baryshkov 		return ERR_PTR(-ENOMEM);
23287729e2aSDmitry Baryshkov 
233*e1072257SDmitry Baryshkov 	msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
234*e1072257SDmitry Baryshkov 	if (IS_ERR(msm_mdss->mmio))
235*e1072257SDmitry Baryshkov 		return ERR_CAST(msm_mdss->mmio);
23687729e2aSDmitry Baryshkov 
237*e1072257SDmitry Baryshkov 	dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
23887729e2aSDmitry Baryshkov 
23987729e2aSDmitry Baryshkov 	if (is_mdp5)
240*e1072257SDmitry Baryshkov 		ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
24187729e2aSDmitry Baryshkov 	else
242*e1072257SDmitry Baryshkov 		ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
24387729e2aSDmitry Baryshkov 	if (ret < 0) {
244*e1072257SDmitry Baryshkov 		dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
245*e1072257SDmitry Baryshkov 		return ERR_PTR(ret);
24687729e2aSDmitry Baryshkov 	}
247*e1072257SDmitry Baryshkov 	msm_mdss->num_clocks = ret;
248*e1072257SDmitry Baryshkov 	msm_mdss->is_mdp5 = is_mdp5;
24987729e2aSDmitry Baryshkov 
250*e1072257SDmitry Baryshkov 	msm_mdss->dev = &pdev->dev;
25187729e2aSDmitry Baryshkov 
25287729e2aSDmitry Baryshkov 	irq = platform_get_irq(pdev, 0);
25387729e2aSDmitry Baryshkov 	if (irq < 0)
254*e1072257SDmitry Baryshkov 		return ERR_PTR(irq);
25587729e2aSDmitry Baryshkov 
256*e1072257SDmitry Baryshkov 	ret = _msm_mdss_irq_domain_add(msm_mdss);
25787729e2aSDmitry Baryshkov 	if (ret)
258*e1072257SDmitry Baryshkov 		return ERR_PTR(ret);
25987729e2aSDmitry Baryshkov 
26087729e2aSDmitry Baryshkov 	irq_set_chained_handler_and_data(irq, msm_mdss_irq,
261*e1072257SDmitry Baryshkov 					 msm_mdss);
26287729e2aSDmitry Baryshkov 
26387729e2aSDmitry Baryshkov 	pm_runtime_enable(&pdev->dev);
26487729e2aSDmitry Baryshkov 
265*e1072257SDmitry Baryshkov 	return msm_mdss;
26687729e2aSDmitry Baryshkov }
267