1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2018, The Linux Foundation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/irq.h> 9 #include <linux/irqchip.h> 10 #include <linux/irqdesc.h> 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/reset.h> 14 15 #include "msm_drv.h" 16 #include "msm_kms.h" 17 18 /* for DPU_HW_* defines */ 19 #include "disp/dpu1/dpu_hw_catalog.h" 20 21 #define HW_REV 0x0 22 #define HW_INTR_STATUS 0x0010 23 24 #define UBWC_STATIC 0x144 25 #define UBWC_CTRL_2 0x150 26 #define UBWC_PREDICTION_MODE 0x154 27 28 struct msm_mdss { 29 struct device *dev; 30 31 void __iomem *mmio; 32 struct clk_bulk_data *clocks; 33 size_t num_clocks; 34 bool is_mdp5; 35 struct { 36 unsigned long enabled_mask; 37 struct irq_domain *domain; 38 } irq_controller; 39 }; 40 41 static void msm_mdss_irq(struct irq_desc *desc) 42 { 43 struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc); 44 struct irq_chip *chip = irq_desc_get_chip(desc); 45 u32 interrupts; 46 47 chained_irq_enter(chip, desc); 48 49 interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS); 50 51 while (interrupts) { 52 irq_hw_number_t hwirq = fls(interrupts) - 1; 53 int rc; 54 55 rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain, 56 hwirq); 57 if (rc < 0) { 58 dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n", 59 hwirq, rc); 60 break; 61 } 62 63 interrupts &= ~(1 << hwirq); 64 } 65 66 chained_irq_exit(chip, desc); 67 } 68 69 static void msm_mdss_irq_mask(struct irq_data *irqd) 70 { 71 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 72 73 /* memory barrier */ 74 smp_mb__before_atomic(); 75 clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 76 /* memory barrier */ 77 smp_mb__after_atomic(); 78 } 79 80 static void msm_mdss_irq_unmask(struct irq_data *irqd) 81 { 82 struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd); 83 84 /* memory barrier */ 85 smp_mb__before_atomic(); 86 set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask); 87 /* memory barrier */ 88 smp_mb__after_atomic(); 89 } 90 91 static struct irq_chip msm_mdss_irq_chip = { 92 .name = "msm_mdss", 93 .irq_mask = msm_mdss_irq_mask, 94 .irq_unmask = msm_mdss_irq_unmask, 95 }; 96 97 static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key; 98 99 static int msm_mdss_irqdomain_map(struct irq_domain *domain, 100 unsigned int irq, irq_hw_number_t hwirq) 101 { 102 struct msm_mdss *msm_mdss = domain->host_data; 103 104 irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key); 105 irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq); 106 107 return irq_set_chip_data(irq, msm_mdss); 108 } 109 110 static const struct irq_domain_ops msm_mdss_irqdomain_ops = { 111 .map = msm_mdss_irqdomain_map, 112 .xlate = irq_domain_xlate_onecell, 113 }; 114 115 static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) 116 { 117 struct device *dev; 118 struct irq_domain *domain; 119 120 dev = msm_mdss->dev; 121 122 domain = irq_domain_add_linear(dev->of_node, 32, 123 &msm_mdss_irqdomain_ops, msm_mdss); 124 if (!domain) { 125 dev_err(dev, "failed to add irq_domain\n"); 126 return -EINVAL; 127 } 128 129 msm_mdss->irq_controller.enabled_mask = 0; 130 msm_mdss->irq_controller.domain = domain; 131 132 return 0; 133 } 134 135 static int msm_mdss_enable(struct msm_mdss *msm_mdss) 136 { 137 int ret; 138 139 ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); 140 if (ret) { 141 dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); 142 return ret; 143 } 144 145 /* 146 * HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on 147 * mdp5 hardware. Skip reading it for now. 148 */ 149 if (msm_mdss->is_mdp5) 150 return 0; 151 152 /* 153 * ubwc config is part of the "mdss" region which is not accessible 154 * from the rest of the driver. hardcode known configurations here 155 */ 156 switch (readl_relaxed(msm_mdss->mmio + HW_REV)) { 157 case DPU_HW_VER_500: 158 case DPU_HW_VER_501: 159 writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC); 160 break; 161 case DPU_HW_VER_600: 162 /* TODO: 0x102e for LP_DDR4 */ 163 writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC); 164 writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2); 165 writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE); 166 break; 167 case DPU_HW_VER_620: 168 writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC); 169 break; 170 case DPU_HW_VER_720: 171 writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC); 172 break; 173 } 174 175 return ret; 176 } 177 178 static int msm_mdss_disable(struct msm_mdss *msm_mdss) 179 { 180 clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); 181 182 return 0; 183 } 184 185 static void msm_mdss_destroy(struct msm_mdss *msm_mdss) 186 { 187 struct platform_device *pdev = to_platform_device(msm_mdss->dev); 188 int irq; 189 190 pm_runtime_suspend(msm_mdss->dev); 191 pm_runtime_disable(msm_mdss->dev); 192 irq_domain_remove(msm_mdss->irq_controller.domain); 193 msm_mdss->irq_controller.domain = NULL; 194 irq = platform_get_irq(pdev, 0); 195 irq_set_chained_handler_and_data(irq, NULL, NULL); 196 } 197 198 static int msm_mdss_reset(struct device *dev) 199 { 200 struct reset_control *reset; 201 202 reset = reset_control_get_optional_exclusive(dev, NULL); 203 if (!reset) { 204 /* Optional reset not specified */ 205 return 0; 206 } else if (IS_ERR(reset)) { 207 return dev_err_probe(dev, PTR_ERR(reset), 208 "failed to acquire mdss reset\n"); 209 } 210 211 reset_control_assert(reset); 212 /* 213 * Tests indicate that reset has to be held for some period of time, 214 * make it one frame in a typical system 215 */ 216 msleep(20); 217 reset_control_deassert(reset); 218 219 reset_control_put(reset); 220 221 return 0; 222 } 223 224 /* 225 * MDP5 MDSS uses at most three specified clocks. 226 */ 227 #define MDP5_MDSS_NUM_CLOCKS 3 228 static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks) 229 { 230 struct clk_bulk_data *bulk; 231 int num_clocks = 0; 232 int ret; 233 234 if (!pdev) 235 return -EINVAL; 236 237 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); 238 if (!bulk) 239 return -ENOMEM; 240 241 bulk[num_clocks++].id = "iface"; 242 bulk[num_clocks++].id = "bus"; 243 bulk[num_clocks++].id = "vsync"; 244 245 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); 246 if (ret) 247 return ret; 248 249 *clocks = bulk; 250 251 return num_clocks; 252 } 253 254 static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5) 255 { 256 struct msm_mdss *msm_mdss; 257 int ret; 258 int irq; 259 260 ret = msm_mdss_reset(&pdev->dev); 261 if (ret) 262 return ERR_PTR(ret); 263 264 msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL); 265 if (!msm_mdss) 266 return ERR_PTR(-ENOMEM); 267 268 msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); 269 if (IS_ERR(msm_mdss->mmio)) 270 return ERR_CAST(msm_mdss->mmio); 271 272 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); 273 274 if (is_mdp5) 275 ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks); 276 else 277 ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks); 278 if (ret < 0) { 279 dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret); 280 return ERR_PTR(ret); 281 } 282 msm_mdss->num_clocks = ret; 283 msm_mdss->is_mdp5 = is_mdp5; 284 285 msm_mdss->dev = &pdev->dev; 286 287 irq = platform_get_irq(pdev, 0); 288 if (irq < 0) 289 return ERR_PTR(irq); 290 291 ret = _msm_mdss_irq_domain_add(msm_mdss); 292 if (ret) 293 return ERR_PTR(ret); 294 295 irq_set_chained_handler_and_data(irq, msm_mdss_irq, 296 msm_mdss); 297 298 pm_runtime_enable(&pdev->dev); 299 300 return msm_mdss; 301 } 302 303 static int __maybe_unused mdss_runtime_suspend(struct device *dev) 304 { 305 struct msm_mdss *mdss = dev_get_drvdata(dev); 306 307 DBG(""); 308 309 return msm_mdss_disable(mdss); 310 } 311 312 static int __maybe_unused mdss_runtime_resume(struct device *dev) 313 { 314 struct msm_mdss *mdss = dev_get_drvdata(dev); 315 316 DBG(""); 317 318 return msm_mdss_enable(mdss); 319 } 320 321 static int __maybe_unused mdss_pm_suspend(struct device *dev) 322 { 323 324 if (pm_runtime_suspended(dev)) 325 return 0; 326 327 return mdss_runtime_suspend(dev); 328 } 329 330 static int __maybe_unused mdss_pm_resume(struct device *dev) 331 { 332 if (pm_runtime_suspended(dev)) 333 return 0; 334 335 return mdss_runtime_resume(dev); 336 } 337 338 static const struct dev_pm_ops mdss_pm_ops = { 339 SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume) 340 SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL) 341 }; 342 343 static int mdss_probe(struct platform_device *pdev) 344 { 345 struct msm_mdss *mdss; 346 bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"); 347 struct device *dev = &pdev->dev; 348 int ret; 349 350 mdss = msm_mdss_init(pdev, is_mdp5); 351 if (IS_ERR(mdss)) 352 return PTR_ERR(mdss); 353 354 platform_set_drvdata(pdev, mdss); 355 356 /* 357 * MDP5/DPU based devices don't have a flat hierarchy. There is a top 358 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. 359 * Populate the children devices, find the MDP5/DPU node, and then add 360 * the interfaces to our components list. 361 */ 362 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 363 if (ret) { 364 DRM_DEV_ERROR(dev, "failed to populate children devices\n"); 365 msm_mdss_destroy(mdss); 366 return ret; 367 } 368 369 return 0; 370 } 371 372 static int mdss_remove(struct platform_device *pdev) 373 { 374 struct msm_mdss *mdss = platform_get_drvdata(pdev); 375 376 of_platform_depopulate(&pdev->dev); 377 378 msm_mdss_destroy(mdss); 379 380 return 0; 381 } 382 383 static const struct of_device_id mdss_dt_match[] = { 384 { .compatible = "qcom,mdss" }, 385 { .compatible = "qcom,msm8998-mdss" }, 386 { .compatible = "qcom,qcm2290-mdss" }, 387 { .compatible = "qcom,sdm845-mdss" }, 388 { .compatible = "qcom,sc7180-mdss" }, 389 { .compatible = "qcom,sc7280-mdss" }, 390 { .compatible = "qcom,sc8180x-mdss" }, 391 { .compatible = "qcom,sm8150-mdss" }, 392 { .compatible = "qcom,sm8250-mdss" }, 393 {} 394 }; 395 MODULE_DEVICE_TABLE(of, mdss_dt_match); 396 397 static struct platform_driver mdss_platform_driver = { 398 .probe = mdss_probe, 399 .remove = mdss_remove, 400 .driver = { 401 .name = "msm-mdss", 402 .of_match_table = mdss_dt_match, 403 .pm = &mdss_pm_ops, 404 }, 405 }; 406 407 void __init msm_mdss_register(void) 408 { 409 platform_driver_register(&mdss_platform_driver); 410 } 411 412 void __exit msm_mdss_unregister(void) 413 { 414 platform_driver_unregister(&mdss_platform_driver); 415 } 416