1 /* 2 * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. 3 * 4 * Parts of this file were based on sources as follows: 5 * 6 * Copyright (c) 2006-2008 Intel Corporation 7 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 8 * Copyright (C) 2011 Texas Instruments 9 * 10 * This program is free software and is provided to you under the terms of the 11 * GNU General Public License version 2 as published by the Free Software 12 * Foundation, and any use by you of this program is subject to the terms of 13 * such GNU licence. 14 * 15 */ 16 17 /** 18 * DOC: ARM PrimeCell PL111 CLCD Driver 19 * 20 * The PL111 is a simple LCD controller that can support TFT and STN 21 * displays. This driver exposes a standard KMS interface for them. 22 * 23 * This driver uses the same Device Tree binding as the fbdev CLCD 24 * driver. While the fbdev driver supports panels that may be 25 * connected to the CLCD internally to the CLCD driver, in DRM the 26 * panels get split out to drivers/gpu/drm/panels/. This means that, 27 * in converting from using fbdev to using DRM, you also need to write 28 * a panel driver (which may be as simple as an entry in 29 * panel-simple.c). 30 * 31 * The driver currently doesn't expose the cursor. The DRM API for 32 * cursors requires support for 64x64 ARGB8888 cursor images, while 33 * the hardware can only support 64x64 monochrome with masking 34 * cursors. While one could imagine trying to hack something together 35 * to look at the ARGB8888 and program reasonable in monochrome, we 36 * just don't expose the cursor at all instead, and leave cursor 37 * support to the X11 software cursor layer. 38 * 39 * TODO: 40 * 41 * - Fix race between setting plane base address and getting IRQ for 42 * vsync firing the pageflip completion. 43 * 44 * - Use the "max-memory-bandwidth" DT property to filter the 45 * supported formats. 46 * 47 * - Read back hardware state at boot to skip reprogramming the 48 * hardware when doing a no-op modeset. 49 * 50 * - Use the CLKSEL bit to support switching between the two external 51 * clock parents. 52 */ 53 54 #include <linux/amba/bus.h> 55 #include <linux/amba/clcd-regs.h> 56 #include <linux/version.h> 57 #include <linux/shmem_fs.h> 58 #include <linux/dma-buf.h> 59 #include <linux/module.h> 60 #include <linux/slab.h> 61 #include <linux/of.h> 62 #include <linux/of_graph.h> 63 #include <linux/of_reserved_mem.h> 64 65 #include <drm/drmP.h> 66 #include <drm/drm_atomic_helper.h> 67 #include <drm/drm_crtc_helper.h> 68 #include <drm/drm_gem_cma_helper.h> 69 #include <drm/drm_gem_framebuffer_helper.h> 70 #include <drm/drm_fb_helper.h> 71 #include <drm/drm_fb_cma_helper.h> 72 #include <drm/drm_of.h> 73 #include <drm/drm_bridge.h> 74 #include <drm/drm_panel.h> 75 76 #include "pl111_drm.h" 77 #include "pl111_versatile.h" 78 79 #define DRIVER_DESC "DRM module for PL111" 80 81 static const struct drm_mode_config_funcs mode_config_funcs = { 82 .fb_create = drm_gem_fb_create, 83 .atomic_check = drm_atomic_helper_check, 84 .atomic_commit = drm_atomic_helper_commit, 85 }; 86 87 static int pl111_modeset_init(struct drm_device *dev) 88 { 89 struct drm_mode_config *mode_config; 90 struct pl111_drm_dev_private *priv = dev->dev_private; 91 struct device_node *np = dev->dev->of_node; 92 struct device_node *remote; 93 struct drm_panel *panel = NULL; 94 struct drm_bridge *bridge = NULL; 95 bool defer = false; 96 int ret = 0; 97 int i; 98 99 drm_mode_config_init(dev); 100 mode_config = &dev->mode_config; 101 mode_config->funcs = &mode_config_funcs; 102 mode_config->min_width = 1; 103 mode_config->max_width = 1024; 104 mode_config->min_height = 1; 105 mode_config->max_height = 768; 106 107 i = 0; 108 for_each_endpoint_of_node(np, remote) { 109 struct drm_panel *tmp_panel; 110 struct drm_bridge *tmp_bridge; 111 112 dev_dbg(dev->dev, "checking endpoint %d\n", i); 113 114 ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 115 0, i, 116 &tmp_panel, 117 &tmp_bridge); 118 if (ret) { 119 if (ret == -EPROBE_DEFER) { 120 /* 121 * Something deferred, but that is often just 122 * another way of saying -ENODEV, but let's 123 * cast a vote for later deferral. 124 */ 125 defer = true; 126 } else if (ret != -ENODEV) { 127 /* Continue, maybe something else is working */ 128 dev_err(dev->dev, 129 "endpoint %d returns %d\n", i, ret); 130 } 131 } 132 133 if (tmp_panel) { 134 dev_info(dev->dev, 135 "found panel on endpoint %d\n", i); 136 panel = tmp_panel; 137 } 138 if (tmp_bridge) { 139 dev_info(dev->dev, 140 "found bridge on endpoint %d\n", i); 141 bridge = tmp_bridge; 142 } 143 144 i++; 145 } 146 147 /* 148 * If we can't find neither panel nor bridge on any of the 149 * endpoints, and any of them retured -EPROBE_DEFER, then 150 * let's defer this driver too. 151 */ 152 if ((!panel && !bridge) && defer) 153 return -EPROBE_DEFER; 154 155 if (panel) { 156 bridge = drm_panel_bridge_add(panel, 157 DRM_MODE_CONNECTOR_Unknown); 158 if (IS_ERR(bridge)) { 159 ret = PTR_ERR(bridge); 160 goto out_config; 161 } 162 } else if (bridge) { 163 dev_info(dev->dev, "Using non-panel bridge\n"); 164 } else { 165 dev_err(dev->dev, "No bridge, exiting\n"); 166 return -ENODEV; 167 } 168 169 priv->bridge = bridge; 170 if (panel) { 171 priv->panel = panel; 172 priv->connector = panel->connector; 173 } 174 175 ret = pl111_display_init(dev); 176 if (ret != 0) { 177 dev_err(dev->dev, "Failed to init display\n"); 178 goto out_bridge; 179 } 180 181 ret = drm_simple_display_pipe_attach_bridge(&priv->pipe, 182 bridge); 183 if (ret) 184 return ret; 185 186 if (!priv->variant->broken_vblank) { 187 ret = drm_vblank_init(dev, 1); 188 if (ret != 0) { 189 dev_err(dev->dev, "Failed to init vblank\n"); 190 goto out_bridge; 191 } 192 } 193 194 drm_mode_config_reset(dev); 195 196 drm_fb_cma_fbdev_init(dev, priv->variant->fb_bpp, 0); 197 198 drm_kms_helper_poll_init(dev); 199 200 goto finish; 201 202 out_bridge: 203 if (panel) 204 drm_panel_bridge_remove(bridge); 205 out_config: 206 drm_mode_config_cleanup(dev); 207 finish: 208 return ret; 209 } 210 211 static struct drm_gem_object * 212 pl111_gem_import_sg_table(struct drm_device *dev, 213 struct dma_buf_attachment *attach, 214 struct sg_table *sgt) 215 { 216 struct pl111_drm_dev_private *priv = dev->dev_private; 217 218 /* 219 * When using device-specific reserved memory we can't import 220 * DMA buffers: those are passed by reference in any global 221 * memory and we can only handle a specific range of memory. 222 */ 223 if (priv->use_device_memory) 224 return ERR_PTR(-EINVAL); 225 226 return drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 227 } 228 229 DEFINE_DRM_GEM_CMA_FOPS(drm_fops); 230 231 static struct drm_driver pl111_drm_driver = { 232 .driver_features = 233 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, 234 .lastclose = drm_fb_helper_lastclose, 235 .ioctls = NULL, 236 .fops = &drm_fops, 237 .name = "pl111", 238 .desc = DRIVER_DESC, 239 .date = "20170317", 240 .major = 1, 241 .minor = 0, 242 .patchlevel = 0, 243 .dumb_create = drm_gem_cma_dumb_create, 244 .gem_free_object_unlocked = drm_gem_cma_free_object, 245 .gem_vm_ops = &drm_gem_cma_vm_ops, 246 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 247 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 248 .gem_prime_import = drm_gem_prime_import, 249 .gem_prime_import_sg_table = pl111_gem_import_sg_table, 250 .gem_prime_export = drm_gem_prime_export, 251 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 252 253 #if defined(CONFIG_DEBUG_FS) 254 .debugfs_init = pl111_debugfs_init, 255 #endif 256 }; 257 258 static int pl111_amba_probe(struct amba_device *amba_dev, 259 const struct amba_id *id) 260 { 261 struct device *dev = &amba_dev->dev; 262 struct pl111_drm_dev_private *priv; 263 const struct pl111_variant_data *variant = id->data; 264 struct drm_device *drm; 265 int ret; 266 267 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 268 if (!priv) 269 return -ENOMEM; 270 271 drm = drm_dev_alloc(&pl111_drm_driver, dev); 272 if (IS_ERR(drm)) 273 return PTR_ERR(drm); 274 amba_set_drvdata(amba_dev, drm); 275 priv->drm = drm; 276 drm->dev_private = priv; 277 priv->variant = variant; 278 279 ret = of_reserved_mem_device_init(dev); 280 if (!ret) { 281 dev_info(dev, "using device-specific reserved memory\n"); 282 priv->use_device_memory = true; 283 } 284 285 if (of_property_read_u32(dev->of_node, "max-memory-bandwidth", 286 &priv->memory_bw)) { 287 dev_info(dev, "no max memory bandwidth specified, assume unlimited\n"); 288 priv->memory_bw = 0; 289 } 290 291 /* The two variants swap this register */ 292 if (variant->is_pl110) { 293 priv->ienb = CLCD_PL110_IENB; 294 priv->ctrl = CLCD_PL110_CNTL; 295 } else { 296 priv->ienb = CLCD_PL111_IENB; 297 priv->ctrl = CLCD_PL111_CNTL; 298 } 299 300 priv->regs = devm_ioremap_resource(dev, &amba_dev->res); 301 if (IS_ERR(priv->regs)) { 302 dev_err(dev, "%s failed mmio\n", __func__); 303 ret = PTR_ERR(priv->regs); 304 goto dev_unref; 305 } 306 307 /* This may override some variant settings */ 308 ret = pl111_versatile_init(dev, priv); 309 if (ret) 310 goto dev_unref; 311 312 /* turn off interrupts before requesting the irq */ 313 writel(0, priv->regs + priv->ienb); 314 315 ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0, 316 variant->name, priv); 317 if (ret != 0) { 318 dev_err(dev, "%s failed irq %d\n", __func__, ret); 319 return ret; 320 } 321 322 ret = pl111_modeset_init(drm); 323 if (ret != 0) 324 goto dev_unref; 325 326 ret = drm_dev_register(drm, 0); 327 if (ret < 0) 328 goto dev_unref; 329 330 return 0; 331 332 dev_unref: 333 drm_dev_unref(drm); 334 of_reserved_mem_device_release(dev); 335 336 return ret; 337 } 338 339 static int pl111_amba_remove(struct amba_device *amba_dev) 340 { 341 struct device *dev = &amba_dev->dev; 342 struct drm_device *drm = amba_get_drvdata(amba_dev); 343 struct pl111_drm_dev_private *priv = drm->dev_private; 344 345 drm_dev_unregister(drm); 346 drm_fb_cma_fbdev_fini(drm); 347 if (priv->panel) 348 drm_panel_bridge_remove(priv->bridge); 349 drm_mode_config_cleanup(drm); 350 drm_dev_unref(drm); 351 of_reserved_mem_device_release(dev); 352 353 return 0; 354 } 355 356 /* 357 * This early variant lacks the 565 and 444 pixel formats. 358 */ 359 static const u32 pl110_pixel_formats[] = { 360 DRM_FORMAT_ABGR8888, 361 DRM_FORMAT_XBGR8888, 362 DRM_FORMAT_ARGB8888, 363 DRM_FORMAT_XRGB8888, 364 DRM_FORMAT_ABGR1555, 365 DRM_FORMAT_XBGR1555, 366 DRM_FORMAT_ARGB1555, 367 DRM_FORMAT_XRGB1555, 368 }; 369 370 static const struct pl111_variant_data pl110_variant = { 371 .name = "PL110", 372 .is_pl110 = true, 373 .formats = pl110_pixel_formats, 374 .nformats = ARRAY_SIZE(pl110_pixel_formats), 375 .fb_bpp = 16, 376 }; 377 378 /* RealView, Versatile Express etc use this modern variant */ 379 static const u32 pl111_pixel_formats[] = { 380 DRM_FORMAT_ABGR8888, 381 DRM_FORMAT_XBGR8888, 382 DRM_FORMAT_ARGB8888, 383 DRM_FORMAT_XRGB8888, 384 DRM_FORMAT_BGR565, 385 DRM_FORMAT_RGB565, 386 DRM_FORMAT_ABGR1555, 387 DRM_FORMAT_XBGR1555, 388 DRM_FORMAT_ARGB1555, 389 DRM_FORMAT_XRGB1555, 390 DRM_FORMAT_ABGR4444, 391 DRM_FORMAT_XBGR4444, 392 DRM_FORMAT_ARGB4444, 393 DRM_FORMAT_XRGB4444, 394 }; 395 396 static const struct pl111_variant_data pl111_variant = { 397 .name = "PL111", 398 .formats = pl111_pixel_formats, 399 .nformats = ARRAY_SIZE(pl111_pixel_formats), 400 .fb_bpp = 32, 401 }; 402 403 static const struct amba_id pl111_id_table[] = { 404 { 405 .id = 0x00041110, 406 .mask = 0x000fffff, 407 .data = (void*)&pl110_variant, 408 }, 409 { 410 .id = 0x00041111, 411 .mask = 0x000fffff, 412 .data = (void*)&pl111_variant, 413 }, 414 {0, 0}, 415 }; 416 417 static struct amba_driver pl111_amba_driver __maybe_unused = { 418 .drv = { 419 .name = "drm-clcd-pl111", 420 }, 421 .probe = pl111_amba_probe, 422 .remove = pl111_amba_remove, 423 .id_table = pl111_id_table, 424 }; 425 426 #ifdef CONFIG_ARM_AMBA 427 module_amba_driver(pl111_amba_driver); 428 #endif 429 430 MODULE_DESCRIPTION(DRIVER_DESC); 431 MODULE_AUTHOR("ARM Ltd."); 432 MODULE_LICENSE("GPL"); 433