1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 * 6 */ 7 #include <linux/component.h> 8 #include <linux/interrupt.h> 9 10 #include <drm/drm_atomic.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_drv.h> 13 #include <drm/drm_fb_helper.h> 14 #include <drm/drm_gem_cma_helper.h> 15 #include <drm/drm_gem_framebuffer_helper.h> 16 #include <drm/drm_irq.h> 17 #include <drm/drm_probe_helper.h> 18 #include <drm/drm_vblank.h> 19 20 #include "komeda_dev.h" 21 #include "komeda_framebuffer.h" 22 #include "komeda_kms.h" 23 24 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops); 25 26 static int komeda_gem_cma_dumb_create(struct drm_file *file, 27 struct drm_device *dev, 28 struct drm_mode_create_dumb *args) 29 { 30 struct komeda_dev *mdev = dev->dev_private; 31 u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 32 33 args->pitch = ALIGN(pitch, mdev->chip.bus_width); 34 35 return drm_gem_cma_dumb_create_internal(file, dev, args); 36 } 37 38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data) 39 { 40 struct drm_device *drm = data; 41 struct komeda_dev *mdev = drm->dev_private; 42 struct komeda_kms_dev *kms = to_kdev(drm); 43 struct komeda_events evts; 44 irqreturn_t status; 45 u32 i; 46 47 /* Call into the CHIP to recognize events */ 48 memset(&evts, 0, sizeof(evts)); 49 status = mdev->funcs->irq_handler(mdev, &evts); 50 51 komeda_print_events(&evts, drm); 52 53 /* Notify the crtc to handle the events */ 54 for (i = 0; i < kms->n_crtcs; i++) 55 komeda_crtc_handle_event(&kms->crtcs[i], &evts); 56 57 return status; 58 } 59 60 static struct drm_driver komeda_kms_driver = { 61 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 62 .lastclose = drm_fb_helper_lastclose, 63 .gem_free_object_unlocked = drm_gem_cma_free_object, 64 .gem_vm_ops = &drm_gem_cma_vm_ops, 65 .dumb_create = komeda_gem_cma_dumb_create, 66 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 67 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 68 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 69 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 70 .gem_prime_vmap = drm_gem_cma_prime_vmap, 71 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 72 .gem_prime_mmap = drm_gem_cma_prime_mmap, 73 .fops = &komeda_cma_fops, 74 .name = "komeda", 75 .desc = "Arm Komeda Display Processor driver", 76 .date = "20181101", 77 .major = 0, 78 .minor = 1, 79 }; 80 81 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) 82 { 83 struct drm_device *dev = old_state->dev; 84 85 drm_atomic_helper_commit_modeset_disables(dev, old_state); 86 87 drm_atomic_helper_commit_planes(dev, old_state, 88 DRM_PLANE_COMMIT_ACTIVE_ONLY); 89 90 drm_atomic_helper_commit_modeset_enables(dev, old_state); 91 92 drm_atomic_helper_wait_for_flip_done(dev, old_state); 93 94 drm_atomic_helper_commit_hw_done(old_state); 95 96 drm_atomic_helper_cleanup_planes(dev, old_state); 97 } 98 99 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = { 100 .atomic_commit_tail = komeda_kms_commit_tail, 101 }; 102 103 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st, 104 struct list_head *zorder_list) 105 { 106 struct komeda_plane_state *new = to_kplane_st(plane_st); 107 struct komeda_plane_state *node, *last; 108 109 last = list_empty(zorder_list) ? 110 NULL : list_last_entry(zorder_list, typeof(*last), zlist_node); 111 112 /* Considering the list sequence is zpos increasing, so if list is empty 113 * or the zpos of new node bigger than the last node in list, no need 114 * loop and just insert the new one to the tail of the list. 115 */ 116 if (!last || (new->base.zpos > last->base.zpos)) { 117 list_add_tail(&new->zlist_node, zorder_list); 118 return 0; 119 } 120 121 /* Build the list by zpos increasing */ 122 list_for_each_entry(node, zorder_list, zlist_node) { 123 if (new->base.zpos < node->base.zpos) { 124 list_add_tail(&new->zlist_node, &node->zlist_node); 125 break; 126 } else if (node->base.zpos == new->base.zpos) { 127 struct drm_plane *a = node->base.plane; 128 struct drm_plane *b = new->base.plane; 129 130 /* Komeda doesn't support setting a same zpos for 131 * different planes. 132 */ 133 DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n", 134 a->name, b->name, node->base.zpos); 135 return -EINVAL; 136 } 137 } 138 139 return 0; 140 } 141 142 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, 143 struct drm_crtc_state *crtc_st) 144 { 145 struct drm_atomic_state *state = crtc_st->state; 146 struct komeda_crtc *kcrtc = to_kcrtc(crtc); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 148 struct komeda_plane_state *kplane_st; 149 struct drm_plane_state *plane_st; 150 struct drm_plane *plane; 151 struct list_head zorder_list; 152 int order = 0, err; 153 154 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", 155 crtc->base.id, crtc->name); 156 157 INIT_LIST_HEAD(&zorder_list); 158 159 /* This loop also added all effected planes into the new state */ 160 drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) { 161 plane_st = drm_atomic_get_plane_state(state, plane); 162 if (IS_ERR(plane_st)) 163 return PTR_ERR(plane_st); 164 165 /* Build a list by zpos increasing */ 166 err = komeda_plane_state_list_add(plane_st, &zorder_list); 167 if (err) 168 return err; 169 } 170 171 kcrtc_st->max_slave_zorder = 0; 172 173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 174 plane_st = &kplane_st->base; 175 plane = plane_st->plane; 176 177 plane_st->normalized_zpos = order++; 178 /* When layer_split has been enabled, one plane will be handled 179 * by two separated komeda layers (left/right), which may needs 180 * two zorders. 181 * - zorder: for left_layer for left display part. 182 * - zorder + 1: will be reserved for right layer. 183 */ 184 if (to_kplane_st(plane_st)->layer_split) 185 order++; 186 187 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n", 188 plane->base.id, plane->name, 189 plane_st->zpos, plane_st->normalized_zpos); 190 191 /* calculate max slave zorder */ 192 if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) 193 kcrtc_st->max_slave_zorder = 194 max(plane_st->normalized_zpos, 195 kcrtc_st->max_slave_zorder); 196 } 197 198 crtc_st->zpos_changed = true; 199 200 return 0; 201 } 202 203 static int komeda_kms_check(struct drm_device *dev, 204 struct drm_atomic_state *state) 205 { 206 struct drm_crtc *crtc; 207 struct drm_crtc_state *new_crtc_st; 208 int i, err; 209 210 err = drm_atomic_helper_check_modeset(dev, state); 211 if (err) 212 return err; 213 214 /* Komeda need to re-calculate resource assumption in every commit 215 * so need to add all affected_planes (even unchanged) to 216 * drm_atomic_state. 217 */ 218 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) { 219 err = drm_atomic_add_affected_planes(state, crtc); 220 if (err) 221 return err; 222 223 err = komeda_crtc_normalize_zpos(crtc, new_crtc_st); 224 if (err) 225 return err; 226 } 227 228 err = drm_atomic_helper_check_planes(dev, state); 229 if (err) 230 return err; 231 232 return 0; 233 } 234 235 static const struct drm_mode_config_funcs komeda_mode_config_funcs = { 236 .fb_create = komeda_fb_create, 237 .atomic_check = komeda_kms_check, 238 .atomic_commit = drm_atomic_helper_commit, 239 }; 240 241 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms, 242 struct komeda_dev *mdev) 243 { 244 struct drm_mode_config *config = &kms->base.mode_config; 245 246 drm_mode_config_init(&kms->base); 247 248 komeda_kms_setup_crtcs(kms, mdev); 249 250 /* Get value from dev */ 251 config->min_width = 0; 252 config->min_height = 0; 253 config->max_width = 4096; 254 config->max_height = 4096; 255 config->allow_fb_modifiers = true; 256 257 config->funcs = &komeda_mode_config_funcs; 258 config->helper_private = &komeda_mode_config_helpers; 259 } 260 261 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) 262 { 263 struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); 264 struct drm_device *drm; 265 int err; 266 267 if (!kms) 268 return ERR_PTR(-ENOMEM); 269 270 drm = &kms->base; 271 err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); 272 if (err) 273 goto free_kms; 274 275 drm->dev_private = mdev; 276 277 komeda_kms_mode_config_init(kms, mdev); 278 279 err = komeda_kms_add_private_objs(kms, mdev); 280 if (err) 281 goto cleanup_mode_config; 282 283 err = komeda_kms_add_planes(kms, mdev); 284 if (err) 285 goto cleanup_mode_config; 286 287 err = drm_vblank_init(drm, kms->n_crtcs); 288 if (err) 289 goto cleanup_mode_config; 290 291 err = komeda_kms_add_crtcs(kms, mdev); 292 if (err) 293 goto cleanup_mode_config; 294 295 err = komeda_kms_add_wb_connectors(kms, mdev); 296 if (err) 297 goto cleanup_mode_config; 298 299 err = component_bind_all(mdev->dev, kms); 300 if (err) 301 goto cleanup_mode_config; 302 303 drm_mode_config_reset(drm); 304 305 err = devm_request_irq(drm->dev, mdev->irq, 306 komeda_kms_irq_handler, IRQF_SHARED, 307 drm->driver->name, drm); 308 if (err) 309 goto free_component_binding; 310 311 drm->irq_enabled = true; 312 313 drm_kms_helper_poll_init(drm); 314 315 err = drm_dev_register(drm, 0); 316 if (err) 317 goto free_interrupts; 318 319 return kms; 320 321 free_interrupts: 322 drm_kms_helper_poll_fini(drm); 323 drm->irq_enabled = false; 324 free_component_binding: 325 component_unbind_all(mdev->dev, drm); 326 cleanup_mode_config: 327 drm_mode_config_cleanup(drm); 328 komeda_kms_cleanup_private_objs(kms); 329 drm->dev_private = NULL; 330 drm_dev_put(drm); 331 free_kms: 332 kfree(kms); 333 return ERR_PTR(err); 334 } 335 336 void komeda_kms_detach(struct komeda_kms_dev *kms) 337 { 338 struct drm_device *drm = &kms->base; 339 struct komeda_dev *mdev = drm->dev_private; 340 341 drm_dev_unregister(drm); 342 drm_kms_helper_poll_fini(drm); 343 drm_atomic_helper_shutdown(drm); 344 drm->irq_enabled = false; 345 component_unbind_all(mdev->dev, drm); 346 drm_mode_config_cleanup(drm); 347 komeda_kms_cleanup_private_objs(kms); 348 drm->dev_private = NULL; 349 drm_dev_put(drm); 350 } 351