1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 * 6 */ 7 #include <linux/component.h> 8 #include <linux/interrupt.h> 9 10 #include <drm/drm_atomic.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_drv.h> 13 #include <drm/drm_fb_helper.h> 14 #include <drm/drm_gem_cma_helper.h> 15 #include <drm/drm_gem_framebuffer_helper.h> 16 #include <drm/drm_irq.h> 17 #include <drm/drm_probe_helper.h> 18 #include <drm/drm_vblank.h> 19 20 #include "komeda_dev.h" 21 #include "komeda_framebuffer.h" 22 #include "komeda_kms.h" 23 24 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops); 25 26 static int komeda_gem_cma_dumb_create(struct drm_file *file, 27 struct drm_device *dev, 28 struct drm_mode_create_dumb *args) 29 { 30 struct komeda_dev *mdev = dev->dev_private; 31 u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 32 33 args->pitch = ALIGN(pitch, mdev->chip.bus_width); 34 35 return drm_gem_cma_dumb_create_internal(file, dev, args); 36 } 37 38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data) 39 { 40 struct drm_device *drm = data; 41 struct komeda_dev *mdev = drm->dev_private; 42 struct komeda_kms_dev *kms = to_kdev(drm); 43 struct komeda_events evts; 44 irqreturn_t status; 45 u32 i; 46 47 /* Call into the CHIP to recognize events */ 48 memset(&evts, 0, sizeof(evts)); 49 status = mdev->funcs->irq_handler(mdev, &evts); 50 51 /* Notify the crtc to handle the events */ 52 for (i = 0; i < kms->n_crtcs; i++) 53 komeda_crtc_handle_event(&kms->crtcs[i], &evts); 54 55 return status; 56 } 57 58 static struct drm_driver komeda_kms_driver = { 59 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 60 .lastclose = drm_fb_helper_lastclose, 61 .gem_free_object_unlocked = drm_gem_cma_free_object, 62 .gem_vm_ops = &drm_gem_cma_vm_ops, 63 .dumb_create = komeda_gem_cma_dumb_create, 64 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 65 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 66 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 67 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 68 .gem_prime_vmap = drm_gem_cma_prime_vmap, 69 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 70 .gem_prime_mmap = drm_gem_cma_prime_mmap, 71 .fops = &komeda_cma_fops, 72 .name = "komeda", 73 .desc = "Arm Komeda Display Processor driver", 74 .date = "20181101", 75 .major = 0, 76 .minor = 1, 77 }; 78 79 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) 80 { 81 struct drm_device *dev = old_state->dev; 82 83 drm_atomic_helper_commit_modeset_disables(dev, old_state); 84 85 drm_atomic_helper_commit_planes(dev, old_state, 0); 86 87 drm_atomic_helper_commit_modeset_enables(dev, old_state); 88 89 drm_atomic_helper_wait_for_flip_done(dev, old_state); 90 91 drm_atomic_helper_commit_hw_done(old_state); 92 93 drm_atomic_helper_cleanup_planes(dev, old_state); 94 } 95 96 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = { 97 .atomic_commit_tail = komeda_kms_commit_tail, 98 }; 99 100 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st, 101 struct list_head *zorder_list) 102 { 103 struct komeda_plane_state *new = to_kplane_st(plane_st); 104 struct komeda_plane_state *node, *last; 105 106 last = list_empty(zorder_list) ? 107 NULL : list_last_entry(zorder_list, typeof(*last), zlist_node); 108 109 /* Considering the list sequence is zpos increasing, so if list is empty 110 * or the zpos of new node bigger than the last node in list, no need 111 * loop and just insert the new one to the tail of the list. 112 */ 113 if (!last || (new->base.zpos > last->base.zpos)) { 114 list_add_tail(&new->zlist_node, zorder_list); 115 return 0; 116 } 117 118 /* Build the list by zpos increasing */ 119 list_for_each_entry(node, zorder_list, zlist_node) { 120 if (new->base.zpos < node->base.zpos) { 121 list_add_tail(&new->zlist_node, &node->zlist_node); 122 break; 123 } else if (node->base.zpos == new->base.zpos) { 124 struct drm_plane *a = node->base.plane; 125 struct drm_plane *b = new->base.plane; 126 127 /* Komeda doesn't support setting a same zpos for 128 * different planes. 129 */ 130 DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n", 131 a->name, b->name, node->base.zpos); 132 return -EINVAL; 133 } 134 } 135 136 return 0; 137 } 138 139 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, 140 struct drm_crtc_state *crtc_st) 141 { 142 struct drm_atomic_state *state = crtc_st->state; 143 struct komeda_crtc *kcrtc = to_kcrtc(crtc); 144 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 145 struct komeda_plane_state *kplane_st; 146 struct drm_plane_state *plane_st; 147 struct drm_plane *plane; 148 struct list_head zorder_list; 149 int order = 0, err; 150 151 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", 152 crtc->base.id, crtc->name); 153 154 INIT_LIST_HEAD(&zorder_list); 155 156 /* This loop also added all effected planes into the new state */ 157 drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) { 158 plane_st = drm_atomic_get_plane_state(state, plane); 159 if (IS_ERR(plane_st)) 160 return PTR_ERR(plane_st); 161 162 /* Build a list by zpos increasing */ 163 err = komeda_plane_state_list_add(plane_st, &zorder_list); 164 if (err) 165 return err; 166 } 167 168 kcrtc_st->max_slave_zorder = 0; 169 170 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 171 plane_st = &kplane_st->base; 172 plane = plane_st->plane; 173 174 plane_st->normalized_zpos = order++; 175 /* When layer_split has been enabled, one plane will be handled 176 * by two separated komeda layers (left/right), which may needs 177 * two zorders. 178 * - zorder: for left_layer for left display part. 179 * - zorder + 1: will be reserved for right layer. 180 */ 181 if (to_kplane_st(plane_st)->layer_split) 182 order++; 183 184 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n", 185 plane->base.id, plane->name, 186 plane_st->zpos, plane_st->normalized_zpos); 187 188 /* calculate max slave zorder */ 189 if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) 190 kcrtc_st->max_slave_zorder = 191 max(plane_st->normalized_zpos, 192 kcrtc_st->max_slave_zorder); 193 } 194 195 crtc_st->zpos_changed = true; 196 197 return 0; 198 } 199 200 static int komeda_kms_check(struct drm_device *dev, 201 struct drm_atomic_state *state) 202 { 203 struct drm_crtc *crtc; 204 struct drm_crtc_state *new_crtc_st; 205 int i, err; 206 207 err = drm_atomic_helper_check_modeset(dev, state); 208 if (err) 209 return err; 210 211 /* Komeda need to re-calculate resource assumption in every commit 212 * so need to add all affected_planes (even unchanged) to 213 * drm_atomic_state. 214 */ 215 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) { 216 err = drm_atomic_add_affected_planes(state, crtc); 217 if (err) 218 return err; 219 220 err = komeda_crtc_normalize_zpos(crtc, new_crtc_st); 221 if (err) 222 return err; 223 } 224 225 err = drm_atomic_helper_check_planes(dev, state); 226 if (err) 227 return err; 228 229 return 0; 230 } 231 232 static const struct drm_mode_config_funcs komeda_mode_config_funcs = { 233 .fb_create = komeda_fb_create, 234 .atomic_check = komeda_kms_check, 235 .atomic_commit = drm_atomic_helper_commit, 236 }; 237 238 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms, 239 struct komeda_dev *mdev) 240 { 241 struct drm_mode_config *config = &kms->base.mode_config; 242 243 drm_mode_config_init(&kms->base); 244 245 komeda_kms_setup_crtcs(kms, mdev); 246 247 /* Get value from dev */ 248 config->min_width = 0; 249 config->min_height = 0; 250 config->max_width = 4096; 251 config->max_height = 4096; 252 config->allow_fb_modifiers = true; 253 254 config->funcs = &komeda_mode_config_funcs; 255 config->helper_private = &komeda_mode_config_helpers; 256 } 257 258 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) 259 { 260 struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); 261 struct drm_device *drm; 262 int err; 263 264 if (!kms) 265 return ERR_PTR(-ENOMEM); 266 267 drm = &kms->base; 268 err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); 269 if (err) 270 goto free_kms; 271 272 drm->dev_private = mdev; 273 274 komeda_kms_mode_config_init(kms, mdev); 275 276 err = komeda_kms_add_private_objs(kms, mdev); 277 if (err) 278 goto cleanup_mode_config; 279 280 err = komeda_kms_add_planes(kms, mdev); 281 if (err) 282 goto cleanup_mode_config; 283 284 err = drm_vblank_init(drm, kms->n_crtcs); 285 if (err) 286 goto cleanup_mode_config; 287 288 err = komeda_kms_add_crtcs(kms, mdev); 289 if (err) 290 goto cleanup_mode_config; 291 292 err = komeda_kms_add_wb_connectors(kms, mdev); 293 if (err) 294 goto cleanup_mode_config; 295 296 err = component_bind_all(mdev->dev, kms); 297 if (err) 298 goto cleanup_mode_config; 299 300 drm_mode_config_reset(drm); 301 302 err = devm_request_irq(drm->dev, mdev->irq, 303 komeda_kms_irq_handler, IRQF_SHARED, 304 drm->driver->name, drm); 305 if (err) 306 goto free_component_binding; 307 308 err = mdev->funcs->enable_irq(mdev); 309 if (err) 310 goto free_component_binding; 311 312 drm->irq_enabled = true; 313 314 drm_kms_helper_poll_init(drm); 315 316 err = drm_dev_register(drm, 0); 317 if (err) 318 goto free_interrupts; 319 320 return kms; 321 322 free_interrupts: 323 drm_kms_helper_poll_fini(drm); 324 drm->irq_enabled = false; 325 mdev->funcs->disable_irq(mdev); 326 free_component_binding: 327 component_unbind_all(mdev->dev, drm); 328 cleanup_mode_config: 329 drm_mode_config_cleanup(drm); 330 komeda_kms_cleanup_private_objs(kms); 331 drm->dev_private = NULL; 332 drm_dev_put(drm); 333 free_kms: 334 kfree(kms); 335 return ERR_PTR(err); 336 } 337 338 void komeda_kms_detach(struct komeda_kms_dev *kms) 339 { 340 struct drm_device *drm = &kms->base; 341 struct komeda_dev *mdev = drm->dev_private; 342 343 drm_dev_unregister(drm); 344 drm_kms_helper_poll_fini(drm); 345 drm_atomic_helper_shutdown(drm); 346 drm->irq_enabled = false; 347 mdev->funcs->disable_irq(mdev); 348 component_unbind_all(mdev->dev, drm); 349 drm_mode_config_cleanup(drm); 350 komeda_kms_cleanup_private_objs(kms); 351 drm->dev_private = NULL; 352 drm_dev_put(drm); 353 } 354