1c8b75bcaSEric Anholt /* 2c8b75bcaSEric Anholt * Copyright (C) 2015 Broadcom 3c8b75bcaSEric Anholt * 4c8b75bcaSEric Anholt * This program is free software; you can redistribute it and/or modify 5c8b75bcaSEric Anholt * it under the terms of the GNU General Public License version 2 as 6c8b75bcaSEric Anholt * published by the Free Software Foundation. 7c8b75bcaSEric Anholt */ 8c8b75bcaSEric Anholt 9c8b75bcaSEric Anholt /** 10c8b75bcaSEric Anholt * DOC: VC4 KMS 11c8b75bcaSEric Anholt * 12c8b75bcaSEric Anholt * This is the general code for implementing KMS mode setting that 13c8b75bcaSEric Anholt * doesn't clearly associate with any of the other objects (plane, 14c8b75bcaSEric Anholt * crtc, HDMI encoder). 15c8b75bcaSEric Anholt */ 16c8b75bcaSEric Anholt 17c8b75bcaSEric Anholt #include "drm_crtc.h" 18b501baccSEric Anholt #include "drm_atomic.h" 19c8b75bcaSEric Anholt #include "drm_atomic_helper.h" 20c8b75bcaSEric Anholt #include "drm_crtc_helper.h" 21c8b75bcaSEric Anholt #include "drm_plane_helper.h" 22c8b75bcaSEric Anholt #include "drm_fb_cma_helper.h" 23c8b75bcaSEric Anholt #include "vc4_drv.h" 24c8b75bcaSEric Anholt 2548666d56SDerek Foreman static void vc4_output_poll_changed(struct drm_device *dev) 2648666d56SDerek Foreman { 2748666d56SDerek Foreman struct vc4_dev *vc4 = to_vc4_dev(dev); 2848666d56SDerek Foreman 2948666d56SDerek Foreman drm_fbdev_cma_hotplug_event(vc4->fbdev); 3048666d56SDerek Foreman } 3148666d56SDerek Foreman 32b501baccSEric Anholt struct vc4_commit { 33b501baccSEric Anholt struct drm_device *dev; 34b501baccSEric Anholt struct drm_atomic_state *state; 35b501baccSEric Anholt struct vc4_seqno_cb cb; 36b501baccSEric Anholt }; 37b501baccSEric Anholt 38b501baccSEric Anholt static void 39b501baccSEric Anholt vc4_atomic_complete_commit(struct vc4_commit *c) 40b501baccSEric Anholt { 41b501baccSEric Anholt struct drm_atomic_state *state = c->state; 42b501baccSEric Anholt struct drm_device *dev = state->dev; 43b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 44b501baccSEric Anholt 45b501baccSEric Anholt drm_atomic_helper_commit_modeset_disables(dev, state); 46b501baccSEric Anholt 472b58e98dSLiu Ying drm_atomic_helper_commit_planes(dev, state, 0); 48b501baccSEric Anholt 49b501baccSEric Anholt drm_atomic_helper_commit_modeset_enables(dev, state); 50b501baccSEric Anholt 516674a904SEric Anholt /* Make sure that drm_atomic_helper_wait_for_vblanks() 526674a904SEric Anholt * actually waits for vblank. If we're doing a full atomic 536674a904SEric Anholt * modeset (as opposed to a vc4_update_plane() short circuit), 546674a904SEric Anholt * then we need to wait for scanout to be done with our 556674a904SEric Anholt * display lists before we free it and potentially reallocate 566674a904SEric Anholt * and overwrite the dlist memory with a new modeset. 576674a904SEric Anholt */ 586674a904SEric Anholt state->legacy_cursor_update = false; 596674a904SEric Anholt 60b501baccSEric Anholt drm_atomic_helper_wait_for_vblanks(dev, state); 61b501baccSEric Anholt 62b501baccSEric Anholt drm_atomic_helper_cleanup_planes(dev, state); 63b501baccSEric Anholt 640853695cSChris Wilson drm_atomic_state_put(state); 65b501baccSEric Anholt 66b501baccSEric Anholt up(&vc4->async_modeset); 67b501baccSEric Anholt 68b501baccSEric Anholt kfree(c); 69b501baccSEric Anholt } 70b501baccSEric Anholt 71b501baccSEric Anholt static void 72b501baccSEric Anholt vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb) 73b501baccSEric Anholt { 74b501baccSEric Anholt struct vc4_commit *c = container_of(cb, struct vc4_commit, cb); 75b501baccSEric Anholt 76b501baccSEric Anholt vc4_atomic_complete_commit(c); 77b501baccSEric Anholt } 78b501baccSEric Anholt 79b501baccSEric Anholt static struct vc4_commit *commit_init(struct drm_atomic_state *state) 80b501baccSEric Anholt { 81b501baccSEric Anholt struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); 82b501baccSEric Anholt 83b501baccSEric Anholt if (!c) 84b501baccSEric Anholt return NULL; 85b501baccSEric Anholt c->dev = state->dev; 86b501baccSEric Anholt c->state = state; 87b501baccSEric Anholt 88b501baccSEric Anholt return c; 89b501baccSEric Anholt } 90b501baccSEric Anholt 91b501baccSEric Anholt /** 92b501baccSEric Anholt * vc4_atomic_commit - commit validated state object 93b501baccSEric Anholt * @dev: DRM device 94b501baccSEric Anholt * @state: the driver state object 95eb63961bSMaarten Lankhorst * @nonblock: nonblocking commit 96b501baccSEric Anholt * 97b501baccSEric Anholt * This function commits a with drm_atomic_helper_check() pre-validated state 98b501baccSEric Anholt * object. This can still fail when e.g. the framebuffer reservation fails. For 99b501baccSEric Anholt * now this doesn't implement asynchronous commits. 100b501baccSEric Anholt * 101b501baccSEric Anholt * RETURNS 102b501baccSEric Anholt * Zero for success or -errno. 103b501baccSEric Anholt */ 104b501baccSEric Anholt static int vc4_atomic_commit(struct drm_device *dev, 105b501baccSEric Anholt struct drm_atomic_state *state, 106eb63961bSMaarten Lankhorst bool nonblock) 107b501baccSEric Anholt { 108b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 109b501baccSEric Anholt int ret; 110b501baccSEric Anholt int i; 111b501baccSEric Anholt uint64_t wait_seqno = 0; 112b501baccSEric Anholt struct vc4_commit *c; 113833cd78aSDaniel Vetter struct drm_plane *plane; 114833cd78aSDaniel Vetter struct drm_plane_state *new_state; 115b501baccSEric Anholt 116b501baccSEric Anholt c = commit_init(state); 117b501baccSEric Anholt if (!c) 118b501baccSEric Anholt return -ENOMEM; 119b501baccSEric Anholt 120b501baccSEric Anholt /* Make sure that any outstanding modesets have finished. */ 121e7c31f6fSRobert Foss if (nonblock) { 12226fc78f6SDerek Foreman struct drm_crtc *crtc; 12326fc78f6SDerek Foreman struct drm_crtc_state *crtc_state; 12426fc78f6SDerek Foreman unsigned long flags; 12526fc78f6SDerek Foreman bool busy = false; 12626fc78f6SDerek Foreman 12726fc78f6SDerek Foreman /* 12826fc78f6SDerek Foreman * If there's an undispatched event to send then we're 12926fc78f6SDerek Foreman * obviously still busy. If there isn't, then we can 13026fc78f6SDerek Foreman * unconditionally wait for the semaphore because it 13126fc78f6SDerek Foreman * shouldn't be contended (for long). 13226fc78f6SDerek Foreman * 13326fc78f6SDerek Foreman * This is to prevent a race where queuing a new flip 13426fc78f6SDerek Foreman * from userspace immediately on receipt of an event 13526fc78f6SDerek Foreman * beats our clean-up and returns EBUSY. 13626fc78f6SDerek Foreman */ 13726fc78f6SDerek Foreman spin_lock_irqsave(&dev->event_lock, flags); 13826fc78f6SDerek Foreman for_each_crtc_in_state(state, crtc, crtc_state, i) 13926fc78f6SDerek Foreman busy |= vc4_event_pending(crtc); 14026fc78f6SDerek Foreman spin_unlock_irqrestore(&dev->event_lock, flags); 14126fc78f6SDerek Foreman if (busy) { 142e7c31f6fSRobert Foss kfree(c); 143e7c31f6fSRobert Foss return -EBUSY; 144e7c31f6fSRobert Foss } 14526fc78f6SDerek Foreman } 146b501baccSEric Anholt ret = down_interruptible(&vc4->async_modeset); 147b501baccSEric Anholt if (ret) { 148b501baccSEric Anholt kfree(c); 149b501baccSEric Anholt return ret; 150b501baccSEric Anholt } 151b501baccSEric Anholt 152b501baccSEric Anholt ret = drm_atomic_helper_prepare_planes(dev, state); 153b501baccSEric Anholt if (ret) { 154b501baccSEric Anholt kfree(c); 155b501baccSEric Anholt up(&vc4->async_modeset); 156b501baccSEric Anholt return ret; 157b501baccSEric Anholt } 158b501baccSEric Anholt 159833cd78aSDaniel Vetter for_each_plane_in_state(state, plane, new_state, i) { 160b501baccSEric Anholt if ((plane->state->fb != new_state->fb) && new_state->fb) { 161b501baccSEric Anholt struct drm_gem_cma_object *cma_bo = 162b501baccSEric Anholt drm_fb_cma_get_gem_obj(new_state->fb, 0); 163b501baccSEric Anholt struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); 164b501baccSEric Anholt 165b501baccSEric Anholt wait_seqno = max(bo->seqno, wait_seqno); 166b501baccSEric Anholt } 167b501baccSEric Anholt } 168b501baccSEric Anholt 169b501baccSEric Anholt /* 170b501baccSEric Anholt * This is the point of no return - everything below never fails except 171b501baccSEric Anholt * when the hw goes bonghits. Which means we can commit the new state on 172b501baccSEric Anholt * the software side now. 173b501baccSEric Anholt */ 174b501baccSEric Anholt 1755e84c269SDaniel Vetter drm_atomic_helper_swap_state(state, true); 176b501baccSEric Anholt 177b501baccSEric Anholt /* 178b501baccSEric Anholt * Everything below can be run asynchronously without the need to grab 179b501baccSEric Anholt * any modeset locks at all under one condition: It must be guaranteed 180b501baccSEric Anholt * that the asynchronous work has either been cancelled (if the driver 181b501baccSEric Anholt * supports it, which at least requires that the framebuffers get 182b501baccSEric Anholt * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 183b501baccSEric Anholt * before the new state gets committed on the software side with 184b501baccSEric Anholt * drm_atomic_helper_swap_state(). 185b501baccSEric Anholt * 186b501baccSEric Anholt * This scheme allows new atomic state updates to be prepared and 187b501baccSEric Anholt * checked in parallel to the asynchronous completion of the previous 188b501baccSEric Anholt * update. Which is important since compositors need to figure out the 189b501baccSEric Anholt * composition of the next frame right after having submitted the 190b501baccSEric Anholt * current layout. 191b501baccSEric Anholt */ 192b501baccSEric Anholt 1930853695cSChris Wilson drm_atomic_state_get(state); 194eb63961bSMaarten Lankhorst if (nonblock) { 195b501baccSEric Anholt vc4_queue_seqno_cb(dev, &c->cb, wait_seqno, 196b501baccSEric Anholt vc4_atomic_complete_commit_seqno_cb); 197b501baccSEric Anholt } else { 198b501baccSEric Anholt vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false); 199b501baccSEric Anholt vc4_atomic_complete_commit(c); 200b501baccSEric Anholt } 201b501baccSEric Anholt 202b501baccSEric Anholt return 0; 203b501baccSEric Anholt } 204b501baccSEric Anholt 205c8b75bcaSEric Anholt static const struct drm_mode_config_funcs vc4_mode_funcs = { 20648666d56SDerek Foreman .output_poll_changed = vc4_output_poll_changed, 207c8b75bcaSEric Anholt .atomic_check = drm_atomic_helper_check, 208b501baccSEric Anholt .atomic_commit = vc4_atomic_commit, 209c8b75bcaSEric Anholt .fb_create = drm_fb_cma_create, 210c8b75bcaSEric Anholt }; 211c8b75bcaSEric Anholt 212c8b75bcaSEric Anholt int vc4_kms_load(struct drm_device *dev) 213c8b75bcaSEric Anholt { 21448666d56SDerek Foreman struct vc4_dev *vc4 = to_vc4_dev(dev); 215c8b75bcaSEric Anholt int ret; 216c8b75bcaSEric Anholt 217b501baccSEric Anholt sema_init(&vc4->async_modeset, 1); 218b501baccSEric Anholt 219c8b75bcaSEric Anholt ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 220c8b75bcaSEric Anholt if (ret < 0) { 221c8b75bcaSEric Anholt dev_err(dev->dev, "failed to initialize vblank\n"); 222c8b75bcaSEric Anholt return ret; 223c8b75bcaSEric Anholt } 224c8b75bcaSEric Anholt 225c8b75bcaSEric Anholt dev->mode_config.max_width = 2048; 226c8b75bcaSEric Anholt dev->mode_config.max_height = 2048; 227c8b75bcaSEric Anholt dev->mode_config.funcs = &vc4_mode_funcs; 228c8b75bcaSEric Anholt dev->mode_config.preferred_depth = 24; 229b501baccSEric Anholt dev->mode_config.async_page_flip = true; 230b501baccSEric Anholt 231c8b75bcaSEric Anholt drm_mode_config_reset(dev); 232c8b75bcaSEric Anholt 2331e70bdcbSEric Anholt if (dev->mode_config.num_connector) { 23448666d56SDerek Foreman vc4->fbdev = drm_fbdev_cma_init(dev, 32, 235c8b75bcaSEric Anholt dev->mode_config.num_connector); 23648666d56SDerek Foreman if (IS_ERR(vc4->fbdev)) 23748666d56SDerek Foreman vc4->fbdev = NULL; 2381e70bdcbSEric Anholt } 239c8b75bcaSEric Anholt 240c8b75bcaSEric Anholt drm_kms_helper_poll_init(dev); 241c8b75bcaSEric Anholt 242c8b75bcaSEric Anholt return 0; 243c8b75bcaSEric Anholt } 244