1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 KMS 11 * 12 * This is the general code for implementing KMS mode setting that 13 * doesn't clearly associate with any of the other objects (plane, 14 * crtc, HDMI encoder). 15 */ 16 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_crtc_helper.h> 21 #include <drm/drm_plane_helper.h> 22 #include <drm/drm_fb_cma_helper.h> 23 #include <drm/drm_gem_framebuffer_helper.h> 24 #include "vc4_drv.h" 25 26 static void vc4_output_poll_changed(struct drm_device *dev) 27 { 28 struct vc4_dev *vc4 = to_vc4_dev(dev); 29 30 drm_fbdev_cma_hotplug_event(vc4->fbdev); 31 } 32 33 static void 34 vc4_atomic_complete_commit(struct drm_atomic_state *state) 35 { 36 struct drm_device *dev = state->dev; 37 struct vc4_dev *vc4 = to_vc4_dev(dev); 38 39 drm_atomic_helper_wait_for_fences(dev, state, false); 40 41 drm_atomic_helper_wait_for_dependencies(state); 42 43 drm_atomic_helper_commit_modeset_disables(dev, state); 44 45 drm_atomic_helper_commit_planes(dev, state, 0); 46 47 drm_atomic_helper_commit_modeset_enables(dev, state); 48 49 /* Make sure that drm_atomic_helper_wait_for_vblanks() 50 * actually waits for vblank. If we're doing a full atomic 51 * modeset (as opposed to a vc4_update_plane() short circuit), 52 * then we need to wait for scanout to be done with our 53 * display lists before we free it and potentially reallocate 54 * and overwrite the dlist memory with a new modeset. 55 */ 56 state->legacy_cursor_update = false; 57 58 drm_atomic_helper_commit_hw_done(state); 59 60 drm_atomic_helper_wait_for_vblanks(dev, state); 61 62 drm_atomic_helper_cleanup_planes(dev, state); 63 64 drm_atomic_helper_commit_cleanup_done(state); 65 66 drm_atomic_state_put(state); 67 68 up(&vc4->async_modeset); 69 } 70 71 static void commit_work(struct work_struct *work) 72 { 73 struct drm_atomic_state *state = container_of(work, 74 struct drm_atomic_state, 75 commit_work); 76 vc4_atomic_complete_commit(state); 77 } 78 79 /** 80 * vc4_atomic_commit - commit validated state object 81 * @dev: DRM device 82 * @state: the driver state object 83 * @nonblock: nonblocking commit 84 * 85 * This function commits a with drm_atomic_helper_check() pre-validated state 86 * object. This can still fail when e.g. the framebuffer reservation fails. For 87 * now this doesn't implement asynchronous commits. 88 * 89 * RETURNS 90 * Zero for success or -errno. 91 */ 92 static int vc4_atomic_commit(struct drm_device *dev, 93 struct drm_atomic_state *state, 94 bool nonblock) 95 { 96 struct vc4_dev *vc4 = to_vc4_dev(dev); 97 int ret; 98 99 ret = drm_atomic_helper_setup_commit(state, nonblock); 100 if (ret) 101 return ret; 102 103 INIT_WORK(&state->commit_work, commit_work); 104 105 ret = down_interruptible(&vc4->async_modeset); 106 if (ret) 107 return ret; 108 109 ret = drm_atomic_helper_prepare_planes(dev, state); 110 if (ret) { 111 up(&vc4->async_modeset); 112 return ret; 113 } 114 115 if (!nonblock) { 116 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 117 if (ret) { 118 drm_atomic_helper_cleanup_planes(dev, state); 119 up(&vc4->async_modeset); 120 return ret; 121 } 122 } 123 124 /* 125 * This is the point of no return - everything below never fails except 126 * when the hw goes bonghits. Which means we can commit the new state on 127 * the software side now. 128 */ 129 130 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 131 132 /* 133 * Everything below can be run asynchronously without the need to grab 134 * any modeset locks at all under one condition: It must be guaranteed 135 * that the asynchronous work has either been cancelled (if the driver 136 * supports it, which at least requires that the framebuffers get 137 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 138 * before the new state gets committed on the software side with 139 * drm_atomic_helper_swap_state(). 140 * 141 * This scheme allows new atomic state updates to be prepared and 142 * checked in parallel to the asynchronous completion of the previous 143 * update. Which is important since compositors need to figure out the 144 * composition of the next frame right after having submitted the 145 * current layout. 146 */ 147 148 drm_atomic_state_get(state); 149 if (nonblock) 150 queue_work(system_unbound_wq, &state->commit_work); 151 else 152 vc4_atomic_complete_commit(state); 153 154 return 0; 155 } 156 157 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 158 struct drm_file *file_priv, 159 const struct drm_mode_fb_cmd2 *mode_cmd) 160 { 161 struct drm_mode_fb_cmd2 mode_cmd_local; 162 163 /* If the user didn't specify a modifier, use the 164 * vc4_set_tiling_ioctl() state for the BO. 165 */ 166 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 167 struct drm_gem_object *gem_obj; 168 struct vc4_bo *bo; 169 170 gem_obj = drm_gem_object_lookup(file_priv, 171 mode_cmd->handles[0]); 172 if (!gem_obj) { 173 DRM_DEBUG("Failed to look up GEM BO %d\n", 174 mode_cmd->handles[0]); 175 return ERR_PTR(-ENOENT); 176 } 177 bo = to_vc4_bo(gem_obj); 178 179 mode_cmd_local = *mode_cmd; 180 181 if (bo->t_format) { 182 mode_cmd_local.modifier[0] = 183 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 184 } else { 185 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 186 } 187 188 drm_gem_object_put_unlocked(gem_obj); 189 190 mode_cmd = &mode_cmd_local; 191 } 192 193 return drm_gem_fb_create(dev, file_priv, mode_cmd); 194 } 195 196 static const struct drm_mode_config_funcs vc4_mode_funcs = { 197 .output_poll_changed = vc4_output_poll_changed, 198 .atomic_check = drm_atomic_helper_check, 199 .atomic_commit = vc4_atomic_commit, 200 .fb_create = vc4_fb_create, 201 }; 202 203 int vc4_kms_load(struct drm_device *dev) 204 { 205 struct vc4_dev *vc4 = to_vc4_dev(dev); 206 int ret; 207 208 sema_init(&vc4->async_modeset, 1); 209 210 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 211 dev->vblank_disable_immediate = true; 212 213 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 214 if (ret < 0) { 215 dev_err(dev->dev, "failed to initialize vblank\n"); 216 return ret; 217 } 218 219 dev->mode_config.max_width = 2048; 220 dev->mode_config.max_height = 2048; 221 dev->mode_config.funcs = &vc4_mode_funcs; 222 dev->mode_config.preferred_depth = 24; 223 dev->mode_config.async_page_flip = true; 224 225 drm_mode_config_reset(dev); 226 227 if (dev->mode_config.num_connector) { 228 vc4->fbdev = drm_fbdev_cma_init(dev, 32, 229 dev->mode_config.num_connector); 230 if (IS_ERR(vc4->fbdev)) 231 vc4->fbdev = NULL; 232 } 233 234 drm_kms_helper_poll_init(dev); 235 236 return 0; 237 } 238