1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 3 /* 4 * Xen para-virtual DRM device 5 * 6 * Copyright (C) 2016-2018 EPAM Systems Inc. 7 * 8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 */ 10 11 #include "xen_drm_front_kms.h" 12 13 #include <drm/drmP.h> 14 #include <drm/drm_atomic.h> 15 #include <drm/drm_atomic_helper.h> 16 #include <drm/drm_gem.h> 17 #include <drm/drm_gem_framebuffer_helper.h> 18 #include <drm/drm_probe_helper.h> 19 20 #include "xen_drm_front.h" 21 #include "xen_drm_front_conn.h" 22 23 /* 24 * Timeout in ms to wait for frame done event from the backend: 25 * must be a bit more than IO time-out 26 */ 27 #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100) 28 29 static struct xen_drm_front_drm_pipeline * 30 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe) 31 { 32 return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe); 33 } 34 35 static void fb_destroy(struct drm_framebuffer *fb) 36 { 37 struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private; 38 int idx; 39 40 if (drm_dev_enter(fb->dev, &idx)) { 41 xen_drm_front_fb_detach(drm_info->front_info, 42 xen_drm_front_fb_to_cookie(fb)); 43 drm_dev_exit(idx); 44 } 45 drm_gem_fb_destroy(fb); 46 } 47 48 static struct drm_framebuffer_funcs fb_funcs = { 49 .destroy = fb_destroy, 50 }; 51 52 static struct drm_framebuffer * 53 fb_create(struct drm_device *dev, struct drm_file *filp, 54 const struct drm_mode_fb_cmd2 *mode_cmd) 55 { 56 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 57 struct drm_framebuffer *fb; 58 struct drm_gem_object *gem_obj; 59 int ret; 60 61 fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); 62 if (IS_ERR_OR_NULL(fb)) 63 return fb; 64 65 gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); 66 if (!gem_obj) { 67 DRM_ERROR("Failed to lookup GEM object\n"); 68 ret = -ENOENT; 69 goto fail; 70 } 71 72 drm_gem_object_put_unlocked(gem_obj); 73 74 ret = xen_drm_front_fb_attach(drm_info->front_info, 75 xen_drm_front_dbuf_to_cookie(gem_obj), 76 xen_drm_front_fb_to_cookie(fb), 77 fb->width, fb->height, 78 fb->format->format); 79 if (ret < 0) { 80 DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret); 81 goto fail; 82 } 83 84 return fb; 85 86 fail: 87 drm_gem_fb_destroy(fb); 88 return ERR_PTR(ret); 89 } 90 91 static const struct drm_mode_config_funcs mode_config_funcs = { 92 .fb_create = fb_create, 93 .atomic_check = drm_atomic_helper_check, 94 .atomic_commit = drm_atomic_helper_commit, 95 }; 96 97 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline) 98 { 99 struct drm_crtc *crtc = &pipeline->pipe.crtc; 100 struct drm_device *dev = crtc->dev; 101 unsigned long flags; 102 103 spin_lock_irqsave(&dev->event_lock, flags); 104 if (pipeline->pending_event) 105 drm_crtc_send_vblank_event(crtc, pipeline->pending_event); 106 pipeline->pending_event = NULL; 107 spin_unlock_irqrestore(&dev->event_lock, flags); 108 } 109 110 static void display_enable(struct drm_simple_display_pipe *pipe, 111 struct drm_crtc_state *crtc_state, 112 struct drm_plane_state *plane_state) 113 { 114 struct xen_drm_front_drm_pipeline *pipeline = 115 to_xen_drm_pipeline(pipe); 116 struct drm_crtc *crtc = &pipe->crtc; 117 struct drm_framebuffer *fb = plane_state->fb; 118 int ret, idx; 119 120 if (!drm_dev_enter(pipe->crtc.dev, &idx)) 121 return; 122 123 ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y, 124 fb->width, fb->height, 125 fb->format->cpp[0] * 8, 126 xen_drm_front_fb_to_cookie(fb)); 127 128 if (ret) { 129 DRM_ERROR("Failed to enable display: %d\n", ret); 130 pipeline->conn_connected = false; 131 } 132 133 drm_dev_exit(idx); 134 } 135 136 static void display_disable(struct drm_simple_display_pipe *pipe) 137 { 138 struct xen_drm_front_drm_pipeline *pipeline = 139 to_xen_drm_pipeline(pipe); 140 int ret = 0, idx; 141 142 if (drm_dev_enter(pipe->crtc.dev, &idx)) { 143 ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0, 144 xen_drm_front_fb_to_cookie(NULL)); 145 drm_dev_exit(idx); 146 } 147 if (ret) 148 DRM_ERROR("Failed to disable display: %d\n", ret); 149 150 /* Make sure we can restart with enabled connector next time */ 151 pipeline->conn_connected = true; 152 153 /* release stalled event if any */ 154 send_pending_event(pipeline); 155 } 156 157 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline, 158 u64 fb_cookie) 159 { 160 /* 161 * This runs in interrupt context, e.g. under 162 * drm_info->front_info->io_lock, so we cannot call _sync version 163 * to cancel the work 164 */ 165 cancel_delayed_work(&pipeline->pflip_to_worker); 166 167 send_pending_event(pipeline); 168 } 169 170 static void pflip_to_worker(struct work_struct *work) 171 { 172 struct delayed_work *delayed_work = to_delayed_work(work); 173 struct xen_drm_front_drm_pipeline *pipeline = 174 container_of(delayed_work, 175 struct xen_drm_front_drm_pipeline, 176 pflip_to_worker); 177 178 DRM_ERROR("Frame done timed-out, releasing"); 179 send_pending_event(pipeline); 180 } 181 182 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe, 183 struct drm_plane_state *old_plane_state) 184 { 185 struct drm_plane_state *plane_state = 186 drm_atomic_get_new_plane_state(old_plane_state->state, 187 &pipe->plane); 188 189 /* 190 * If old_plane_state->fb is NULL and plane_state->fb is not, 191 * then this is an atomic commit which will enable display. 192 * If old_plane_state->fb is not NULL and plane_state->fb is, 193 * then this is an atomic commit which will disable display. 194 * Ignore these and do not send page flip as this framebuffer will be 195 * sent to the backend as a part of display_set_config call. 196 */ 197 if (old_plane_state->fb && plane_state->fb) { 198 struct xen_drm_front_drm_pipeline *pipeline = 199 to_xen_drm_pipeline(pipe); 200 struct xen_drm_front_drm_info *drm_info = pipeline->drm_info; 201 int ret; 202 203 schedule_delayed_work(&pipeline->pflip_to_worker, 204 msecs_to_jiffies(FRAME_DONE_TO_MS)); 205 206 ret = xen_drm_front_page_flip(drm_info->front_info, 207 pipeline->index, 208 xen_drm_front_fb_to_cookie(plane_state->fb)); 209 if (ret) { 210 DRM_ERROR("Failed to send page flip request to backend: %d\n", ret); 211 212 pipeline->conn_connected = false; 213 /* 214 * Report the flip not handled, so pending event is 215 * sent, unblocking user-space. 216 */ 217 return false; 218 } 219 /* 220 * Signal that page flip was handled, pending event will be sent 221 * on frame done event from the backend. 222 */ 223 return true; 224 } 225 226 return false; 227 } 228 229 static void display_update(struct drm_simple_display_pipe *pipe, 230 struct drm_plane_state *old_plane_state) 231 { 232 struct xen_drm_front_drm_pipeline *pipeline = 233 to_xen_drm_pipeline(pipe); 234 struct drm_crtc *crtc = &pipe->crtc; 235 struct drm_pending_vblank_event *event; 236 int idx; 237 238 event = crtc->state->event; 239 if (event) { 240 struct drm_device *dev = crtc->dev; 241 unsigned long flags; 242 243 WARN_ON(pipeline->pending_event); 244 245 spin_lock_irqsave(&dev->event_lock, flags); 246 crtc->state->event = NULL; 247 248 pipeline->pending_event = event; 249 spin_unlock_irqrestore(&dev->event_lock, flags); 250 } 251 252 if (!drm_dev_enter(pipe->crtc.dev, &idx)) { 253 send_pending_event(pipeline); 254 return; 255 } 256 257 /* 258 * Send page flip request to the backend *after* we have event cached 259 * above, so on page flip done event from the backend we can 260 * deliver it and there is no race condition between this code and 261 * event from the backend. 262 * If this is not a page flip, e.g. no flip done event from the backend 263 * is expected, then send now. 264 */ 265 if (!display_send_page_flip(pipe, old_plane_state)) 266 send_pending_event(pipeline); 267 268 drm_dev_exit(idx); 269 } 270 271 static enum drm_mode_status 272 display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) 273 { 274 struct xen_drm_front_drm_pipeline *pipeline = 275 container_of(crtc, struct xen_drm_front_drm_pipeline, 276 pipe.crtc); 277 278 if (mode->hdisplay != pipeline->width) 279 return MODE_ERROR; 280 281 if (mode->vdisplay != pipeline->height) 282 return MODE_ERROR; 283 284 return MODE_OK; 285 } 286 287 static const struct drm_simple_display_pipe_funcs display_funcs = { 288 .mode_valid = display_mode_valid, 289 .enable = display_enable, 290 .disable = display_disable, 291 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, 292 .update = display_update, 293 }; 294 295 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info, 296 int index, struct xen_drm_front_cfg_connector *cfg, 297 struct xen_drm_front_drm_pipeline *pipeline) 298 { 299 struct drm_device *dev = drm_info->drm_dev; 300 const u32 *formats; 301 int format_count; 302 int ret; 303 304 pipeline->drm_info = drm_info; 305 pipeline->index = index; 306 pipeline->height = cfg->height; 307 pipeline->width = cfg->width; 308 309 INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker); 310 311 ret = xen_drm_front_conn_init(drm_info, &pipeline->conn); 312 if (ret) 313 return ret; 314 315 formats = xen_drm_front_conn_get_formats(&format_count); 316 317 return drm_simple_display_pipe_init(dev, &pipeline->pipe, 318 &display_funcs, formats, 319 format_count, NULL, 320 &pipeline->conn); 321 } 322 323 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info) 324 { 325 struct drm_device *dev = drm_info->drm_dev; 326 int i, ret; 327 328 drm_mode_config_init(dev); 329 330 dev->mode_config.min_width = 0; 331 dev->mode_config.min_height = 0; 332 dev->mode_config.max_width = 4095; 333 dev->mode_config.max_height = 2047; 334 dev->mode_config.funcs = &mode_config_funcs; 335 336 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { 337 struct xen_drm_front_cfg_connector *cfg = 338 &drm_info->front_info->cfg.connectors[i]; 339 struct xen_drm_front_drm_pipeline *pipeline = 340 &drm_info->pipeline[i]; 341 342 ret = display_pipe_init(drm_info, i, cfg, pipeline); 343 if (ret) { 344 drm_mode_config_cleanup(dev); 345 return ret; 346 } 347 } 348 349 drm_mode_config_reset(dev); 350 drm_kms_helper_poll_init(dev); 351 return 0; 352 } 353 354 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info) 355 { 356 int i; 357 358 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { 359 struct xen_drm_front_drm_pipeline *pipeline = 360 &drm_info->pipeline[i]; 361 362 cancel_delayed_work_sync(&pipeline->pflip_to_worker); 363 364 send_pending_event(pipeline); 365 } 366 } 367