1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <drm/drm_atomic.h>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_gem_framebuffer_helper.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "xen_drm_front.h"
21 #include "xen_drm_front_conn.h"
22 #include "xen_drm_front_kms.h"
23 
24 /*
25  * Timeout in ms to wait for frame done event from the backend:
26  * must be a bit more than IO time-out
27  */
28 #define FRAME_DONE_TO_MS	(XEN_DRM_FRONT_WAIT_BACK_MS + 100)
29 
30 static struct xen_drm_front_drm_pipeline *
31 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
32 {
33 	return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
34 }
35 
36 static void fb_destroy(struct drm_framebuffer *fb)
37 {
38 	struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
39 	int idx;
40 
41 	if (drm_dev_enter(fb->dev, &idx)) {
42 		xen_drm_front_fb_detach(drm_info->front_info,
43 					xen_drm_front_fb_to_cookie(fb));
44 		drm_dev_exit(idx);
45 	}
46 	drm_gem_fb_destroy(fb);
47 }
48 
49 static const struct drm_framebuffer_funcs fb_funcs = {
50 	.destroy = fb_destroy,
51 };
52 
53 static struct drm_framebuffer *
54 fb_create(struct drm_device *dev, struct drm_file *filp,
55 	  const struct drm_mode_fb_cmd2 *mode_cmd)
56 {
57 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
58 	struct drm_framebuffer *fb;
59 	struct drm_gem_object *gem_obj;
60 	int ret;
61 
62 	fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
63 	if (IS_ERR_OR_NULL(fb))
64 		return fb;
65 
66 	gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
67 	if (!gem_obj) {
68 		DRM_ERROR("Failed to lookup GEM object\n");
69 		ret = -ENOENT;
70 		goto fail;
71 	}
72 
73 	drm_gem_object_put_unlocked(gem_obj);
74 
75 	ret = xen_drm_front_fb_attach(drm_info->front_info,
76 				      xen_drm_front_dbuf_to_cookie(gem_obj),
77 				      xen_drm_front_fb_to_cookie(fb),
78 				      fb->width, fb->height,
79 				      fb->format->format);
80 	if (ret < 0) {
81 		DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
82 		goto fail;
83 	}
84 
85 	return fb;
86 
87 fail:
88 	drm_gem_fb_destroy(fb);
89 	return ERR_PTR(ret);
90 }
91 
92 static const struct drm_mode_config_funcs mode_config_funcs = {
93 	.fb_create = fb_create,
94 	.atomic_check = drm_atomic_helper_check,
95 	.atomic_commit = drm_atomic_helper_commit,
96 };
97 
98 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
99 {
100 	struct drm_crtc *crtc = &pipeline->pipe.crtc;
101 	struct drm_device *dev = crtc->dev;
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&dev->event_lock, flags);
105 	if (pipeline->pending_event)
106 		drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
107 	pipeline->pending_event = NULL;
108 	spin_unlock_irqrestore(&dev->event_lock, flags);
109 }
110 
111 static void display_enable(struct drm_simple_display_pipe *pipe,
112 			   struct drm_crtc_state *crtc_state,
113 			   struct drm_plane_state *plane_state)
114 {
115 	struct xen_drm_front_drm_pipeline *pipeline =
116 			to_xen_drm_pipeline(pipe);
117 	struct drm_crtc *crtc = &pipe->crtc;
118 	struct drm_framebuffer *fb = plane_state->fb;
119 	int ret, idx;
120 
121 	if (!drm_dev_enter(pipe->crtc.dev, &idx))
122 		return;
123 
124 	ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
125 				     fb->width, fb->height,
126 				     fb->format->cpp[0] * 8,
127 				     xen_drm_front_fb_to_cookie(fb));
128 
129 	if (ret) {
130 		DRM_ERROR("Failed to enable display: %d\n", ret);
131 		pipeline->conn_connected = false;
132 	}
133 
134 	drm_dev_exit(idx);
135 }
136 
137 static void display_disable(struct drm_simple_display_pipe *pipe)
138 {
139 	struct xen_drm_front_drm_pipeline *pipeline =
140 			to_xen_drm_pipeline(pipe);
141 	int ret = 0, idx;
142 
143 	if (drm_dev_enter(pipe->crtc.dev, &idx)) {
144 		ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
145 					     xen_drm_front_fb_to_cookie(NULL));
146 		drm_dev_exit(idx);
147 	}
148 	if (ret)
149 		DRM_ERROR("Failed to disable display: %d\n", ret);
150 
151 	/* Make sure we can restart with enabled connector next time */
152 	pipeline->conn_connected = true;
153 
154 	/* release stalled event if any */
155 	send_pending_event(pipeline);
156 }
157 
158 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
159 				     u64 fb_cookie)
160 {
161 	/*
162 	 * This runs in interrupt context, e.g. under
163 	 * drm_info->front_info->io_lock, so we cannot call _sync version
164 	 * to cancel the work
165 	 */
166 	cancel_delayed_work(&pipeline->pflip_to_worker);
167 
168 	send_pending_event(pipeline);
169 }
170 
171 static void pflip_to_worker(struct work_struct *work)
172 {
173 	struct delayed_work *delayed_work = to_delayed_work(work);
174 	struct xen_drm_front_drm_pipeline *pipeline =
175 			container_of(delayed_work,
176 				     struct xen_drm_front_drm_pipeline,
177 				     pflip_to_worker);
178 
179 	DRM_ERROR("Frame done timed-out, releasing");
180 	send_pending_event(pipeline);
181 }
182 
183 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
184 				   struct drm_plane_state *old_plane_state)
185 {
186 	struct drm_plane_state *plane_state =
187 			drm_atomic_get_new_plane_state(old_plane_state->state,
188 						       &pipe->plane);
189 
190 	/*
191 	 * If old_plane_state->fb is NULL and plane_state->fb is not,
192 	 * then this is an atomic commit which will enable display.
193 	 * If old_plane_state->fb is not NULL and plane_state->fb is,
194 	 * then this is an atomic commit which will disable display.
195 	 * Ignore these and do not send page flip as this framebuffer will be
196 	 * sent to the backend as a part of display_set_config call.
197 	 */
198 	if (old_plane_state->fb && plane_state->fb) {
199 		struct xen_drm_front_drm_pipeline *pipeline =
200 				to_xen_drm_pipeline(pipe);
201 		struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
202 		int ret;
203 
204 		schedule_delayed_work(&pipeline->pflip_to_worker,
205 				      msecs_to_jiffies(FRAME_DONE_TO_MS));
206 
207 		ret = xen_drm_front_page_flip(drm_info->front_info,
208 					      pipeline->index,
209 					      xen_drm_front_fb_to_cookie(plane_state->fb));
210 		if (ret) {
211 			DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
212 
213 			pipeline->conn_connected = false;
214 			/*
215 			 * Report the flip not handled, so pending event is
216 			 * sent, unblocking user-space.
217 			 */
218 			return false;
219 		}
220 		/*
221 		 * Signal that page flip was handled, pending event will be sent
222 		 * on frame done event from the backend.
223 		 */
224 		return true;
225 	}
226 
227 	return false;
228 }
229 
230 static void display_update(struct drm_simple_display_pipe *pipe,
231 			   struct drm_plane_state *old_plane_state)
232 {
233 	struct xen_drm_front_drm_pipeline *pipeline =
234 			to_xen_drm_pipeline(pipe);
235 	struct drm_crtc *crtc = &pipe->crtc;
236 	struct drm_pending_vblank_event *event;
237 	int idx;
238 
239 	event = crtc->state->event;
240 	if (event) {
241 		struct drm_device *dev = crtc->dev;
242 		unsigned long flags;
243 
244 		WARN_ON(pipeline->pending_event);
245 
246 		spin_lock_irqsave(&dev->event_lock, flags);
247 		crtc->state->event = NULL;
248 
249 		pipeline->pending_event = event;
250 		spin_unlock_irqrestore(&dev->event_lock, flags);
251 	}
252 
253 	if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
254 		send_pending_event(pipeline);
255 		return;
256 	}
257 
258 	/*
259 	 * Send page flip request to the backend *after* we have event cached
260 	 * above, so on page flip done event from the backend we can
261 	 * deliver it and there is no race condition between this code and
262 	 * event from the backend.
263 	 * If this is not a page flip, e.g. no flip done event from the backend
264 	 * is expected, then send now.
265 	 */
266 	if (!display_send_page_flip(pipe, old_plane_state))
267 		send_pending_event(pipeline);
268 
269 	drm_dev_exit(idx);
270 }
271 
272 static enum drm_mode_status
273 display_mode_valid(struct drm_simple_display_pipe *pipe,
274 		   const struct drm_display_mode *mode)
275 {
276 	struct xen_drm_front_drm_pipeline *pipeline =
277 			container_of(pipe, struct xen_drm_front_drm_pipeline,
278 				     pipe);
279 
280 	if (mode->hdisplay != pipeline->width)
281 		return MODE_ERROR;
282 
283 	if (mode->vdisplay != pipeline->height)
284 		return MODE_ERROR;
285 
286 	return MODE_OK;
287 }
288 
289 static const struct drm_simple_display_pipe_funcs display_funcs = {
290 	.mode_valid = display_mode_valid,
291 	.enable = display_enable,
292 	.disable = display_disable,
293 	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
294 	.update = display_update,
295 };
296 
297 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
298 			     int index, struct xen_drm_front_cfg_connector *cfg,
299 			     struct xen_drm_front_drm_pipeline *pipeline)
300 {
301 	struct drm_device *dev = drm_info->drm_dev;
302 	const u32 *formats;
303 	int format_count;
304 	int ret;
305 
306 	pipeline->drm_info = drm_info;
307 	pipeline->index = index;
308 	pipeline->height = cfg->height;
309 	pipeline->width = cfg->width;
310 
311 	INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
312 
313 	ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
314 	if (ret)
315 		return ret;
316 
317 	formats = xen_drm_front_conn_get_formats(&format_count);
318 
319 	return drm_simple_display_pipe_init(dev, &pipeline->pipe,
320 					    &display_funcs, formats,
321 					    format_count, NULL,
322 					    &pipeline->conn);
323 }
324 
325 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
326 {
327 	struct drm_device *dev = drm_info->drm_dev;
328 	int i, ret;
329 
330 	drm_mode_config_init(dev);
331 
332 	dev->mode_config.min_width = 0;
333 	dev->mode_config.min_height = 0;
334 	dev->mode_config.max_width = 4095;
335 	dev->mode_config.max_height = 2047;
336 	dev->mode_config.funcs = &mode_config_funcs;
337 
338 	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
339 		struct xen_drm_front_cfg_connector *cfg =
340 				&drm_info->front_info->cfg.connectors[i];
341 		struct xen_drm_front_drm_pipeline *pipeline =
342 				&drm_info->pipeline[i];
343 
344 		ret = display_pipe_init(drm_info, i, cfg, pipeline);
345 		if (ret) {
346 			drm_mode_config_cleanup(dev);
347 			return ret;
348 		}
349 	}
350 
351 	drm_mode_config_reset(dev);
352 	drm_kms_helper_poll_init(dev);
353 	return 0;
354 }
355 
356 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
357 {
358 	int i;
359 
360 	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
361 		struct xen_drm_front_drm_pipeline *pipeline =
362 				&drm_info->pipeline[i];
363 
364 		cancel_delayed_work_sync(&pipeline->pflip_to_worker);
365 
366 		send_pending_event(pipeline);
367 	}
368 }
369