1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <drm/drmP.h>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_probe_helper.h>
14 #include <drm/drm_gem.h>
15 
16 #include <linux/of_device.h>
17 
18 #include <xen/platform_pci.h>
19 #include <xen/xen.h>
20 #include <xen/xenbus.h>
21 
22 #include <xen/xen-front-pgdir-shbuf.h>
23 #include <xen/interface/io/displif.h>
24 
25 #include "xen_drm_front.h"
26 #include "xen_drm_front_cfg.h"
27 #include "xen_drm_front_evtchnl.h"
28 #include "xen_drm_front_gem.h"
29 #include "xen_drm_front_kms.h"
30 
31 struct xen_drm_front_dbuf {
32 	struct list_head list;
33 	u64 dbuf_cookie;
34 	u64 fb_cookie;
35 
36 	struct xen_front_pgdir_shbuf shbuf;
37 };
38 
39 static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
40 			     struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
41 {
42 	dbuf->dbuf_cookie = dbuf_cookie;
43 	list_add(&dbuf->list, &front_info->dbuf_list);
44 }
45 
46 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
47 					   u64 dbuf_cookie)
48 {
49 	struct xen_drm_front_dbuf *buf, *q;
50 
51 	list_for_each_entry_safe(buf, q, dbuf_list, list)
52 		if (buf->dbuf_cookie == dbuf_cookie)
53 			return buf;
54 
55 	return NULL;
56 }
57 
58 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
59 {
60 	struct xen_drm_front_dbuf *buf, *q;
61 
62 	list_for_each_entry_safe(buf, q, dbuf_list, list)
63 		if (buf->dbuf_cookie == dbuf_cookie) {
64 			list_del(&buf->list);
65 			xen_front_pgdir_shbuf_unmap(&buf->shbuf);
66 			xen_front_pgdir_shbuf_free(&buf->shbuf);
67 			kfree(buf);
68 			break;
69 		}
70 }
71 
72 static void dbuf_free_all(struct list_head *dbuf_list)
73 {
74 	struct xen_drm_front_dbuf *buf, *q;
75 
76 	list_for_each_entry_safe(buf, q, dbuf_list, list) {
77 		list_del(&buf->list);
78 		xen_front_pgdir_shbuf_unmap(&buf->shbuf);
79 		xen_front_pgdir_shbuf_free(&buf->shbuf);
80 		kfree(buf);
81 	}
82 }
83 
84 static struct xendispl_req *
85 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
86 {
87 	struct xendispl_req *req;
88 
89 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
90 			       evtchnl->u.req.ring.req_prod_pvt);
91 	req->operation = operation;
92 	req->id = evtchnl->evt_next_id++;
93 	evtchnl->evt_id = req->id;
94 	return req;
95 }
96 
97 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
98 			   struct xendispl_req *req)
99 {
100 	reinit_completion(&evtchnl->u.req.completion);
101 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
102 		return -EIO;
103 
104 	xen_drm_front_evtchnl_flush(evtchnl);
105 	return 0;
106 }
107 
108 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
109 {
110 	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
111 			msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
112 		return -ETIMEDOUT;
113 
114 	return evtchnl->u.req.resp_status;
115 }
116 
117 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
118 			   u32 x, u32 y, u32 width, u32 height,
119 			   u32 bpp, u64 fb_cookie)
120 {
121 	struct xen_drm_front_evtchnl *evtchnl;
122 	struct xen_drm_front_info *front_info;
123 	struct xendispl_req *req;
124 	unsigned long flags;
125 	int ret;
126 
127 	front_info = pipeline->drm_info->front_info;
128 	evtchnl = &front_info->evt_pairs[pipeline->index].req;
129 	if (unlikely(!evtchnl))
130 		return -EIO;
131 
132 	mutex_lock(&evtchnl->u.req.req_io_lock);
133 
134 	spin_lock_irqsave(&front_info->io_lock, flags);
135 	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
136 	req->op.set_config.x = x;
137 	req->op.set_config.y = y;
138 	req->op.set_config.width = width;
139 	req->op.set_config.height = height;
140 	req->op.set_config.bpp = bpp;
141 	req->op.set_config.fb_cookie = fb_cookie;
142 
143 	ret = be_stream_do_io(evtchnl, req);
144 	spin_unlock_irqrestore(&front_info->io_lock, flags);
145 
146 	if (ret == 0)
147 		ret = be_stream_wait_io(evtchnl);
148 
149 	mutex_unlock(&evtchnl->u.req.req_io_lock);
150 	return ret;
151 }
152 
153 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
154 			      u64 dbuf_cookie, u32 width, u32 height,
155 			      u32 bpp, u64 size, struct page **pages)
156 {
157 	struct xen_drm_front_evtchnl *evtchnl;
158 	struct xen_drm_front_dbuf *dbuf;
159 	struct xendispl_req *req;
160 	struct xen_front_pgdir_shbuf_cfg buf_cfg;
161 	unsigned long flags;
162 	int ret;
163 
164 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
165 	if (unlikely(!evtchnl))
166 		return -EIO;
167 
168 	dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
169 	if (!dbuf)
170 		return -ENOMEM;
171 
172 	dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
173 
174 	memset(&buf_cfg, 0, sizeof(buf_cfg));
175 	buf_cfg.xb_dev = front_info->xb_dev;
176 	buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
177 	buf_cfg.pages = pages;
178 	buf_cfg.pgdir = &dbuf->shbuf;
179 	buf_cfg.be_alloc = front_info->cfg.be_alloc;
180 
181 	ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
182 	if (ret < 0)
183 		goto fail_shbuf_alloc;
184 
185 	mutex_lock(&evtchnl->u.req.req_io_lock);
186 
187 	spin_lock_irqsave(&front_info->io_lock, flags);
188 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
189 	req->op.dbuf_create.gref_directory =
190 			xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
191 	req->op.dbuf_create.buffer_sz = size;
192 	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
193 	req->op.dbuf_create.width = width;
194 	req->op.dbuf_create.height = height;
195 	req->op.dbuf_create.bpp = bpp;
196 	if (buf_cfg.be_alloc)
197 		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
198 
199 	ret = be_stream_do_io(evtchnl, req);
200 	spin_unlock_irqrestore(&front_info->io_lock, flags);
201 
202 	if (ret < 0)
203 		goto fail;
204 
205 	ret = be_stream_wait_io(evtchnl);
206 	if (ret < 0)
207 		goto fail;
208 
209 	ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
210 	if (ret < 0)
211 		goto fail;
212 
213 	mutex_unlock(&evtchnl->u.req.req_io_lock);
214 	return 0;
215 
216 fail:
217 	mutex_unlock(&evtchnl->u.req.req_io_lock);
218 fail_shbuf_alloc:
219 	dbuf_free(&front_info->dbuf_list, dbuf_cookie);
220 	return ret;
221 }
222 
223 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
224 				      u64 dbuf_cookie)
225 {
226 	struct xen_drm_front_evtchnl *evtchnl;
227 	struct xendispl_req *req;
228 	unsigned long flags;
229 	bool be_alloc;
230 	int ret;
231 
232 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
233 	if (unlikely(!evtchnl))
234 		return -EIO;
235 
236 	be_alloc = front_info->cfg.be_alloc;
237 
238 	/*
239 	 * For the backend allocated buffer release references now, so backend
240 	 * can free the buffer.
241 	 */
242 	if (be_alloc)
243 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
244 
245 	mutex_lock(&evtchnl->u.req.req_io_lock);
246 
247 	spin_lock_irqsave(&front_info->io_lock, flags);
248 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
249 	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
250 
251 	ret = be_stream_do_io(evtchnl, req);
252 	spin_unlock_irqrestore(&front_info->io_lock, flags);
253 
254 	if (ret == 0)
255 		ret = be_stream_wait_io(evtchnl);
256 
257 	/*
258 	 * Do this regardless of communication status with the backend:
259 	 * if we cannot remove remote resources remove what we can locally.
260 	 */
261 	if (!be_alloc)
262 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
263 
264 	mutex_unlock(&evtchnl->u.req.req_io_lock);
265 	return ret;
266 }
267 
268 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
269 			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
270 			    u32 height, u32 pixel_format)
271 {
272 	struct xen_drm_front_evtchnl *evtchnl;
273 	struct xen_drm_front_dbuf *buf;
274 	struct xendispl_req *req;
275 	unsigned long flags;
276 	int ret;
277 
278 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
279 	if (unlikely(!evtchnl))
280 		return -EIO;
281 
282 	buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
283 	if (!buf)
284 		return -EINVAL;
285 
286 	buf->fb_cookie = fb_cookie;
287 
288 	mutex_lock(&evtchnl->u.req.req_io_lock);
289 
290 	spin_lock_irqsave(&front_info->io_lock, flags);
291 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
292 	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
293 	req->op.fb_attach.fb_cookie = fb_cookie;
294 	req->op.fb_attach.width = width;
295 	req->op.fb_attach.height = height;
296 	req->op.fb_attach.pixel_format = pixel_format;
297 
298 	ret = be_stream_do_io(evtchnl, req);
299 	spin_unlock_irqrestore(&front_info->io_lock, flags);
300 
301 	if (ret == 0)
302 		ret = be_stream_wait_io(evtchnl);
303 
304 	mutex_unlock(&evtchnl->u.req.req_io_lock);
305 	return ret;
306 }
307 
308 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
309 			    u64 fb_cookie)
310 {
311 	struct xen_drm_front_evtchnl *evtchnl;
312 	struct xendispl_req *req;
313 	unsigned long flags;
314 	int ret;
315 
316 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
317 	if (unlikely(!evtchnl))
318 		return -EIO;
319 
320 	mutex_lock(&evtchnl->u.req.req_io_lock);
321 
322 	spin_lock_irqsave(&front_info->io_lock, flags);
323 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
324 	req->op.fb_detach.fb_cookie = fb_cookie;
325 
326 	ret = be_stream_do_io(evtchnl, req);
327 	spin_unlock_irqrestore(&front_info->io_lock, flags);
328 
329 	if (ret == 0)
330 		ret = be_stream_wait_io(evtchnl);
331 
332 	mutex_unlock(&evtchnl->u.req.req_io_lock);
333 	return ret;
334 }
335 
336 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
337 			    int conn_idx, u64 fb_cookie)
338 {
339 	struct xen_drm_front_evtchnl *evtchnl;
340 	struct xendispl_req *req;
341 	unsigned long flags;
342 	int ret;
343 
344 	if (unlikely(conn_idx >= front_info->num_evt_pairs))
345 		return -EINVAL;
346 
347 	evtchnl = &front_info->evt_pairs[conn_idx].req;
348 
349 	mutex_lock(&evtchnl->u.req.req_io_lock);
350 
351 	spin_lock_irqsave(&front_info->io_lock, flags);
352 	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
353 	req->op.pg_flip.fb_cookie = fb_cookie;
354 
355 	ret = be_stream_do_io(evtchnl, req);
356 	spin_unlock_irqrestore(&front_info->io_lock, flags);
357 
358 	if (ret == 0)
359 		ret = be_stream_wait_io(evtchnl);
360 
361 	mutex_unlock(&evtchnl->u.req.req_io_lock);
362 	return ret;
363 }
364 
365 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
366 				 int conn_idx, u64 fb_cookie)
367 {
368 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
369 
370 	if (unlikely(conn_idx >= front_info->cfg.num_connectors))
371 		return;
372 
373 	xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
374 					fb_cookie);
375 }
376 
377 static int xen_drm_drv_dumb_create(struct drm_file *filp,
378 				   struct drm_device *dev,
379 				   struct drm_mode_create_dumb *args)
380 {
381 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
382 	struct drm_gem_object *obj;
383 	int ret;
384 
385 	/*
386 	 * Dumb creation is a two stage process: first we create a fully
387 	 * constructed GEM object which is communicated to the backend, and
388 	 * only after that we can create GEM's handle. This is done so,
389 	 * because of the possible races: once you create a handle it becomes
390 	 * immediately visible to user-space, so the latter can try accessing
391 	 * object without pages etc.
392 	 * For details also see drm_gem_handle_create
393 	 */
394 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
395 	args->size = args->pitch * args->height;
396 
397 	obj = xen_drm_front_gem_create(dev, args->size);
398 	if (IS_ERR_OR_NULL(obj)) {
399 		ret = PTR_ERR(obj);
400 		goto fail;
401 	}
402 
403 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
404 					xen_drm_front_dbuf_to_cookie(obj),
405 					args->width, args->height, args->bpp,
406 					args->size,
407 					xen_drm_front_gem_get_pages(obj));
408 	if (ret)
409 		goto fail_backend;
410 
411 	/* This is the tail of GEM object creation */
412 	ret = drm_gem_handle_create(filp, obj, &args->handle);
413 	if (ret)
414 		goto fail_handle;
415 
416 	/* Drop reference from allocate - handle holds it now */
417 	drm_gem_object_put_unlocked(obj);
418 	return 0;
419 
420 fail_handle:
421 	xen_drm_front_dbuf_destroy(drm_info->front_info,
422 				   xen_drm_front_dbuf_to_cookie(obj));
423 fail_backend:
424 	/* drop reference from allocate */
425 	drm_gem_object_put_unlocked(obj);
426 fail:
427 	DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
428 	return ret;
429 }
430 
431 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
432 {
433 	struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
434 	int idx;
435 
436 	if (drm_dev_enter(obj->dev, &idx)) {
437 		xen_drm_front_dbuf_destroy(drm_info->front_info,
438 					   xen_drm_front_dbuf_to_cookie(obj));
439 		drm_dev_exit(idx);
440 	} else {
441 		dbuf_free(&drm_info->front_info->dbuf_list,
442 			  xen_drm_front_dbuf_to_cookie(obj));
443 	}
444 
445 	xen_drm_front_gem_free_object_unlocked(obj);
446 }
447 
448 static void xen_drm_drv_release(struct drm_device *dev)
449 {
450 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
451 	struct xen_drm_front_info *front_info = drm_info->front_info;
452 
453 	xen_drm_front_kms_fini(drm_info);
454 
455 	drm_atomic_helper_shutdown(dev);
456 	drm_mode_config_cleanup(dev);
457 
458 	drm_dev_fini(dev);
459 	kfree(dev);
460 
461 	if (front_info->cfg.be_alloc)
462 		xenbus_switch_state(front_info->xb_dev,
463 				    XenbusStateInitialising);
464 
465 	kfree(drm_info);
466 }
467 
468 static const struct file_operations xen_drm_dev_fops = {
469 	.owner          = THIS_MODULE,
470 	.open           = drm_open,
471 	.release        = drm_release,
472 	.unlocked_ioctl = drm_ioctl,
473 #ifdef CONFIG_COMPAT
474 	.compat_ioctl   = drm_compat_ioctl,
475 #endif
476 	.poll           = drm_poll,
477 	.read           = drm_read,
478 	.llseek         = no_llseek,
479 	.mmap           = xen_drm_front_gem_mmap,
480 };
481 
482 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
483 	.open           = drm_gem_vm_open,
484 	.close          = drm_gem_vm_close,
485 };
486 
487 static struct drm_driver xen_drm_driver = {
488 	.driver_features           = DRIVER_GEM | DRIVER_MODESET |
489 				     DRIVER_PRIME | DRIVER_ATOMIC,
490 	.release                   = xen_drm_drv_release,
491 	.gem_vm_ops                = &xen_drm_drv_vm_ops,
492 	.gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
493 	.prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
494 	.prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
495 	.gem_prime_import          = drm_gem_prime_import,
496 	.gem_prime_export          = drm_gem_prime_export,
497 	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
498 	.gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
499 	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
500 	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
501 	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
502 	.dumb_create               = xen_drm_drv_dumb_create,
503 	.fops                      = &xen_drm_dev_fops,
504 	.name                      = "xendrm-du",
505 	.desc                      = "Xen PV DRM Display Unit",
506 	.date                      = "20180221",
507 	.major                     = 1,
508 	.minor                     = 0,
509 
510 };
511 
512 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
513 {
514 	struct device *dev = &front_info->xb_dev->dev;
515 	struct xen_drm_front_drm_info *drm_info;
516 	struct drm_device *drm_dev;
517 	int ret;
518 
519 	DRM_INFO("Creating %s\n", xen_drm_driver.desc);
520 
521 	drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
522 	if (!drm_info) {
523 		ret = -ENOMEM;
524 		goto fail;
525 	}
526 
527 	drm_info->front_info = front_info;
528 	front_info->drm_info = drm_info;
529 
530 	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
531 	if (IS_ERR(drm_dev)) {
532 		ret = PTR_ERR(drm_dev);
533 		goto fail;
534 	}
535 
536 	drm_info->drm_dev = drm_dev;
537 
538 	drm_dev->dev_private = drm_info;
539 
540 	ret = xen_drm_front_kms_init(drm_info);
541 	if (ret) {
542 		DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
543 		goto fail_modeset;
544 	}
545 
546 	ret = drm_dev_register(drm_dev, 0);
547 	if (ret)
548 		goto fail_register;
549 
550 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
551 		 xen_drm_driver.name, xen_drm_driver.major,
552 		 xen_drm_driver.minor, xen_drm_driver.patchlevel,
553 		 xen_drm_driver.date, drm_dev->primary->index);
554 
555 	return 0;
556 
557 fail_register:
558 	drm_dev_unregister(drm_dev);
559 fail_modeset:
560 	drm_kms_helper_poll_fini(drm_dev);
561 	drm_mode_config_cleanup(drm_dev);
562 fail:
563 	kfree(drm_info);
564 	return ret;
565 }
566 
567 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
568 {
569 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
570 	struct drm_device *dev;
571 
572 	if (!drm_info)
573 		return;
574 
575 	dev = drm_info->drm_dev;
576 	if (!dev)
577 		return;
578 
579 	/* Nothing to do if device is already unplugged */
580 	if (drm_dev_is_unplugged(dev))
581 		return;
582 
583 	drm_kms_helper_poll_fini(dev);
584 	drm_dev_unplug(dev);
585 	drm_dev_put(dev);
586 
587 	front_info->drm_info = NULL;
588 
589 	xen_drm_front_evtchnl_free_all(front_info);
590 	dbuf_free_all(&front_info->dbuf_list);
591 
592 	/*
593 	 * If we are not using backend allocated buffers, then tell the
594 	 * backend we are ready to (re)initialize. Otherwise, wait for
595 	 * drm_driver.release.
596 	 */
597 	if (!front_info->cfg.be_alloc)
598 		xenbus_switch_state(front_info->xb_dev,
599 				    XenbusStateInitialising);
600 }
601 
602 static int displback_initwait(struct xen_drm_front_info *front_info)
603 {
604 	struct xen_drm_front_cfg *cfg = &front_info->cfg;
605 	int ret;
606 
607 	cfg->front_info = front_info;
608 	ret = xen_drm_front_cfg_card(front_info, cfg);
609 	if (ret < 0)
610 		return ret;
611 
612 	DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
613 	/* Create event channels for all connectors and publish */
614 	ret = xen_drm_front_evtchnl_create_all(front_info);
615 	if (ret < 0)
616 		return ret;
617 
618 	return xen_drm_front_evtchnl_publish_all(front_info);
619 }
620 
621 static int displback_connect(struct xen_drm_front_info *front_info)
622 {
623 	xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
624 	return xen_drm_drv_init(front_info);
625 }
626 
627 static void displback_disconnect(struct xen_drm_front_info *front_info)
628 {
629 	if (!front_info->drm_info)
630 		return;
631 
632 	/* Tell the backend to wait until we release the DRM driver. */
633 	xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
634 
635 	xen_drm_drv_fini(front_info);
636 }
637 
638 static void displback_changed(struct xenbus_device *xb_dev,
639 			      enum xenbus_state backend_state)
640 {
641 	struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
642 	int ret;
643 
644 	DRM_DEBUG("Backend state is %s, front is %s\n",
645 		  xenbus_strstate(backend_state),
646 		  xenbus_strstate(xb_dev->state));
647 
648 	switch (backend_state) {
649 	case XenbusStateReconfiguring:
650 		/* fall through */
651 	case XenbusStateReconfigured:
652 		/* fall through */
653 	case XenbusStateInitialised:
654 		break;
655 
656 	case XenbusStateInitialising:
657 		if (xb_dev->state == XenbusStateReconfiguring)
658 			break;
659 
660 		/* recovering after backend unexpected closure */
661 		displback_disconnect(front_info);
662 		break;
663 
664 	case XenbusStateInitWait:
665 		if (xb_dev->state == XenbusStateReconfiguring)
666 			break;
667 
668 		/* recovering after backend unexpected closure */
669 		displback_disconnect(front_info);
670 		if (xb_dev->state != XenbusStateInitialising)
671 			break;
672 
673 		ret = displback_initwait(front_info);
674 		if (ret < 0)
675 			xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
676 		else
677 			xenbus_switch_state(xb_dev, XenbusStateInitialised);
678 		break;
679 
680 	case XenbusStateConnected:
681 		if (xb_dev->state != XenbusStateInitialised)
682 			break;
683 
684 		ret = displback_connect(front_info);
685 		if (ret < 0) {
686 			displback_disconnect(front_info);
687 			xenbus_dev_fatal(xb_dev, ret, "connecting backend");
688 		} else {
689 			xenbus_switch_state(xb_dev, XenbusStateConnected);
690 		}
691 		break;
692 
693 	case XenbusStateClosing:
694 		/*
695 		 * in this state backend starts freeing resources,
696 		 * so let it go into closed state, so we can also
697 		 * remove ours
698 		 */
699 		break;
700 
701 	case XenbusStateUnknown:
702 		/* fall through */
703 	case XenbusStateClosed:
704 		if (xb_dev->state == XenbusStateClosed)
705 			break;
706 
707 		displback_disconnect(front_info);
708 		break;
709 	}
710 }
711 
712 static int xen_drv_probe(struct xenbus_device *xb_dev,
713 			 const struct xenbus_device_id *id)
714 {
715 	struct xen_drm_front_info *front_info;
716 	struct device *dev = &xb_dev->dev;
717 	int ret;
718 
719 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
720 	if (ret < 0) {
721 		DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
722 		return ret;
723 	}
724 
725 	front_info = devm_kzalloc(&xb_dev->dev,
726 				  sizeof(*front_info), GFP_KERNEL);
727 	if (!front_info)
728 		return -ENOMEM;
729 
730 	front_info->xb_dev = xb_dev;
731 	spin_lock_init(&front_info->io_lock);
732 	INIT_LIST_HEAD(&front_info->dbuf_list);
733 	dev_set_drvdata(&xb_dev->dev, front_info);
734 
735 	return xenbus_switch_state(xb_dev, XenbusStateInitialising);
736 }
737 
738 static int xen_drv_remove(struct xenbus_device *dev)
739 {
740 	struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
741 	int to = 100;
742 
743 	xenbus_switch_state(dev, XenbusStateClosing);
744 
745 	/*
746 	 * On driver removal it is disconnected from XenBus,
747 	 * so no backend state change events come via .otherend_changed
748 	 * callback. This prevents us from exiting gracefully, e.g.
749 	 * signaling the backend to free event channels, waiting for its
750 	 * state to change to XenbusStateClosed and cleaning at our end.
751 	 * Normally when front driver removed backend will finally go into
752 	 * XenbusStateInitWait state.
753 	 *
754 	 * Workaround: read backend's state manually and wait with time-out.
755 	 */
756 	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
757 				     XenbusStateUnknown) != XenbusStateInitWait) &&
758 				     --to)
759 		msleep(10);
760 
761 	if (!to) {
762 		unsigned int state;
763 
764 		state = xenbus_read_unsigned(front_info->xb_dev->otherend,
765 					     "state", XenbusStateUnknown);
766 		DRM_ERROR("Backend state is %s while removing driver\n",
767 			  xenbus_strstate(state));
768 	}
769 
770 	xen_drm_drv_fini(front_info);
771 	xenbus_frontend_closed(dev);
772 	return 0;
773 }
774 
775 static const struct xenbus_device_id xen_driver_ids[] = {
776 	{ XENDISPL_DRIVER_NAME },
777 	{ "" }
778 };
779 
780 static struct xenbus_driver xen_driver = {
781 	.ids = xen_driver_ids,
782 	.probe = xen_drv_probe,
783 	.remove = xen_drv_remove,
784 	.otherend_changed = displback_changed,
785 };
786 
787 static int __init xen_drv_init(void)
788 {
789 	/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
790 	if (XEN_PAGE_SIZE != PAGE_SIZE) {
791 		DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
792 			  XEN_PAGE_SIZE, PAGE_SIZE);
793 		return -ENODEV;
794 	}
795 
796 	if (!xen_domain())
797 		return -ENODEV;
798 
799 	if (!xen_has_pv_devices())
800 		return -ENODEV;
801 
802 	DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
803 	return xenbus_register_frontend(&xen_driver);
804 }
805 
806 static void __exit xen_drv_fini(void)
807 {
808 	DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
809 	xenbus_unregister_driver(&xen_driver);
810 }
811 
812 module_init(xen_drv_init);
813 module_exit(xen_drv_fini);
814 
815 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
816 MODULE_LICENSE("GPL");
817 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
818