1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/module.h>
14 #include <linux/of_device.h>
15 
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_ioctl.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_file.h>
21 #include <drm/drm_gem.h>
22 
23 #include <xen/platform_pci.h>
24 #include <xen/xen.h>
25 #include <xen/xenbus.h>
26 
27 #include <xen/xen-front-pgdir-shbuf.h>
28 #include <xen/interface/io/displif.h>
29 
30 #include "xen_drm_front.h"
31 #include "xen_drm_front_cfg.h"
32 #include "xen_drm_front_evtchnl.h"
33 #include "xen_drm_front_gem.h"
34 #include "xen_drm_front_kms.h"
35 
36 struct xen_drm_front_dbuf {
37 	struct list_head list;
38 	u64 dbuf_cookie;
39 	u64 fb_cookie;
40 
41 	struct xen_front_pgdir_shbuf shbuf;
42 };
43 
44 static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
45 			     struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
46 {
47 	dbuf->dbuf_cookie = dbuf_cookie;
48 	list_add(&dbuf->list, &front_info->dbuf_list);
49 }
50 
51 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
52 					   u64 dbuf_cookie)
53 {
54 	struct xen_drm_front_dbuf *buf, *q;
55 
56 	list_for_each_entry_safe(buf, q, dbuf_list, list)
57 		if (buf->dbuf_cookie == dbuf_cookie)
58 			return buf;
59 
60 	return NULL;
61 }
62 
63 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
64 {
65 	struct xen_drm_front_dbuf *buf, *q;
66 
67 	list_for_each_entry_safe(buf, q, dbuf_list, list)
68 		if (buf->dbuf_cookie == dbuf_cookie) {
69 			list_del(&buf->list);
70 			xen_front_pgdir_shbuf_unmap(&buf->shbuf);
71 			xen_front_pgdir_shbuf_free(&buf->shbuf);
72 			kfree(buf);
73 			break;
74 		}
75 }
76 
77 static void dbuf_free_all(struct list_head *dbuf_list)
78 {
79 	struct xen_drm_front_dbuf *buf, *q;
80 
81 	list_for_each_entry_safe(buf, q, dbuf_list, list) {
82 		list_del(&buf->list);
83 		xen_front_pgdir_shbuf_unmap(&buf->shbuf);
84 		xen_front_pgdir_shbuf_free(&buf->shbuf);
85 		kfree(buf);
86 	}
87 }
88 
89 static struct xendispl_req *
90 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
91 {
92 	struct xendispl_req *req;
93 
94 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
95 			       evtchnl->u.req.ring.req_prod_pvt);
96 	req->operation = operation;
97 	req->id = evtchnl->evt_next_id++;
98 	evtchnl->evt_id = req->id;
99 	return req;
100 }
101 
102 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
103 			   struct xendispl_req *req)
104 {
105 	reinit_completion(&evtchnl->u.req.completion);
106 	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
107 		return -EIO;
108 
109 	xen_drm_front_evtchnl_flush(evtchnl);
110 	return 0;
111 }
112 
113 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
114 {
115 	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
116 			msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
117 		return -ETIMEDOUT;
118 
119 	return evtchnl->u.req.resp_status;
120 }
121 
122 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
123 			   u32 x, u32 y, u32 width, u32 height,
124 			   u32 bpp, u64 fb_cookie)
125 {
126 	struct xen_drm_front_evtchnl *evtchnl;
127 	struct xen_drm_front_info *front_info;
128 	struct xendispl_req *req;
129 	unsigned long flags;
130 	int ret;
131 
132 	front_info = pipeline->drm_info->front_info;
133 	evtchnl = &front_info->evt_pairs[pipeline->index].req;
134 	if (unlikely(!evtchnl))
135 		return -EIO;
136 
137 	mutex_lock(&evtchnl->u.req.req_io_lock);
138 
139 	spin_lock_irqsave(&front_info->io_lock, flags);
140 	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
141 	req->op.set_config.x = x;
142 	req->op.set_config.y = y;
143 	req->op.set_config.width = width;
144 	req->op.set_config.height = height;
145 	req->op.set_config.bpp = bpp;
146 	req->op.set_config.fb_cookie = fb_cookie;
147 
148 	ret = be_stream_do_io(evtchnl, req);
149 	spin_unlock_irqrestore(&front_info->io_lock, flags);
150 
151 	if (ret == 0)
152 		ret = be_stream_wait_io(evtchnl);
153 
154 	mutex_unlock(&evtchnl->u.req.req_io_lock);
155 	return ret;
156 }
157 
158 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
159 			      u64 dbuf_cookie, u32 width, u32 height,
160 			      u32 bpp, u64 size, struct page **pages)
161 {
162 	struct xen_drm_front_evtchnl *evtchnl;
163 	struct xen_drm_front_dbuf *dbuf;
164 	struct xendispl_req *req;
165 	struct xen_front_pgdir_shbuf_cfg buf_cfg;
166 	unsigned long flags;
167 	int ret;
168 
169 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
170 	if (unlikely(!evtchnl))
171 		return -EIO;
172 
173 	dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
174 	if (!dbuf)
175 		return -ENOMEM;
176 
177 	dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
178 
179 	memset(&buf_cfg, 0, sizeof(buf_cfg));
180 	buf_cfg.xb_dev = front_info->xb_dev;
181 	buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
182 	buf_cfg.pages = pages;
183 	buf_cfg.pgdir = &dbuf->shbuf;
184 	buf_cfg.be_alloc = front_info->cfg.be_alloc;
185 
186 	ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
187 	if (ret < 0)
188 		goto fail_shbuf_alloc;
189 
190 	mutex_lock(&evtchnl->u.req.req_io_lock);
191 
192 	spin_lock_irqsave(&front_info->io_lock, flags);
193 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
194 	req->op.dbuf_create.gref_directory =
195 			xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
196 	req->op.dbuf_create.buffer_sz = size;
197 	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
198 	req->op.dbuf_create.width = width;
199 	req->op.dbuf_create.height = height;
200 	req->op.dbuf_create.bpp = bpp;
201 	if (buf_cfg.be_alloc)
202 		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
203 
204 	ret = be_stream_do_io(evtchnl, req);
205 	spin_unlock_irqrestore(&front_info->io_lock, flags);
206 
207 	if (ret < 0)
208 		goto fail;
209 
210 	ret = be_stream_wait_io(evtchnl);
211 	if (ret < 0)
212 		goto fail;
213 
214 	ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
215 	if (ret < 0)
216 		goto fail;
217 
218 	mutex_unlock(&evtchnl->u.req.req_io_lock);
219 	return 0;
220 
221 fail:
222 	mutex_unlock(&evtchnl->u.req.req_io_lock);
223 fail_shbuf_alloc:
224 	dbuf_free(&front_info->dbuf_list, dbuf_cookie);
225 	return ret;
226 }
227 
228 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
229 				      u64 dbuf_cookie)
230 {
231 	struct xen_drm_front_evtchnl *evtchnl;
232 	struct xendispl_req *req;
233 	unsigned long flags;
234 	bool be_alloc;
235 	int ret;
236 
237 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
238 	if (unlikely(!evtchnl))
239 		return -EIO;
240 
241 	be_alloc = front_info->cfg.be_alloc;
242 
243 	/*
244 	 * For the backend allocated buffer release references now, so backend
245 	 * can free the buffer.
246 	 */
247 	if (be_alloc)
248 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
249 
250 	mutex_lock(&evtchnl->u.req.req_io_lock);
251 
252 	spin_lock_irqsave(&front_info->io_lock, flags);
253 	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
254 	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
255 
256 	ret = be_stream_do_io(evtchnl, req);
257 	spin_unlock_irqrestore(&front_info->io_lock, flags);
258 
259 	if (ret == 0)
260 		ret = be_stream_wait_io(evtchnl);
261 
262 	/*
263 	 * Do this regardless of communication status with the backend:
264 	 * if we cannot remove remote resources remove what we can locally.
265 	 */
266 	if (!be_alloc)
267 		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
268 
269 	mutex_unlock(&evtchnl->u.req.req_io_lock);
270 	return ret;
271 }
272 
273 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
274 			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
275 			    u32 height, u32 pixel_format)
276 {
277 	struct xen_drm_front_evtchnl *evtchnl;
278 	struct xen_drm_front_dbuf *buf;
279 	struct xendispl_req *req;
280 	unsigned long flags;
281 	int ret;
282 
283 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
284 	if (unlikely(!evtchnl))
285 		return -EIO;
286 
287 	buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
288 	if (!buf)
289 		return -EINVAL;
290 
291 	buf->fb_cookie = fb_cookie;
292 
293 	mutex_lock(&evtchnl->u.req.req_io_lock);
294 
295 	spin_lock_irqsave(&front_info->io_lock, flags);
296 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
297 	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
298 	req->op.fb_attach.fb_cookie = fb_cookie;
299 	req->op.fb_attach.width = width;
300 	req->op.fb_attach.height = height;
301 	req->op.fb_attach.pixel_format = pixel_format;
302 
303 	ret = be_stream_do_io(evtchnl, req);
304 	spin_unlock_irqrestore(&front_info->io_lock, flags);
305 
306 	if (ret == 0)
307 		ret = be_stream_wait_io(evtchnl);
308 
309 	mutex_unlock(&evtchnl->u.req.req_io_lock);
310 	return ret;
311 }
312 
313 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
314 			    u64 fb_cookie)
315 {
316 	struct xen_drm_front_evtchnl *evtchnl;
317 	struct xendispl_req *req;
318 	unsigned long flags;
319 	int ret;
320 
321 	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
322 	if (unlikely(!evtchnl))
323 		return -EIO;
324 
325 	mutex_lock(&evtchnl->u.req.req_io_lock);
326 
327 	spin_lock_irqsave(&front_info->io_lock, flags);
328 	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
329 	req->op.fb_detach.fb_cookie = fb_cookie;
330 
331 	ret = be_stream_do_io(evtchnl, req);
332 	spin_unlock_irqrestore(&front_info->io_lock, flags);
333 
334 	if (ret == 0)
335 		ret = be_stream_wait_io(evtchnl);
336 
337 	mutex_unlock(&evtchnl->u.req.req_io_lock);
338 	return ret;
339 }
340 
341 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
342 			    int conn_idx, u64 fb_cookie)
343 {
344 	struct xen_drm_front_evtchnl *evtchnl;
345 	struct xendispl_req *req;
346 	unsigned long flags;
347 	int ret;
348 
349 	if (unlikely(conn_idx >= front_info->num_evt_pairs))
350 		return -EINVAL;
351 
352 	evtchnl = &front_info->evt_pairs[conn_idx].req;
353 
354 	mutex_lock(&evtchnl->u.req.req_io_lock);
355 
356 	spin_lock_irqsave(&front_info->io_lock, flags);
357 	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
358 	req->op.pg_flip.fb_cookie = fb_cookie;
359 
360 	ret = be_stream_do_io(evtchnl, req);
361 	spin_unlock_irqrestore(&front_info->io_lock, flags);
362 
363 	if (ret == 0)
364 		ret = be_stream_wait_io(evtchnl);
365 
366 	mutex_unlock(&evtchnl->u.req.req_io_lock);
367 	return ret;
368 }
369 
370 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
371 				 int conn_idx, u64 fb_cookie)
372 {
373 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
374 
375 	if (unlikely(conn_idx >= front_info->cfg.num_connectors))
376 		return;
377 
378 	xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
379 					fb_cookie);
380 }
381 
382 static int xen_drm_drv_dumb_create(struct drm_file *filp,
383 				   struct drm_device *dev,
384 				   struct drm_mode_create_dumb *args)
385 {
386 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
387 	struct drm_gem_object *obj;
388 	int ret;
389 
390 	/*
391 	 * Dumb creation is a two stage process: first we create a fully
392 	 * constructed GEM object which is communicated to the backend, and
393 	 * only after that we can create GEM's handle. This is done so,
394 	 * because of the possible races: once you create a handle it becomes
395 	 * immediately visible to user-space, so the latter can try accessing
396 	 * object without pages etc.
397 	 * For details also see drm_gem_handle_create
398 	 */
399 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
400 	args->size = args->pitch * args->height;
401 
402 	obj = xen_drm_front_gem_create(dev, args->size);
403 	if (IS_ERR_OR_NULL(obj)) {
404 		ret = PTR_ERR(obj);
405 		goto fail;
406 	}
407 
408 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
409 					xen_drm_front_dbuf_to_cookie(obj),
410 					args->width, args->height, args->bpp,
411 					args->size,
412 					xen_drm_front_gem_get_pages(obj));
413 	if (ret)
414 		goto fail_backend;
415 
416 	/* This is the tail of GEM object creation */
417 	ret = drm_gem_handle_create(filp, obj, &args->handle);
418 	if (ret)
419 		goto fail_handle;
420 
421 	/* Drop reference from allocate - handle holds it now */
422 	drm_gem_object_put_unlocked(obj);
423 	return 0;
424 
425 fail_handle:
426 	xen_drm_front_dbuf_destroy(drm_info->front_info,
427 				   xen_drm_front_dbuf_to_cookie(obj));
428 fail_backend:
429 	/* drop reference from allocate */
430 	drm_gem_object_put_unlocked(obj);
431 fail:
432 	DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
433 	return ret;
434 }
435 
436 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
437 {
438 	struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
439 	int idx;
440 
441 	if (drm_dev_enter(obj->dev, &idx)) {
442 		xen_drm_front_dbuf_destroy(drm_info->front_info,
443 					   xen_drm_front_dbuf_to_cookie(obj));
444 		drm_dev_exit(idx);
445 	} else {
446 		dbuf_free(&drm_info->front_info->dbuf_list,
447 			  xen_drm_front_dbuf_to_cookie(obj));
448 	}
449 
450 	xen_drm_front_gem_free_object_unlocked(obj);
451 }
452 
453 static void xen_drm_drv_release(struct drm_device *dev)
454 {
455 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
456 	struct xen_drm_front_info *front_info = drm_info->front_info;
457 
458 	xen_drm_front_kms_fini(drm_info);
459 
460 	drm_atomic_helper_shutdown(dev);
461 	drm_mode_config_cleanup(dev);
462 
463 	drm_dev_fini(dev);
464 	kfree(dev);
465 
466 	if (front_info->cfg.be_alloc)
467 		xenbus_switch_state(front_info->xb_dev,
468 				    XenbusStateInitialising);
469 
470 	kfree(drm_info);
471 }
472 
473 static const struct file_operations xen_drm_dev_fops = {
474 	.owner          = THIS_MODULE,
475 	.open           = drm_open,
476 	.release        = drm_release,
477 	.unlocked_ioctl = drm_ioctl,
478 #ifdef CONFIG_COMPAT
479 	.compat_ioctl   = drm_compat_ioctl,
480 #endif
481 	.poll           = drm_poll,
482 	.read           = drm_read,
483 	.llseek         = no_llseek,
484 	.mmap           = xen_drm_front_gem_mmap,
485 };
486 
487 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
488 	.open           = drm_gem_vm_open,
489 	.close          = drm_gem_vm_close,
490 };
491 
492 static struct drm_driver xen_drm_driver = {
493 	.driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
494 	.release                   = xen_drm_drv_release,
495 	.gem_vm_ops                = &xen_drm_drv_vm_ops,
496 	.gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
497 	.prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
498 	.prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
499 	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
500 	.gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
501 	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
502 	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
503 	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
504 	.dumb_create               = xen_drm_drv_dumb_create,
505 	.fops                      = &xen_drm_dev_fops,
506 	.name                      = "xendrm-du",
507 	.desc                      = "Xen PV DRM Display Unit",
508 	.date                      = "20180221",
509 	.major                     = 1,
510 	.minor                     = 0,
511 
512 };
513 
514 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
515 {
516 	struct device *dev = &front_info->xb_dev->dev;
517 	struct xen_drm_front_drm_info *drm_info;
518 	struct drm_device *drm_dev;
519 	int ret;
520 
521 	DRM_INFO("Creating %s\n", xen_drm_driver.desc);
522 
523 	drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
524 	if (!drm_info) {
525 		ret = -ENOMEM;
526 		goto fail;
527 	}
528 
529 	drm_info->front_info = front_info;
530 	front_info->drm_info = drm_info;
531 
532 	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
533 	if (IS_ERR(drm_dev)) {
534 		ret = PTR_ERR(drm_dev);
535 		goto fail;
536 	}
537 
538 	drm_info->drm_dev = drm_dev;
539 
540 	drm_dev->dev_private = drm_info;
541 
542 	ret = xen_drm_front_kms_init(drm_info);
543 	if (ret) {
544 		DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
545 		goto fail_modeset;
546 	}
547 
548 	ret = drm_dev_register(drm_dev, 0);
549 	if (ret)
550 		goto fail_register;
551 
552 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
553 		 xen_drm_driver.name, xen_drm_driver.major,
554 		 xen_drm_driver.minor, xen_drm_driver.patchlevel,
555 		 xen_drm_driver.date, drm_dev->primary->index);
556 
557 	return 0;
558 
559 fail_register:
560 	drm_dev_unregister(drm_dev);
561 fail_modeset:
562 	drm_kms_helper_poll_fini(drm_dev);
563 	drm_mode_config_cleanup(drm_dev);
564 fail:
565 	kfree(drm_info);
566 	return ret;
567 }
568 
569 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
570 {
571 	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
572 	struct drm_device *dev;
573 
574 	if (!drm_info)
575 		return;
576 
577 	dev = drm_info->drm_dev;
578 	if (!dev)
579 		return;
580 
581 	/* Nothing to do if device is already unplugged */
582 	if (drm_dev_is_unplugged(dev))
583 		return;
584 
585 	drm_kms_helper_poll_fini(dev);
586 	drm_dev_unplug(dev);
587 	drm_dev_put(dev);
588 
589 	front_info->drm_info = NULL;
590 
591 	xen_drm_front_evtchnl_free_all(front_info);
592 	dbuf_free_all(&front_info->dbuf_list);
593 
594 	/*
595 	 * If we are not using backend allocated buffers, then tell the
596 	 * backend we are ready to (re)initialize. Otherwise, wait for
597 	 * drm_driver.release.
598 	 */
599 	if (!front_info->cfg.be_alloc)
600 		xenbus_switch_state(front_info->xb_dev,
601 				    XenbusStateInitialising);
602 }
603 
604 static int displback_initwait(struct xen_drm_front_info *front_info)
605 {
606 	struct xen_drm_front_cfg *cfg = &front_info->cfg;
607 	int ret;
608 
609 	cfg->front_info = front_info;
610 	ret = xen_drm_front_cfg_card(front_info, cfg);
611 	if (ret < 0)
612 		return ret;
613 
614 	DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
615 	/* Create event channels for all connectors and publish */
616 	ret = xen_drm_front_evtchnl_create_all(front_info);
617 	if (ret < 0)
618 		return ret;
619 
620 	return xen_drm_front_evtchnl_publish_all(front_info);
621 }
622 
623 static int displback_connect(struct xen_drm_front_info *front_info)
624 {
625 	xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
626 	return xen_drm_drv_init(front_info);
627 }
628 
629 static void displback_disconnect(struct xen_drm_front_info *front_info)
630 {
631 	if (!front_info->drm_info)
632 		return;
633 
634 	/* Tell the backend to wait until we release the DRM driver. */
635 	xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
636 
637 	xen_drm_drv_fini(front_info);
638 }
639 
640 static void displback_changed(struct xenbus_device *xb_dev,
641 			      enum xenbus_state backend_state)
642 {
643 	struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
644 	int ret;
645 
646 	DRM_DEBUG("Backend state is %s, front is %s\n",
647 		  xenbus_strstate(backend_state),
648 		  xenbus_strstate(xb_dev->state));
649 
650 	switch (backend_state) {
651 	case XenbusStateReconfiguring:
652 		/* fall through */
653 	case XenbusStateReconfigured:
654 		/* fall through */
655 	case XenbusStateInitialised:
656 		break;
657 
658 	case XenbusStateInitialising:
659 		if (xb_dev->state == XenbusStateReconfiguring)
660 			break;
661 
662 		/* recovering after backend unexpected closure */
663 		displback_disconnect(front_info);
664 		break;
665 
666 	case XenbusStateInitWait:
667 		if (xb_dev->state == XenbusStateReconfiguring)
668 			break;
669 
670 		/* recovering after backend unexpected closure */
671 		displback_disconnect(front_info);
672 		if (xb_dev->state != XenbusStateInitialising)
673 			break;
674 
675 		ret = displback_initwait(front_info);
676 		if (ret < 0)
677 			xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
678 		else
679 			xenbus_switch_state(xb_dev, XenbusStateInitialised);
680 		break;
681 
682 	case XenbusStateConnected:
683 		if (xb_dev->state != XenbusStateInitialised)
684 			break;
685 
686 		ret = displback_connect(front_info);
687 		if (ret < 0) {
688 			displback_disconnect(front_info);
689 			xenbus_dev_fatal(xb_dev, ret, "connecting backend");
690 		} else {
691 			xenbus_switch_state(xb_dev, XenbusStateConnected);
692 		}
693 		break;
694 
695 	case XenbusStateClosing:
696 		/*
697 		 * in this state backend starts freeing resources,
698 		 * so let it go into closed state, so we can also
699 		 * remove ours
700 		 */
701 		break;
702 
703 	case XenbusStateUnknown:
704 		/* fall through */
705 	case XenbusStateClosed:
706 		if (xb_dev->state == XenbusStateClosed)
707 			break;
708 
709 		displback_disconnect(front_info);
710 		break;
711 	}
712 }
713 
714 static int xen_drv_probe(struct xenbus_device *xb_dev,
715 			 const struct xenbus_device_id *id)
716 {
717 	struct xen_drm_front_info *front_info;
718 	struct device *dev = &xb_dev->dev;
719 	int ret;
720 
721 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
722 	if (ret < 0) {
723 		DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
724 		return ret;
725 	}
726 
727 	front_info = devm_kzalloc(&xb_dev->dev,
728 				  sizeof(*front_info), GFP_KERNEL);
729 	if (!front_info)
730 		return -ENOMEM;
731 
732 	front_info->xb_dev = xb_dev;
733 	spin_lock_init(&front_info->io_lock);
734 	INIT_LIST_HEAD(&front_info->dbuf_list);
735 	dev_set_drvdata(&xb_dev->dev, front_info);
736 
737 	return xenbus_switch_state(xb_dev, XenbusStateInitialising);
738 }
739 
740 static int xen_drv_remove(struct xenbus_device *dev)
741 {
742 	struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
743 	int to = 100;
744 
745 	xenbus_switch_state(dev, XenbusStateClosing);
746 
747 	/*
748 	 * On driver removal it is disconnected from XenBus,
749 	 * so no backend state change events come via .otherend_changed
750 	 * callback. This prevents us from exiting gracefully, e.g.
751 	 * signaling the backend to free event channels, waiting for its
752 	 * state to change to XenbusStateClosed and cleaning at our end.
753 	 * Normally when front driver removed backend will finally go into
754 	 * XenbusStateInitWait state.
755 	 *
756 	 * Workaround: read backend's state manually and wait with time-out.
757 	 */
758 	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
759 				     XenbusStateUnknown) != XenbusStateInitWait) &&
760 				     --to)
761 		msleep(10);
762 
763 	if (!to) {
764 		unsigned int state;
765 
766 		state = xenbus_read_unsigned(front_info->xb_dev->otherend,
767 					     "state", XenbusStateUnknown);
768 		DRM_ERROR("Backend state is %s while removing driver\n",
769 			  xenbus_strstate(state));
770 	}
771 
772 	xen_drm_drv_fini(front_info);
773 	xenbus_frontend_closed(dev);
774 	return 0;
775 }
776 
777 static const struct xenbus_device_id xen_driver_ids[] = {
778 	{ XENDISPL_DRIVER_NAME },
779 	{ "" }
780 };
781 
782 static struct xenbus_driver xen_driver = {
783 	.ids = xen_driver_ids,
784 	.probe = xen_drv_probe,
785 	.remove = xen_drv_remove,
786 	.otherend_changed = displback_changed,
787 };
788 
789 static int __init xen_drv_init(void)
790 {
791 	/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
792 	if (XEN_PAGE_SIZE != PAGE_SIZE) {
793 		DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
794 			  XEN_PAGE_SIZE, PAGE_SIZE);
795 		return -ENODEV;
796 	}
797 
798 	if (!xen_domain())
799 		return -ENODEV;
800 
801 	if (!xen_has_pv_devices())
802 		return -ENODEV;
803 
804 	DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
805 	return xenbus_register_frontend(&xen_driver);
806 }
807 
808 static void __exit xen_drv_fini(void)
809 {
810 	DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
811 	xenbus_unregister_driver(&xen_driver);
812 }
813 
814 module_init(xen_drv_init);
815 module_exit(xen_drv_fini);
816 
817 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
818 MODULE_LICENSE("GPL");
819 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
820