1c575b7eeSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT
2c575b7eeSOleksandr Andrushchenko
3c575b7eeSOleksandr Andrushchenko /*
4c575b7eeSOleksandr Andrushchenko * Xen para-virtual DRM device
5c575b7eeSOleksandr Andrushchenko *
6c575b7eeSOleksandr Andrushchenko * Copyright (C) 2016-2018 EPAM Systems Inc.
7c575b7eeSOleksandr Andrushchenko *
8c575b7eeSOleksandr Andrushchenko * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9c575b7eeSOleksandr Andrushchenko */
10c575b7eeSOleksandr Andrushchenko
112ea2269eSSam Ravnborg #include <linux/delay.h>
122ea2269eSSam Ravnborg #include <linux/dma-mapping.h>
132ea2269eSSam Ravnborg #include <linux/module.h>
14c575b7eeSOleksandr Andrushchenko
152ea2269eSSam Ravnborg #include <drm/drm_atomic_helper.h>
162ea2269eSSam Ravnborg #include <drm/drm_drv.h>
172ea2269eSSam Ravnborg #include <drm/drm_ioctl.h>
182ea2269eSSam Ravnborg #include <drm/drm_probe_helper.h>
192ea2269eSSam Ravnborg #include <drm/drm_file.h>
202ea2269eSSam Ravnborg #include <drm/drm_gem.h>
212ea2269eSSam Ravnborg
22c575b7eeSOleksandr Andrushchenko #include <xen/platform_pci.h>
23c575b7eeSOleksandr Andrushchenko #include <xen/xen.h>
24c575b7eeSOleksandr Andrushchenko #include <xen/xenbus.h>
25c575b7eeSOleksandr Andrushchenko
265641f19bSOleksandr Andrushchenko #include <xen/xen-front-pgdir-shbuf.h>
27c575b7eeSOleksandr Andrushchenko #include <xen/interface/io/displif.h>
28c575b7eeSOleksandr Andrushchenko
29c575b7eeSOleksandr Andrushchenko #include "xen_drm_front.h"
30c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_cfg.h"
31c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_evtchnl.h"
32c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_gem.h"
33c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_kms.h"
34c575b7eeSOleksandr Andrushchenko
35c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf {
36c575b7eeSOleksandr Andrushchenko struct list_head list;
37c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie;
38c575b7eeSOleksandr Andrushchenko u64 fb_cookie;
395641f19bSOleksandr Andrushchenko
405641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf shbuf;
41c575b7eeSOleksandr Andrushchenko };
42c575b7eeSOleksandr Andrushchenko
dbuf_add_to_list(struct xen_drm_front_info * front_info,struct xen_drm_front_dbuf * dbuf,u64 dbuf_cookie)435641f19bSOleksandr Andrushchenko static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
445641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
45c575b7eeSOleksandr Andrushchenko {
46c575b7eeSOleksandr Andrushchenko dbuf->dbuf_cookie = dbuf_cookie;
47c575b7eeSOleksandr Andrushchenko list_add(&dbuf->list, &front_info->dbuf_list);
48c575b7eeSOleksandr Andrushchenko }
49c575b7eeSOleksandr Andrushchenko
dbuf_get(struct list_head * dbuf_list,u64 dbuf_cookie)50c575b7eeSOleksandr Andrushchenko static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
51c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie)
52c575b7eeSOleksandr Andrushchenko {
53c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q;
54c575b7eeSOleksandr Andrushchenko
55c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list)
56c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie)
57c575b7eeSOleksandr Andrushchenko return buf;
58c575b7eeSOleksandr Andrushchenko
59c575b7eeSOleksandr Andrushchenko return NULL;
60c575b7eeSOleksandr Andrushchenko }
61c575b7eeSOleksandr Andrushchenko
dbuf_free(struct list_head * dbuf_list,u64 dbuf_cookie)62c575b7eeSOleksandr Andrushchenko static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
63c575b7eeSOleksandr Andrushchenko {
64c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q;
65c575b7eeSOleksandr Andrushchenko
66c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list)
67c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie) {
68c575b7eeSOleksandr Andrushchenko list_del(&buf->list);
695641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf);
705641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf);
71c575b7eeSOleksandr Andrushchenko kfree(buf);
72c575b7eeSOleksandr Andrushchenko break;
73c575b7eeSOleksandr Andrushchenko }
74c575b7eeSOleksandr Andrushchenko }
75c575b7eeSOleksandr Andrushchenko
dbuf_free_all(struct list_head * dbuf_list)76c575b7eeSOleksandr Andrushchenko static void dbuf_free_all(struct list_head *dbuf_list)
77c575b7eeSOleksandr Andrushchenko {
78c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q;
79c575b7eeSOleksandr Andrushchenko
80c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) {
81c575b7eeSOleksandr Andrushchenko list_del(&buf->list);
825641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf);
835641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf);
84c575b7eeSOleksandr Andrushchenko kfree(buf);
85c575b7eeSOleksandr Andrushchenko }
86c575b7eeSOleksandr Andrushchenko }
87c575b7eeSOleksandr Andrushchenko
88c575b7eeSOleksandr Andrushchenko static struct xendispl_req *
be_prepare_req(struct xen_drm_front_evtchnl * evtchnl,u8 operation)89c575b7eeSOleksandr Andrushchenko be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
90c575b7eeSOleksandr Andrushchenko {
91c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
92c575b7eeSOleksandr Andrushchenko
93c575b7eeSOleksandr Andrushchenko req = RING_GET_REQUEST(&evtchnl->u.req.ring,
94c575b7eeSOleksandr Andrushchenko evtchnl->u.req.ring.req_prod_pvt);
95c575b7eeSOleksandr Andrushchenko req->operation = operation;
96c575b7eeSOleksandr Andrushchenko req->id = evtchnl->evt_next_id++;
97c575b7eeSOleksandr Andrushchenko evtchnl->evt_id = req->id;
98c575b7eeSOleksandr Andrushchenko return req;
99c575b7eeSOleksandr Andrushchenko }
100c575b7eeSOleksandr Andrushchenko
be_stream_do_io(struct xen_drm_front_evtchnl * evtchnl,struct xendispl_req * req)101c575b7eeSOleksandr Andrushchenko static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
102c575b7eeSOleksandr Andrushchenko struct xendispl_req *req)
103c575b7eeSOleksandr Andrushchenko {
104c575b7eeSOleksandr Andrushchenko reinit_completion(&evtchnl->u.req.completion);
105c575b7eeSOleksandr Andrushchenko if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
106c575b7eeSOleksandr Andrushchenko return -EIO;
107c575b7eeSOleksandr Andrushchenko
108c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_flush(evtchnl);
109c575b7eeSOleksandr Andrushchenko return 0;
110c575b7eeSOleksandr Andrushchenko }
111c575b7eeSOleksandr Andrushchenko
be_stream_wait_io(struct xen_drm_front_evtchnl * evtchnl)112c575b7eeSOleksandr Andrushchenko static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
113c575b7eeSOleksandr Andrushchenko {
114c575b7eeSOleksandr Andrushchenko if (wait_for_completion_timeout(&evtchnl->u.req.completion,
115c575b7eeSOleksandr Andrushchenko msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
116c575b7eeSOleksandr Andrushchenko return -ETIMEDOUT;
117c575b7eeSOleksandr Andrushchenko
118c575b7eeSOleksandr Andrushchenko return evtchnl->u.req.resp_status;
119c575b7eeSOleksandr Andrushchenko }
120c575b7eeSOleksandr Andrushchenko
xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline * pipeline,u32 x,u32 y,u32 width,u32 height,u32 bpp,u64 fb_cookie)121c575b7eeSOleksandr Andrushchenko int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
122c575b7eeSOleksandr Andrushchenko u32 x, u32 y, u32 width, u32 height,
123c575b7eeSOleksandr Andrushchenko u32 bpp, u64 fb_cookie)
124c575b7eeSOleksandr Andrushchenko {
125c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
126c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info;
127c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
128c575b7eeSOleksandr Andrushchenko unsigned long flags;
129c575b7eeSOleksandr Andrushchenko int ret;
130c575b7eeSOleksandr Andrushchenko
131c575b7eeSOleksandr Andrushchenko front_info = pipeline->drm_info->front_info;
132c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[pipeline->index].req;
133c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl))
134c575b7eeSOleksandr Andrushchenko return -EIO;
135c575b7eeSOleksandr Andrushchenko
136c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
137c575b7eeSOleksandr Andrushchenko
138c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
139c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
140c575b7eeSOleksandr Andrushchenko req->op.set_config.x = x;
141c575b7eeSOleksandr Andrushchenko req->op.set_config.y = y;
142c575b7eeSOleksandr Andrushchenko req->op.set_config.width = width;
143c575b7eeSOleksandr Andrushchenko req->op.set_config.height = height;
144c575b7eeSOleksandr Andrushchenko req->op.set_config.bpp = bpp;
145c575b7eeSOleksandr Andrushchenko req->op.set_config.fb_cookie = fb_cookie;
146c575b7eeSOleksandr Andrushchenko
147c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
148c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
149c575b7eeSOleksandr Andrushchenko
150c575b7eeSOleksandr Andrushchenko if (ret == 0)
151c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
152c575b7eeSOleksandr Andrushchenko
153c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
154c575b7eeSOleksandr Andrushchenko return ret;
155c575b7eeSOleksandr Andrushchenko }
156c575b7eeSOleksandr Andrushchenko
xen_drm_front_dbuf_create(struct xen_drm_front_info * front_info,u64 dbuf_cookie,u32 width,u32 height,u32 bpp,u64 size,u32 offset,struct page ** pages)1574394e964SOleksandr Andrushchenko int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
158c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u32 width, u32 height,
159585c6ed7SOleksandr Andrushchenko u32 bpp, u64 size, u32 offset,
160585c6ed7SOleksandr Andrushchenko struct page **pages)
161c575b7eeSOleksandr Andrushchenko {
162c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
1635641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf;
164c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
1655641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf_cfg buf_cfg;
166c575b7eeSOleksandr Andrushchenko unsigned long flags;
167c575b7eeSOleksandr Andrushchenko int ret;
168c575b7eeSOleksandr Andrushchenko
169c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
170c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl))
171c575b7eeSOleksandr Andrushchenko return -EIO;
172c575b7eeSOleksandr Andrushchenko
1735641f19bSOleksandr Andrushchenko dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
1745641f19bSOleksandr Andrushchenko if (!dbuf)
1755641f19bSOleksandr Andrushchenko return -ENOMEM;
1765641f19bSOleksandr Andrushchenko
1775641f19bSOleksandr Andrushchenko dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
1785641f19bSOleksandr Andrushchenko
179c575b7eeSOleksandr Andrushchenko memset(&buf_cfg, 0, sizeof(buf_cfg));
180c575b7eeSOleksandr Andrushchenko buf_cfg.xb_dev = front_info->xb_dev;
1815641f19bSOleksandr Andrushchenko buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
182c575b7eeSOleksandr Andrushchenko buf_cfg.pages = pages;
1835641f19bSOleksandr Andrushchenko buf_cfg.pgdir = &dbuf->shbuf;
184c575b7eeSOleksandr Andrushchenko buf_cfg.be_alloc = front_info->cfg.be_alloc;
185c575b7eeSOleksandr Andrushchenko
1865641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
1875641f19bSOleksandr Andrushchenko if (ret < 0)
1885641f19bSOleksandr Andrushchenko goto fail_shbuf_alloc;
189c575b7eeSOleksandr Andrushchenko
190c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
191c575b7eeSOleksandr Andrushchenko
192c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
193c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
194c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.gref_directory =
1955641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
196c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.buffer_sz = size;
197585c6ed7SOleksandr Andrushchenko req->op.dbuf_create.data_ofs = offset;
198c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
199c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.width = width;
200c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.height = height;
201c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.bpp = bpp;
202c575b7eeSOleksandr Andrushchenko if (buf_cfg.be_alloc)
203c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
204c575b7eeSOleksandr Andrushchenko
205c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
206c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
207c575b7eeSOleksandr Andrushchenko
208c575b7eeSOleksandr Andrushchenko if (ret < 0)
209c575b7eeSOleksandr Andrushchenko goto fail;
210c575b7eeSOleksandr Andrushchenko
211c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
212c575b7eeSOleksandr Andrushchenko if (ret < 0)
213c575b7eeSOleksandr Andrushchenko goto fail;
214c575b7eeSOleksandr Andrushchenko
2155641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
216c575b7eeSOleksandr Andrushchenko if (ret < 0)
217c575b7eeSOleksandr Andrushchenko goto fail;
218c575b7eeSOleksandr Andrushchenko
219c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
220c575b7eeSOleksandr Andrushchenko return 0;
221c575b7eeSOleksandr Andrushchenko
222c575b7eeSOleksandr Andrushchenko fail:
223c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
2245641f19bSOleksandr Andrushchenko fail_shbuf_alloc:
225c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie);
226c575b7eeSOleksandr Andrushchenko return ret;
227c575b7eeSOleksandr Andrushchenko }
228c575b7eeSOleksandr Andrushchenko
xen_drm_front_dbuf_destroy(struct xen_drm_front_info * front_info,u64 dbuf_cookie)229c575b7eeSOleksandr Andrushchenko static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
230c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie)
231c575b7eeSOleksandr Andrushchenko {
232c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
233c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
234c575b7eeSOleksandr Andrushchenko unsigned long flags;
235c575b7eeSOleksandr Andrushchenko bool be_alloc;
236c575b7eeSOleksandr Andrushchenko int ret;
237c575b7eeSOleksandr Andrushchenko
238c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
239c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl))
240c575b7eeSOleksandr Andrushchenko return -EIO;
241c575b7eeSOleksandr Andrushchenko
242c575b7eeSOleksandr Andrushchenko be_alloc = front_info->cfg.be_alloc;
243c575b7eeSOleksandr Andrushchenko
244c575b7eeSOleksandr Andrushchenko /*
245c575b7eeSOleksandr Andrushchenko * For the backend allocated buffer release references now, so backend
246c575b7eeSOleksandr Andrushchenko * can free the buffer.
247c575b7eeSOleksandr Andrushchenko */
248c575b7eeSOleksandr Andrushchenko if (be_alloc)
249c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie);
250c575b7eeSOleksandr Andrushchenko
251c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
252c575b7eeSOleksandr Andrushchenko
253c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
254c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
255c575b7eeSOleksandr Andrushchenko req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
256c575b7eeSOleksandr Andrushchenko
257c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
258c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
259c575b7eeSOleksandr Andrushchenko
260c575b7eeSOleksandr Andrushchenko if (ret == 0)
261c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
262c575b7eeSOleksandr Andrushchenko
263c575b7eeSOleksandr Andrushchenko /*
264c575b7eeSOleksandr Andrushchenko * Do this regardless of communication status with the backend:
265c575b7eeSOleksandr Andrushchenko * if we cannot remove remote resources remove what we can locally.
266c575b7eeSOleksandr Andrushchenko */
267c575b7eeSOleksandr Andrushchenko if (!be_alloc)
268c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie);
269c575b7eeSOleksandr Andrushchenko
270c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
271c575b7eeSOleksandr Andrushchenko return ret;
272c575b7eeSOleksandr Andrushchenko }
273c575b7eeSOleksandr Andrushchenko
xen_drm_front_fb_attach(struct xen_drm_front_info * front_info,u64 dbuf_cookie,u64 fb_cookie,u32 width,u32 height,u32 pixel_format)274c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
275c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u64 fb_cookie, u32 width,
276c575b7eeSOleksandr Andrushchenko u32 height, u32 pixel_format)
277c575b7eeSOleksandr Andrushchenko {
278c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
279c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf;
280c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
281c575b7eeSOleksandr Andrushchenko unsigned long flags;
282c575b7eeSOleksandr Andrushchenko int ret;
283c575b7eeSOleksandr Andrushchenko
284c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
285c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl))
286c575b7eeSOleksandr Andrushchenko return -EIO;
287c575b7eeSOleksandr Andrushchenko
288c575b7eeSOleksandr Andrushchenko buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
289c575b7eeSOleksandr Andrushchenko if (!buf)
290c575b7eeSOleksandr Andrushchenko return -EINVAL;
291c575b7eeSOleksandr Andrushchenko
292c575b7eeSOleksandr Andrushchenko buf->fb_cookie = fb_cookie;
293c575b7eeSOleksandr Andrushchenko
294c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
295c575b7eeSOleksandr Andrushchenko
296c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
297c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
298c575b7eeSOleksandr Andrushchenko req->op.fb_attach.dbuf_cookie = dbuf_cookie;
299c575b7eeSOleksandr Andrushchenko req->op.fb_attach.fb_cookie = fb_cookie;
300c575b7eeSOleksandr Andrushchenko req->op.fb_attach.width = width;
301c575b7eeSOleksandr Andrushchenko req->op.fb_attach.height = height;
302c575b7eeSOleksandr Andrushchenko req->op.fb_attach.pixel_format = pixel_format;
303c575b7eeSOleksandr Andrushchenko
304c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
305c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
306c575b7eeSOleksandr Andrushchenko
307c575b7eeSOleksandr Andrushchenko if (ret == 0)
308c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
309c575b7eeSOleksandr Andrushchenko
310c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
311c575b7eeSOleksandr Andrushchenko return ret;
312c575b7eeSOleksandr Andrushchenko }
313c575b7eeSOleksandr Andrushchenko
xen_drm_front_fb_detach(struct xen_drm_front_info * front_info,u64 fb_cookie)314c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
315c575b7eeSOleksandr Andrushchenko u64 fb_cookie)
316c575b7eeSOleksandr Andrushchenko {
317c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
318c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
319c575b7eeSOleksandr Andrushchenko unsigned long flags;
320c575b7eeSOleksandr Andrushchenko int ret;
321c575b7eeSOleksandr Andrushchenko
322c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
323c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl))
324c575b7eeSOleksandr Andrushchenko return -EIO;
325c575b7eeSOleksandr Andrushchenko
326c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
327c575b7eeSOleksandr Andrushchenko
328c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
329c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
330c575b7eeSOleksandr Andrushchenko req->op.fb_detach.fb_cookie = fb_cookie;
331c575b7eeSOleksandr Andrushchenko
332c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
333c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
334c575b7eeSOleksandr Andrushchenko
335c575b7eeSOleksandr Andrushchenko if (ret == 0)
336c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
337c575b7eeSOleksandr Andrushchenko
338c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
339c575b7eeSOleksandr Andrushchenko return ret;
340c575b7eeSOleksandr Andrushchenko }
341c575b7eeSOleksandr Andrushchenko
xen_drm_front_page_flip(struct xen_drm_front_info * front_info,int conn_idx,u64 fb_cookie)342c575b7eeSOleksandr Andrushchenko int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
343c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie)
344c575b7eeSOleksandr Andrushchenko {
345c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl;
346c575b7eeSOleksandr Andrushchenko struct xendispl_req *req;
347c575b7eeSOleksandr Andrushchenko unsigned long flags;
348c575b7eeSOleksandr Andrushchenko int ret;
349c575b7eeSOleksandr Andrushchenko
350c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->num_evt_pairs))
351c575b7eeSOleksandr Andrushchenko return -EINVAL;
352c575b7eeSOleksandr Andrushchenko
353c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[conn_idx].req;
354c575b7eeSOleksandr Andrushchenko
355c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock);
356c575b7eeSOleksandr Andrushchenko
357c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags);
358c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
359c575b7eeSOleksandr Andrushchenko req->op.pg_flip.fb_cookie = fb_cookie;
360c575b7eeSOleksandr Andrushchenko
361c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req);
362c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags);
363c575b7eeSOleksandr Andrushchenko
364c575b7eeSOleksandr Andrushchenko if (ret == 0)
365c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl);
366c575b7eeSOleksandr Andrushchenko
367c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock);
368c575b7eeSOleksandr Andrushchenko return ret;
369c575b7eeSOleksandr Andrushchenko }
370c575b7eeSOleksandr Andrushchenko
xen_drm_front_on_frame_done(struct xen_drm_front_info * front_info,int conn_idx,u64 fb_cookie)371c575b7eeSOleksandr Andrushchenko void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
372c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie)
373c575b7eeSOleksandr Andrushchenko {
374c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
375c575b7eeSOleksandr Andrushchenko
376c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->cfg.num_connectors))
377c575b7eeSOleksandr Andrushchenko return;
378c575b7eeSOleksandr Andrushchenko
379c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
380c575b7eeSOleksandr Andrushchenko fb_cookie);
381c575b7eeSOleksandr Andrushchenko }
382c575b7eeSOleksandr Andrushchenko
xen_drm_front_gem_object_free(struct drm_gem_object * obj)3831adda8b8SThomas Zimmermann void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
3841adda8b8SThomas Zimmermann {
3851adda8b8SThomas Zimmermann struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
3861adda8b8SThomas Zimmermann int idx;
3871adda8b8SThomas Zimmermann
3881adda8b8SThomas Zimmermann if (drm_dev_enter(obj->dev, &idx)) {
3891adda8b8SThomas Zimmermann xen_drm_front_dbuf_destroy(drm_info->front_info,
3901adda8b8SThomas Zimmermann xen_drm_front_dbuf_to_cookie(obj));
3911adda8b8SThomas Zimmermann drm_dev_exit(idx);
3921adda8b8SThomas Zimmermann } else {
3931adda8b8SThomas Zimmermann dbuf_free(&drm_info->front_info->dbuf_list,
3941adda8b8SThomas Zimmermann xen_drm_front_dbuf_to_cookie(obj));
3951adda8b8SThomas Zimmermann }
3961adda8b8SThomas Zimmermann
3971adda8b8SThomas Zimmermann xen_drm_front_gem_free_object_unlocked(obj);
3981adda8b8SThomas Zimmermann }
3991adda8b8SThomas Zimmermann
xen_drm_drv_dumb_create(struct drm_file * filp,struct drm_device * dev,struct drm_mode_create_dumb * args)400c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_dumb_create(struct drm_file *filp,
401c575b7eeSOleksandr Andrushchenko struct drm_device *dev,
402c575b7eeSOleksandr Andrushchenko struct drm_mode_create_dumb *args)
403c575b7eeSOleksandr Andrushchenko {
404c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private;
405c575b7eeSOleksandr Andrushchenko struct drm_gem_object *obj;
406c575b7eeSOleksandr Andrushchenko int ret;
407c575b7eeSOleksandr Andrushchenko
408c575b7eeSOleksandr Andrushchenko /*
409c575b7eeSOleksandr Andrushchenko * Dumb creation is a two stage process: first we create a fully
410c575b7eeSOleksandr Andrushchenko * constructed GEM object which is communicated to the backend, and
411c575b7eeSOleksandr Andrushchenko * only after that we can create GEM's handle. This is done so,
412c575b7eeSOleksandr Andrushchenko * because of the possible races: once you create a handle it becomes
413c575b7eeSOleksandr Andrushchenko * immediately visible to user-space, so the latter can try accessing
414c575b7eeSOleksandr Andrushchenko * object without pages etc.
415c575b7eeSOleksandr Andrushchenko * For details also see drm_gem_handle_create
416c575b7eeSOleksandr Andrushchenko */
417c575b7eeSOleksandr Andrushchenko args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
418c575b7eeSOleksandr Andrushchenko args->size = args->pitch * args->height;
419c575b7eeSOleksandr Andrushchenko
420c575b7eeSOleksandr Andrushchenko obj = xen_drm_front_gem_create(dev, args->size);
42114dee058SOleksandr Andrushchenko if (IS_ERR(obj)) {
42214dee058SOleksandr Andrushchenko ret = PTR_ERR(obj);
423c575b7eeSOleksandr Andrushchenko goto fail;
424c575b7eeSOleksandr Andrushchenko }
425c575b7eeSOleksandr Andrushchenko
4264394e964SOleksandr Andrushchenko ret = xen_drm_front_dbuf_create(drm_info->front_info,
427c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj),
428c575b7eeSOleksandr Andrushchenko args->width, args->height, args->bpp,
429585c6ed7SOleksandr Andrushchenko args->size, 0,
430c575b7eeSOleksandr Andrushchenko xen_drm_front_gem_get_pages(obj));
431c575b7eeSOleksandr Andrushchenko if (ret)
432c575b7eeSOleksandr Andrushchenko goto fail_backend;
433c575b7eeSOleksandr Andrushchenko
434c575b7eeSOleksandr Andrushchenko /* This is the tail of GEM object creation */
435c575b7eeSOleksandr Andrushchenko ret = drm_gem_handle_create(filp, obj, &args->handle);
436c575b7eeSOleksandr Andrushchenko if (ret)
437c575b7eeSOleksandr Andrushchenko goto fail_handle;
438c575b7eeSOleksandr Andrushchenko
439c575b7eeSOleksandr Andrushchenko /* Drop reference from allocate - handle holds it now */
44095865e3aSEmil Velikov drm_gem_object_put(obj);
441c575b7eeSOleksandr Andrushchenko return 0;
442c575b7eeSOleksandr Andrushchenko
443c575b7eeSOleksandr Andrushchenko fail_handle:
444c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_destroy(drm_info->front_info,
445c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj));
446c575b7eeSOleksandr Andrushchenko fail_backend:
447c575b7eeSOleksandr Andrushchenko /* drop reference from allocate */
44895865e3aSEmil Velikov drm_gem_object_put(obj);
449c575b7eeSOleksandr Andrushchenko fail:
450c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
451c575b7eeSOleksandr Andrushchenko return ret;
452c575b7eeSOleksandr Andrushchenko }
453c575b7eeSOleksandr Andrushchenko
xen_drm_drv_release(struct drm_device * dev)454c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_release(struct drm_device *dev)
455c575b7eeSOleksandr Andrushchenko {
456c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private;
457c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = drm_info->front_info;
458c575b7eeSOleksandr Andrushchenko
459c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_fini(drm_info);
460c575b7eeSOleksandr Andrushchenko
461c575b7eeSOleksandr Andrushchenko drm_atomic_helper_shutdown(dev);
462c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(dev);
463c575b7eeSOleksandr Andrushchenko
464c575b7eeSOleksandr Andrushchenko if (front_info->cfg.be_alloc)
465c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev,
466c575b7eeSOleksandr Andrushchenko XenbusStateInitialising);
467c575b7eeSOleksandr Andrushchenko
468c575b7eeSOleksandr Andrushchenko kfree(drm_info);
469c575b7eeSOleksandr Andrushchenko }
470c575b7eeSOleksandr Andrushchenko
4713153c648SThomas Zimmermann DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
472c575b7eeSOleksandr Andrushchenko
47370a59dd8SDaniel Vetter static const struct drm_driver xen_drm_driver = {
4740424fdafSDaniel Vetter .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
475c575b7eeSOleksandr Andrushchenko .release = xen_drm_drv_release,
476c575b7eeSOleksandr Andrushchenko .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
477c575b7eeSOleksandr Andrushchenko .dumb_create = xen_drm_drv_dumb_create,
478c575b7eeSOleksandr Andrushchenko .fops = &xen_drm_dev_fops,
479c575b7eeSOleksandr Andrushchenko .name = "xendrm-du",
480c575b7eeSOleksandr Andrushchenko .desc = "Xen PV DRM Display Unit",
481c575b7eeSOleksandr Andrushchenko .date = "20180221",
482c575b7eeSOleksandr Andrushchenko .major = 1,
483c575b7eeSOleksandr Andrushchenko .minor = 0,
484c575b7eeSOleksandr Andrushchenko
485c575b7eeSOleksandr Andrushchenko };
486c575b7eeSOleksandr Andrushchenko
xen_drm_drv_init(struct xen_drm_front_info * front_info)487c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
488c575b7eeSOleksandr Andrushchenko {
489c575b7eeSOleksandr Andrushchenko struct device *dev = &front_info->xb_dev->dev;
490c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info;
491c575b7eeSOleksandr Andrushchenko struct drm_device *drm_dev;
492c575b7eeSOleksandr Andrushchenko int ret;
493c575b7eeSOleksandr Andrushchenko
4949d6bf794SJavier Martinez Canillas if (drm_firmware_drivers_only())
4959d6bf794SJavier Martinez Canillas return -ENODEV;
4969d6bf794SJavier Martinez Canillas
497c575b7eeSOleksandr Andrushchenko DRM_INFO("Creating %s\n", xen_drm_driver.desc);
498c575b7eeSOleksandr Andrushchenko
499c575b7eeSOleksandr Andrushchenko drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
500c575b7eeSOleksandr Andrushchenko if (!drm_info) {
501c575b7eeSOleksandr Andrushchenko ret = -ENOMEM;
502c575b7eeSOleksandr Andrushchenko goto fail;
503c575b7eeSOleksandr Andrushchenko }
504c575b7eeSOleksandr Andrushchenko
505c575b7eeSOleksandr Andrushchenko drm_info->front_info = front_info;
506c575b7eeSOleksandr Andrushchenko front_info->drm_info = drm_info;
507c575b7eeSOleksandr Andrushchenko
508c575b7eeSOleksandr Andrushchenko drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
509e30ca4bcSDan Carpenter if (IS_ERR(drm_dev)) {
510e30ca4bcSDan Carpenter ret = PTR_ERR(drm_dev);
51152762efaSLv Yunlong goto fail_dev;
512c575b7eeSOleksandr Andrushchenko }
513c575b7eeSOleksandr Andrushchenko
514c575b7eeSOleksandr Andrushchenko drm_info->drm_dev = drm_dev;
515c575b7eeSOleksandr Andrushchenko
516c575b7eeSOleksandr Andrushchenko drm_dev->dev_private = drm_info;
517c575b7eeSOleksandr Andrushchenko
518c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_kms_init(drm_info);
519c575b7eeSOleksandr Andrushchenko if (ret) {
520c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
521c575b7eeSOleksandr Andrushchenko goto fail_modeset;
522c575b7eeSOleksandr Andrushchenko }
523c575b7eeSOleksandr Andrushchenko
524c575b7eeSOleksandr Andrushchenko ret = drm_dev_register(drm_dev, 0);
525c575b7eeSOleksandr Andrushchenko if (ret)
526c575b7eeSOleksandr Andrushchenko goto fail_register;
527c575b7eeSOleksandr Andrushchenko
528c575b7eeSOleksandr Andrushchenko DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
529c575b7eeSOleksandr Andrushchenko xen_drm_driver.name, xen_drm_driver.major,
530c575b7eeSOleksandr Andrushchenko xen_drm_driver.minor, xen_drm_driver.patchlevel,
531c575b7eeSOleksandr Andrushchenko xen_drm_driver.date, drm_dev->primary->index);
532c575b7eeSOleksandr Andrushchenko
533c575b7eeSOleksandr Andrushchenko return 0;
534c575b7eeSOleksandr Andrushchenko
535c575b7eeSOleksandr Andrushchenko fail_register:
536c575b7eeSOleksandr Andrushchenko drm_dev_unregister(drm_dev);
537c575b7eeSOleksandr Andrushchenko fail_modeset:
538c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(drm_dev);
539c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(drm_dev);
5406f365e56SDaniel Vetter drm_dev_put(drm_dev);
54152762efaSLv Yunlong fail_dev:
542c575b7eeSOleksandr Andrushchenko kfree(drm_info);
54352762efaSLv Yunlong front_info->drm_info = NULL;
54452762efaSLv Yunlong fail:
545c575b7eeSOleksandr Andrushchenko return ret;
546c575b7eeSOleksandr Andrushchenko }
547c575b7eeSOleksandr Andrushchenko
xen_drm_drv_fini(struct xen_drm_front_info * front_info)548c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
549c575b7eeSOleksandr Andrushchenko {
550c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
551c575b7eeSOleksandr Andrushchenko struct drm_device *dev;
552c575b7eeSOleksandr Andrushchenko
553c575b7eeSOleksandr Andrushchenko if (!drm_info)
554c575b7eeSOleksandr Andrushchenko return;
555c575b7eeSOleksandr Andrushchenko
556c575b7eeSOleksandr Andrushchenko dev = drm_info->drm_dev;
557c575b7eeSOleksandr Andrushchenko if (!dev)
558c575b7eeSOleksandr Andrushchenko return;
559c575b7eeSOleksandr Andrushchenko
560c575b7eeSOleksandr Andrushchenko /* Nothing to do if device is already unplugged */
561c575b7eeSOleksandr Andrushchenko if (drm_dev_is_unplugged(dev))
562c575b7eeSOleksandr Andrushchenko return;
563c575b7eeSOleksandr Andrushchenko
564c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(dev);
565c575b7eeSOleksandr Andrushchenko drm_dev_unplug(dev);
566ba3bf37eSNoralf Trønnes drm_dev_put(dev);
567c575b7eeSOleksandr Andrushchenko
568c575b7eeSOleksandr Andrushchenko front_info->drm_info = NULL;
569c575b7eeSOleksandr Andrushchenko
570c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_free_all(front_info);
571c575b7eeSOleksandr Andrushchenko dbuf_free_all(&front_info->dbuf_list);
572c575b7eeSOleksandr Andrushchenko
573c575b7eeSOleksandr Andrushchenko /*
574c575b7eeSOleksandr Andrushchenko * If we are not using backend allocated buffers, then tell the
575c575b7eeSOleksandr Andrushchenko * backend we are ready to (re)initialize. Otherwise, wait for
576c575b7eeSOleksandr Andrushchenko * drm_driver.release.
577c575b7eeSOleksandr Andrushchenko */
578c575b7eeSOleksandr Andrushchenko if (!front_info->cfg.be_alloc)
579c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev,
580c575b7eeSOleksandr Andrushchenko XenbusStateInitialising);
581c575b7eeSOleksandr Andrushchenko }
582c575b7eeSOleksandr Andrushchenko
displback_initwait(struct xen_drm_front_info * front_info)583c575b7eeSOleksandr Andrushchenko static int displback_initwait(struct xen_drm_front_info *front_info)
584c575b7eeSOleksandr Andrushchenko {
585c575b7eeSOleksandr Andrushchenko struct xen_drm_front_cfg *cfg = &front_info->cfg;
586c575b7eeSOleksandr Andrushchenko int ret;
587c575b7eeSOleksandr Andrushchenko
588c575b7eeSOleksandr Andrushchenko cfg->front_info = front_info;
589c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_cfg_card(front_info, cfg);
590c575b7eeSOleksandr Andrushchenko if (ret < 0)
591c575b7eeSOleksandr Andrushchenko return ret;
592c575b7eeSOleksandr Andrushchenko
593aefff491SColin Ian King DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
594c575b7eeSOleksandr Andrushchenko /* Create event channels for all connectors and publish */
595c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_evtchnl_create_all(front_info);
596c575b7eeSOleksandr Andrushchenko if (ret < 0)
597c575b7eeSOleksandr Andrushchenko return ret;
598c575b7eeSOleksandr Andrushchenko
599c575b7eeSOleksandr Andrushchenko return xen_drm_front_evtchnl_publish_all(front_info);
600c575b7eeSOleksandr Andrushchenko }
601c575b7eeSOleksandr Andrushchenko
displback_connect(struct xen_drm_front_info * front_info)602c575b7eeSOleksandr Andrushchenko static int displback_connect(struct xen_drm_front_info *front_info)
603c575b7eeSOleksandr Andrushchenko {
604c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
605c575b7eeSOleksandr Andrushchenko return xen_drm_drv_init(front_info);
606c575b7eeSOleksandr Andrushchenko }
607c575b7eeSOleksandr Andrushchenko
displback_disconnect(struct xen_drm_front_info * front_info)608c575b7eeSOleksandr Andrushchenko static void displback_disconnect(struct xen_drm_front_info *front_info)
609c575b7eeSOleksandr Andrushchenko {
610c575b7eeSOleksandr Andrushchenko if (!front_info->drm_info)
611c575b7eeSOleksandr Andrushchenko return;
612c575b7eeSOleksandr Andrushchenko
613c575b7eeSOleksandr Andrushchenko /* Tell the backend to wait until we release the DRM driver. */
614c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
615c575b7eeSOleksandr Andrushchenko
616c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info);
617c575b7eeSOleksandr Andrushchenko }
618c575b7eeSOleksandr Andrushchenko
displback_changed(struct xenbus_device * xb_dev,enum xenbus_state backend_state)619c575b7eeSOleksandr Andrushchenko static void displback_changed(struct xenbus_device *xb_dev,
620c575b7eeSOleksandr Andrushchenko enum xenbus_state backend_state)
621c575b7eeSOleksandr Andrushchenko {
622c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
623c575b7eeSOleksandr Andrushchenko int ret;
624c575b7eeSOleksandr Andrushchenko
625c575b7eeSOleksandr Andrushchenko DRM_DEBUG("Backend state is %s, front is %s\n",
626c575b7eeSOleksandr Andrushchenko xenbus_strstate(backend_state),
627c575b7eeSOleksandr Andrushchenko xenbus_strstate(xb_dev->state));
628c575b7eeSOleksandr Andrushchenko
629c575b7eeSOleksandr Andrushchenko switch (backend_state) {
630c575b7eeSOleksandr Andrushchenko case XenbusStateReconfiguring:
631c575b7eeSOleksandr Andrushchenko case XenbusStateReconfigured:
632c575b7eeSOleksandr Andrushchenko case XenbusStateInitialised:
633c575b7eeSOleksandr Andrushchenko break;
634c575b7eeSOleksandr Andrushchenko
635c575b7eeSOleksandr Andrushchenko case XenbusStateInitialising:
636c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring)
637c575b7eeSOleksandr Andrushchenko break;
638c575b7eeSOleksandr Andrushchenko
639c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */
640c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info);
641c575b7eeSOleksandr Andrushchenko break;
642c575b7eeSOleksandr Andrushchenko
643c575b7eeSOleksandr Andrushchenko case XenbusStateInitWait:
644c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring)
645c575b7eeSOleksandr Andrushchenko break;
646c575b7eeSOleksandr Andrushchenko
647c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */
648c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info);
649c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialising)
650c575b7eeSOleksandr Andrushchenko break;
651c575b7eeSOleksandr Andrushchenko
652c575b7eeSOleksandr Andrushchenko ret = displback_initwait(front_info);
653c575b7eeSOleksandr Andrushchenko if (ret < 0)
654c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
655c575b7eeSOleksandr Andrushchenko else
656c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateInitialised);
657c575b7eeSOleksandr Andrushchenko break;
658c575b7eeSOleksandr Andrushchenko
659c575b7eeSOleksandr Andrushchenko case XenbusStateConnected:
660c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialised)
661c575b7eeSOleksandr Andrushchenko break;
662c575b7eeSOleksandr Andrushchenko
663c575b7eeSOleksandr Andrushchenko ret = displback_connect(front_info);
664c575b7eeSOleksandr Andrushchenko if (ret < 0) {
665c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info);
666c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "connecting backend");
667c575b7eeSOleksandr Andrushchenko } else {
668c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateConnected);
669c575b7eeSOleksandr Andrushchenko }
670c575b7eeSOleksandr Andrushchenko break;
671c575b7eeSOleksandr Andrushchenko
672c575b7eeSOleksandr Andrushchenko case XenbusStateClosing:
673c575b7eeSOleksandr Andrushchenko /*
674c575b7eeSOleksandr Andrushchenko * in this state backend starts freeing resources,
675c575b7eeSOleksandr Andrushchenko * so let it go into closed state, so we can also
676c575b7eeSOleksandr Andrushchenko * remove ours
677c575b7eeSOleksandr Andrushchenko */
678c575b7eeSOleksandr Andrushchenko break;
679c575b7eeSOleksandr Andrushchenko
680c575b7eeSOleksandr Andrushchenko case XenbusStateUnknown:
681c575b7eeSOleksandr Andrushchenko case XenbusStateClosed:
682c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateClosed)
683c575b7eeSOleksandr Andrushchenko break;
684c575b7eeSOleksandr Andrushchenko
685c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info);
686c575b7eeSOleksandr Andrushchenko break;
687c575b7eeSOleksandr Andrushchenko }
688c575b7eeSOleksandr Andrushchenko }
689c575b7eeSOleksandr Andrushchenko
xen_drv_probe(struct xenbus_device * xb_dev,const struct xenbus_device_id * id)690c575b7eeSOleksandr Andrushchenko static int xen_drv_probe(struct xenbus_device *xb_dev,
691c575b7eeSOleksandr Andrushchenko const struct xenbus_device_id *id)
692c575b7eeSOleksandr Andrushchenko {
693c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info;
694c575b7eeSOleksandr Andrushchenko struct device *dev = &xb_dev->dev;
695c575b7eeSOleksandr Andrushchenko int ret;
696c575b7eeSOleksandr Andrushchenko
697ee7f5225SRob Herring ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
698c575b7eeSOleksandr Andrushchenko if (ret < 0) {
699ee7f5225SRob Herring DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
700c575b7eeSOleksandr Andrushchenko return ret;
701c575b7eeSOleksandr Andrushchenko }
702c575b7eeSOleksandr Andrushchenko
703c575b7eeSOleksandr Andrushchenko front_info = devm_kzalloc(&xb_dev->dev,
704c575b7eeSOleksandr Andrushchenko sizeof(*front_info), GFP_KERNEL);
705c575b7eeSOleksandr Andrushchenko if (!front_info)
706c575b7eeSOleksandr Andrushchenko return -ENOMEM;
707c575b7eeSOleksandr Andrushchenko
708c575b7eeSOleksandr Andrushchenko front_info->xb_dev = xb_dev;
709c575b7eeSOleksandr Andrushchenko spin_lock_init(&front_info->io_lock);
710c575b7eeSOleksandr Andrushchenko INIT_LIST_HEAD(&front_info->dbuf_list);
711c575b7eeSOleksandr Andrushchenko dev_set_drvdata(&xb_dev->dev, front_info);
712c575b7eeSOleksandr Andrushchenko
713c575b7eeSOleksandr Andrushchenko return xenbus_switch_state(xb_dev, XenbusStateInitialising);
714c575b7eeSOleksandr Andrushchenko }
715c575b7eeSOleksandr Andrushchenko
xen_drv_remove(struct xenbus_device * dev)716*7cffcadeSDawei Li static void xen_drv_remove(struct xenbus_device *dev)
717c575b7eeSOleksandr Andrushchenko {
718c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
719c575b7eeSOleksandr Andrushchenko int to = 100;
720c575b7eeSOleksandr Andrushchenko
721c575b7eeSOleksandr Andrushchenko xenbus_switch_state(dev, XenbusStateClosing);
722c575b7eeSOleksandr Andrushchenko
723c575b7eeSOleksandr Andrushchenko /*
724c575b7eeSOleksandr Andrushchenko * On driver removal it is disconnected from XenBus,
725c575b7eeSOleksandr Andrushchenko * so no backend state change events come via .otherend_changed
726c575b7eeSOleksandr Andrushchenko * callback. This prevents us from exiting gracefully, e.g.
727c575b7eeSOleksandr Andrushchenko * signaling the backend to free event channels, waiting for its
728c575b7eeSOleksandr Andrushchenko * state to change to XenbusStateClosed and cleaning at our end.
729c575b7eeSOleksandr Andrushchenko * Normally when front driver removed backend will finally go into
730c575b7eeSOleksandr Andrushchenko * XenbusStateInitWait state.
731c575b7eeSOleksandr Andrushchenko *
732c575b7eeSOleksandr Andrushchenko * Workaround: read backend's state manually and wait with time-out.
733c575b7eeSOleksandr Andrushchenko */
734c575b7eeSOleksandr Andrushchenko while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
735c575b7eeSOleksandr Andrushchenko XenbusStateUnknown) != XenbusStateInitWait) &&
736f45140dfSDan Carpenter --to)
737c575b7eeSOleksandr Andrushchenko msleep(10);
738c575b7eeSOleksandr Andrushchenko
739c575b7eeSOleksandr Andrushchenko if (!to) {
740c575b7eeSOleksandr Andrushchenko unsigned int state;
741c575b7eeSOleksandr Andrushchenko
742c575b7eeSOleksandr Andrushchenko state = xenbus_read_unsigned(front_info->xb_dev->otherend,
743c575b7eeSOleksandr Andrushchenko "state", XenbusStateUnknown);
744c575b7eeSOleksandr Andrushchenko DRM_ERROR("Backend state is %s while removing driver\n",
745c575b7eeSOleksandr Andrushchenko xenbus_strstate(state));
746c575b7eeSOleksandr Andrushchenko }
747c575b7eeSOleksandr Andrushchenko
748c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info);
749c575b7eeSOleksandr Andrushchenko xenbus_frontend_closed(dev);
750c575b7eeSOleksandr Andrushchenko }
751c575b7eeSOleksandr Andrushchenko
752c575b7eeSOleksandr Andrushchenko static const struct xenbus_device_id xen_driver_ids[] = {
753c575b7eeSOleksandr Andrushchenko { XENDISPL_DRIVER_NAME },
754c575b7eeSOleksandr Andrushchenko { "" }
755c575b7eeSOleksandr Andrushchenko };
756c575b7eeSOleksandr Andrushchenko
757c575b7eeSOleksandr Andrushchenko static struct xenbus_driver xen_driver = {
758c575b7eeSOleksandr Andrushchenko .ids = xen_driver_ids,
759c575b7eeSOleksandr Andrushchenko .probe = xen_drv_probe,
760c575b7eeSOleksandr Andrushchenko .remove = xen_drv_remove,
761c575b7eeSOleksandr Andrushchenko .otherend_changed = displback_changed,
7621c669938SJuergen Gross .not_essential = true,
763c575b7eeSOleksandr Andrushchenko };
764c575b7eeSOleksandr Andrushchenko
xen_drv_init(void)765c575b7eeSOleksandr Andrushchenko static int __init xen_drv_init(void)
766c575b7eeSOleksandr Andrushchenko {
767c575b7eeSOleksandr Andrushchenko /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
768c575b7eeSOleksandr Andrushchenko if (XEN_PAGE_SIZE != PAGE_SIZE) {
769c575b7eeSOleksandr Andrushchenko DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
770c575b7eeSOleksandr Andrushchenko XEN_PAGE_SIZE, PAGE_SIZE);
771c575b7eeSOleksandr Andrushchenko return -ENODEV;
772c575b7eeSOleksandr Andrushchenko }
773c575b7eeSOleksandr Andrushchenko
774c575b7eeSOleksandr Andrushchenko if (!xen_domain())
775c575b7eeSOleksandr Andrushchenko return -ENODEV;
776c575b7eeSOleksandr Andrushchenko
777c575b7eeSOleksandr Andrushchenko if (!xen_has_pv_devices())
778c575b7eeSOleksandr Andrushchenko return -ENODEV;
779c575b7eeSOleksandr Andrushchenko
780c575b7eeSOleksandr Andrushchenko DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
781c575b7eeSOleksandr Andrushchenko return xenbus_register_frontend(&xen_driver);
782c575b7eeSOleksandr Andrushchenko }
783c575b7eeSOleksandr Andrushchenko
xen_drv_fini(void)784c575b7eeSOleksandr Andrushchenko static void __exit xen_drv_fini(void)
785c575b7eeSOleksandr Andrushchenko {
786c575b7eeSOleksandr Andrushchenko DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
787c575b7eeSOleksandr Andrushchenko xenbus_unregister_driver(&xen_driver);
788c575b7eeSOleksandr Andrushchenko }
789c575b7eeSOleksandr Andrushchenko
790c575b7eeSOleksandr Andrushchenko module_init(xen_drv_init);
791c575b7eeSOleksandr Andrushchenko module_exit(xen_drv_fini);
792c575b7eeSOleksandr Andrushchenko
793c575b7eeSOleksandr Andrushchenko MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
794c575b7eeSOleksandr Andrushchenko MODULE_LICENSE("GPL");
795c575b7eeSOleksandr Andrushchenko MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
796