1c575b7eeSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT 2c575b7eeSOleksandr Andrushchenko 3c575b7eeSOleksandr Andrushchenko /* 4c575b7eeSOleksandr Andrushchenko * Xen para-virtual DRM device 5c575b7eeSOleksandr Andrushchenko * 6c575b7eeSOleksandr Andrushchenko * Copyright (C) 2016-2018 EPAM Systems Inc. 7c575b7eeSOleksandr Andrushchenko * 8c575b7eeSOleksandr Andrushchenko * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9c575b7eeSOleksandr Andrushchenko */ 10c575b7eeSOleksandr Andrushchenko 112ea2269eSSam Ravnborg #include <linux/delay.h> 122ea2269eSSam Ravnborg #include <linux/dma-mapping.h> 132ea2269eSSam Ravnborg #include <linux/module.h> 14c575b7eeSOleksandr Andrushchenko #include <linux/of_device.h> 15c575b7eeSOleksandr Andrushchenko 162ea2269eSSam Ravnborg #include <drm/drm_atomic_helper.h> 172ea2269eSSam Ravnborg #include <drm/drm_drv.h> 182ea2269eSSam Ravnborg #include <drm/drm_ioctl.h> 192ea2269eSSam Ravnborg #include <drm/drm_probe_helper.h> 202ea2269eSSam Ravnborg #include <drm/drm_file.h> 212ea2269eSSam Ravnborg #include <drm/drm_gem.h> 222ea2269eSSam Ravnborg 23c575b7eeSOleksandr Andrushchenko #include <xen/platform_pci.h> 24c575b7eeSOleksandr Andrushchenko #include <xen/xen.h> 25c575b7eeSOleksandr Andrushchenko #include <xen/xenbus.h> 26c575b7eeSOleksandr Andrushchenko 275641f19bSOleksandr Andrushchenko #include <xen/xen-front-pgdir-shbuf.h> 28c575b7eeSOleksandr Andrushchenko #include <xen/interface/io/displif.h> 29c575b7eeSOleksandr Andrushchenko 30c575b7eeSOleksandr Andrushchenko #include "xen_drm_front.h" 31c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_cfg.h" 32c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_evtchnl.h" 33c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_gem.h" 34c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_kms.h" 35c575b7eeSOleksandr Andrushchenko 36c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf { 37c575b7eeSOleksandr Andrushchenko struct list_head list; 38c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie; 39c575b7eeSOleksandr Andrushchenko u64 fb_cookie; 405641f19bSOleksandr Andrushchenko 415641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf shbuf; 42c575b7eeSOleksandr Andrushchenko }; 43c575b7eeSOleksandr Andrushchenko 445641f19bSOleksandr Andrushchenko static void dbuf_add_to_list(struct xen_drm_front_info *front_info, 455641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) 46c575b7eeSOleksandr Andrushchenko { 47c575b7eeSOleksandr Andrushchenko dbuf->dbuf_cookie = dbuf_cookie; 48c575b7eeSOleksandr Andrushchenko list_add(&dbuf->list, &front_info->dbuf_list); 49c575b7eeSOleksandr Andrushchenko } 50c575b7eeSOleksandr Andrushchenko 51c575b7eeSOleksandr Andrushchenko static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, 52c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie) 53c575b7eeSOleksandr Andrushchenko { 54c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 55c575b7eeSOleksandr Andrushchenko 56c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) 57c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie) 58c575b7eeSOleksandr Andrushchenko return buf; 59c575b7eeSOleksandr Andrushchenko 60c575b7eeSOleksandr Andrushchenko return NULL; 61c575b7eeSOleksandr Andrushchenko } 62c575b7eeSOleksandr Andrushchenko 63c575b7eeSOleksandr Andrushchenko static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) 64c575b7eeSOleksandr Andrushchenko { 65c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 66c575b7eeSOleksandr Andrushchenko 67c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) 68c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie) { 69c575b7eeSOleksandr Andrushchenko list_del(&buf->list); 705641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf); 715641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf); 72c575b7eeSOleksandr Andrushchenko kfree(buf); 73c575b7eeSOleksandr Andrushchenko break; 74c575b7eeSOleksandr Andrushchenko } 75c575b7eeSOleksandr Andrushchenko } 76c575b7eeSOleksandr Andrushchenko 77c575b7eeSOleksandr Andrushchenko static void dbuf_free_all(struct list_head *dbuf_list) 78c575b7eeSOleksandr Andrushchenko { 79c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 80c575b7eeSOleksandr Andrushchenko 81c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) { 82c575b7eeSOleksandr Andrushchenko list_del(&buf->list); 835641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf); 845641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf); 85c575b7eeSOleksandr Andrushchenko kfree(buf); 86c575b7eeSOleksandr Andrushchenko } 87c575b7eeSOleksandr Andrushchenko } 88c575b7eeSOleksandr Andrushchenko 89c575b7eeSOleksandr Andrushchenko static struct xendispl_req * 90c575b7eeSOleksandr Andrushchenko be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) 91c575b7eeSOleksandr Andrushchenko { 92c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 93c575b7eeSOleksandr Andrushchenko 94c575b7eeSOleksandr Andrushchenko req = RING_GET_REQUEST(&evtchnl->u.req.ring, 95c575b7eeSOleksandr Andrushchenko evtchnl->u.req.ring.req_prod_pvt); 96c575b7eeSOleksandr Andrushchenko req->operation = operation; 97c575b7eeSOleksandr Andrushchenko req->id = evtchnl->evt_next_id++; 98c575b7eeSOleksandr Andrushchenko evtchnl->evt_id = req->id; 99c575b7eeSOleksandr Andrushchenko return req; 100c575b7eeSOleksandr Andrushchenko } 101c575b7eeSOleksandr Andrushchenko 102c575b7eeSOleksandr Andrushchenko static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, 103c575b7eeSOleksandr Andrushchenko struct xendispl_req *req) 104c575b7eeSOleksandr Andrushchenko { 105c575b7eeSOleksandr Andrushchenko reinit_completion(&evtchnl->u.req.completion); 106c575b7eeSOleksandr Andrushchenko if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 107c575b7eeSOleksandr Andrushchenko return -EIO; 108c575b7eeSOleksandr Andrushchenko 109c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_flush(evtchnl); 110c575b7eeSOleksandr Andrushchenko return 0; 111c575b7eeSOleksandr Andrushchenko } 112c575b7eeSOleksandr Andrushchenko 113c575b7eeSOleksandr Andrushchenko static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) 114c575b7eeSOleksandr Andrushchenko { 115c575b7eeSOleksandr Andrushchenko if (wait_for_completion_timeout(&evtchnl->u.req.completion, 116c575b7eeSOleksandr Andrushchenko msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) 117c575b7eeSOleksandr Andrushchenko return -ETIMEDOUT; 118c575b7eeSOleksandr Andrushchenko 119c575b7eeSOleksandr Andrushchenko return evtchnl->u.req.resp_status; 120c575b7eeSOleksandr Andrushchenko } 121c575b7eeSOleksandr Andrushchenko 122c575b7eeSOleksandr Andrushchenko int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, 123c575b7eeSOleksandr Andrushchenko u32 x, u32 y, u32 width, u32 height, 124c575b7eeSOleksandr Andrushchenko u32 bpp, u64 fb_cookie) 125c575b7eeSOleksandr Andrushchenko { 126c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 127c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info; 128c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 129c575b7eeSOleksandr Andrushchenko unsigned long flags; 130c575b7eeSOleksandr Andrushchenko int ret; 131c575b7eeSOleksandr Andrushchenko 132c575b7eeSOleksandr Andrushchenko front_info = pipeline->drm_info->front_info; 133c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[pipeline->index].req; 134c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 135c575b7eeSOleksandr Andrushchenko return -EIO; 136c575b7eeSOleksandr Andrushchenko 137c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 138c575b7eeSOleksandr Andrushchenko 139c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 140c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); 141c575b7eeSOleksandr Andrushchenko req->op.set_config.x = x; 142c575b7eeSOleksandr Andrushchenko req->op.set_config.y = y; 143c575b7eeSOleksandr Andrushchenko req->op.set_config.width = width; 144c575b7eeSOleksandr Andrushchenko req->op.set_config.height = height; 145c575b7eeSOleksandr Andrushchenko req->op.set_config.bpp = bpp; 146c575b7eeSOleksandr Andrushchenko req->op.set_config.fb_cookie = fb_cookie; 147c575b7eeSOleksandr Andrushchenko 148c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 149c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 150c575b7eeSOleksandr Andrushchenko 151c575b7eeSOleksandr Andrushchenko if (ret == 0) 152c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 153c575b7eeSOleksandr Andrushchenko 154c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 155c575b7eeSOleksandr Andrushchenko return ret; 156c575b7eeSOleksandr Andrushchenko } 157c575b7eeSOleksandr Andrushchenko 1584394e964SOleksandr Andrushchenko int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 159c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u32 width, u32 height, 1604394e964SOleksandr Andrushchenko u32 bpp, u64 size, struct page **pages) 161c575b7eeSOleksandr Andrushchenko { 162c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 1635641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf; 164c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 1655641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf_cfg buf_cfg; 166c575b7eeSOleksandr Andrushchenko unsigned long flags; 167c575b7eeSOleksandr Andrushchenko int ret; 168c575b7eeSOleksandr Andrushchenko 169c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 170c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 171c575b7eeSOleksandr Andrushchenko return -EIO; 172c575b7eeSOleksandr Andrushchenko 1735641f19bSOleksandr Andrushchenko dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); 1745641f19bSOleksandr Andrushchenko if (!dbuf) 1755641f19bSOleksandr Andrushchenko return -ENOMEM; 1765641f19bSOleksandr Andrushchenko 1775641f19bSOleksandr Andrushchenko dbuf_add_to_list(front_info, dbuf, dbuf_cookie); 1785641f19bSOleksandr Andrushchenko 179c575b7eeSOleksandr Andrushchenko memset(&buf_cfg, 0, sizeof(buf_cfg)); 180c575b7eeSOleksandr Andrushchenko buf_cfg.xb_dev = front_info->xb_dev; 1815641f19bSOleksandr Andrushchenko buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 182c575b7eeSOleksandr Andrushchenko buf_cfg.pages = pages; 1835641f19bSOleksandr Andrushchenko buf_cfg.pgdir = &dbuf->shbuf; 184c575b7eeSOleksandr Andrushchenko buf_cfg.be_alloc = front_info->cfg.be_alloc; 185c575b7eeSOleksandr Andrushchenko 1865641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); 1875641f19bSOleksandr Andrushchenko if (ret < 0) 1885641f19bSOleksandr Andrushchenko goto fail_shbuf_alloc; 189c575b7eeSOleksandr Andrushchenko 190c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 191c575b7eeSOleksandr Andrushchenko 192c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 193c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); 194c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.gref_directory = 1955641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); 196c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.buffer_sz = size; 197c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.dbuf_cookie = dbuf_cookie; 198c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.width = width; 199c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.height = height; 200c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.bpp = bpp; 201c575b7eeSOleksandr Andrushchenko if (buf_cfg.be_alloc) 202c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; 203c575b7eeSOleksandr Andrushchenko 204c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 205c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 206c575b7eeSOleksandr Andrushchenko 207c575b7eeSOleksandr Andrushchenko if (ret < 0) 208c575b7eeSOleksandr Andrushchenko goto fail; 209c575b7eeSOleksandr Andrushchenko 210c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 211c575b7eeSOleksandr Andrushchenko if (ret < 0) 212c575b7eeSOleksandr Andrushchenko goto fail; 213c575b7eeSOleksandr Andrushchenko 2145641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); 215c575b7eeSOleksandr Andrushchenko if (ret < 0) 216c575b7eeSOleksandr Andrushchenko goto fail; 217c575b7eeSOleksandr Andrushchenko 218c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 219c575b7eeSOleksandr Andrushchenko return 0; 220c575b7eeSOleksandr Andrushchenko 221c575b7eeSOleksandr Andrushchenko fail: 222c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 2235641f19bSOleksandr Andrushchenko fail_shbuf_alloc: 224c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 225c575b7eeSOleksandr Andrushchenko return ret; 226c575b7eeSOleksandr Andrushchenko } 227c575b7eeSOleksandr Andrushchenko 228c575b7eeSOleksandr Andrushchenko static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, 229c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie) 230c575b7eeSOleksandr Andrushchenko { 231c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 232c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 233c575b7eeSOleksandr Andrushchenko unsigned long flags; 234c575b7eeSOleksandr Andrushchenko bool be_alloc; 235c575b7eeSOleksandr Andrushchenko int ret; 236c575b7eeSOleksandr Andrushchenko 237c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 238c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 239c575b7eeSOleksandr Andrushchenko return -EIO; 240c575b7eeSOleksandr Andrushchenko 241c575b7eeSOleksandr Andrushchenko be_alloc = front_info->cfg.be_alloc; 242c575b7eeSOleksandr Andrushchenko 243c575b7eeSOleksandr Andrushchenko /* 244c575b7eeSOleksandr Andrushchenko * For the backend allocated buffer release references now, so backend 245c575b7eeSOleksandr Andrushchenko * can free the buffer. 246c575b7eeSOleksandr Andrushchenko */ 247c575b7eeSOleksandr Andrushchenko if (be_alloc) 248c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 249c575b7eeSOleksandr Andrushchenko 250c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 251c575b7eeSOleksandr Andrushchenko 252c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 253c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); 254c575b7eeSOleksandr Andrushchenko req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; 255c575b7eeSOleksandr Andrushchenko 256c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 257c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 258c575b7eeSOleksandr Andrushchenko 259c575b7eeSOleksandr Andrushchenko if (ret == 0) 260c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 261c575b7eeSOleksandr Andrushchenko 262c575b7eeSOleksandr Andrushchenko /* 263c575b7eeSOleksandr Andrushchenko * Do this regardless of communication status with the backend: 264c575b7eeSOleksandr Andrushchenko * if we cannot remove remote resources remove what we can locally. 265c575b7eeSOleksandr Andrushchenko */ 266c575b7eeSOleksandr Andrushchenko if (!be_alloc) 267c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 268c575b7eeSOleksandr Andrushchenko 269c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 270c575b7eeSOleksandr Andrushchenko return ret; 271c575b7eeSOleksandr Andrushchenko } 272c575b7eeSOleksandr Andrushchenko 273c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, 274c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u64 fb_cookie, u32 width, 275c575b7eeSOleksandr Andrushchenko u32 height, u32 pixel_format) 276c575b7eeSOleksandr Andrushchenko { 277c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 278c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf; 279c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 280c575b7eeSOleksandr Andrushchenko unsigned long flags; 281c575b7eeSOleksandr Andrushchenko int ret; 282c575b7eeSOleksandr Andrushchenko 283c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 284c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 285c575b7eeSOleksandr Andrushchenko return -EIO; 286c575b7eeSOleksandr Andrushchenko 287c575b7eeSOleksandr Andrushchenko buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); 288c575b7eeSOleksandr Andrushchenko if (!buf) 289c575b7eeSOleksandr Andrushchenko return -EINVAL; 290c575b7eeSOleksandr Andrushchenko 291c575b7eeSOleksandr Andrushchenko buf->fb_cookie = fb_cookie; 292c575b7eeSOleksandr Andrushchenko 293c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 294c575b7eeSOleksandr Andrushchenko 295c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 296c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); 297c575b7eeSOleksandr Andrushchenko req->op.fb_attach.dbuf_cookie = dbuf_cookie; 298c575b7eeSOleksandr Andrushchenko req->op.fb_attach.fb_cookie = fb_cookie; 299c575b7eeSOleksandr Andrushchenko req->op.fb_attach.width = width; 300c575b7eeSOleksandr Andrushchenko req->op.fb_attach.height = height; 301c575b7eeSOleksandr Andrushchenko req->op.fb_attach.pixel_format = pixel_format; 302c575b7eeSOleksandr Andrushchenko 303c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 304c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 305c575b7eeSOleksandr Andrushchenko 306c575b7eeSOleksandr Andrushchenko if (ret == 0) 307c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 308c575b7eeSOleksandr Andrushchenko 309c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 310c575b7eeSOleksandr Andrushchenko return ret; 311c575b7eeSOleksandr Andrushchenko } 312c575b7eeSOleksandr Andrushchenko 313c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, 314c575b7eeSOleksandr Andrushchenko u64 fb_cookie) 315c575b7eeSOleksandr Andrushchenko { 316c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 317c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 318c575b7eeSOleksandr Andrushchenko unsigned long flags; 319c575b7eeSOleksandr Andrushchenko int ret; 320c575b7eeSOleksandr Andrushchenko 321c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 322c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 323c575b7eeSOleksandr Andrushchenko return -EIO; 324c575b7eeSOleksandr Andrushchenko 325c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 326c575b7eeSOleksandr Andrushchenko 327c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 328c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); 329c575b7eeSOleksandr Andrushchenko req->op.fb_detach.fb_cookie = fb_cookie; 330c575b7eeSOleksandr Andrushchenko 331c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 332c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 333c575b7eeSOleksandr Andrushchenko 334c575b7eeSOleksandr Andrushchenko if (ret == 0) 335c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 336c575b7eeSOleksandr Andrushchenko 337c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 338c575b7eeSOleksandr Andrushchenko return ret; 339c575b7eeSOleksandr Andrushchenko } 340c575b7eeSOleksandr Andrushchenko 341c575b7eeSOleksandr Andrushchenko int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, 342c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie) 343c575b7eeSOleksandr Andrushchenko { 344c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 345c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 346c575b7eeSOleksandr Andrushchenko unsigned long flags; 347c575b7eeSOleksandr Andrushchenko int ret; 348c575b7eeSOleksandr Andrushchenko 349c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->num_evt_pairs)) 350c575b7eeSOleksandr Andrushchenko return -EINVAL; 351c575b7eeSOleksandr Andrushchenko 352c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[conn_idx].req; 353c575b7eeSOleksandr Andrushchenko 354c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 355c575b7eeSOleksandr Andrushchenko 356c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 357c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); 358c575b7eeSOleksandr Andrushchenko req->op.pg_flip.fb_cookie = fb_cookie; 359c575b7eeSOleksandr Andrushchenko 360c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 361c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 362c575b7eeSOleksandr Andrushchenko 363c575b7eeSOleksandr Andrushchenko if (ret == 0) 364c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 365c575b7eeSOleksandr Andrushchenko 366c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 367c575b7eeSOleksandr Andrushchenko return ret; 368c575b7eeSOleksandr Andrushchenko } 369c575b7eeSOleksandr Andrushchenko 370c575b7eeSOleksandr Andrushchenko void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, 371c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie) 372c575b7eeSOleksandr Andrushchenko { 373c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 374c575b7eeSOleksandr Andrushchenko 375c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->cfg.num_connectors)) 376c575b7eeSOleksandr Andrushchenko return; 377c575b7eeSOleksandr Andrushchenko 378c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], 379c575b7eeSOleksandr Andrushchenko fb_cookie); 380c575b7eeSOleksandr Andrushchenko } 381c575b7eeSOleksandr Andrushchenko 382c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_dumb_create(struct drm_file *filp, 383c575b7eeSOleksandr Andrushchenko struct drm_device *dev, 384c575b7eeSOleksandr Andrushchenko struct drm_mode_create_dumb *args) 385c575b7eeSOleksandr Andrushchenko { 386c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private; 387c575b7eeSOleksandr Andrushchenko struct drm_gem_object *obj; 388c575b7eeSOleksandr Andrushchenko int ret; 389c575b7eeSOleksandr Andrushchenko 390c575b7eeSOleksandr Andrushchenko /* 391c575b7eeSOleksandr Andrushchenko * Dumb creation is a two stage process: first we create a fully 392c575b7eeSOleksandr Andrushchenko * constructed GEM object which is communicated to the backend, and 393c575b7eeSOleksandr Andrushchenko * only after that we can create GEM's handle. This is done so, 394c575b7eeSOleksandr Andrushchenko * because of the possible races: once you create a handle it becomes 395c575b7eeSOleksandr Andrushchenko * immediately visible to user-space, so the latter can try accessing 396c575b7eeSOleksandr Andrushchenko * object without pages etc. 397c575b7eeSOleksandr Andrushchenko * For details also see drm_gem_handle_create 398c575b7eeSOleksandr Andrushchenko */ 399c575b7eeSOleksandr Andrushchenko args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 400c575b7eeSOleksandr Andrushchenko args->size = args->pitch * args->height; 401c575b7eeSOleksandr Andrushchenko 402c575b7eeSOleksandr Andrushchenko obj = xen_drm_front_gem_create(dev, args->size); 40314dee058SOleksandr Andrushchenko if (IS_ERR(obj)) { 40414dee058SOleksandr Andrushchenko ret = PTR_ERR(obj); 405c575b7eeSOleksandr Andrushchenko goto fail; 406c575b7eeSOleksandr Andrushchenko } 407c575b7eeSOleksandr Andrushchenko 4084394e964SOleksandr Andrushchenko ret = xen_drm_front_dbuf_create(drm_info->front_info, 409c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj), 410c575b7eeSOleksandr Andrushchenko args->width, args->height, args->bpp, 411c575b7eeSOleksandr Andrushchenko args->size, 412c575b7eeSOleksandr Andrushchenko xen_drm_front_gem_get_pages(obj)); 413c575b7eeSOleksandr Andrushchenko if (ret) 414c575b7eeSOleksandr Andrushchenko goto fail_backend; 415c575b7eeSOleksandr Andrushchenko 416c575b7eeSOleksandr Andrushchenko /* This is the tail of GEM object creation */ 417c575b7eeSOleksandr Andrushchenko ret = drm_gem_handle_create(filp, obj, &args->handle); 418c575b7eeSOleksandr Andrushchenko if (ret) 419c575b7eeSOleksandr Andrushchenko goto fail_handle; 420c575b7eeSOleksandr Andrushchenko 421c575b7eeSOleksandr Andrushchenko /* Drop reference from allocate - handle holds it now */ 422c575b7eeSOleksandr Andrushchenko drm_gem_object_put_unlocked(obj); 423c575b7eeSOleksandr Andrushchenko return 0; 424c575b7eeSOleksandr Andrushchenko 425c575b7eeSOleksandr Andrushchenko fail_handle: 426c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_destroy(drm_info->front_info, 427c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj)); 428c575b7eeSOleksandr Andrushchenko fail_backend: 429c575b7eeSOleksandr Andrushchenko /* drop reference from allocate */ 430c575b7eeSOleksandr Andrushchenko drm_gem_object_put_unlocked(obj); 431c575b7eeSOleksandr Andrushchenko fail: 432c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to create dumb buffer: %d\n", ret); 433c575b7eeSOleksandr Andrushchenko return ret; 434c575b7eeSOleksandr Andrushchenko } 435c575b7eeSOleksandr Andrushchenko 436c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj) 437c575b7eeSOleksandr Andrushchenko { 438c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; 439c575b7eeSOleksandr Andrushchenko int idx; 440c575b7eeSOleksandr Andrushchenko 441c575b7eeSOleksandr Andrushchenko if (drm_dev_enter(obj->dev, &idx)) { 442c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_destroy(drm_info->front_info, 443c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj)); 444c575b7eeSOleksandr Andrushchenko drm_dev_exit(idx); 445c575b7eeSOleksandr Andrushchenko } else { 446c575b7eeSOleksandr Andrushchenko dbuf_free(&drm_info->front_info->dbuf_list, 447c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj)); 448c575b7eeSOleksandr Andrushchenko } 449c575b7eeSOleksandr Andrushchenko 450c575b7eeSOleksandr Andrushchenko xen_drm_front_gem_free_object_unlocked(obj); 451c575b7eeSOleksandr Andrushchenko } 452c575b7eeSOleksandr Andrushchenko 453c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_release(struct drm_device *dev) 454c575b7eeSOleksandr Andrushchenko { 455c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private; 456c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = drm_info->front_info; 457c575b7eeSOleksandr Andrushchenko 458c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_fini(drm_info); 459c575b7eeSOleksandr Andrushchenko 460c575b7eeSOleksandr Andrushchenko drm_atomic_helper_shutdown(dev); 461c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(dev); 462c575b7eeSOleksandr Andrushchenko 463c575b7eeSOleksandr Andrushchenko if (front_info->cfg.be_alloc) 464c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, 465c575b7eeSOleksandr Andrushchenko XenbusStateInitialising); 466c575b7eeSOleksandr Andrushchenko 467c575b7eeSOleksandr Andrushchenko kfree(drm_info); 468c575b7eeSOleksandr Andrushchenko } 469c575b7eeSOleksandr Andrushchenko 470c575b7eeSOleksandr Andrushchenko static const struct file_operations xen_drm_dev_fops = { 471c575b7eeSOleksandr Andrushchenko .owner = THIS_MODULE, 472c575b7eeSOleksandr Andrushchenko .open = drm_open, 473c575b7eeSOleksandr Andrushchenko .release = drm_release, 474c575b7eeSOleksandr Andrushchenko .unlocked_ioctl = drm_ioctl, 475c575b7eeSOleksandr Andrushchenko #ifdef CONFIG_COMPAT 476c575b7eeSOleksandr Andrushchenko .compat_ioctl = drm_compat_ioctl, 477c575b7eeSOleksandr Andrushchenko #endif 478c575b7eeSOleksandr Andrushchenko .poll = drm_poll, 479c575b7eeSOleksandr Andrushchenko .read = drm_read, 480c575b7eeSOleksandr Andrushchenko .llseek = no_llseek, 481c575b7eeSOleksandr Andrushchenko .mmap = xen_drm_front_gem_mmap, 482c575b7eeSOleksandr Andrushchenko }; 483c575b7eeSOleksandr Andrushchenko 484c575b7eeSOleksandr Andrushchenko static const struct vm_operations_struct xen_drm_drv_vm_ops = { 485c575b7eeSOleksandr Andrushchenko .open = drm_gem_vm_open, 486c575b7eeSOleksandr Andrushchenko .close = drm_gem_vm_close, 487c575b7eeSOleksandr Andrushchenko }; 488c575b7eeSOleksandr Andrushchenko 489c575b7eeSOleksandr Andrushchenko static struct drm_driver xen_drm_driver = { 4900424fdafSDaniel Vetter .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 491c575b7eeSOleksandr Andrushchenko .release = xen_drm_drv_release, 492c575b7eeSOleksandr Andrushchenko .gem_vm_ops = &xen_drm_drv_vm_ops, 493c575b7eeSOleksandr Andrushchenko .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, 494c575b7eeSOleksandr Andrushchenko .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 495c575b7eeSOleksandr Andrushchenko .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 496c575b7eeSOleksandr Andrushchenko .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, 497c575b7eeSOleksandr Andrushchenko .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, 4984394e964SOleksandr Andrushchenko .gem_prime_vmap = xen_drm_front_gem_prime_vmap, 4994394e964SOleksandr Andrushchenko .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, 5004394e964SOleksandr Andrushchenko .gem_prime_mmap = xen_drm_front_gem_prime_mmap, 501c575b7eeSOleksandr Andrushchenko .dumb_create = xen_drm_drv_dumb_create, 502c575b7eeSOleksandr Andrushchenko .fops = &xen_drm_dev_fops, 503c575b7eeSOleksandr Andrushchenko .name = "xendrm-du", 504c575b7eeSOleksandr Andrushchenko .desc = "Xen PV DRM Display Unit", 505c575b7eeSOleksandr Andrushchenko .date = "20180221", 506c575b7eeSOleksandr Andrushchenko .major = 1, 507c575b7eeSOleksandr Andrushchenko .minor = 0, 508c575b7eeSOleksandr Andrushchenko 509c575b7eeSOleksandr Andrushchenko }; 510c575b7eeSOleksandr Andrushchenko 511c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_init(struct xen_drm_front_info *front_info) 512c575b7eeSOleksandr Andrushchenko { 513c575b7eeSOleksandr Andrushchenko struct device *dev = &front_info->xb_dev->dev; 514c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info; 515c575b7eeSOleksandr Andrushchenko struct drm_device *drm_dev; 516c575b7eeSOleksandr Andrushchenko int ret; 517c575b7eeSOleksandr Andrushchenko 518c575b7eeSOleksandr Andrushchenko DRM_INFO("Creating %s\n", xen_drm_driver.desc); 519c575b7eeSOleksandr Andrushchenko 520c575b7eeSOleksandr Andrushchenko drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); 521c575b7eeSOleksandr Andrushchenko if (!drm_info) { 522c575b7eeSOleksandr Andrushchenko ret = -ENOMEM; 523c575b7eeSOleksandr Andrushchenko goto fail; 524c575b7eeSOleksandr Andrushchenko } 525c575b7eeSOleksandr Andrushchenko 526c575b7eeSOleksandr Andrushchenko drm_info->front_info = front_info; 527c575b7eeSOleksandr Andrushchenko front_info->drm_info = drm_info; 528c575b7eeSOleksandr Andrushchenko 529c575b7eeSOleksandr Andrushchenko drm_dev = drm_dev_alloc(&xen_drm_driver, dev); 530e30ca4bcSDan Carpenter if (IS_ERR(drm_dev)) { 531e30ca4bcSDan Carpenter ret = PTR_ERR(drm_dev); 532c575b7eeSOleksandr Andrushchenko goto fail; 533c575b7eeSOleksandr Andrushchenko } 534c575b7eeSOleksandr Andrushchenko 535c575b7eeSOleksandr Andrushchenko drm_info->drm_dev = drm_dev; 536c575b7eeSOleksandr Andrushchenko 537c575b7eeSOleksandr Andrushchenko drm_dev->dev_private = drm_info; 538c575b7eeSOleksandr Andrushchenko 539c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_kms_init(drm_info); 540c575b7eeSOleksandr Andrushchenko if (ret) { 541c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); 542c575b7eeSOleksandr Andrushchenko goto fail_modeset; 543c575b7eeSOleksandr Andrushchenko } 544c575b7eeSOleksandr Andrushchenko 545c575b7eeSOleksandr Andrushchenko ret = drm_dev_register(drm_dev, 0); 546c575b7eeSOleksandr Andrushchenko if (ret) 547c575b7eeSOleksandr Andrushchenko goto fail_register; 548c575b7eeSOleksandr Andrushchenko 549c575b7eeSOleksandr Andrushchenko DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 550c575b7eeSOleksandr Andrushchenko xen_drm_driver.name, xen_drm_driver.major, 551c575b7eeSOleksandr Andrushchenko xen_drm_driver.minor, xen_drm_driver.patchlevel, 552c575b7eeSOleksandr Andrushchenko xen_drm_driver.date, drm_dev->primary->index); 553c575b7eeSOleksandr Andrushchenko 554c575b7eeSOleksandr Andrushchenko return 0; 555c575b7eeSOleksandr Andrushchenko 556c575b7eeSOleksandr Andrushchenko fail_register: 557c575b7eeSOleksandr Andrushchenko drm_dev_unregister(drm_dev); 558c575b7eeSOleksandr Andrushchenko fail_modeset: 559c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(drm_dev); 560c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(drm_dev); 5616f365e56SDaniel Vetter drm_dev_put(drm_dev); 562c575b7eeSOleksandr Andrushchenko fail: 563c575b7eeSOleksandr Andrushchenko kfree(drm_info); 564c575b7eeSOleksandr Andrushchenko return ret; 565c575b7eeSOleksandr Andrushchenko } 566c575b7eeSOleksandr Andrushchenko 567c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) 568c575b7eeSOleksandr Andrushchenko { 569c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 570c575b7eeSOleksandr Andrushchenko struct drm_device *dev; 571c575b7eeSOleksandr Andrushchenko 572c575b7eeSOleksandr Andrushchenko if (!drm_info) 573c575b7eeSOleksandr Andrushchenko return; 574c575b7eeSOleksandr Andrushchenko 575c575b7eeSOleksandr Andrushchenko dev = drm_info->drm_dev; 576c575b7eeSOleksandr Andrushchenko if (!dev) 577c575b7eeSOleksandr Andrushchenko return; 578c575b7eeSOleksandr Andrushchenko 579c575b7eeSOleksandr Andrushchenko /* Nothing to do if device is already unplugged */ 580c575b7eeSOleksandr Andrushchenko if (drm_dev_is_unplugged(dev)) 581c575b7eeSOleksandr Andrushchenko return; 582c575b7eeSOleksandr Andrushchenko 583c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(dev); 584c575b7eeSOleksandr Andrushchenko drm_dev_unplug(dev); 585ba3bf37eSNoralf Trønnes drm_dev_put(dev); 586c575b7eeSOleksandr Andrushchenko 587c575b7eeSOleksandr Andrushchenko front_info->drm_info = NULL; 588c575b7eeSOleksandr Andrushchenko 589c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_free_all(front_info); 590c575b7eeSOleksandr Andrushchenko dbuf_free_all(&front_info->dbuf_list); 591c575b7eeSOleksandr Andrushchenko 592c575b7eeSOleksandr Andrushchenko /* 593c575b7eeSOleksandr Andrushchenko * If we are not using backend allocated buffers, then tell the 594c575b7eeSOleksandr Andrushchenko * backend we are ready to (re)initialize. Otherwise, wait for 595c575b7eeSOleksandr Andrushchenko * drm_driver.release. 596c575b7eeSOleksandr Andrushchenko */ 597c575b7eeSOleksandr Andrushchenko if (!front_info->cfg.be_alloc) 598c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, 599c575b7eeSOleksandr Andrushchenko XenbusStateInitialising); 600c575b7eeSOleksandr Andrushchenko } 601c575b7eeSOleksandr Andrushchenko 602c575b7eeSOleksandr Andrushchenko static int displback_initwait(struct xen_drm_front_info *front_info) 603c575b7eeSOleksandr Andrushchenko { 604c575b7eeSOleksandr Andrushchenko struct xen_drm_front_cfg *cfg = &front_info->cfg; 605c575b7eeSOleksandr Andrushchenko int ret; 606c575b7eeSOleksandr Andrushchenko 607c575b7eeSOleksandr Andrushchenko cfg->front_info = front_info; 608c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_cfg_card(front_info, cfg); 609c575b7eeSOleksandr Andrushchenko if (ret < 0) 610c575b7eeSOleksandr Andrushchenko return ret; 611c575b7eeSOleksandr Andrushchenko 612aefff491SColin Ian King DRM_INFO("Have %d connector(s)\n", cfg->num_connectors); 613c575b7eeSOleksandr Andrushchenko /* Create event channels for all connectors and publish */ 614c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_evtchnl_create_all(front_info); 615c575b7eeSOleksandr Andrushchenko if (ret < 0) 616c575b7eeSOleksandr Andrushchenko return ret; 617c575b7eeSOleksandr Andrushchenko 618c575b7eeSOleksandr Andrushchenko return xen_drm_front_evtchnl_publish_all(front_info); 619c575b7eeSOleksandr Andrushchenko } 620c575b7eeSOleksandr Andrushchenko 621c575b7eeSOleksandr Andrushchenko static int displback_connect(struct xen_drm_front_info *front_info) 622c575b7eeSOleksandr Andrushchenko { 623c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); 624c575b7eeSOleksandr Andrushchenko return xen_drm_drv_init(front_info); 625c575b7eeSOleksandr Andrushchenko } 626c575b7eeSOleksandr Andrushchenko 627c575b7eeSOleksandr Andrushchenko static void displback_disconnect(struct xen_drm_front_info *front_info) 628c575b7eeSOleksandr Andrushchenko { 629c575b7eeSOleksandr Andrushchenko if (!front_info->drm_info) 630c575b7eeSOleksandr Andrushchenko return; 631c575b7eeSOleksandr Andrushchenko 632c575b7eeSOleksandr Andrushchenko /* Tell the backend to wait until we release the DRM driver. */ 633c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); 634c575b7eeSOleksandr Andrushchenko 635c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info); 636c575b7eeSOleksandr Andrushchenko } 637c575b7eeSOleksandr Andrushchenko 638c575b7eeSOleksandr Andrushchenko static void displback_changed(struct xenbus_device *xb_dev, 639c575b7eeSOleksandr Andrushchenko enum xenbus_state backend_state) 640c575b7eeSOleksandr Andrushchenko { 641c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); 642c575b7eeSOleksandr Andrushchenko int ret; 643c575b7eeSOleksandr Andrushchenko 644c575b7eeSOleksandr Andrushchenko DRM_DEBUG("Backend state is %s, front is %s\n", 645c575b7eeSOleksandr Andrushchenko xenbus_strstate(backend_state), 646c575b7eeSOleksandr Andrushchenko xenbus_strstate(xb_dev->state)); 647c575b7eeSOleksandr Andrushchenko 648c575b7eeSOleksandr Andrushchenko switch (backend_state) { 649c575b7eeSOleksandr Andrushchenko case XenbusStateReconfiguring: 650c575b7eeSOleksandr Andrushchenko /* fall through */ 651c575b7eeSOleksandr Andrushchenko case XenbusStateReconfigured: 652c575b7eeSOleksandr Andrushchenko /* fall through */ 653c575b7eeSOleksandr Andrushchenko case XenbusStateInitialised: 654c575b7eeSOleksandr Andrushchenko break; 655c575b7eeSOleksandr Andrushchenko 656c575b7eeSOleksandr Andrushchenko case XenbusStateInitialising: 657c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring) 658c575b7eeSOleksandr Andrushchenko break; 659c575b7eeSOleksandr Andrushchenko 660c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */ 661c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 662c575b7eeSOleksandr Andrushchenko break; 663c575b7eeSOleksandr Andrushchenko 664c575b7eeSOleksandr Andrushchenko case XenbusStateInitWait: 665c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring) 666c575b7eeSOleksandr Andrushchenko break; 667c575b7eeSOleksandr Andrushchenko 668c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */ 669c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 670c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialising) 671c575b7eeSOleksandr Andrushchenko break; 672c575b7eeSOleksandr Andrushchenko 673c575b7eeSOleksandr Andrushchenko ret = displback_initwait(front_info); 674c575b7eeSOleksandr Andrushchenko if (ret < 0) 675c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); 676c575b7eeSOleksandr Andrushchenko else 677c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateInitialised); 678c575b7eeSOleksandr Andrushchenko break; 679c575b7eeSOleksandr Andrushchenko 680c575b7eeSOleksandr Andrushchenko case XenbusStateConnected: 681c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialised) 682c575b7eeSOleksandr Andrushchenko break; 683c575b7eeSOleksandr Andrushchenko 684c575b7eeSOleksandr Andrushchenko ret = displback_connect(front_info); 685c575b7eeSOleksandr Andrushchenko if (ret < 0) { 686c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 687c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "connecting backend"); 688c575b7eeSOleksandr Andrushchenko } else { 689c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateConnected); 690c575b7eeSOleksandr Andrushchenko } 691c575b7eeSOleksandr Andrushchenko break; 692c575b7eeSOleksandr Andrushchenko 693c575b7eeSOleksandr Andrushchenko case XenbusStateClosing: 694c575b7eeSOleksandr Andrushchenko /* 695c575b7eeSOleksandr Andrushchenko * in this state backend starts freeing resources, 696c575b7eeSOleksandr Andrushchenko * so let it go into closed state, so we can also 697c575b7eeSOleksandr Andrushchenko * remove ours 698c575b7eeSOleksandr Andrushchenko */ 699c575b7eeSOleksandr Andrushchenko break; 700c575b7eeSOleksandr Andrushchenko 701c575b7eeSOleksandr Andrushchenko case XenbusStateUnknown: 702c575b7eeSOleksandr Andrushchenko /* fall through */ 703c575b7eeSOleksandr Andrushchenko case XenbusStateClosed: 704c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateClosed) 705c575b7eeSOleksandr Andrushchenko break; 706c575b7eeSOleksandr Andrushchenko 707c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 708c575b7eeSOleksandr Andrushchenko break; 709c575b7eeSOleksandr Andrushchenko } 710c575b7eeSOleksandr Andrushchenko } 711c575b7eeSOleksandr Andrushchenko 712c575b7eeSOleksandr Andrushchenko static int xen_drv_probe(struct xenbus_device *xb_dev, 713c575b7eeSOleksandr Andrushchenko const struct xenbus_device_id *id) 714c575b7eeSOleksandr Andrushchenko { 715c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info; 716c575b7eeSOleksandr Andrushchenko struct device *dev = &xb_dev->dev; 717c575b7eeSOleksandr Andrushchenko int ret; 718c575b7eeSOleksandr Andrushchenko 719ee7f5225SRob Herring ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 720c575b7eeSOleksandr Andrushchenko if (ret < 0) { 721ee7f5225SRob Herring DRM_ERROR("Cannot setup DMA mask, ret %d", ret); 722c575b7eeSOleksandr Andrushchenko return ret; 723c575b7eeSOleksandr Andrushchenko } 724c575b7eeSOleksandr Andrushchenko 725c575b7eeSOleksandr Andrushchenko front_info = devm_kzalloc(&xb_dev->dev, 726c575b7eeSOleksandr Andrushchenko sizeof(*front_info), GFP_KERNEL); 727c575b7eeSOleksandr Andrushchenko if (!front_info) 728c575b7eeSOleksandr Andrushchenko return -ENOMEM; 729c575b7eeSOleksandr Andrushchenko 730c575b7eeSOleksandr Andrushchenko front_info->xb_dev = xb_dev; 731c575b7eeSOleksandr Andrushchenko spin_lock_init(&front_info->io_lock); 732c575b7eeSOleksandr Andrushchenko INIT_LIST_HEAD(&front_info->dbuf_list); 733c575b7eeSOleksandr Andrushchenko dev_set_drvdata(&xb_dev->dev, front_info); 734c575b7eeSOleksandr Andrushchenko 735c575b7eeSOleksandr Andrushchenko return xenbus_switch_state(xb_dev, XenbusStateInitialising); 736c575b7eeSOleksandr Andrushchenko } 737c575b7eeSOleksandr Andrushchenko 738c575b7eeSOleksandr Andrushchenko static int xen_drv_remove(struct xenbus_device *dev) 739c575b7eeSOleksandr Andrushchenko { 740c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); 741c575b7eeSOleksandr Andrushchenko int to = 100; 742c575b7eeSOleksandr Andrushchenko 743c575b7eeSOleksandr Andrushchenko xenbus_switch_state(dev, XenbusStateClosing); 744c575b7eeSOleksandr Andrushchenko 745c575b7eeSOleksandr Andrushchenko /* 746c575b7eeSOleksandr Andrushchenko * On driver removal it is disconnected from XenBus, 747c575b7eeSOleksandr Andrushchenko * so no backend state change events come via .otherend_changed 748c575b7eeSOleksandr Andrushchenko * callback. This prevents us from exiting gracefully, e.g. 749c575b7eeSOleksandr Andrushchenko * signaling the backend to free event channels, waiting for its 750c575b7eeSOleksandr Andrushchenko * state to change to XenbusStateClosed and cleaning at our end. 751c575b7eeSOleksandr Andrushchenko * Normally when front driver removed backend will finally go into 752c575b7eeSOleksandr Andrushchenko * XenbusStateInitWait state. 753c575b7eeSOleksandr Andrushchenko * 754c575b7eeSOleksandr Andrushchenko * Workaround: read backend's state manually and wait with time-out. 755c575b7eeSOleksandr Andrushchenko */ 756c575b7eeSOleksandr Andrushchenko while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", 757c575b7eeSOleksandr Andrushchenko XenbusStateUnknown) != XenbusStateInitWait) && 758f45140dfSDan Carpenter --to) 759c575b7eeSOleksandr Andrushchenko msleep(10); 760c575b7eeSOleksandr Andrushchenko 761c575b7eeSOleksandr Andrushchenko if (!to) { 762c575b7eeSOleksandr Andrushchenko unsigned int state; 763c575b7eeSOleksandr Andrushchenko 764c575b7eeSOleksandr Andrushchenko state = xenbus_read_unsigned(front_info->xb_dev->otherend, 765c575b7eeSOleksandr Andrushchenko "state", XenbusStateUnknown); 766c575b7eeSOleksandr Andrushchenko DRM_ERROR("Backend state is %s while removing driver\n", 767c575b7eeSOleksandr Andrushchenko xenbus_strstate(state)); 768c575b7eeSOleksandr Andrushchenko } 769c575b7eeSOleksandr Andrushchenko 770c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info); 771c575b7eeSOleksandr Andrushchenko xenbus_frontend_closed(dev); 772c575b7eeSOleksandr Andrushchenko return 0; 773c575b7eeSOleksandr Andrushchenko } 774c575b7eeSOleksandr Andrushchenko 775c575b7eeSOleksandr Andrushchenko static const struct xenbus_device_id xen_driver_ids[] = { 776c575b7eeSOleksandr Andrushchenko { XENDISPL_DRIVER_NAME }, 777c575b7eeSOleksandr Andrushchenko { "" } 778c575b7eeSOleksandr Andrushchenko }; 779c575b7eeSOleksandr Andrushchenko 780c575b7eeSOleksandr Andrushchenko static struct xenbus_driver xen_driver = { 781c575b7eeSOleksandr Andrushchenko .ids = xen_driver_ids, 782c575b7eeSOleksandr Andrushchenko .probe = xen_drv_probe, 783c575b7eeSOleksandr Andrushchenko .remove = xen_drv_remove, 784c575b7eeSOleksandr Andrushchenko .otherend_changed = displback_changed, 785c575b7eeSOleksandr Andrushchenko }; 786c575b7eeSOleksandr Andrushchenko 787c575b7eeSOleksandr Andrushchenko static int __init xen_drv_init(void) 788c575b7eeSOleksandr Andrushchenko { 789c575b7eeSOleksandr Andrushchenko /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ 790c575b7eeSOleksandr Andrushchenko if (XEN_PAGE_SIZE != PAGE_SIZE) { 791c575b7eeSOleksandr Andrushchenko DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", 792c575b7eeSOleksandr Andrushchenko XEN_PAGE_SIZE, PAGE_SIZE); 793c575b7eeSOleksandr Andrushchenko return -ENODEV; 794c575b7eeSOleksandr Andrushchenko } 795c575b7eeSOleksandr Andrushchenko 796c575b7eeSOleksandr Andrushchenko if (!xen_domain()) 797c575b7eeSOleksandr Andrushchenko return -ENODEV; 798c575b7eeSOleksandr Andrushchenko 799c575b7eeSOleksandr Andrushchenko if (!xen_has_pv_devices()) 800c575b7eeSOleksandr Andrushchenko return -ENODEV; 801c575b7eeSOleksandr Andrushchenko 802c575b7eeSOleksandr Andrushchenko DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); 803c575b7eeSOleksandr Andrushchenko return xenbus_register_frontend(&xen_driver); 804c575b7eeSOleksandr Andrushchenko } 805c575b7eeSOleksandr Andrushchenko 806c575b7eeSOleksandr Andrushchenko static void __exit xen_drv_fini(void) 807c575b7eeSOleksandr Andrushchenko { 808c575b7eeSOleksandr Andrushchenko DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); 809c575b7eeSOleksandr Andrushchenko xenbus_unregister_driver(&xen_driver); 810c575b7eeSOleksandr Andrushchenko } 811c575b7eeSOleksandr Andrushchenko 812c575b7eeSOleksandr Andrushchenko module_init(xen_drv_init); 813c575b7eeSOleksandr Andrushchenko module_exit(xen_drv_fini); 814c575b7eeSOleksandr Andrushchenko 815c575b7eeSOleksandr Andrushchenko MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); 816c575b7eeSOleksandr Andrushchenko MODULE_LICENSE("GPL"); 817c575b7eeSOleksandr Andrushchenko MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); 818