1c575b7eeSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT 2c575b7eeSOleksandr Andrushchenko 3c575b7eeSOleksandr Andrushchenko /* 4c575b7eeSOleksandr Andrushchenko * Xen para-virtual DRM device 5c575b7eeSOleksandr Andrushchenko * 6c575b7eeSOleksandr Andrushchenko * Copyright (C) 2016-2018 EPAM Systems Inc. 7c575b7eeSOleksandr Andrushchenko * 8c575b7eeSOleksandr Andrushchenko * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9c575b7eeSOleksandr Andrushchenko */ 10c575b7eeSOleksandr Andrushchenko 112ea2269eSSam Ravnborg #include <linux/delay.h> 122ea2269eSSam Ravnborg #include <linux/dma-mapping.h> 132ea2269eSSam Ravnborg #include <linux/module.h> 14c575b7eeSOleksandr Andrushchenko #include <linux/of_device.h> 15c575b7eeSOleksandr Andrushchenko 162ea2269eSSam Ravnborg #include <drm/drm_atomic_helper.h> 172ea2269eSSam Ravnborg #include <drm/drm_drv.h> 182ea2269eSSam Ravnborg #include <drm/drm_ioctl.h> 192ea2269eSSam Ravnborg #include <drm/drm_probe_helper.h> 202ea2269eSSam Ravnborg #include <drm/drm_file.h> 212ea2269eSSam Ravnborg #include <drm/drm_gem.h> 222ea2269eSSam Ravnborg 23c575b7eeSOleksandr Andrushchenko #include <xen/platform_pci.h> 24c575b7eeSOleksandr Andrushchenko #include <xen/xen.h> 25c575b7eeSOleksandr Andrushchenko #include <xen/xenbus.h> 26c575b7eeSOleksandr Andrushchenko 275641f19bSOleksandr Andrushchenko #include <xen/xen-front-pgdir-shbuf.h> 28c575b7eeSOleksandr Andrushchenko #include <xen/interface/io/displif.h> 29c575b7eeSOleksandr Andrushchenko 30c575b7eeSOleksandr Andrushchenko #include "xen_drm_front.h" 31c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_cfg.h" 32c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_evtchnl.h" 33c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_gem.h" 34c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_kms.h" 35c575b7eeSOleksandr Andrushchenko 36c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf { 37c575b7eeSOleksandr Andrushchenko struct list_head list; 38c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie; 39c575b7eeSOleksandr Andrushchenko u64 fb_cookie; 405641f19bSOleksandr Andrushchenko 415641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf shbuf; 42c575b7eeSOleksandr Andrushchenko }; 43c575b7eeSOleksandr Andrushchenko 445641f19bSOleksandr Andrushchenko static void dbuf_add_to_list(struct xen_drm_front_info *front_info, 455641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) 46c575b7eeSOleksandr Andrushchenko { 47c575b7eeSOleksandr Andrushchenko dbuf->dbuf_cookie = dbuf_cookie; 48c575b7eeSOleksandr Andrushchenko list_add(&dbuf->list, &front_info->dbuf_list); 49c575b7eeSOleksandr Andrushchenko } 50c575b7eeSOleksandr Andrushchenko 51c575b7eeSOleksandr Andrushchenko static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, 52c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie) 53c575b7eeSOleksandr Andrushchenko { 54c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 55c575b7eeSOleksandr Andrushchenko 56c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) 57c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie) 58c575b7eeSOleksandr Andrushchenko return buf; 59c575b7eeSOleksandr Andrushchenko 60c575b7eeSOleksandr Andrushchenko return NULL; 61c575b7eeSOleksandr Andrushchenko } 62c575b7eeSOleksandr Andrushchenko 63c575b7eeSOleksandr Andrushchenko static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) 64c575b7eeSOleksandr Andrushchenko { 65c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 66c575b7eeSOleksandr Andrushchenko 67c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) 68c575b7eeSOleksandr Andrushchenko if (buf->dbuf_cookie == dbuf_cookie) { 69c575b7eeSOleksandr Andrushchenko list_del(&buf->list); 705641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf); 715641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf); 72c575b7eeSOleksandr Andrushchenko kfree(buf); 73c575b7eeSOleksandr Andrushchenko break; 74c575b7eeSOleksandr Andrushchenko } 75c575b7eeSOleksandr Andrushchenko } 76c575b7eeSOleksandr Andrushchenko 77c575b7eeSOleksandr Andrushchenko static void dbuf_free_all(struct list_head *dbuf_list) 78c575b7eeSOleksandr Andrushchenko { 79c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf, *q; 80c575b7eeSOleksandr Andrushchenko 81c575b7eeSOleksandr Andrushchenko list_for_each_entry_safe(buf, q, dbuf_list, list) { 82c575b7eeSOleksandr Andrushchenko list_del(&buf->list); 835641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_unmap(&buf->shbuf); 845641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_free(&buf->shbuf); 85c575b7eeSOleksandr Andrushchenko kfree(buf); 86c575b7eeSOleksandr Andrushchenko } 87c575b7eeSOleksandr Andrushchenko } 88c575b7eeSOleksandr Andrushchenko 89c575b7eeSOleksandr Andrushchenko static struct xendispl_req * 90c575b7eeSOleksandr Andrushchenko be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) 91c575b7eeSOleksandr Andrushchenko { 92c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 93c575b7eeSOleksandr Andrushchenko 94c575b7eeSOleksandr Andrushchenko req = RING_GET_REQUEST(&evtchnl->u.req.ring, 95c575b7eeSOleksandr Andrushchenko evtchnl->u.req.ring.req_prod_pvt); 96c575b7eeSOleksandr Andrushchenko req->operation = operation; 97c575b7eeSOleksandr Andrushchenko req->id = evtchnl->evt_next_id++; 98c575b7eeSOleksandr Andrushchenko evtchnl->evt_id = req->id; 99c575b7eeSOleksandr Andrushchenko return req; 100c575b7eeSOleksandr Andrushchenko } 101c575b7eeSOleksandr Andrushchenko 102c575b7eeSOleksandr Andrushchenko static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, 103c575b7eeSOleksandr Andrushchenko struct xendispl_req *req) 104c575b7eeSOleksandr Andrushchenko { 105c575b7eeSOleksandr Andrushchenko reinit_completion(&evtchnl->u.req.completion); 106c575b7eeSOleksandr Andrushchenko if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) 107c575b7eeSOleksandr Andrushchenko return -EIO; 108c575b7eeSOleksandr Andrushchenko 109c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_flush(evtchnl); 110c575b7eeSOleksandr Andrushchenko return 0; 111c575b7eeSOleksandr Andrushchenko } 112c575b7eeSOleksandr Andrushchenko 113c575b7eeSOleksandr Andrushchenko static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) 114c575b7eeSOleksandr Andrushchenko { 115c575b7eeSOleksandr Andrushchenko if (wait_for_completion_timeout(&evtchnl->u.req.completion, 116c575b7eeSOleksandr Andrushchenko msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) 117c575b7eeSOleksandr Andrushchenko return -ETIMEDOUT; 118c575b7eeSOleksandr Andrushchenko 119c575b7eeSOleksandr Andrushchenko return evtchnl->u.req.resp_status; 120c575b7eeSOleksandr Andrushchenko } 121c575b7eeSOleksandr Andrushchenko 122c575b7eeSOleksandr Andrushchenko int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, 123c575b7eeSOleksandr Andrushchenko u32 x, u32 y, u32 width, u32 height, 124c575b7eeSOleksandr Andrushchenko u32 bpp, u64 fb_cookie) 125c575b7eeSOleksandr Andrushchenko { 126c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 127c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info; 128c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 129c575b7eeSOleksandr Andrushchenko unsigned long flags; 130c575b7eeSOleksandr Andrushchenko int ret; 131c575b7eeSOleksandr Andrushchenko 132c575b7eeSOleksandr Andrushchenko front_info = pipeline->drm_info->front_info; 133c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[pipeline->index].req; 134c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 135c575b7eeSOleksandr Andrushchenko return -EIO; 136c575b7eeSOleksandr Andrushchenko 137c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 138c575b7eeSOleksandr Andrushchenko 139c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 140c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); 141c575b7eeSOleksandr Andrushchenko req->op.set_config.x = x; 142c575b7eeSOleksandr Andrushchenko req->op.set_config.y = y; 143c575b7eeSOleksandr Andrushchenko req->op.set_config.width = width; 144c575b7eeSOleksandr Andrushchenko req->op.set_config.height = height; 145c575b7eeSOleksandr Andrushchenko req->op.set_config.bpp = bpp; 146c575b7eeSOleksandr Andrushchenko req->op.set_config.fb_cookie = fb_cookie; 147c575b7eeSOleksandr Andrushchenko 148c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 149c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 150c575b7eeSOleksandr Andrushchenko 151c575b7eeSOleksandr Andrushchenko if (ret == 0) 152c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 153c575b7eeSOleksandr Andrushchenko 154c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 155c575b7eeSOleksandr Andrushchenko return ret; 156c575b7eeSOleksandr Andrushchenko } 157c575b7eeSOleksandr Andrushchenko 1584394e964SOleksandr Andrushchenko int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, 159c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u32 width, u32 height, 160585c6ed7SOleksandr Andrushchenko u32 bpp, u64 size, u32 offset, 161585c6ed7SOleksandr Andrushchenko struct page **pages) 162c575b7eeSOleksandr Andrushchenko { 163c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 1645641f19bSOleksandr Andrushchenko struct xen_drm_front_dbuf *dbuf; 165c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 1665641f19bSOleksandr Andrushchenko struct xen_front_pgdir_shbuf_cfg buf_cfg; 167c575b7eeSOleksandr Andrushchenko unsigned long flags; 168c575b7eeSOleksandr Andrushchenko int ret; 169c575b7eeSOleksandr Andrushchenko 170c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 171c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 172c575b7eeSOleksandr Andrushchenko return -EIO; 173c575b7eeSOleksandr Andrushchenko 1745641f19bSOleksandr Andrushchenko dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); 1755641f19bSOleksandr Andrushchenko if (!dbuf) 1765641f19bSOleksandr Andrushchenko return -ENOMEM; 1775641f19bSOleksandr Andrushchenko 1785641f19bSOleksandr Andrushchenko dbuf_add_to_list(front_info, dbuf, dbuf_cookie); 1795641f19bSOleksandr Andrushchenko 180c575b7eeSOleksandr Andrushchenko memset(&buf_cfg, 0, sizeof(buf_cfg)); 181c575b7eeSOleksandr Andrushchenko buf_cfg.xb_dev = front_info->xb_dev; 1825641f19bSOleksandr Andrushchenko buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 183c575b7eeSOleksandr Andrushchenko buf_cfg.pages = pages; 1845641f19bSOleksandr Andrushchenko buf_cfg.pgdir = &dbuf->shbuf; 185c575b7eeSOleksandr Andrushchenko buf_cfg.be_alloc = front_info->cfg.be_alloc; 186c575b7eeSOleksandr Andrushchenko 1875641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); 1885641f19bSOleksandr Andrushchenko if (ret < 0) 1895641f19bSOleksandr Andrushchenko goto fail_shbuf_alloc; 190c575b7eeSOleksandr Andrushchenko 191c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 192c575b7eeSOleksandr Andrushchenko 193c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 194c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); 195c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.gref_directory = 1965641f19bSOleksandr Andrushchenko xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); 197c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.buffer_sz = size; 198585c6ed7SOleksandr Andrushchenko req->op.dbuf_create.data_ofs = offset; 199c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.dbuf_cookie = dbuf_cookie; 200c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.width = width; 201c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.height = height; 202c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.bpp = bpp; 203c575b7eeSOleksandr Andrushchenko if (buf_cfg.be_alloc) 204c575b7eeSOleksandr Andrushchenko req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; 205c575b7eeSOleksandr Andrushchenko 206c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 207c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 208c575b7eeSOleksandr Andrushchenko 209c575b7eeSOleksandr Andrushchenko if (ret < 0) 210c575b7eeSOleksandr Andrushchenko goto fail; 211c575b7eeSOleksandr Andrushchenko 212c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 213c575b7eeSOleksandr Andrushchenko if (ret < 0) 214c575b7eeSOleksandr Andrushchenko goto fail; 215c575b7eeSOleksandr Andrushchenko 2165641f19bSOleksandr Andrushchenko ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); 217c575b7eeSOleksandr Andrushchenko if (ret < 0) 218c575b7eeSOleksandr Andrushchenko goto fail; 219c575b7eeSOleksandr Andrushchenko 220c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 221c575b7eeSOleksandr Andrushchenko return 0; 222c575b7eeSOleksandr Andrushchenko 223c575b7eeSOleksandr Andrushchenko fail: 224c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 2255641f19bSOleksandr Andrushchenko fail_shbuf_alloc: 226c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 227c575b7eeSOleksandr Andrushchenko return ret; 228c575b7eeSOleksandr Andrushchenko } 229c575b7eeSOleksandr Andrushchenko 230c575b7eeSOleksandr Andrushchenko static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, 231c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie) 232c575b7eeSOleksandr Andrushchenko { 233c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 234c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 235c575b7eeSOleksandr Andrushchenko unsigned long flags; 236c575b7eeSOleksandr Andrushchenko bool be_alloc; 237c575b7eeSOleksandr Andrushchenko int ret; 238c575b7eeSOleksandr Andrushchenko 239c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 240c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 241c575b7eeSOleksandr Andrushchenko return -EIO; 242c575b7eeSOleksandr Andrushchenko 243c575b7eeSOleksandr Andrushchenko be_alloc = front_info->cfg.be_alloc; 244c575b7eeSOleksandr Andrushchenko 245c575b7eeSOleksandr Andrushchenko /* 246c575b7eeSOleksandr Andrushchenko * For the backend allocated buffer release references now, so backend 247c575b7eeSOleksandr Andrushchenko * can free the buffer. 248c575b7eeSOleksandr Andrushchenko */ 249c575b7eeSOleksandr Andrushchenko if (be_alloc) 250c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 251c575b7eeSOleksandr Andrushchenko 252c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 253c575b7eeSOleksandr Andrushchenko 254c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 255c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); 256c575b7eeSOleksandr Andrushchenko req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; 257c575b7eeSOleksandr Andrushchenko 258c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 259c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 260c575b7eeSOleksandr Andrushchenko 261c575b7eeSOleksandr Andrushchenko if (ret == 0) 262c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 263c575b7eeSOleksandr Andrushchenko 264c575b7eeSOleksandr Andrushchenko /* 265c575b7eeSOleksandr Andrushchenko * Do this regardless of communication status with the backend: 266c575b7eeSOleksandr Andrushchenko * if we cannot remove remote resources remove what we can locally. 267c575b7eeSOleksandr Andrushchenko */ 268c575b7eeSOleksandr Andrushchenko if (!be_alloc) 269c575b7eeSOleksandr Andrushchenko dbuf_free(&front_info->dbuf_list, dbuf_cookie); 270c575b7eeSOleksandr Andrushchenko 271c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 272c575b7eeSOleksandr Andrushchenko return ret; 273c575b7eeSOleksandr Andrushchenko } 274c575b7eeSOleksandr Andrushchenko 275c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, 276c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u64 fb_cookie, u32 width, 277c575b7eeSOleksandr Andrushchenko u32 height, u32 pixel_format) 278c575b7eeSOleksandr Andrushchenko { 279c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 280c575b7eeSOleksandr Andrushchenko struct xen_drm_front_dbuf *buf; 281c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 282c575b7eeSOleksandr Andrushchenko unsigned long flags; 283c575b7eeSOleksandr Andrushchenko int ret; 284c575b7eeSOleksandr Andrushchenko 285c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 286c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 287c575b7eeSOleksandr Andrushchenko return -EIO; 288c575b7eeSOleksandr Andrushchenko 289c575b7eeSOleksandr Andrushchenko buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); 290c575b7eeSOleksandr Andrushchenko if (!buf) 291c575b7eeSOleksandr Andrushchenko return -EINVAL; 292c575b7eeSOleksandr Andrushchenko 293c575b7eeSOleksandr Andrushchenko buf->fb_cookie = fb_cookie; 294c575b7eeSOleksandr Andrushchenko 295c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 296c575b7eeSOleksandr Andrushchenko 297c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 298c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); 299c575b7eeSOleksandr Andrushchenko req->op.fb_attach.dbuf_cookie = dbuf_cookie; 300c575b7eeSOleksandr Andrushchenko req->op.fb_attach.fb_cookie = fb_cookie; 301c575b7eeSOleksandr Andrushchenko req->op.fb_attach.width = width; 302c575b7eeSOleksandr Andrushchenko req->op.fb_attach.height = height; 303c575b7eeSOleksandr Andrushchenko req->op.fb_attach.pixel_format = pixel_format; 304c575b7eeSOleksandr Andrushchenko 305c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 306c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 307c575b7eeSOleksandr Andrushchenko 308c575b7eeSOleksandr Andrushchenko if (ret == 0) 309c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 310c575b7eeSOleksandr Andrushchenko 311c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 312c575b7eeSOleksandr Andrushchenko return ret; 313c575b7eeSOleksandr Andrushchenko } 314c575b7eeSOleksandr Andrushchenko 315c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, 316c575b7eeSOleksandr Andrushchenko u64 fb_cookie) 317c575b7eeSOleksandr Andrushchenko { 318c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 319c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 320c575b7eeSOleksandr Andrushchenko unsigned long flags; 321c575b7eeSOleksandr Andrushchenko int ret; 322c575b7eeSOleksandr Andrushchenko 323c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; 324c575b7eeSOleksandr Andrushchenko if (unlikely(!evtchnl)) 325c575b7eeSOleksandr Andrushchenko return -EIO; 326c575b7eeSOleksandr Andrushchenko 327c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 328c575b7eeSOleksandr Andrushchenko 329c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 330c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); 331c575b7eeSOleksandr Andrushchenko req->op.fb_detach.fb_cookie = fb_cookie; 332c575b7eeSOleksandr Andrushchenko 333c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 334c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 335c575b7eeSOleksandr Andrushchenko 336c575b7eeSOleksandr Andrushchenko if (ret == 0) 337c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 338c575b7eeSOleksandr Andrushchenko 339c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 340c575b7eeSOleksandr Andrushchenko return ret; 341c575b7eeSOleksandr Andrushchenko } 342c575b7eeSOleksandr Andrushchenko 343c575b7eeSOleksandr Andrushchenko int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, 344c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie) 345c575b7eeSOleksandr Andrushchenko { 346c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl *evtchnl; 347c575b7eeSOleksandr Andrushchenko struct xendispl_req *req; 348c575b7eeSOleksandr Andrushchenko unsigned long flags; 349c575b7eeSOleksandr Andrushchenko int ret; 350c575b7eeSOleksandr Andrushchenko 351c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->num_evt_pairs)) 352c575b7eeSOleksandr Andrushchenko return -EINVAL; 353c575b7eeSOleksandr Andrushchenko 354c575b7eeSOleksandr Andrushchenko evtchnl = &front_info->evt_pairs[conn_idx].req; 355c575b7eeSOleksandr Andrushchenko 356c575b7eeSOleksandr Andrushchenko mutex_lock(&evtchnl->u.req.req_io_lock); 357c575b7eeSOleksandr Andrushchenko 358c575b7eeSOleksandr Andrushchenko spin_lock_irqsave(&front_info->io_lock, flags); 359c575b7eeSOleksandr Andrushchenko req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); 360c575b7eeSOleksandr Andrushchenko req->op.pg_flip.fb_cookie = fb_cookie; 361c575b7eeSOleksandr Andrushchenko 362c575b7eeSOleksandr Andrushchenko ret = be_stream_do_io(evtchnl, req); 363c575b7eeSOleksandr Andrushchenko spin_unlock_irqrestore(&front_info->io_lock, flags); 364c575b7eeSOleksandr Andrushchenko 365c575b7eeSOleksandr Andrushchenko if (ret == 0) 366c575b7eeSOleksandr Andrushchenko ret = be_stream_wait_io(evtchnl); 367c575b7eeSOleksandr Andrushchenko 368c575b7eeSOleksandr Andrushchenko mutex_unlock(&evtchnl->u.req.req_io_lock); 369c575b7eeSOleksandr Andrushchenko return ret; 370c575b7eeSOleksandr Andrushchenko } 371c575b7eeSOleksandr Andrushchenko 372c575b7eeSOleksandr Andrushchenko void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, 373c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie) 374c575b7eeSOleksandr Andrushchenko { 375c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 376c575b7eeSOleksandr Andrushchenko 377c575b7eeSOleksandr Andrushchenko if (unlikely(conn_idx >= front_info->cfg.num_connectors)) 378c575b7eeSOleksandr Andrushchenko return; 379c575b7eeSOleksandr Andrushchenko 380c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], 381c575b7eeSOleksandr Andrushchenko fb_cookie); 382c575b7eeSOleksandr Andrushchenko } 383c575b7eeSOleksandr Andrushchenko 3841adda8b8SThomas Zimmermann void xen_drm_front_gem_object_free(struct drm_gem_object *obj) 3851adda8b8SThomas Zimmermann { 3861adda8b8SThomas Zimmermann struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; 3871adda8b8SThomas Zimmermann int idx; 3881adda8b8SThomas Zimmermann 3891adda8b8SThomas Zimmermann if (drm_dev_enter(obj->dev, &idx)) { 3901adda8b8SThomas Zimmermann xen_drm_front_dbuf_destroy(drm_info->front_info, 3911adda8b8SThomas Zimmermann xen_drm_front_dbuf_to_cookie(obj)); 3921adda8b8SThomas Zimmermann drm_dev_exit(idx); 3931adda8b8SThomas Zimmermann } else { 3941adda8b8SThomas Zimmermann dbuf_free(&drm_info->front_info->dbuf_list, 3951adda8b8SThomas Zimmermann xen_drm_front_dbuf_to_cookie(obj)); 3961adda8b8SThomas Zimmermann } 3971adda8b8SThomas Zimmermann 3981adda8b8SThomas Zimmermann xen_drm_front_gem_free_object_unlocked(obj); 3991adda8b8SThomas Zimmermann } 4001adda8b8SThomas Zimmermann 401c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_dumb_create(struct drm_file *filp, 402c575b7eeSOleksandr Andrushchenko struct drm_device *dev, 403c575b7eeSOleksandr Andrushchenko struct drm_mode_create_dumb *args) 404c575b7eeSOleksandr Andrushchenko { 405c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private; 406c575b7eeSOleksandr Andrushchenko struct drm_gem_object *obj; 407c575b7eeSOleksandr Andrushchenko int ret; 408c575b7eeSOleksandr Andrushchenko 409c575b7eeSOleksandr Andrushchenko /* 410c575b7eeSOleksandr Andrushchenko * Dumb creation is a two stage process: first we create a fully 411c575b7eeSOleksandr Andrushchenko * constructed GEM object which is communicated to the backend, and 412c575b7eeSOleksandr Andrushchenko * only after that we can create GEM's handle. This is done so, 413c575b7eeSOleksandr Andrushchenko * because of the possible races: once you create a handle it becomes 414c575b7eeSOleksandr Andrushchenko * immediately visible to user-space, so the latter can try accessing 415c575b7eeSOleksandr Andrushchenko * object without pages etc. 416c575b7eeSOleksandr Andrushchenko * For details also see drm_gem_handle_create 417c575b7eeSOleksandr Andrushchenko */ 418c575b7eeSOleksandr Andrushchenko args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 419c575b7eeSOleksandr Andrushchenko args->size = args->pitch * args->height; 420c575b7eeSOleksandr Andrushchenko 421c575b7eeSOleksandr Andrushchenko obj = xen_drm_front_gem_create(dev, args->size); 42214dee058SOleksandr Andrushchenko if (IS_ERR(obj)) { 42314dee058SOleksandr Andrushchenko ret = PTR_ERR(obj); 424c575b7eeSOleksandr Andrushchenko goto fail; 425c575b7eeSOleksandr Andrushchenko } 426c575b7eeSOleksandr Andrushchenko 4274394e964SOleksandr Andrushchenko ret = xen_drm_front_dbuf_create(drm_info->front_info, 428c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj), 429c575b7eeSOleksandr Andrushchenko args->width, args->height, args->bpp, 430585c6ed7SOleksandr Andrushchenko args->size, 0, 431c575b7eeSOleksandr Andrushchenko xen_drm_front_gem_get_pages(obj)); 432c575b7eeSOleksandr Andrushchenko if (ret) 433c575b7eeSOleksandr Andrushchenko goto fail_backend; 434c575b7eeSOleksandr Andrushchenko 435c575b7eeSOleksandr Andrushchenko /* This is the tail of GEM object creation */ 436c575b7eeSOleksandr Andrushchenko ret = drm_gem_handle_create(filp, obj, &args->handle); 437c575b7eeSOleksandr Andrushchenko if (ret) 438c575b7eeSOleksandr Andrushchenko goto fail_handle; 439c575b7eeSOleksandr Andrushchenko 440c575b7eeSOleksandr Andrushchenko /* Drop reference from allocate - handle holds it now */ 44195865e3aSEmil Velikov drm_gem_object_put(obj); 442c575b7eeSOleksandr Andrushchenko return 0; 443c575b7eeSOleksandr Andrushchenko 444c575b7eeSOleksandr Andrushchenko fail_handle: 445c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_destroy(drm_info->front_info, 446c575b7eeSOleksandr Andrushchenko xen_drm_front_dbuf_to_cookie(obj)); 447c575b7eeSOleksandr Andrushchenko fail_backend: 448c575b7eeSOleksandr Andrushchenko /* drop reference from allocate */ 44995865e3aSEmil Velikov drm_gem_object_put(obj); 450c575b7eeSOleksandr Andrushchenko fail: 451c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to create dumb buffer: %d\n", ret); 452c575b7eeSOleksandr Andrushchenko return ret; 453c575b7eeSOleksandr Andrushchenko } 454c575b7eeSOleksandr Andrushchenko 455c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_release(struct drm_device *dev) 456c575b7eeSOleksandr Andrushchenko { 457c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = dev->dev_private; 458c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = drm_info->front_info; 459c575b7eeSOleksandr Andrushchenko 460c575b7eeSOleksandr Andrushchenko xen_drm_front_kms_fini(drm_info); 461c575b7eeSOleksandr Andrushchenko 462c575b7eeSOleksandr Andrushchenko drm_atomic_helper_shutdown(dev); 463c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(dev); 464c575b7eeSOleksandr Andrushchenko 465c575b7eeSOleksandr Andrushchenko if (front_info->cfg.be_alloc) 466c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, 467c575b7eeSOleksandr Andrushchenko XenbusStateInitialising); 468c575b7eeSOleksandr Andrushchenko 469c575b7eeSOleksandr Andrushchenko kfree(drm_info); 470c575b7eeSOleksandr Andrushchenko } 471c575b7eeSOleksandr Andrushchenko 472*3153c648SThomas Zimmermann DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); 473c575b7eeSOleksandr Andrushchenko 47470a59dd8SDaniel Vetter static const struct drm_driver xen_drm_driver = { 4750424fdafSDaniel Vetter .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 476c575b7eeSOleksandr Andrushchenko .release = xen_drm_drv_release, 477c575b7eeSOleksandr Andrushchenko .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 478c575b7eeSOleksandr Andrushchenko .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 479c575b7eeSOleksandr Andrushchenko .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, 480*3153c648SThomas Zimmermann .gem_prime_mmap = drm_gem_prime_mmap, 481c575b7eeSOleksandr Andrushchenko .dumb_create = xen_drm_drv_dumb_create, 482c575b7eeSOleksandr Andrushchenko .fops = &xen_drm_dev_fops, 483c575b7eeSOleksandr Andrushchenko .name = "xendrm-du", 484c575b7eeSOleksandr Andrushchenko .desc = "Xen PV DRM Display Unit", 485c575b7eeSOleksandr Andrushchenko .date = "20180221", 486c575b7eeSOleksandr Andrushchenko .major = 1, 487c575b7eeSOleksandr Andrushchenko .minor = 0, 488c575b7eeSOleksandr Andrushchenko 489c575b7eeSOleksandr Andrushchenko }; 490c575b7eeSOleksandr Andrushchenko 491c575b7eeSOleksandr Andrushchenko static int xen_drm_drv_init(struct xen_drm_front_info *front_info) 492c575b7eeSOleksandr Andrushchenko { 493c575b7eeSOleksandr Andrushchenko struct device *dev = &front_info->xb_dev->dev; 494c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info; 495c575b7eeSOleksandr Andrushchenko struct drm_device *drm_dev; 496c575b7eeSOleksandr Andrushchenko int ret; 497c575b7eeSOleksandr Andrushchenko 498c575b7eeSOleksandr Andrushchenko DRM_INFO("Creating %s\n", xen_drm_driver.desc); 499c575b7eeSOleksandr Andrushchenko 500c575b7eeSOleksandr Andrushchenko drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); 501c575b7eeSOleksandr Andrushchenko if (!drm_info) { 502c575b7eeSOleksandr Andrushchenko ret = -ENOMEM; 503c575b7eeSOleksandr Andrushchenko goto fail; 504c575b7eeSOleksandr Andrushchenko } 505c575b7eeSOleksandr Andrushchenko 506c575b7eeSOleksandr Andrushchenko drm_info->front_info = front_info; 507c575b7eeSOleksandr Andrushchenko front_info->drm_info = drm_info; 508c575b7eeSOleksandr Andrushchenko 509c575b7eeSOleksandr Andrushchenko drm_dev = drm_dev_alloc(&xen_drm_driver, dev); 510e30ca4bcSDan Carpenter if (IS_ERR(drm_dev)) { 511e30ca4bcSDan Carpenter ret = PTR_ERR(drm_dev); 51252762efaSLv Yunlong goto fail_dev; 513c575b7eeSOleksandr Andrushchenko } 514c575b7eeSOleksandr Andrushchenko 515c575b7eeSOleksandr Andrushchenko drm_info->drm_dev = drm_dev; 516c575b7eeSOleksandr Andrushchenko 517c575b7eeSOleksandr Andrushchenko drm_dev->dev_private = drm_info; 518c575b7eeSOleksandr Andrushchenko 519c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_kms_init(drm_info); 520c575b7eeSOleksandr Andrushchenko if (ret) { 521c575b7eeSOleksandr Andrushchenko DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); 522c575b7eeSOleksandr Andrushchenko goto fail_modeset; 523c575b7eeSOleksandr Andrushchenko } 524c575b7eeSOleksandr Andrushchenko 525c575b7eeSOleksandr Andrushchenko ret = drm_dev_register(drm_dev, 0); 526c575b7eeSOleksandr Andrushchenko if (ret) 527c575b7eeSOleksandr Andrushchenko goto fail_register; 528c575b7eeSOleksandr Andrushchenko 529c575b7eeSOleksandr Andrushchenko DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 530c575b7eeSOleksandr Andrushchenko xen_drm_driver.name, xen_drm_driver.major, 531c575b7eeSOleksandr Andrushchenko xen_drm_driver.minor, xen_drm_driver.patchlevel, 532c575b7eeSOleksandr Andrushchenko xen_drm_driver.date, drm_dev->primary->index); 533c575b7eeSOleksandr Andrushchenko 534c575b7eeSOleksandr Andrushchenko return 0; 535c575b7eeSOleksandr Andrushchenko 536c575b7eeSOleksandr Andrushchenko fail_register: 537c575b7eeSOleksandr Andrushchenko drm_dev_unregister(drm_dev); 538c575b7eeSOleksandr Andrushchenko fail_modeset: 539c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(drm_dev); 540c575b7eeSOleksandr Andrushchenko drm_mode_config_cleanup(drm_dev); 5416f365e56SDaniel Vetter drm_dev_put(drm_dev); 54252762efaSLv Yunlong fail_dev: 543c575b7eeSOleksandr Andrushchenko kfree(drm_info); 54452762efaSLv Yunlong front_info->drm_info = NULL; 54552762efaSLv Yunlong fail: 546c575b7eeSOleksandr Andrushchenko return ret; 547c575b7eeSOleksandr Andrushchenko } 548c575b7eeSOleksandr Andrushchenko 549c575b7eeSOleksandr Andrushchenko static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) 550c575b7eeSOleksandr Andrushchenko { 551c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info = front_info->drm_info; 552c575b7eeSOleksandr Andrushchenko struct drm_device *dev; 553c575b7eeSOleksandr Andrushchenko 554c575b7eeSOleksandr Andrushchenko if (!drm_info) 555c575b7eeSOleksandr Andrushchenko return; 556c575b7eeSOleksandr Andrushchenko 557c575b7eeSOleksandr Andrushchenko dev = drm_info->drm_dev; 558c575b7eeSOleksandr Andrushchenko if (!dev) 559c575b7eeSOleksandr Andrushchenko return; 560c575b7eeSOleksandr Andrushchenko 561c575b7eeSOleksandr Andrushchenko /* Nothing to do if device is already unplugged */ 562c575b7eeSOleksandr Andrushchenko if (drm_dev_is_unplugged(dev)) 563c575b7eeSOleksandr Andrushchenko return; 564c575b7eeSOleksandr Andrushchenko 565c575b7eeSOleksandr Andrushchenko drm_kms_helper_poll_fini(dev); 566c575b7eeSOleksandr Andrushchenko drm_dev_unplug(dev); 567ba3bf37eSNoralf Trønnes drm_dev_put(dev); 568c575b7eeSOleksandr Andrushchenko 569c575b7eeSOleksandr Andrushchenko front_info->drm_info = NULL; 570c575b7eeSOleksandr Andrushchenko 571c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_free_all(front_info); 572c575b7eeSOleksandr Andrushchenko dbuf_free_all(&front_info->dbuf_list); 573c575b7eeSOleksandr Andrushchenko 574c575b7eeSOleksandr Andrushchenko /* 575c575b7eeSOleksandr Andrushchenko * If we are not using backend allocated buffers, then tell the 576c575b7eeSOleksandr Andrushchenko * backend we are ready to (re)initialize. Otherwise, wait for 577c575b7eeSOleksandr Andrushchenko * drm_driver.release. 578c575b7eeSOleksandr Andrushchenko */ 579c575b7eeSOleksandr Andrushchenko if (!front_info->cfg.be_alloc) 580c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, 581c575b7eeSOleksandr Andrushchenko XenbusStateInitialising); 582c575b7eeSOleksandr Andrushchenko } 583c575b7eeSOleksandr Andrushchenko 584c575b7eeSOleksandr Andrushchenko static int displback_initwait(struct xen_drm_front_info *front_info) 585c575b7eeSOleksandr Andrushchenko { 586c575b7eeSOleksandr Andrushchenko struct xen_drm_front_cfg *cfg = &front_info->cfg; 587c575b7eeSOleksandr Andrushchenko int ret; 588c575b7eeSOleksandr Andrushchenko 589c575b7eeSOleksandr Andrushchenko cfg->front_info = front_info; 590c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_cfg_card(front_info, cfg); 591c575b7eeSOleksandr Andrushchenko if (ret < 0) 592c575b7eeSOleksandr Andrushchenko return ret; 593c575b7eeSOleksandr Andrushchenko 594aefff491SColin Ian King DRM_INFO("Have %d connector(s)\n", cfg->num_connectors); 595c575b7eeSOleksandr Andrushchenko /* Create event channels for all connectors and publish */ 596c575b7eeSOleksandr Andrushchenko ret = xen_drm_front_evtchnl_create_all(front_info); 597c575b7eeSOleksandr Andrushchenko if (ret < 0) 598c575b7eeSOleksandr Andrushchenko return ret; 599c575b7eeSOleksandr Andrushchenko 600c575b7eeSOleksandr Andrushchenko return xen_drm_front_evtchnl_publish_all(front_info); 601c575b7eeSOleksandr Andrushchenko } 602c575b7eeSOleksandr Andrushchenko 603c575b7eeSOleksandr Andrushchenko static int displback_connect(struct xen_drm_front_info *front_info) 604c575b7eeSOleksandr Andrushchenko { 605c575b7eeSOleksandr Andrushchenko xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); 606c575b7eeSOleksandr Andrushchenko return xen_drm_drv_init(front_info); 607c575b7eeSOleksandr Andrushchenko } 608c575b7eeSOleksandr Andrushchenko 609c575b7eeSOleksandr Andrushchenko static void displback_disconnect(struct xen_drm_front_info *front_info) 610c575b7eeSOleksandr Andrushchenko { 611c575b7eeSOleksandr Andrushchenko if (!front_info->drm_info) 612c575b7eeSOleksandr Andrushchenko return; 613c575b7eeSOleksandr Andrushchenko 614c575b7eeSOleksandr Andrushchenko /* Tell the backend to wait until we release the DRM driver. */ 615c575b7eeSOleksandr Andrushchenko xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); 616c575b7eeSOleksandr Andrushchenko 617c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info); 618c575b7eeSOleksandr Andrushchenko } 619c575b7eeSOleksandr Andrushchenko 620c575b7eeSOleksandr Andrushchenko static void displback_changed(struct xenbus_device *xb_dev, 621c575b7eeSOleksandr Andrushchenko enum xenbus_state backend_state) 622c575b7eeSOleksandr Andrushchenko { 623c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); 624c575b7eeSOleksandr Andrushchenko int ret; 625c575b7eeSOleksandr Andrushchenko 626c575b7eeSOleksandr Andrushchenko DRM_DEBUG("Backend state is %s, front is %s\n", 627c575b7eeSOleksandr Andrushchenko xenbus_strstate(backend_state), 628c575b7eeSOleksandr Andrushchenko xenbus_strstate(xb_dev->state)); 629c575b7eeSOleksandr Andrushchenko 630c575b7eeSOleksandr Andrushchenko switch (backend_state) { 631c575b7eeSOleksandr Andrushchenko case XenbusStateReconfiguring: 632c575b7eeSOleksandr Andrushchenko case XenbusStateReconfigured: 633c575b7eeSOleksandr Andrushchenko case XenbusStateInitialised: 634c575b7eeSOleksandr Andrushchenko break; 635c575b7eeSOleksandr Andrushchenko 636c575b7eeSOleksandr Andrushchenko case XenbusStateInitialising: 637c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring) 638c575b7eeSOleksandr Andrushchenko break; 639c575b7eeSOleksandr Andrushchenko 640c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */ 641c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 642c575b7eeSOleksandr Andrushchenko break; 643c575b7eeSOleksandr Andrushchenko 644c575b7eeSOleksandr Andrushchenko case XenbusStateInitWait: 645c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateReconfiguring) 646c575b7eeSOleksandr Andrushchenko break; 647c575b7eeSOleksandr Andrushchenko 648c575b7eeSOleksandr Andrushchenko /* recovering after backend unexpected closure */ 649c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 650c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialising) 651c575b7eeSOleksandr Andrushchenko break; 652c575b7eeSOleksandr Andrushchenko 653c575b7eeSOleksandr Andrushchenko ret = displback_initwait(front_info); 654c575b7eeSOleksandr Andrushchenko if (ret < 0) 655c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); 656c575b7eeSOleksandr Andrushchenko else 657c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateInitialised); 658c575b7eeSOleksandr Andrushchenko break; 659c575b7eeSOleksandr Andrushchenko 660c575b7eeSOleksandr Andrushchenko case XenbusStateConnected: 661c575b7eeSOleksandr Andrushchenko if (xb_dev->state != XenbusStateInitialised) 662c575b7eeSOleksandr Andrushchenko break; 663c575b7eeSOleksandr Andrushchenko 664c575b7eeSOleksandr Andrushchenko ret = displback_connect(front_info); 665c575b7eeSOleksandr Andrushchenko if (ret < 0) { 666c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 667c575b7eeSOleksandr Andrushchenko xenbus_dev_fatal(xb_dev, ret, "connecting backend"); 668c575b7eeSOleksandr Andrushchenko } else { 669c575b7eeSOleksandr Andrushchenko xenbus_switch_state(xb_dev, XenbusStateConnected); 670c575b7eeSOleksandr Andrushchenko } 671c575b7eeSOleksandr Andrushchenko break; 672c575b7eeSOleksandr Andrushchenko 673c575b7eeSOleksandr Andrushchenko case XenbusStateClosing: 674c575b7eeSOleksandr Andrushchenko /* 675c575b7eeSOleksandr Andrushchenko * in this state backend starts freeing resources, 676c575b7eeSOleksandr Andrushchenko * so let it go into closed state, so we can also 677c575b7eeSOleksandr Andrushchenko * remove ours 678c575b7eeSOleksandr Andrushchenko */ 679c575b7eeSOleksandr Andrushchenko break; 680c575b7eeSOleksandr Andrushchenko 681c575b7eeSOleksandr Andrushchenko case XenbusStateUnknown: 682c575b7eeSOleksandr Andrushchenko case XenbusStateClosed: 683c575b7eeSOleksandr Andrushchenko if (xb_dev->state == XenbusStateClosed) 684c575b7eeSOleksandr Andrushchenko break; 685c575b7eeSOleksandr Andrushchenko 686c575b7eeSOleksandr Andrushchenko displback_disconnect(front_info); 687c575b7eeSOleksandr Andrushchenko break; 688c575b7eeSOleksandr Andrushchenko } 689c575b7eeSOleksandr Andrushchenko } 690c575b7eeSOleksandr Andrushchenko 691c575b7eeSOleksandr Andrushchenko static int xen_drv_probe(struct xenbus_device *xb_dev, 692c575b7eeSOleksandr Andrushchenko const struct xenbus_device_id *id) 693c575b7eeSOleksandr Andrushchenko { 694c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info; 695c575b7eeSOleksandr Andrushchenko struct device *dev = &xb_dev->dev; 696c575b7eeSOleksandr Andrushchenko int ret; 697c575b7eeSOleksandr Andrushchenko 698ee7f5225SRob Herring ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 699c575b7eeSOleksandr Andrushchenko if (ret < 0) { 700ee7f5225SRob Herring DRM_ERROR("Cannot setup DMA mask, ret %d", ret); 701c575b7eeSOleksandr Andrushchenko return ret; 702c575b7eeSOleksandr Andrushchenko } 703c575b7eeSOleksandr Andrushchenko 704c575b7eeSOleksandr Andrushchenko front_info = devm_kzalloc(&xb_dev->dev, 705c575b7eeSOleksandr Andrushchenko sizeof(*front_info), GFP_KERNEL); 706c575b7eeSOleksandr Andrushchenko if (!front_info) 707c575b7eeSOleksandr Andrushchenko return -ENOMEM; 708c575b7eeSOleksandr Andrushchenko 709c575b7eeSOleksandr Andrushchenko front_info->xb_dev = xb_dev; 710c575b7eeSOleksandr Andrushchenko spin_lock_init(&front_info->io_lock); 711c575b7eeSOleksandr Andrushchenko INIT_LIST_HEAD(&front_info->dbuf_list); 712c575b7eeSOleksandr Andrushchenko dev_set_drvdata(&xb_dev->dev, front_info); 713c575b7eeSOleksandr Andrushchenko 714c575b7eeSOleksandr Andrushchenko return xenbus_switch_state(xb_dev, XenbusStateInitialising); 715c575b7eeSOleksandr Andrushchenko } 716c575b7eeSOleksandr Andrushchenko 717c575b7eeSOleksandr Andrushchenko static int xen_drv_remove(struct xenbus_device *dev) 718c575b7eeSOleksandr Andrushchenko { 719c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); 720c575b7eeSOleksandr Andrushchenko int to = 100; 721c575b7eeSOleksandr Andrushchenko 722c575b7eeSOleksandr Andrushchenko xenbus_switch_state(dev, XenbusStateClosing); 723c575b7eeSOleksandr Andrushchenko 724c575b7eeSOleksandr Andrushchenko /* 725c575b7eeSOleksandr Andrushchenko * On driver removal it is disconnected from XenBus, 726c575b7eeSOleksandr Andrushchenko * so no backend state change events come via .otherend_changed 727c575b7eeSOleksandr Andrushchenko * callback. This prevents us from exiting gracefully, e.g. 728c575b7eeSOleksandr Andrushchenko * signaling the backend to free event channels, waiting for its 729c575b7eeSOleksandr Andrushchenko * state to change to XenbusStateClosed and cleaning at our end. 730c575b7eeSOleksandr Andrushchenko * Normally when front driver removed backend will finally go into 731c575b7eeSOleksandr Andrushchenko * XenbusStateInitWait state. 732c575b7eeSOleksandr Andrushchenko * 733c575b7eeSOleksandr Andrushchenko * Workaround: read backend's state manually and wait with time-out. 734c575b7eeSOleksandr Andrushchenko */ 735c575b7eeSOleksandr Andrushchenko while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", 736c575b7eeSOleksandr Andrushchenko XenbusStateUnknown) != XenbusStateInitWait) && 737f45140dfSDan Carpenter --to) 738c575b7eeSOleksandr Andrushchenko msleep(10); 739c575b7eeSOleksandr Andrushchenko 740c575b7eeSOleksandr Andrushchenko if (!to) { 741c575b7eeSOleksandr Andrushchenko unsigned int state; 742c575b7eeSOleksandr Andrushchenko 743c575b7eeSOleksandr Andrushchenko state = xenbus_read_unsigned(front_info->xb_dev->otherend, 744c575b7eeSOleksandr Andrushchenko "state", XenbusStateUnknown); 745c575b7eeSOleksandr Andrushchenko DRM_ERROR("Backend state is %s while removing driver\n", 746c575b7eeSOleksandr Andrushchenko xenbus_strstate(state)); 747c575b7eeSOleksandr Andrushchenko } 748c575b7eeSOleksandr Andrushchenko 749c575b7eeSOleksandr Andrushchenko xen_drm_drv_fini(front_info); 750c575b7eeSOleksandr Andrushchenko xenbus_frontend_closed(dev); 751c575b7eeSOleksandr Andrushchenko return 0; 752c575b7eeSOleksandr Andrushchenko } 753c575b7eeSOleksandr Andrushchenko 754c575b7eeSOleksandr Andrushchenko static const struct xenbus_device_id xen_driver_ids[] = { 755c575b7eeSOleksandr Andrushchenko { XENDISPL_DRIVER_NAME }, 756c575b7eeSOleksandr Andrushchenko { "" } 757c575b7eeSOleksandr Andrushchenko }; 758c575b7eeSOleksandr Andrushchenko 759c575b7eeSOleksandr Andrushchenko static struct xenbus_driver xen_driver = { 760c575b7eeSOleksandr Andrushchenko .ids = xen_driver_ids, 761c575b7eeSOleksandr Andrushchenko .probe = xen_drv_probe, 762c575b7eeSOleksandr Andrushchenko .remove = xen_drv_remove, 763c575b7eeSOleksandr Andrushchenko .otherend_changed = displback_changed, 764c575b7eeSOleksandr Andrushchenko }; 765c575b7eeSOleksandr Andrushchenko 766c575b7eeSOleksandr Andrushchenko static int __init xen_drv_init(void) 767c575b7eeSOleksandr Andrushchenko { 768c575b7eeSOleksandr Andrushchenko /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ 769c575b7eeSOleksandr Andrushchenko if (XEN_PAGE_SIZE != PAGE_SIZE) { 770c575b7eeSOleksandr Andrushchenko DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", 771c575b7eeSOleksandr Andrushchenko XEN_PAGE_SIZE, PAGE_SIZE); 772c575b7eeSOleksandr Andrushchenko return -ENODEV; 773c575b7eeSOleksandr Andrushchenko } 774c575b7eeSOleksandr Andrushchenko 775c575b7eeSOleksandr Andrushchenko if (!xen_domain()) 776c575b7eeSOleksandr Andrushchenko return -ENODEV; 777c575b7eeSOleksandr Andrushchenko 778c575b7eeSOleksandr Andrushchenko if (!xen_has_pv_devices()) 779c575b7eeSOleksandr Andrushchenko return -ENODEV; 780c575b7eeSOleksandr Andrushchenko 781c575b7eeSOleksandr Andrushchenko DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); 782c575b7eeSOleksandr Andrushchenko return xenbus_register_frontend(&xen_driver); 783c575b7eeSOleksandr Andrushchenko } 784c575b7eeSOleksandr Andrushchenko 785c575b7eeSOleksandr Andrushchenko static void __exit xen_drv_fini(void) 786c575b7eeSOleksandr Andrushchenko { 787c575b7eeSOleksandr Andrushchenko DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); 788c575b7eeSOleksandr Andrushchenko xenbus_unregister_driver(&xen_driver); 789c575b7eeSOleksandr Andrushchenko } 790c575b7eeSOleksandr Andrushchenko 791c575b7eeSOleksandr Andrushchenko module_init(xen_drv_init); 792c575b7eeSOleksandr Andrushchenko module_exit(xen_drv_fini); 793c575b7eeSOleksandr Andrushchenko 794c575b7eeSOleksandr Andrushchenko MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); 795c575b7eeSOleksandr Andrushchenko MODULE_LICENSE("GPL"); 796c575b7eeSOleksandr Andrushchenko MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); 797