1c575b7eeSOleksandr Andrushchenko /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2c575b7eeSOleksandr Andrushchenko
3c575b7eeSOleksandr Andrushchenko /*
4c575b7eeSOleksandr Andrushchenko * Xen para-virtual DRM device
5c575b7eeSOleksandr Andrushchenko *
6c575b7eeSOleksandr Andrushchenko * Copyright (C) 2016-2018 EPAM Systems Inc.
7c575b7eeSOleksandr Andrushchenko *
8c575b7eeSOleksandr Andrushchenko * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9c575b7eeSOleksandr Andrushchenko */
10c575b7eeSOleksandr Andrushchenko
11c575b7eeSOleksandr Andrushchenko #ifndef __XEN_DRM_FRONT_H_
12c575b7eeSOleksandr Andrushchenko #define __XEN_DRM_FRONT_H_
13c575b7eeSOleksandr Andrushchenko
14c575b7eeSOleksandr Andrushchenko #include <linux/scatterlist.h>
15c575b7eeSOleksandr Andrushchenko
162ea2269eSSam Ravnborg #include <drm/drm_connector.h>
172ea2269eSSam Ravnborg #include <drm/drm_simple_kms_helper.h>
182ea2269eSSam Ravnborg
19c575b7eeSOleksandr Andrushchenko #include "xen_drm_front_cfg.h"
20c575b7eeSOleksandr Andrushchenko
212ea2269eSSam Ravnborg struct drm_device;
222ea2269eSSam Ravnborg struct drm_framebuffer;
232ea2269eSSam Ravnborg struct drm_gem_object;
242ea2269eSSam Ravnborg struct drm_pending_vblank_event;
252ea2269eSSam Ravnborg
26c575b7eeSOleksandr Andrushchenko /**
27c575b7eeSOleksandr Andrushchenko * DOC: Driver modes of operation in terms of display buffers used
28c575b7eeSOleksandr Andrushchenko *
29c575b7eeSOleksandr Andrushchenko * Depending on the requirements for the para-virtualized environment, namely
30c575b7eeSOleksandr Andrushchenko * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
314394e964SOleksandr Andrushchenko * host and guest environments, display buffers can be allocated by either
324394e964SOleksandr Andrushchenko * frontend driver or backend.
33c575b7eeSOleksandr Andrushchenko */
34c575b7eeSOleksandr Andrushchenko
35c575b7eeSOleksandr Andrushchenko /**
36c575b7eeSOleksandr Andrushchenko * DOC: Buffers allocated by the frontend driver
37c575b7eeSOleksandr Andrushchenko *
384394e964SOleksandr Andrushchenko * In this mode of operation driver allocates buffers from system memory.
39c575b7eeSOleksandr Andrushchenko *
40c575b7eeSOleksandr Andrushchenko * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
41c575b7eeSOleksandr Andrushchenko * may require IOMMU support on the platform, so accompanying DRM/vGPU
42c575b7eeSOleksandr Andrushchenko * hardware can still reach display buffer memory while importing PRIME
43c575b7eeSOleksandr Andrushchenko * buffers from the frontend driver.
44c575b7eeSOleksandr Andrushchenko */
45c575b7eeSOleksandr Andrushchenko
46c575b7eeSOleksandr Andrushchenko /**
47c575b7eeSOleksandr Andrushchenko * DOC: Buffers allocated by the backend
48c575b7eeSOleksandr Andrushchenko *
49c575b7eeSOleksandr Andrushchenko * This mode of operation is run-time configured via guest domain configuration
50c575b7eeSOleksandr Andrushchenko * through XenStore entries.
51c575b7eeSOleksandr Andrushchenko *
52c575b7eeSOleksandr Andrushchenko * For systems which do not provide IOMMU support, but having specific
53c575b7eeSOleksandr Andrushchenko * requirements for display buffers it is possible to allocate such buffers
54c575b7eeSOleksandr Andrushchenko * at backend side and share those with the frontend.
55c575b7eeSOleksandr Andrushchenko * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
56c575b7eeSOleksandr Andrushchenko * physically contiguous memory, this allows implementing zero-copying
57c575b7eeSOleksandr Andrushchenko * use-cases.
58c575b7eeSOleksandr Andrushchenko *
59c575b7eeSOleksandr Andrushchenko * Note, while using this scenario the following should be considered:
60c575b7eeSOleksandr Andrushchenko *
61c575b7eeSOleksandr Andrushchenko * #. If guest domain dies then pages/grants received from the backend
62c575b7eeSOleksandr Andrushchenko * cannot be claimed back
63c575b7eeSOleksandr Andrushchenko *
64c575b7eeSOleksandr Andrushchenko * #. Misbehaving guest may send too many requests to the
65c575b7eeSOleksandr Andrushchenko * backend exhausting its grant references and memory
66c575b7eeSOleksandr Andrushchenko * (consider this from security POV)
67c575b7eeSOleksandr Andrushchenko */
68c575b7eeSOleksandr Andrushchenko
69c575b7eeSOleksandr Andrushchenko /**
70c575b7eeSOleksandr Andrushchenko * DOC: Driver limitations
71c575b7eeSOleksandr Andrushchenko *
72c575b7eeSOleksandr Andrushchenko * #. Only primary plane without additional properties is supported.
73c575b7eeSOleksandr Andrushchenko *
74c575b7eeSOleksandr Andrushchenko * #. Only one video mode per connector supported which is configured
75c575b7eeSOleksandr Andrushchenko * via XenStore.
76c575b7eeSOleksandr Andrushchenko *
77c575b7eeSOleksandr Andrushchenko * #. All CRTCs operate at fixed frequency of 60Hz.
78c575b7eeSOleksandr Andrushchenko */
79c575b7eeSOleksandr Andrushchenko
80c575b7eeSOleksandr Andrushchenko /* timeout in ms to wait for backend to respond */
81c575b7eeSOleksandr Andrushchenko #define XEN_DRM_FRONT_WAIT_BACK_MS 3000
82c575b7eeSOleksandr Andrushchenko
83c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info {
84c575b7eeSOleksandr Andrushchenko struct xenbus_device *xb_dev;
85c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info;
86c575b7eeSOleksandr Andrushchenko
87c575b7eeSOleksandr Andrushchenko /* to protect data between backend IO code and interrupt handler */
88c575b7eeSOleksandr Andrushchenko spinlock_t io_lock;
89c575b7eeSOleksandr Andrushchenko
90c575b7eeSOleksandr Andrushchenko int num_evt_pairs;
91c575b7eeSOleksandr Andrushchenko struct xen_drm_front_evtchnl_pair *evt_pairs;
92c575b7eeSOleksandr Andrushchenko struct xen_drm_front_cfg cfg;
93c575b7eeSOleksandr Andrushchenko
94c575b7eeSOleksandr Andrushchenko /* display buffers */
95c575b7eeSOleksandr Andrushchenko struct list_head dbuf_list;
96c575b7eeSOleksandr Andrushchenko };
97c575b7eeSOleksandr Andrushchenko
98c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline {
99c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info *drm_info;
100c575b7eeSOleksandr Andrushchenko
101c575b7eeSOleksandr Andrushchenko int index;
102c575b7eeSOleksandr Andrushchenko
103c575b7eeSOleksandr Andrushchenko struct drm_simple_display_pipe pipe;
104c575b7eeSOleksandr Andrushchenko
105c575b7eeSOleksandr Andrushchenko struct drm_connector conn;
106c575b7eeSOleksandr Andrushchenko /* These are only for connector mode checking */
107c575b7eeSOleksandr Andrushchenko int width, height;
108c575b7eeSOleksandr Andrushchenko
109c575b7eeSOleksandr Andrushchenko struct drm_pending_vblank_event *pending_event;
110c575b7eeSOleksandr Andrushchenko
111c575b7eeSOleksandr Andrushchenko struct delayed_work pflip_to_worker;
112c575b7eeSOleksandr Andrushchenko
113c575b7eeSOleksandr Andrushchenko bool conn_connected;
114c575b7eeSOleksandr Andrushchenko };
115c575b7eeSOleksandr Andrushchenko
116c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_info {
117c575b7eeSOleksandr Andrushchenko struct xen_drm_front_info *front_info;
118c575b7eeSOleksandr Andrushchenko struct drm_device *drm_dev;
119c575b7eeSOleksandr Andrushchenko
120c575b7eeSOleksandr Andrushchenko struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
121c575b7eeSOleksandr Andrushchenko };
122c575b7eeSOleksandr Andrushchenko
xen_drm_front_fb_to_cookie(struct drm_framebuffer * fb)123c575b7eeSOleksandr Andrushchenko static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
124c575b7eeSOleksandr Andrushchenko {
1259eece5d9SOleksandr Andrushchenko return (uintptr_t)fb;
126c575b7eeSOleksandr Andrushchenko }
127c575b7eeSOleksandr Andrushchenko
xen_drm_front_dbuf_to_cookie(struct drm_gem_object * gem_obj)128c575b7eeSOleksandr Andrushchenko static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
129c575b7eeSOleksandr Andrushchenko {
1309eece5d9SOleksandr Andrushchenko return (uintptr_t)gem_obj;
131c575b7eeSOleksandr Andrushchenko }
132c575b7eeSOleksandr Andrushchenko
133c575b7eeSOleksandr Andrushchenko int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
134c575b7eeSOleksandr Andrushchenko u32 x, u32 y, u32 width, u32 height,
135c575b7eeSOleksandr Andrushchenko u32 bpp, u64 fb_cookie);
136c575b7eeSOleksandr Andrushchenko
1374394e964SOleksandr Andrushchenko int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
138c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u32 width, u32 height,
139585c6ed7SOleksandr Andrushchenko u32 bpp, u64 size, u32 offset, struct page **pages);
140c575b7eeSOleksandr Andrushchenko
141c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
142c575b7eeSOleksandr Andrushchenko u64 dbuf_cookie, u64 fb_cookie, u32 width,
143c575b7eeSOleksandr Andrushchenko u32 height, u32 pixel_format);
144c575b7eeSOleksandr Andrushchenko
145c575b7eeSOleksandr Andrushchenko int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
146c575b7eeSOleksandr Andrushchenko u64 fb_cookie);
147c575b7eeSOleksandr Andrushchenko
148c575b7eeSOleksandr Andrushchenko int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
149c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie);
150c575b7eeSOleksandr Andrushchenko
151c575b7eeSOleksandr Andrushchenko void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
152c575b7eeSOleksandr Andrushchenko int conn_idx, u64 fb_cookie);
153c575b7eeSOleksandr Andrushchenko
154*1adda8b8SThomas Zimmermann void xen_drm_front_gem_object_free(struct drm_gem_object *obj);
155*1adda8b8SThomas Zimmermann
156c575b7eeSOleksandr Andrushchenko #endif /* __XEN_DRM_FRONT_H_ */
157