xref: /openbmc/qemu/include/hw/xen/xen_backend_ops.h (revision e8d1e0cd)
1 /*
2  * QEMU Xen backend support
3  *
4  * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5  *
6  * Authors: David Woodhouse <dwmw2@infradead.org>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 
12 #ifndef QEMU_XEN_BACKEND_OPS_H
13 #define QEMU_XEN_BACKEND_OPS_H
14 
15 #include "hw/xen/xen.h"
16 #include "hw/xen/interface/xen.h"
17 #include "hw/xen/interface/io/xenbus.h"
18 
19 /*
20  * For the time being, these operations map fairly closely to the API of
21  * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
22  * from XenLegacyDevice back ends to the new XenDevice model, they may
23  * evolve to slightly higher-level APIs.
24  *
25  * The internal emulations do not emulate the Xen APIs entirely faithfully;
26  * only enough to be used by the Xen backend devices. For example, only one
27  * event channel can be bound to each handle, since that's sufficient for
28  * the device support (only the true Xen HVM backend uses more). And the
29  * behaviour of unmask() and pending() is different too because the device
30  * backends don't care.
31  */
32 
33 typedef struct xenevtchn_handle xenevtchn_handle;
34 typedef int xenevtchn_port_or_error_t;
35 typedef uint32_t evtchn_port_t;
36 typedef uint16_t domid_t;
37 typedef uint32_t grant_ref_t;
38 
39 #define XEN_PAGE_SHIFT       12
40 #define XEN_PAGE_SIZE        (1UL << XEN_PAGE_SHIFT)
41 #define XEN_PAGE_MASK        (~(XEN_PAGE_SIZE - 1))
42 
43 #ifndef xen_rmb
44 #define xen_rmb() smp_rmb()
45 #endif
46 #ifndef xen_wmb
47 #define xen_wmb() smp_wmb()
48 #endif
49 #ifndef xen_mb
50 #define xen_mb() smp_mb()
51 #endif
52 
53 struct evtchn_backend_ops {
54     xenevtchn_handle *(*open)(void);
55     int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
56                             evtchn_port_t guest_port);
57     int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
58     int (*close)(struct xenevtchn_handle *xc);
59     int (*get_fd)(struct xenevtchn_handle *xc);
60     int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
61     int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
62     int (*pending)(struct xenevtchn_handle *xc);
63 };
64 
65 extern struct evtchn_backend_ops *xen_evtchn_ops;
66 
67 static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
68 {
69     if (!xen_evtchn_ops) {
70         return NULL;
71     }
72     return xen_evtchn_ops->open();
73 }
74 
75 static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
76                                                    uint32_t domid,
77                                                    evtchn_port_t guest_port)
78 {
79     if (!xen_evtchn_ops) {
80         return -ENOSYS;
81     }
82     return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
83 }
84 
85 static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
86                                          evtchn_port_t port)
87 {
88     if (!xen_evtchn_ops) {
89         return -ENOSYS;
90     }
91     return xen_evtchn_ops->unbind(xc, port);
92 }
93 
94 static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
95 {
96     if (!xen_evtchn_ops) {
97         return -ENOSYS;
98     }
99     return xen_evtchn_ops->close(xc);
100 }
101 
102 static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
103 {
104     if (!xen_evtchn_ops) {
105         return -ENOSYS;
106     }
107     return xen_evtchn_ops->get_fd(xc);
108 }
109 
110 static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
111                                          evtchn_port_t port)
112 {
113     if (!xen_evtchn_ops) {
114         return -ENOSYS;
115     }
116     return xen_evtchn_ops->notify(xc, port);
117 }
118 
119 static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
120                                          evtchn_port_t port)
121 {
122     if (!xen_evtchn_ops) {
123         return -ENOSYS;
124     }
125     return xen_evtchn_ops->unmask(xc, port);
126 }
127 
128 static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
129 {
130     if (!xen_evtchn_ops) {
131         return -ENOSYS;
132     }
133     return xen_evtchn_ops->pending(xc);
134 }
135 
136 typedef struct xengntdev_handle xengnttab_handle;
137 
138 typedef struct XenGrantCopySegment {
139     union {
140         void *virt;
141         struct {
142             uint32_t ref;
143             off_t offset;
144         } foreign;
145     } source, dest;
146     size_t len;
147 } XenGrantCopySegment;
148 
149 #define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE  (1U << 0)
150 
151 struct gnttab_backend_ops {
152     uint32_t features;
153     xengnttab_handle *(*open)(void);
154     int (*close)(xengnttab_handle *xgt);
155     int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
156                       XenGrantCopySegment *segs, uint32_t nr_segs,
157                       Error **errp);
158     int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
159     void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
160                       uint32_t *refs, int prot);
161     int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
162                  uint32_t count);
163 };
164 
165 extern struct gnttab_backend_ops *xen_gnttab_ops;
166 
167 static inline bool qemu_xen_gnttab_can_map_multi(void)
168 {
169     return xen_gnttab_ops &&
170         !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
171 }
172 
173 static inline xengnttab_handle *qemu_xen_gnttab_open(void)
174 {
175     if (!xen_gnttab_ops) {
176         return NULL;
177     }
178     return xen_gnttab_ops->open();
179 }
180 
181 static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
182 {
183     if (!xen_gnttab_ops) {
184         return -ENOSYS;
185     }
186     return xen_gnttab_ops->close(xgt);
187 }
188 
189 static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
190                                              bool to_domain, uint32_t domid,
191                                              XenGrantCopySegment *segs,
192                                              uint32_t nr_segs, Error **errp)
193 {
194     if (!xen_gnttab_ops) {
195         return -ENOSYS;
196     }
197 
198     return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
199                                       errp);
200 }
201 
202 static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
203                                                  uint32_t nr_grants)
204 {
205     if (!xen_gnttab_ops) {
206         return -ENOSYS;
207     }
208     return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
209 }
210 
211 static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
212                                              uint32_t count, uint32_t domid,
213                                              uint32_t *refs, int prot)
214 {
215     if (!xen_gnttab_ops) {
216         return NULL;
217     }
218     return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
219 }
220 
221 static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
222                                         void *start_address, uint32_t *refs,
223                                         uint32_t count)
224 {
225     if (!xen_gnttab_ops) {
226         return -ENOSYS;
227     }
228     return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
229 }
230 
231 struct foreignmem_backend_ops {
232     void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
233                  xen_pfn_t *pfns, int *errs);
234     int (*unmap)(void *addr, size_t pages);
235 };
236 
237 extern struct foreignmem_backend_ops *xen_foreignmem_ops;
238 
239 static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
240                                             size_t pages, xen_pfn_t *pfns,
241                                             int *errs)
242 {
243     if (!xen_foreignmem_ops) {
244         return NULL;
245     }
246     return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
247 }
248 
249 static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
250 {
251     if (!xen_foreignmem_ops) {
252         return -ENOSYS;
253     }
254     return xen_foreignmem_ops->unmap(addr, pages);
255 }
256 
257 typedef void (*xs_watch_fn)(void *opaque, const char *path);
258 
259 struct qemu_xs_handle;
260 struct qemu_xs_watch;
261 typedef uint32_t xs_transaction_t;
262 
263 #define XBT_NULL 0
264 
265 #define XS_PERM_NONE  0x00
266 #define XS_PERM_READ  0x01
267 #define XS_PERM_WRITE 0x02
268 
269 struct xenstore_backend_ops {
270     struct qemu_xs_handle *(*open)(void);
271     void (*close)(struct qemu_xs_handle *h);
272     char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
273     char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
274                         const char *path, unsigned int *num);
275     void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
276                   const char *path, unsigned int *len);
277     bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
278                   const char *path, const void *data, unsigned int len);
279     bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
280                    unsigned int owner, unsigned int domid,
281                    unsigned int perms, const char *path);
282     bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
283                const char *path);
284     struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
285                                    xs_watch_fn fn, void *opaque);
286     void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
287     xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
288     bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
289                             bool abort);
290 };
291 
292 extern struct xenstore_backend_ops *xen_xenstore_ops;
293 
294 static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
295 {
296     if (!xen_xenstore_ops) {
297         return NULL;
298     }
299     return xen_xenstore_ops->open();
300 }
301 
302 static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
303 {
304     if (!xen_xenstore_ops) {
305         return;
306     }
307     xen_xenstore_ops->close(h);
308 }
309 
310 static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
311                                                 unsigned int domid)
312 {
313     if (!xen_xenstore_ops) {
314         return NULL;
315     }
316     return xen_xenstore_ops->get_domain_path(h, domid);
317 }
318 
319 static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
320                                            xs_transaction_t t, const char *path,
321                                            unsigned int *num)
322 {
323     if (!xen_xenstore_ops) {
324         return NULL;
325     }
326     return xen_xenstore_ops->directory(h, t, path, num);
327 }
328 
329 static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
330                                      xs_transaction_t t, const char *path,
331                                      unsigned int *len)
332 {
333     if (!xen_xenstore_ops) {
334         return NULL;
335     }
336     return xen_xenstore_ops->read(h, t, path, len);
337 }
338 
339 static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
340                                      xs_transaction_t t, const char *path,
341                                      const void *data, unsigned int len)
342 {
343     if (!xen_xenstore_ops) {
344         return false;
345     }
346     return xen_xenstore_ops->write(h, t, path, data, len);
347 }
348 
349 static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
350                                       xs_transaction_t t, unsigned int owner,
351                                       unsigned int domid, unsigned int perms,
352                                       const char *path)
353 {
354     if (!xen_xenstore_ops) {
355         return false;
356     }
357     return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
358 }
359 
360 static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
361                                        xs_transaction_t t, const char *path)
362 {
363     if (!xen_xenstore_ops) {
364         return false;
365     }
366     return xen_xenstore_ops->destroy(h, t, path);
367 }
368 
369 static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
370                                                       const char *path,
371                                                       xs_watch_fn fn,
372                                                       void *opaque)
373 {
374     if (!xen_xenstore_ops) {
375         return NULL;
376     }
377     return xen_xenstore_ops->watch(h, path, fn, opaque);
378 }
379 
380 static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
381                                        struct qemu_xs_watch *w)
382 {
383     if (!xen_xenstore_ops) {
384         return;
385     }
386     xen_xenstore_ops->unwatch(h, w);
387 }
388 
389 static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
390 {
391     if (!xen_xenstore_ops) {
392         return XBT_NULL;
393     }
394     return xen_xenstore_ops->transaction_start(h);
395 }
396 
397 static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
398                                                xs_transaction_t t, bool abort)
399 {
400     if (!xen_xenstore_ops) {
401         return false;
402     }
403     return xen_xenstore_ops->transaction_end(h, t, abort);
404 }
405 
406 void setup_xen_backend_ops(void);
407 
408 #endif /* QEMU_XEN_BACKEND_OPS_H */
409