xref: /openbmc/qemu/hw/hyperv/hyperv.c (revision ed75658a)
1 /*
2  * Hyper-V guest/hypervisor interaction
3  *
4  * Copyright (c) 2015-2018 Virtuozzo International GmbH.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "exec/address-spaces.h"
15 #include "exec/memory.h"
16 #include "sysemu/kvm.h"
17 #include "qemu/bitops.h"
18 #include "qemu/error-report.h"
19 #include "qemu/lockable.h"
20 #include "qemu/queue.h"
21 #include "qemu/rcu.h"
22 #include "qemu/rcu_queue.h"
23 #include "hw/hyperv/hyperv.h"
24 #include "qom/object.h"
25 #include "target/i386/kvm/hyperv-proto.h"
26 #include "target/i386/cpu.h"
27 #include "exec/cpu-all.h"
28 
29 struct SynICState {
30     DeviceState parent_obj;
31 
32     CPUState *cs;
33 
34     bool sctl_enabled;
35     hwaddr msg_page_addr;
36     hwaddr event_page_addr;
37     MemoryRegion msg_page_mr;
38     MemoryRegion event_page_mr;
39     struct hyperv_message_page *msg_page;
40     struct hyperv_event_flags_page *event_page;
41 
42     QemuMutex sint_routes_mutex;
43     QLIST_HEAD(, HvSintRoute) sint_routes;
44 };
45 
46 #define TYPE_SYNIC "hyperv-synic"
47 OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC)
48 
49 static bool synic_enabled;
50 
51 bool hyperv_is_synic_enabled(void)
52 {
53     return synic_enabled;
54 }
55 
56 static SynICState *get_synic(CPUState *cs)
57 {
58     return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
59 }
60 
61 static void synic_update(SynICState *synic, bool sctl_enable,
62                          hwaddr msg_page_addr, hwaddr event_page_addr)
63 {
64 
65     synic->sctl_enabled = sctl_enable;
66     if (synic->msg_page_addr != msg_page_addr) {
67         if (synic->msg_page_addr) {
68             memory_region_del_subregion(get_system_memory(),
69                                         &synic->msg_page_mr);
70         }
71         if (msg_page_addr) {
72             memory_region_add_subregion(get_system_memory(), msg_page_addr,
73                                         &synic->msg_page_mr);
74         }
75         synic->msg_page_addr = msg_page_addr;
76     }
77     if (synic->event_page_addr != event_page_addr) {
78         if (synic->event_page_addr) {
79             memory_region_del_subregion(get_system_memory(),
80                                         &synic->event_page_mr);
81         }
82         if (event_page_addr) {
83             memory_region_add_subregion(get_system_memory(), event_page_addr,
84                                         &synic->event_page_mr);
85         }
86         synic->event_page_addr = event_page_addr;
87     }
88 }
89 
90 void hyperv_synic_update(CPUState *cs, bool sctl_enable,
91                          hwaddr msg_page_addr, hwaddr event_page_addr)
92 {
93     SynICState *synic = get_synic(cs);
94 
95     if (!synic) {
96         return;
97     }
98 
99     synic_update(synic, sctl_enable, msg_page_addr, event_page_addr);
100 }
101 
102 static void synic_realize(DeviceState *dev, Error **errp)
103 {
104     Object *obj = OBJECT(dev);
105     SynICState *synic = SYNIC(dev);
106     char *msgp_name, *eventp_name;
107     uint32_t vp_index;
108 
109     /* memory region names have to be globally unique */
110     vp_index = hyperv_vp_index(synic->cs);
111     msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
112     eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
113 
114     memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
115                            sizeof(*synic->msg_page), &error_abort);
116     memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
117                            sizeof(*synic->event_page), &error_abort);
118     synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
119     synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
120     qemu_mutex_init(&synic->sint_routes_mutex);
121     QLIST_INIT(&synic->sint_routes);
122 
123     g_free(msgp_name);
124     g_free(eventp_name);
125 }
126 
127 static void synic_reset(DeviceState *dev)
128 {
129     SynICState *synic = SYNIC(dev);
130     memset(synic->msg_page, 0, sizeof(*synic->msg_page));
131     memset(synic->event_page, 0, sizeof(*synic->event_page));
132     synic_update(synic, false, 0, 0);
133     assert(QLIST_EMPTY(&synic->sint_routes));
134 }
135 
136 static void synic_class_init(ObjectClass *klass, void *data)
137 {
138     DeviceClass *dc = DEVICE_CLASS(klass);
139 
140     dc->realize = synic_realize;
141     dc->reset = synic_reset;
142     dc->user_creatable = false;
143 }
144 
145 void hyperv_synic_add(CPUState *cs)
146 {
147     Object *obj;
148     SynICState *synic;
149 
150     obj = object_new(TYPE_SYNIC);
151     synic = SYNIC(obj);
152     synic->cs = cs;
153     object_property_add_child(OBJECT(cs), "synic", obj);
154     object_unref(obj);
155     qdev_realize(DEVICE(obj), NULL, &error_abort);
156     synic_enabled = true;
157 }
158 
159 void hyperv_synic_reset(CPUState *cs)
160 {
161     SynICState *synic = get_synic(cs);
162 
163     if (synic) {
164         device_cold_reset(DEVICE(synic));
165     }
166 }
167 
168 static const TypeInfo synic_type_info = {
169     .name = TYPE_SYNIC,
170     .parent = TYPE_DEVICE,
171     .instance_size = sizeof(SynICState),
172     .class_init = synic_class_init,
173 };
174 
175 static void synic_register_types(void)
176 {
177     type_register_static(&synic_type_info);
178 }
179 
180 type_init(synic_register_types)
181 
182 /*
183  * KVM has its own message producers (SynIC timers).  To guarantee
184  * serialization with both KVM vcpu and the guest cpu, the messages are first
185  * staged in an intermediate area and then posted to the SynIC message page in
186  * the vcpu thread.
187  */
188 typedef struct HvSintStagedMessage {
189     /* message content staged by hyperv_post_msg */
190     struct hyperv_message msg;
191     /* callback + data (r/o) to complete the processing in a BH */
192     HvSintMsgCb cb;
193     void *cb_data;
194     /* message posting status filled by cpu_post_msg */
195     int status;
196     /* passing the buck: */
197     enum {
198         /* initial state */
199         HV_STAGED_MSG_FREE,
200         /*
201          * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
202          * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
203          */
204         HV_STAGED_MSG_BUSY,
205         /*
206          * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
207          * notify the guest, records the status, marks the posting done (BUSY
208          * -> POSTED), and schedules sint_msg_bh BH
209          */
210         HV_STAGED_MSG_POSTED,
211         /*
212          * sint_msg_bh (BH) verifies that the posting is done, runs the
213          * callback, and starts over (POSTED -> FREE)
214          */
215     } state;
216 } HvSintStagedMessage;
217 
218 struct HvSintRoute {
219     uint32_t sint;
220     SynICState *synic;
221     int gsi;
222     EventNotifier sint_set_notifier;
223     EventNotifier sint_ack_notifier;
224 
225     HvSintStagedMessage *staged_msg;
226 
227     unsigned refcount;
228     QLIST_ENTRY(HvSintRoute) link;
229 };
230 
231 static CPUState *hyperv_find_vcpu(uint32_t vp_index)
232 {
233     CPUState *cs = qemu_get_cpu(vp_index);
234     assert(hyperv_vp_index(cs) == vp_index);
235     return cs;
236 }
237 
238 /*
239  * BH to complete the processing of a staged message.
240  */
241 static void sint_msg_bh(void *opaque)
242 {
243     HvSintRoute *sint_route = opaque;
244     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
245 
246     if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
247         /* status nor ready yet (spurious ack from guest?), ignore */
248         return;
249     }
250 
251     staged_msg->cb(staged_msg->cb_data, staged_msg->status);
252     staged_msg->status = 0;
253 
254     /* staged message processing finished, ready to start over */
255     qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
256     /* drop the reference taken in hyperv_post_msg */
257     hyperv_sint_route_unref(sint_route);
258 }
259 
260 /*
261  * Worker to transfer the message from the staging area into the SynIC message
262  * page in vcpu context.
263  */
264 static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
265 {
266     HvSintRoute *sint_route = data.host_ptr;
267     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
268     SynICState *synic = sint_route->synic;
269     struct hyperv_message *dst_msg;
270     bool wait_for_sint_ack = false;
271 
272     assert(staged_msg->state == HV_STAGED_MSG_BUSY);
273 
274     if (!synic->msg_page_addr) {
275         staged_msg->status = -ENXIO;
276         goto posted;
277     }
278 
279     dst_msg = &synic->msg_page->slot[sint_route->sint];
280 
281     if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
282         dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
283         staged_msg->status = -EAGAIN;
284         wait_for_sint_ack = true;
285     } else {
286         memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
287         staged_msg->status = hyperv_sint_route_set_sint(sint_route);
288     }
289 
290     memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
291 
292 posted:
293     qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
294     /*
295      * Notify the msg originator of the progress made; if the slot was busy we
296      * set msg_pending flag in it so it will be the guest who will do EOM and
297      * trigger the notification from KVM via sint_ack_notifier
298      */
299     if (!wait_for_sint_ack) {
300         aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
301                                 sint_route);
302     }
303 }
304 
305 /*
306  * Post a Hyper-V message to the staging area, for delivery to guest in the
307  * vcpu thread.
308  */
309 int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
310 {
311     HvSintStagedMessage *staged_msg = sint_route->staged_msg;
312 
313     assert(staged_msg);
314 
315     /* grab the staging area */
316     if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
317                        HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
318         return -EAGAIN;
319     }
320 
321     memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
322 
323     /* hold a reference on sint_route until the callback is finished */
324     hyperv_sint_route_ref(sint_route);
325 
326     /* schedule message posting attempt in vcpu thread */
327     async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
328                      RUN_ON_CPU_HOST_PTR(sint_route));
329     return 0;
330 }
331 
332 static void sint_ack_handler(EventNotifier *notifier)
333 {
334     HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
335                                            sint_ack_notifier);
336     event_notifier_test_and_clear(notifier);
337 
338     /*
339      * the guest consumed the previous message so complete the current one with
340      * -EAGAIN and let the msg originator retry
341      */
342     aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
343 }
344 
345 /*
346  * Set given event flag for a given sint on a given vcpu, and signal the sint.
347  */
348 int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
349 {
350     int ret;
351     SynICState *synic = sint_route->synic;
352     unsigned long *flags, set_mask;
353     unsigned set_idx;
354 
355     if (eventno > HV_EVENT_FLAGS_COUNT) {
356         return -EINVAL;
357     }
358     if (!synic->sctl_enabled || !synic->event_page_addr) {
359         return -ENXIO;
360     }
361 
362     set_idx = BIT_WORD(eventno);
363     set_mask = BIT_MASK(eventno);
364     flags = synic->event_page->slot[sint_route->sint].flags;
365 
366     if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
367         memory_region_set_dirty(&synic->event_page_mr, 0,
368                                 sizeof(*synic->event_page));
369         ret = hyperv_sint_route_set_sint(sint_route);
370     } else {
371         ret = 0;
372     }
373     return ret;
374 }
375 
376 HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
377                                    HvSintMsgCb cb, void *cb_data)
378 {
379     HvSintRoute *sint_route = NULL;
380     EventNotifier *ack_notifier = NULL;
381     int r, gsi;
382     CPUState *cs;
383     SynICState *synic;
384     bool ack_event_initialized = false;
385 
386     cs = hyperv_find_vcpu(vp_index);
387     if (!cs) {
388         return NULL;
389     }
390 
391     synic = get_synic(cs);
392     if (!synic) {
393         return NULL;
394     }
395 
396     sint_route = g_new0(HvSintRoute, 1);
397     if (!sint_route) {
398         return NULL;
399     }
400 
401     sint_route->synic = synic;
402     sint_route->sint = sint;
403     sint_route->refcount = 1;
404 
405     ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
406     if (ack_notifier) {
407         sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
408         if (!sint_route->staged_msg) {
409             goto cleanup_err_sint;
410         }
411         sint_route->staged_msg->cb = cb;
412         sint_route->staged_msg->cb_data = cb_data;
413 
414         r = event_notifier_init(ack_notifier, false);
415         if (r) {
416             goto cleanup_err_sint;
417         }
418         event_notifier_set_handler(ack_notifier, sint_ack_handler);
419         ack_event_initialized = true;
420     }
421 
422     /* See if we are done or we need to setup a GSI for this SintRoute */
423     if (!synic->sctl_enabled) {
424         goto cleanup;
425     }
426 
427     /* We need to setup a GSI for this SintRoute */
428     r = event_notifier_init(&sint_route->sint_set_notifier, false);
429     if (r) {
430         goto cleanup_err_sint;
431     }
432 
433     gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
434     if (gsi < 0) {
435         goto cleanup_err_sint_notifier;
436     }
437 
438     r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
439                                            &sint_route->sint_set_notifier,
440                                            ack_notifier, gsi);
441     if (r) {
442         goto cleanup_err_irqfd;
443     }
444     sint_route->gsi = gsi;
445 cleanup:
446     qemu_mutex_lock(&synic->sint_routes_mutex);
447     QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link);
448     qemu_mutex_unlock(&synic->sint_routes_mutex);
449     return sint_route;
450 
451 cleanup_err_irqfd:
452     kvm_irqchip_release_virq(kvm_state, gsi);
453 
454 cleanup_err_sint_notifier:
455     event_notifier_cleanup(&sint_route->sint_set_notifier);
456 
457 cleanup_err_sint:
458     if (ack_notifier) {
459         if (ack_event_initialized) {
460             event_notifier_set_handler(ack_notifier, NULL);
461             event_notifier_cleanup(ack_notifier);
462         }
463 
464         g_free(sint_route->staged_msg);
465     }
466 
467     g_free(sint_route);
468     return NULL;
469 }
470 
471 void hyperv_sint_route_ref(HvSintRoute *sint_route)
472 {
473     sint_route->refcount++;
474 }
475 
476 void hyperv_sint_route_unref(HvSintRoute *sint_route)
477 {
478     SynICState *synic;
479 
480     if (!sint_route) {
481         return;
482     }
483 
484     assert(sint_route->refcount > 0);
485 
486     if (--sint_route->refcount) {
487         return;
488     }
489 
490     synic = sint_route->synic;
491     qemu_mutex_lock(&synic->sint_routes_mutex);
492     QLIST_REMOVE(sint_route, link);
493     qemu_mutex_unlock(&synic->sint_routes_mutex);
494 
495     if (sint_route->gsi) {
496         kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
497                                               &sint_route->sint_set_notifier,
498                                               sint_route->gsi);
499         kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
500         event_notifier_cleanup(&sint_route->sint_set_notifier);
501     }
502 
503     if (sint_route->staged_msg) {
504         event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
505         event_notifier_cleanup(&sint_route->sint_ack_notifier);
506         g_free(sint_route->staged_msg);
507     }
508     g_free(sint_route);
509 }
510 
511 int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
512 {
513     if (!sint_route->gsi) {
514         return 0;
515     }
516 
517     return event_notifier_set(&sint_route->sint_set_notifier);
518 }
519 
520 typedef struct MsgHandler {
521     struct rcu_head rcu;
522     QLIST_ENTRY(MsgHandler) link;
523     uint32_t conn_id;
524     HvMsgHandler handler;
525     void *data;
526 } MsgHandler;
527 
528 typedef struct EventFlagHandler {
529     struct rcu_head rcu;
530     QLIST_ENTRY(EventFlagHandler) link;
531     uint32_t conn_id;
532     EventNotifier *notifier;
533 } EventFlagHandler;
534 
535 static QLIST_HEAD(, MsgHandler) msg_handlers;
536 static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
537 static QemuMutex handlers_mutex;
538 
539 static void __attribute__((constructor)) hv_init(void)
540 {
541     QLIST_INIT(&msg_handlers);
542     QLIST_INIT(&event_flag_handlers);
543     qemu_mutex_init(&handlers_mutex);
544 }
545 
546 int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
547 {
548     int ret;
549     MsgHandler *mh;
550 
551     QEMU_LOCK_GUARD(&handlers_mutex);
552     QLIST_FOREACH(mh, &msg_handlers, link) {
553         if (mh->conn_id == conn_id) {
554             if (handler) {
555                 ret = -EEXIST;
556             } else {
557                 QLIST_REMOVE_RCU(mh, link);
558                 g_free_rcu(mh, rcu);
559                 ret = 0;
560             }
561             return ret;
562         }
563     }
564 
565     if (handler) {
566         mh = g_new(MsgHandler, 1);
567         mh->conn_id = conn_id;
568         mh->handler = handler;
569         mh->data = data;
570         QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
571         ret = 0;
572     } else {
573         ret = -ENOENT;
574     }
575 
576     return ret;
577 }
578 
579 uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
580 {
581     uint16_t ret;
582     hwaddr len;
583     struct hyperv_post_message_input *msg;
584     MsgHandler *mh;
585 
586     if (fast) {
587         return HV_STATUS_INVALID_HYPERCALL_CODE;
588     }
589     if (param & (__alignof__(*msg) - 1)) {
590         return HV_STATUS_INVALID_ALIGNMENT;
591     }
592 
593     len = sizeof(*msg);
594     msg = cpu_physical_memory_map(param, &len, 0);
595     if (len < sizeof(*msg)) {
596         ret = HV_STATUS_INSUFFICIENT_MEMORY;
597         goto unmap;
598     }
599     if (msg->payload_size > sizeof(msg->payload)) {
600         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
601         goto unmap;
602     }
603 
604     ret = HV_STATUS_INVALID_CONNECTION_ID;
605     WITH_RCU_READ_LOCK_GUARD() {
606         QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
607             if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
608                 ret = mh->handler(msg, mh->data);
609                 break;
610             }
611         }
612     }
613 
614 unmap:
615     cpu_physical_memory_unmap(msg, len, 0, 0);
616     return ret;
617 }
618 
619 static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
620 {
621     int ret;
622     EventFlagHandler *handler;
623 
624     QEMU_LOCK_GUARD(&handlers_mutex);
625     QLIST_FOREACH(handler, &event_flag_handlers, link) {
626         if (handler->conn_id == conn_id) {
627             if (notifier) {
628                 ret = -EEXIST;
629             } else {
630                 QLIST_REMOVE_RCU(handler, link);
631                 g_free_rcu(handler, rcu);
632                 ret = 0;
633             }
634             return ret;
635         }
636     }
637 
638     if (notifier) {
639         handler = g_new(EventFlagHandler, 1);
640         handler->conn_id = conn_id;
641         handler->notifier = notifier;
642         QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
643         ret = 0;
644     } else {
645         ret = -ENOENT;
646     }
647 
648     return ret;
649 }
650 
651 static bool process_event_flags_userspace;
652 
653 int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
654 {
655     if (!process_event_flags_userspace &&
656         !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
657         process_event_flags_userspace = true;
658 
659         warn_report("Hyper-V event signaling is not supported by this kernel; "
660                     "using slower userspace hypercall processing");
661     }
662 
663     if (!process_event_flags_userspace) {
664         struct kvm_hyperv_eventfd hvevfd = {
665             .conn_id = conn_id,
666             .fd = notifier ? event_notifier_get_fd(notifier) : -1,
667             .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
668         };
669 
670         return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
671     }
672     return set_event_flag_handler(conn_id, notifier);
673 }
674 
675 uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
676 {
677     EventFlagHandler *handler;
678 
679     if (unlikely(!fast)) {
680         hwaddr addr = param;
681 
682         if (addr & (__alignof__(addr) - 1)) {
683             return HV_STATUS_INVALID_ALIGNMENT;
684         }
685 
686         param = ldq_phys(&address_space_memory, addr);
687     }
688 
689     /*
690      * Per spec, bits 32-47 contain the extra "flag number".  However, we
691      * have no use for it, and in all known usecases it is zero, so just
692      * report lookup failure if it isn't.
693      */
694     if (param & 0xffff00000000ULL) {
695         return HV_STATUS_INVALID_PORT_ID;
696     }
697     /* remaining bits are reserved-zero */
698     if (param & ~HV_CONNECTION_ID_MASK) {
699         return HV_STATUS_INVALID_HYPERCALL_INPUT;
700     }
701 
702     RCU_READ_LOCK_GUARD();
703     QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
704         if (handler->conn_id == param) {
705             event_notifier_set(handler->notifier);
706             return 0;
707         }
708     }
709     return HV_STATUS_INVALID_CONNECTION_ID;
710 }
711 
712 static HvSynDbgHandler hv_syndbg_handler;
713 static void *hv_syndbg_context;
714 
715 void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context)
716 {
717     assert(!hv_syndbg_handler);
718     hv_syndbg_handler = handler;
719     hv_syndbg_context = context;
720 }
721 
722 uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa)
723 {
724     uint16_t ret;
725     HvSynDbgMsg msg;
726     struct hyperv_reset_debug_session_output *reset_dbg_session = NULL;
727     hwaddr len;
728 
729     if (!hv_syndbg_handler) {
730         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
731         goto cleanup;
732     }
733 
734     len = sizeof(*reset_dbg_session);
735     reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1);
736     if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) {
737         ret = HV_STATUS_INSUFFICIENT_MEMORY;
738         goto cleanup;
739     }
740 
741     msg.type = HV_SYNDBG_MSG_CONNECTION_INFO;
742     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
743     if (ret) {
744         goto cleanup;
745     }
746 
747     reset_dbg_session->host_ip = msg.u.connection_info.host_ip;
748     reset_dbg_session->host_port = msg.u.connection_info.host_port;
749     /* The following fields are only used as validation for KDVM */
750     memset(&reset_dbg_session->host_mac, 0,
751            sizeof(reset_dbg_session->host_mac));
752     reset_dbg_session->target_ip = msg.u.connection_info.host_ip;
753     reset_dbg_session->target_port = msg.u.connection_info.host_port;
754     memset(&reset_dbg_session->target_mac, 0,
755            sizeof(reset_dbg_session->target_mac));
756 cleanup:
757     if (reset_dbg_session) {
758         cpu_physical_memory_unmap(reset_dbg_session,
759                                   sizeof(*reset_dbg_session), 1, len);
760     }
761 
762     return ret;
763 }
764 
765 uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
766                                         bool fast)
767 {
768     uint16_t ret;
769     struct hyperv_retrieve_debug_data_input *debug_data_in = NULL;
770     struct hyperv_retrieve_debug_data_output *debug_data_out = NULL;
771     hwaddr in_len, out_len;
772     HvSynDbgMsg msg;
773 
774     if (fast || !hv_syndbg_handler) {
775         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
776         goto cleanup;
777     }
778 
779     in_len = sizeof(*debug_data_in);
780     debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
781     if (!debug_data_in || in_len < sizeof(*debug_data_in)) {
782         ret = HV_STATUS_INSUFFICIENT_MEMORY;
783         goto cleanup;
784     }
785 
786     out_len = sizeof(*debug_data_out);
787     debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
788     if (!debug_data_out || out_len < sizeof(*debug_data_out)) {
789         ret = HV_STATUS_INSUFFICIENT_MEMORY;
790         goto cleanup;
791     }
792 
793     msg.type = HV_SYNDBG_MSG_RECV;
794     msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out);
795     msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out);
796     msg.u.recv.options = debug_data_in->options;
797     msg.u.recv.timeout = debug_data_in->timeout;
798     msg.u.recv.is_raw = true;
799     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
800     if (ret == HV_STATUS_NO_DATA) {
801         debug_data_out->retrieved_count = 0;
802         debug_data_out->remaining_count = debug_data_in->count;
803         goto cleanup;
804     } else if (ret != HV_STATUS_SUCCESS) {
805         goto cleanup;
806     }
807 
808     debug_data_out->retrieved_count = msg.u.recv.retrieved_count;
809     debug_data_out->remaining_count =
810         debug_data_in->count - msg.u.recv.retrieved_count;
811 cleanup:
812     if (debug_data_out) {
813         cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1,
814                                   out_len);
815     }
816 
817     if (debug_data_in) {
818         cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0,
819                                   in_len);
820     }
821 
822     return ret;
823 }
824 
825 uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast)
826 {
827     uint16_t ret;
828     struct hyperv_post_debug_data_input *post_data_in = NULL;
829     struct hyperv_post_debug_data_output *post_data_out = NULL;
830     hwaddr in_len, out_len;
831     HvSynDbgMsg msg;
832 
833     if (fast || !hv_syndbg_handler) {
834         ret = HV_STATUS_INVALID_HYPERCALL_CODE;
835         goto cleanup;
836     }
837 
838     in_len = sizeof(*post_data_in);
839     post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
840     if (!post_data_in || in_len < sizeof(*post_data_in)) {
841         ret = HV_STATUS_INSUFFICIENT_MEMORY;
842         goto cleanup;
843     }
844 
845     if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) {
846         ret = HV_STATUS_INVALID_PARAMETER;
847         goto cleanup;
848     }
849 
850     out_len = sizeof(*post_data_out);
851     post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
852     if (!post_data_out || out_len < sizeof(*post_data_out)) {
853         ret = HV_STATUS_INSUFFICIENT_MEMORY;
854         goto cleanup;
855     }
856 
857     msg.type = HV_SYNDBG_MSG_SEND;
858     msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in);
859     msg.u.send.count = post_data_in->count;
860     msg.u.send.is_raw = true;
861     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
862     if (ret != HV_STATUS_SUCCESS) {
863         goto cleanup;
864     }
865 
866     post_data_out->pending_count = msg.u.send.pending_count;
867     ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS :
868                                          HV_STATUS_SUCCESS;
869 cleanup:
870     if (post_data_out) {
871         cpu_physical_memory_unmap(post_data_out,
872                                   sizeof(*post_data_out), 1, out_len);
873     }
874 
875     if (post_data_in) {
876         cpu_physical_memory_unmap(post_data_in,
877                                   sizeof(*post_data_in), 0, in_len);
878     }
879 
880     return ret;
881 }
882 
883 uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count)
884 {
885     HvSynDbgMsg msg;
886 
887     if (!hv_syndbg_handler) {
888         return HV_SYNDBG_STATUS_INVALID;
889     }
890 
891     msg.type = HV_SYNDBG_MSG_SEND;
892     msg.u.send.buf_gpa = ingpa;
893     msg.u.send.count = count;
894     msg.u.send.is_raw = false;
895     if (hv_syndbg_handler(hv_syndbg_context, &msg)) {
896         return HV_SYNDBG_STATUS_INVALID;
897     }
898 
899     return HV_SYNDBG_STATUS_SEND_SUCCESS;
900 }
901 
902 uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count)
903 {
904     uint16_t ret;
905     HvSynDbgMsg msg;
906 
907     if (!hv_syndbg_handler) {
908         return HV_SYNDBG_STATUS_INVALID;
909     }
910 
911     msg.type = HV_SYNDBG_MSG_RECV;
912     msg.u.recv.buf_gpa = ingpa;
913     msg.u.recv.count = count;
914     msg.u.recv.options = 0;
915     msg.u.recv.timeout = 0;
916     msg.u.recv.is_raw = false;
917     ret = hv_syndbg_handler(hv_syndbg_context, &msg);
918     if (ret != HV_STATUS_SUCCESS) {
919         return 0;
920     }
921 
922     return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS,
923                                      msg.u.recv.retrieved_count);
924 }
925 
926 void hyperv_syndbg_set_pending_page(uint64_t ingpa)
927 {
928     HvSynDbgMsg msg;
929 
930     if (!hv_syndbg_handler) {
931         return;
932     }
933 
934     msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE;
935     msg.u.pending_page.buf_gpa = ingpa;
936     hv_syndbg_handler(hv_syndbg_context, &msg);
937 }
938 
939 uint64_t hyperv_syndbg_query_options(void)
940 {
941     HvSynDbgMsg msg;
942 
943     if (!hv_syndbg_handler) {
944         return 0;
945     }
946 
947     msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS;
948     if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) {
949         return 0;
950     }
951 
952     return msg.u.query_options.options;
953 }
954 
955 static bool vmbus_recommended_features_enabled;
956 
957 bool hyperv_are_vmbus_recommended_features_enabled(void)
958 {
959     return vmbus_recommended_features_enabled;
960 }
961 
962 void hyperv_set_vmbus_recommended_features_enabled(void)
963 {
964     vmbus_recommended_features_enabled = true;
965 }
966