xref: /openbmc/qemu/hw/hyperv/hyperv.c (revision 28ae3179fc52d2e4d870b635c4a412aab99759e7)
1  /*
2   * Hyper-V guest/hypervisor interaction
3   *
4   * Copyright (c) 2015-2018 Virtuozzo International GmbH.
5   *
6   * This work is licensed under the terms of the GNU GPL, version 2 or later.
7   * See the COPYING file in the top-level directory.
8   */
9  
10  #include "qemu/osdep.h"
11  #include "qemu/main-loop.h"
12  #include "qemu/module.h"
13  #include "qapi/error.h"
14  #include "exec/address-spaces.h"
15  #include "exec/memory.h"
16  #include "sysemu/kvm.h"
17  #include "qemu/bitops.h"
18  #include "qemu/error-report.h"
19  #include "qemu/lockable.h"
20  #include "qemu/queue.h"
21  #include "qemu/rcu.h"
22  #include "qemu/rcu_queue.h"
23  #include "hw/hyperv/hyperv.h"
24  #include "qom/object.h"
25  #include "target/i386/kvm/hyperv-proto.h"
26  #include "target/i386/cpu.h"
27  #include "exec/cpu-all.h"
28  
29  struct SynICState {
30      DeviceState parent_obj;
31  
32      CPUState *cs;
33  
34      bool sctl_enabled;
35      hwaddr msg_page_addr;
36      hwaddr event_page_addr;
37      MemoryRegion msg_page_mr;
38      MemoryRegion event_page_mr;
39      struct hyperv_message_page *msg_page;
40      struct hyperv_event_flags_page *event_page;
41  
42      QemuMutex sint_routes_mutex;
43      QLIST_HEAD(, HvSintRoute) sint_routes;
44  };
45  
46  #define TYPE_SYNIC "hyperv-synic"
OBJECT_DECLARE_SIMPLE_TYPE(SynICState,SYNIC)47  OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC)
48  
49  static bool synic_enabled;
50  
51  bool hyperv_is_synic_enabled(void)
52  {
53      return synic_enabled;
54  }
55  
get_synic(CPUState * cs)56  static SynICState *get_synic(CPUState *cs)
57  {
58      return SYNIC(object_resolve_path_component(OBJECT(cs), "synic"));
59  }
60  
synic_update(SynICState * synic,bool sctl_enable,hwaddr msg_page_addr,hwaddr event_page_addr)61  static void synic_update(SynICState *synic, bool sctl_enable,
62                           hwaddr msg_page_addr, hwaddr event_page_addr)
63  {
64  
65      synic->sctl_enabled = sctl_enable;
66      if (synic->msg_page_addr != msg_page_addr) {
67          if (synic->msg_page_addr) {
68              memory_region_del_subregion(get_system_memory(),
69                                          &synic->msg_page_mr);
70          }
71          if (msg_page_addr) {
72              memory_region_add_subregion(get_system_memory(), msg_page_addr,
73                                          &synic->msg_page_mr);
74          }
75          synic->msg_page_addr = msg_page_addr;
76      }
77      if (synic->event_page_addr != event_page_addr) {
78          if (synic->event_page_addr) {
79              memory_region_del_subregion(get_system_memory(),
80                                          &synic->event_page_mr);
81          }
82          if (event_page_addr) {
83              memory_region_add_subregion(get_system_memory(), event_page_addr,
84                                          &synic->event_page_mr);
85          }
86          synic->event_page_addr = event_page_addr;
87      }
88  }
89  
hyperv_synic_update(CPUState * cs,bool sctl_enable,hwaddr msg_page_addr,hwaddr event_page_addr)90  void hyperv_synic_update(CPUState *cs, bool sctl_enable,
91                           hwaddr msg_page_addr, hwaddr event_page_addr)
92  {
93      SynICState *synic = get_synic(cs);
94  
95      if (!synic) {
96          return;
97      }
98  
99      synic_update(synic, sctl_enable, msg_page_addr, event_page_addr);
100  }
101  
synic_realize(DeviceState * dev,Error ** errp)102  static void synic_realize(DeviceState *dev, Error **errp)
103  {
104      Object *obj = OBJECT(dev);
105      SynICState *synic = SYNIC(dev);
106      char *msgp_name, *eventp_name;
107      uint32_t vp_index;
108  
109      /* memory region names have to be globally unique */
110      vp_index = hyperv_vp_index(synic->cs);
111      msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index);
112      eventp_name = g_strdup_printf("synic-%u-event-page", vp_index);
113  
114      memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name,
115                             sizeof(*synic->msg_page), &error_abort);
116      memory_region_init_ram(&synic->event_page_mr, obj, eventp_name,
117                             sizeof(*synic->event_page), &error_abort);
118      synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr);
119      synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr);
120      qemu_mutex_init(&synic->sint_routes_mutex);
121      QLIST_INIT(&synic->sint_routes);
122  
123      g_free(msgp_name);
124      g_free(eventp_name);
125  }
126  
synic_reset(DeviceState * dev)127  static void synic_reset(DeviceState *dev)
128  {
129      SynICState *synic = SYNIC(dev);
130      memset(synic->msg_page, 0, sizeof(*synic->msg_page));
131      memset(synic->event_page, 0, sizeof(*synic->event_page));
132      synic_update(synic, false, 0, 0);
133      assert(QLIST_EMPTY(&synic->sint_routes));
134  }
135  
synic_class_init(ObjectClass * klass,void * data)136  static void synic_class_init(ObjectClass *klass, void *data)
137  {
138      DeviceClass *dc = DEVICE_CLASS(klass);
139  
140      dc->realize = synic_realize;
141      device_class_set_legacy_reset(dc, synic_reset);
142      dc->user_creatable = false;
143  }
144  
hyperv_synic_add(CPUState * cs)145  void hyperv_synic_add(CPUState *cs)
146  {
147      Object *obj;
148      SynICState *synic;
149  
150      obj = object_new(TYPE_SYNIC);
151      synic = SYNIC(obj);
152      synic->cs = cs;
153      object_property_add_child(OBJECT(cs), "synic", obj);
154      object_unref(obj);
155      qdev_realize(DEVICE(obj), NULL, &error_abort);
156      synic_enabled = true;
157  }
158  
hyperv_synic_reset(CPUState * cs)159  void hyperv_synic_reset(CPUState *cs)
160  {
161      SynICState *synic = get_synic(cs);
162  
163      if (synic) {
164          device_cold_reset(DEVICE(synic));
165      }
166  }
167  
168  static const TypeInfo synic_type_info = {
169      .name = TYPE_SYNIC,
170      .parent = TYPE_DEVICE,
171      .instance_size = sizeof(SynICState),
172      .class_init = synic_class_init,
173  };
174  
synic_register_types(void)175  static void synic_register_types(void)
176  {
177      type_register_static(&synic_type_info);
178  }
179  
180  type_init(synic_register_types)
181  
182  /*
183   * KVM has its own message producers (SynIC timers).  To guarantee
184   * serialization with both KVM vcpu and the guest cpu, the messages are first
185   * staged in an intermediate area and then posted to the SynIC message page in
186   * the vcpu thread.
187   */
188  typedef struct HvSintStagedMessage {
189      /* message content staged by hyperv_post_msg */
190      struct hyperv_message msg;
191      /* callback + data (r/o) to complete the processing in a BH */
192      HvSintMsgCb cb;
193      void *cb_data;
194      /* message posting status filled by cpu_post_msg */
195      int status;
196      /* passing the buck: */
197      enum {
198          /* initial state */
199          HV_STAGED_MSG_FREE,
200          /*
201           * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
202           * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
203           */
204          HV_STAGED_MSG_BUSY,
205          /*
206           * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
207           * notify the guest, records the status, marks the posting done (BUSY
208           * -> POSTED), and schedules sint_msg_bh BH
209           */
210          HV_STAGED_MSG_POSTED,
211          /*
212           * sint_msg_bh (BH) verifies that the posting is done, runs the
213           * callback, and starts over (POSTED -> FREE)
214           */
215      } state;
216  } HvSintStagedMessage;
217  
218  struct HvSintRoute {
219      uint32_t sint;
220      SynICState *synic;
221      int gsi;
222      EventNotifier sint_set_notifier;
223      EventNotifier sint_ack_notifier;
224  
225      HvSintStagedMessage *staged_msg;
226  
227      unsigned refcount;
228      QLIST_ENTRY(HvSintRoute) link;
229  };
230  
hyperv_find_vcpu(uint32_t vp_index)231  static CPUState *hyperv_find_vcpu(uint32_t vp_index)
232  {
233      CPUState *cs = qemu_get_cpu(vp_index);
234      assert(hyperv_vp_index(cs) == vp_index);
235      return cs;
236  }
237  
238  /*
239   * BH to complete the processing of a staged message.
240   */
sint_msg_bh(void * opaque)241  static void sint_msg_bh(void *opaque)
242  {
243      HvSintRoute *sint_route = opaque;
244      HvSintStagedMessage *staged_msg = sint_route->staged_msg;
245  
246      if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
247          /* status nor ready yet (spurious ack from guest?), ignore */
248          return;
249      }
250  
251      staged_msg->cb(staged_msg->cb_data, staged_msg->status);
252      staged_msg->status = 0;
253  
254      /* staged message processing finished, ready to start over */
255      qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
256      /* drop the reference taken in hyperv_post_msg */
257      hyperv_sint_route_unref(sint_route);
258  }
259  
260  /*
261   * Worker to transfer the message from the staging area into the SynIC message
262   * page in vcpu context.
263   */
cpu_post_msg(CPUState * cs,run_on_cpu_data data)264  static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
265  {
266      HvSintRoute *sint_route = data.host_ptr;
267      HvSintStagedMessage *staged_msg = sint_route->staged_msg;
268      SynICState *synic = sint_route->synic;
269      struct hyperv_message *dst_msg;
270      bool wait_for_sint_ack = false;
271  
272      assert(staged_msg->state == HV_STAGED_MSG_BUSY);
273  
274      if (!synic->msg_page_addr) {
275          staged_msg->status = -ENXIO;
276          goto posted;
277      }
278  
279      dst_msg = &synic->msg_page->slot[sint_route->sint];
280  
281      if (dst_msg->header.message_type != HV_MESSAGE_NONE) {
282          dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING;
283          staged_msg->status = -EAGAIN;
284          wait_for_sint_ack = true;
285      } else {
286          memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg));
287          staged_msg->status = hyperv_sint_route_set_sint(sint_route);
288      }
289  
290      memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
291  
292  posted:
293      qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
294      /*
295       * Notify the msg originator of the progress made; if the slot was busy we
296       * set msg_pending flag in it so it will be the guest who will do EOM and
297       * trigger the notification from KVM via sint_ack_notifier
298       */
299      if (!wait_for_sint_ack) {
300          aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh,
301                                  sint_route);
302      }
303  }
304  
305  /*
306   * Post a Hyper-V message to the staging area, for delivery to guest in the
307   * vcpu thread.
308   */
hyperv_post_msg(HvSintRoute * sint_route,struct hyperv_message * src_msg)309  int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
310  {
311      HvSintStagedMessage *staged_msg = sint_route->staged_msg;
312  
313      assert(staged_msg);
314  
315      /* grab the staging area */
316      if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
317                         HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
318          return -EAGAIN;
319      }
320  
321      memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg));
322  
323      /* hold a reference on sint_route until the callback is finished */
324      hyperv_sint_route_ref(sint_route);
325  
326      /* schedule message posting attempt in vcpu thread */
327      async_run_on_cpu(sint_route->synic->cs, cpu_post_msg,
328                       RUN_ON_CPU_HOST_PTR(sint_route));
329      return 0;
330  }
331  
sint_ack_handler(EventNotifier * notifier)332  static void sint_ack_handler(EventNotifier *notifier)
333  {
334      HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
335                                             sint_ack_notifier);
336      event_notifier_test_and_clear(notifier);
337  
338      /*
339       * the guest consumed the previous message so complete the current one with
340       * -EAGAIN and let the msg originator retry
341       */
342      aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route);
343  }
344  
345  /*
346   * Set given event flag for a given sint on a given vcpu, and signal the sint.
347   */
hyperv_set_event_flag(HvSintRoute * sint_route,unsigned eventno)348  int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
349  {
350      int ret;
351      SynICState *synic = sint_route->synic;
352      unsigned long *flags, set_mask;
353      unsigned set_idx;
354  
355      if (eventno > HV_EVENT_FLAGS_COUNT) {
356          return -EINVAL;
357      }
358      if (!synic->sctl_enabled || !synic->event_page_addr) {
359          return -ENXIO;
360      }
361  
362      set_idx = BIT_WORD(eventno);
363      set_mask = BIT_MASK(eventno);
364      flags = synic->event_page->slot[sint_route->sint].flags;
365  
366      if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
367          memory_region_set_dirty(&synic->event_page_mr, 0,
368                                  sizeof(*synic->event_page));
369          ret = hyperv_sint_route_set_sint(sint_route);
370      } else {
371          ret = 0;
372      }
373      return ret;
374  }
375  
kvm_irqchip_add_hv_sint_route(KVMState * s,uint32_t vcpu,uint32_t sint)376  static int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
377  {
378      struct kvm_irq_routing_entry kroute = {};
379      int virq;
380  
381      if (!kvm_gsi_routing_enabled()) {
382          return -ENOSYS;
383      }
384      virq = kvm_irqchip_get_virq(s);
385      if (virq < 0) {
386          return virq;
387      }
388  
389      kroute.gsi = virq;
390      kroute.type = KVM_IRQ_ROUTING_HV_SINT;
391      kroute.flags = 0;
392      kroute.u.hv_sint.vcpu = vcpu;
393      kroute.u.hv_sint.sint = sint;
394  
395      kvm_add_routing_entry(s, &kroute);
396      kvm_irqchip_commit_routes(s);
397  
398      return virq;
399  }
400  
hyperv_sint_route_new(uint32_t vp_index,uint32_t sint,HvSintMsgCb cb,void * cb_data)401  HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint,
402                                     HvSintMsgCb cb, void *cb_data)
403  {
404      HvSintRoute *sint_route = NULL;
405      EventNotifier *ack_notifier = NULL;
406      int r, gsi;
407      CPUState *cs;
408      SynICState *synic;
409      bool ack_event_initialized = false;
410  
411      cs = hyperv_find_vcpu(vp_index);
412      if (!cs) {
413          return NULL;
414      }
415  
416      synic = get_synic(cs);
417      if (!synic) {
418          return NULL;
419      }
420  
421      sint_route = g_new0(HvSintRoute, 1);
422      if (!sint_route) {
423          return NULL;
424      }
425  
426      sint_route->synic = synic;
427      sint_route->sint = sint;
428      sint_route->refcount = 1;
429  
430      ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL;
431      if (ack_notifier) {
432          sint_route->staged_msg = g_new0(HvSintStagedMessage, 1);
433          if (!sint_route->staged_msg) {
434              goto cleanup_err_sint;
435          }
436          sint_route->staged_msg->cb = cb;
437          sint_route->staged_msg->cb_data = cb_data;
438  
439          r = event_notifier_init(ack_notifier, false);
440          if (r) {
441              goto cleanup_err_sint;
442          }
443          event_notifier_set_handler(ack_notifier, sint_ack_handler);
444          ack_event_initialized = true;
445      }
446  
447      /* See if we are done or we need to setup a GSI for this SintRoute */
448      if (!synic->sctl_enabled) {
449          goto cleanup;
450      }
451  
452      /* We need to setup a GSI for this SintRoute */
453      r = event_notifier_init(&sint_route->sint_set_notifier, false);
454      if (r) {
455          goto cleanup_err_sint;
456      }
457  
458      gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint);
459      if (gsi < 0) {
460          goto cleanup_err_sint_notifier;
461      }
462  
463      r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
464                                             &sint_route->sint_set_notifier,
465                                             ack_notifier, gsi);
466      if (r) {
467          goto cleanup_err_irqfd;
468      }
469      sint_route->gsi = gsi;
470  cleanup:
471      qemu_mutex_lock(&synic->sint_routes_mutex);
472      QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link);
473      qemu_mutex_unlock(&synic->sint_routes_mutex);
474      return sint_route;
475  
476  cleanup_err_irqfd:
477      kvm_irqchip_release_virq(kvm_state, gsi);
478  
479  cleanup_err_sint_notifier:
480      event_notifier_cleanup(&sint_route->sint_set_notifier);
481  
482  cleanup_err_sint:
483      if (ack_notifier) {
484          if (ack_event_initialized) {
485              event_notifier_set_handler(ack_notifier, NULL);
486              event_notifier_cleanup(ack_notifier);
487          }
488  
489          g_free(sint_route->staged_msg);
490      }
491  
492      g_free(sint_route);
493      return NULL;
494  }
495  
hyperv_sint_route_ref(HvSintRoute * sint_route)496  void hyperv_sint_route_ref(HvSintRoute *sint_route)
497  {
498      sint_route->refcount++;
499  }
500  
hyperv_sint_route_unref(HvSintRoute * sint_route)501  void hyperv_sint_route_unref(HvSintRoute *sint_route)
502  {
503      SynICState *synic;
504  
505      if (!sint_route) {
506          return;
507      }
508  
509      assert(sint_route->refcount > 0);
510  
511      if (--sint_route->refcount) {
512          return;
513      }
514  
515      synic = sint_route->synic;
516      qemu_mutex_lock(&synic->sint_routes_mutex);
517      QLIST_REMOVE(sint_route, link);
518      qemu_mutex_unlock(&synic->sint_routes_mutex);
519  
520      if (sint_route->gsi) {
521          kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
522                                                &sint_route->sint_set_notifier,
523                                                sint_route->gsi);
524          kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
525          event_notifier_cleanup(&sint_route->sint_set_notifier);
526      }
527  
528      if (sint_route->staged_msg) {
529          event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
530          event_notifier_cleanup(&sint_route->sint_ack_notifier);
531          g_free(sint_route->staged_msg);
532      }
533      g_free(sint_route);
534  }
535  
hyperv_sint_route_set_sint(HvSintRoute * sint_route)536  int hyperv_sint_route_set_sint(HvSintRoute *sint_route)
537  {
538      if (!sint_route->gsi) {
539          return 0;
540      }
541  
542      return event_notifier_set(&sint_route->sint_set_notifier);
543  }
544  
545  typedef struct MsgHandler {
546      struct rcu_head rcu;
547      QLIST_ENTRY(MsgHandler) link;
548      uint32_t conn_id;
549      HvMsgHandler handler;
550      void *data;
551  } MsgHandler;
552  
553  typedef struct EventFlagHandler {
554      struct rcu_head rcu;
555      QLIST_ENTRY(EventFlagHandler) link;
556      uint32_t conn_id;
557      EventNotifier *notifier;
558  } EventFlagHandler;
559  
560  static QLIST_HEAD(, MsgHandler) msg_handlers;
561  static QLIST_HEAD(, EventFlagHandler) event_flag_handlers;
562  static QemuMutex handlers_mutex;
563  
hv_init(void)564  static void __attribute__((constructor)) hv_init(void)
565  {
566      QLIST_INIT(&msg_handlers);
567      QLIST_INIT(&event_flag_handlers);
568      qemu_mutex_init(&handlers_mutex);
569  }
570  
hyperv_set_msg_handler(uint32_t conn_id,HvMsgHandler handler,void * data)571  int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
572  {
573      int ret;
574      MsgHandler *mh;
575  
576      QEMU_LOCK_GUARD(&handlers_mutex);
577      QLIST_FOREACH(mh, &msg_handlers, link) {
578          if (mh->conn_id == conn_id) {
579              if (handler) {
580                  ret = -EEXIST;
581              } else {
582                  QLIST_REMOVE_RCU(mh, link);
583                  g_free_rcu(mh, rcu);
584                  ret = 0;
585              }
586              return ret;
587          }
588      }
589  
590      if (handler) {
591          mh = g_new(MsgHandler, 1);
592          mh->conn_id = conn_id;
593          mh->handler = handler;
594          mh->data = data;
595          QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link);
596          ret = 0;
597      } else {
598          ret = -ENOENT;
599      }
600  
601      return ret;
602  }
603  
hyperv_hcall_post_message(uint64_t param,bool fast)604  uint16_t hyperv_hcall_post_message(uint64_t param, bool fast)
605  {
606      uint16_t ret;
607      hwaddr len;
608      struct hyperv_post_message_input *msg;
609      MsgHandler *mh;
610  
611      if (fast) {
612          return HV_STATUS_INVALID_HYPERCALL_CODE;
613      }
614      if (param & (__alignof__(*msg) - 1)) {
615          return HV_STATUS_INVALID_ALIGNMENT;
616      }
617  
618      len = sizeof(*msg);
619      msg = cpu_physical_memory_map(param, &len, 0);
620      if (len < sizeof(*msg)) {
621          ret = HV_STATUS_INSUFFICIENT_MEMORY;
622          goto unmap;
623      }
624      if (msg->payload_size > sizeof(msg->payload)) {
625          ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
626          goto unmap;
627      }
628  
629      ret = HV_STATUS_INVALID_CONNECTION_ID;
630      WITH_RCU_READ_LOCK_GUARD() {
631          QLIST_FOREACH_RCU(mh, &msg_handlers, link) {
632              if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) {
633                  ret = mh->handler(msg, mh->data);
634                  break;
635              }
636          }
637      }
638  
639  unmap:
640      cpu_physical_memory_unmap(msg, len, 0, 0);
641      return ret;
642  }
643  
set_event_flag_handler(uint32_t conn_id,EventNotifier * notifier)644  static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
645  {
646      int ret;
647      EventFlagHandler *handler;
648  
649      QEMU_LOCK_GUARD(&handlers_mutex);
650      QLIST_FOREACH(handler, &event_flag_handlers, link) {
651          if (handler->conn_id == conn_id) {
652              if (notifier) {
653                  ret = -EEXIST;
654              } else {
655                  QLIST_REMOVE_RCU(handler, link);
656                  g_free_rcu(handler, rcu);
657                  ret = 0;
658              }
659              return ret;
660          }
661      }
662  
663      if (notifier) {
664          handler = g_new(EventFlagHandler, 1);
665          handler->conn_id = conn_id;
666          handler->notifier = notifier;
667          QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link);
668          ret = 0;
669      } else {
670          ret = -ENOENT;
671      }
672  
673      return ret;
674  }
675  
676  static bool process_event_flags_userspace;
677  
hyperv_set_event_flag_handler(uint32_t conn_id,EventNotifier * notifier)678  int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
679  {
680      if (!process_event_flags_userspace &&
681          !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) {
682          process_event_flags_userspace = true;
683  
684          warn_report("Hyper-V event signaling is not supported by this kernel; "
685                      "using slower userspace hypercall processing");
686      }
687  
688      if (!process_event_flags_userspace) {
689          struct kvm_hyperv_eventfd hvevfd = {
690              .conn_id = conn_id,
691              .fd = notifier ? event_notifier_get_fd(notifier) : -1,
692              .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN,
693          };
694  
695          return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd);
696      }
697      return set_event_flag_handler(conn_id, notifier);
698  }
699  
hyperv_hcall_signal_event(uint64_t param,bool fast)700  uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast)
701  {
702      EventFlagHandler *handler;
703  
704      if (unlikely(!fast)) {
705          hwaddr addr = param;
706  
707          if (addr & (__alignof__(addr) - 1)) {
708              return HV_STATUS_INVALID_ALIGNMENT;
709          }
710  
711          param = ldq_phys(&address_space_memory, addr);
712      }
713  
714      /*
715       * Per spec, bits 32-47 contain the extra "flag number".  However, we
716       * have no use for it, and in all known usecases it is zero, so just
717       * report lookup failure if it isn't.
718       */
719      if (param & 0xffff00000000ULL) {
720          return HV_STATUS_INVALID_PORT_ID;
721      }
722      /* remaining bits are reserved-zero */
723      if (param & ~HV_CONNECTION_ID_MASK) {
724          return HV_STATUS_INVALID_HYPERCALL_INPUT;
725      }
726  
727      RCU_READ_LOCK_GUARD();
728      QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) {
729          if (handler->conn_id == param) {
730              event_notifier_set(handler->notifier);
731              return 0;
732          }
733      }
734      return HV_STATUS_INVALID_CONNECTION_ID;
735  }
736  
737  static HvSynDbgHandler hv_syndbg_handler;
738  static void *hv_syndbg_context;
739  
hyperv_set_syndbg_handler(HvSynDbgHandler handler,void * context)740  void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context)
741  {
742      assert(!hv_syndbg_handler);
743      hv_syndbg_handler = handler;
744      hv_syndbg_context = context;
745  }
746  
hyperv_hcall_reset_dbg_session(uint64_t outgpa)747  uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa)
748  {
749      uint16_t ret;
750      HvSynDbgMsg msg;
751      struct hyperv_reset_debug_session_output *reset_dbg_session = NULL;
752      hwaddr len;
753  
754      if (!hv_syndbg_handler) {
755          ret = HV_STATUS_INVALID_HYPERCALL_CODE;
756          goto cleanup;
757      }
758  
759      len = sizeof(*reset_dbg_session);
760      reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1);
761      if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) {
762          ret = HV_STATUS_INSUFFICIENT_MEMORY;
763          goto cleanup;
764      }
765  
766      msg.type = HV_SYNDBG_MSG_CONNECTION_INFO;
767      ret = hv_syndbg_handler(hv_syndbg_context, &msg);
768      if (ret) {
769          goto cleanup;
770      }
771  
772      reset_dbg_session->host_ip = msg.u.connection_info.host_ip;
773      reset_dbg_session->host_port = msg.u.connection_info.host_port;
774      /* The following fields are only used as validation for KDVM */
775      memset(&reset_dbg_session->host_mac, 0,
776             sizeof(reset_dbg_session->host_mac));
777      reset_dbg_session->target_ip = msg.u.connection_info.host_ip;
778      reset_dbg_session->target_port = msg.u.connection_info.host_port;
779      memset(&reset_dbg_session->target_mac, 0,
780             sizeof(reset_dbg_session->target_mac));
781  cleanup:
782      if (reset_dbg_session) {
783          cpu_physical_memory_unmap(reset_dbg_session,
784                                    sizeof(*reset_dbg_session), 1, len);
785      }
786  
787      return ret;
788  }
789  
hyperv_hcall_retreive_dbg_data(uint64_t ingpa,uint64_t outgpa,bool fast)790  uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa,
791                                          bool fast)
792  {
793      uint16_t ret;
794      struct hyperv_retrieve_debug_data_input *debug_data_in = NULL;
795      struct hyperv_retrieve_debug_data_output *debug_data_out = NULL;
796      hwaddr in_len, out_len;
797      HvSynDbgMsg msg;
798  
799      if (fast || !hv_syndbg_handler) {
800          ret = HV_STATUS_INVALID_HYPERCALL_CODE;
801          goto cleanup;
802      }
803  
804      in_len = sizeof(*debug_data_in);
805      debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
806      if (!debug_data_in || in_len < sizeof(*debug_data_in)) {
807          ret = HV_STATUS_INSUFFICIENT_MEMORY;
808          goto cleanup;
809      }
810  
811      out_len = sizeof(*debug_data_out);
812      debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
813      if (!debug_data_out || out_len < sizeof(*debug_data_out)) {
814          ret = HV_STATUS_INSUFFICIENT_MEMORY;
815          goto cleanup;
816      }
817  
818      msg.type = HV_SYNDBG_MSG_RECV;
819      msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out);
820      msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out);
821      msg.u.recv.options = debug_data_in->options;
822      msg.u.recv.timeout = debug_data_in->timeout;
823      msg.u.recv.is_raw = true;
824      ret = hv_syndbg_handler(hv_syndbg_context, &msg);
825      if (ret == HV_STATUS_NO_DATA) {
826          debug_data_out->retrieved_count = 0;
827          debug_data_out->remaining_count = debug_data_in->count;
828          goto cleanup;
829      } else if (ret != HV_STATUS_SUCCESS) {
830          goto cleanup;
831      }
832  
833      debug_data_out->retrieved_count = msg.u.recv.retrieved_count;
834      debug_data_out->remaining_count =
835          debug_data_in->count - msg.u.recv.retrieved_count;
836  cleanup:
837      if (debug_data_out) {
838          cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1,
839                                    out_len);
840      }
841  
842      if (debug_data_in) {
843          cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0,
844                                    in_len);
845      }
846  
847      return ret;
848  }
849  
hyperv_hcall_post_dbg_data(uint64_t ingpa,uint64_t outgpa,bool fast)850  uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast)
851  {
852      uint16_t ret;
853      struct hyperv_post_debug_data_input *post_data_in = NULL;
854      struct hyperv_post_debug_data_output *post_data_out = NULL;
855      hwaddr in_len, out_len;
856      HvSynDbgMsg msg;
857  
858      if (fast || !hv_syndbg_handler) {
859          ret = HV_STATUS_INVALID_HYPERCALL_CODE;
860          goto cleanup;
861      }
862  
863      in_len = sizeof(*post_data_in);
864      post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0);
865      if (!post_data_in || in_len < sizeof(*post_data_in)) {
866          ret = HV_STATUS_INSUFFICIENT_MEMORY;
867          goto cleanup;
868      }
869  
870      if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) {
871          ret = HV_STATUS_INVALID_PARAMETER;
872          goto cleanup;
873      }
874  
875      out_len = sizeof(*post_data_out);
876      post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1);
877      if (!post_data_out || out_len < sizeof(*post_data_out)) {
878          ret = HV_STATUS_INSUFFICIENT_MEMORY;
879          goto cleanup;
880      }
881  
882      msg.type = HV_SYNDBG_MSG_SEND;
883      msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in);
884      msg.u.send.count = post_data_in->count;
885      msg.u.send.is_raw = true;
886      ret = hv_syndbg_handler(hv_syndbg_context, &msg);
887      if (ret != HV_STATUS_SUCCESS) {
888          goto cleanup;
889      }
890  
891      post_data_out->pending_count = msg.u.send.pending_count;
892      ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS :
893                                           HV_STATUS_SUCCESS;
894  cleanup:
895      if (post_data_out) {
896          cpu_physical_memory_unmap(post_data_out,
897                                    sizeof(*post_data_out), 1, out_len);
898      }
899  
900      if (post_data_in) {
901          cpu_physical_memory_unmap(post_data_in,
902                                    sizeof(*post_data_in), 0, in_len);
903      }
904  
905      return ret;
906  }
907  
hyperv_syndbg_send(uint64_t ingpa,uint32_t count)908  uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count)
909  {
910      HvSynDbgMsg msg;
911  
912      if (!hv_syndbg_handler) {
913          return HV_SYNDBG_STATUS_INVALID;
914      }
915  
916      msg.type = HV_SYNDBG_MSG_SEND;
917      msg.u.send.buf_gpa = ingpa;
918      msg.u.send.count = count;
919      msg.u.send.is_raw = false;
920      if (hv_syndbg_handler(hv_syndbg_context, &msg)) {
921          return HV_SYNDBG_STATUS_INVALID;
922      }
923  
924      return HV_SYNDBG_STATUS_SEND_SUCCESS;
925  }
926  
hyperv_syndbg_recv(uint64_t ingpa,uint32_t count)927  uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count)
928  {
929      uint16_t ret;
930      HvSynDbgMsg msg;
931  
932      if (!hv_syndbg_handler) {
933          return HV_SYNDBG_STATUS_INVALID;
934      }
935  
936      msg.type = HV_SYNDBG_MSG_RECV;
937      msg.u.recv.buf_gpa = ingpa;
938      msg.u.recv.count = count;
939      msg.u.recv.options = 0;
940      msg.u.recv.timeout = 0;
941      msg.u.recv.is_raw = false;
942      ret = hv_syndbg_handler(hv_syndbg_context, &msg);
943      if (ret != HV_STATUS_SUCCESS) {
944          return 0;
945      }
946  
947      return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS,
948                                       msg.u.recv.retrieved_count);
949  }
950  
hyperv_syndbg_set_pending_page(uint64_t ingpa)951  void hyperv_syndbg_set_pending_page(uint64_t ingpa)
952  {
953      HvSynDbgMsg msg;
954  
955      if (!hv_syndbg_handler) {
956          return;
957      }
958  
959      msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE;
960      msg.u.pending_page.buf_gpa = ingpa;
961      hv_syndbg_handler(hv_syndbg_context, &msg);
962  }
963  
hyperv_syndbg_query_options(void)964  uint64_t hyperv_syndbg_query_options(void)
965  {
966      HvSynDbgMsg msg;
967  
968      if (!hv_syndbg_handler) {
969          return 0;
970      }
971  
972      msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS;
973      if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) {
974          return 0;
975      }
976  
977      return msg.u.query_options.options;
978  }
979  
980  static bool vmbus_recommended_features_enabled;
981  
hyperv_are_vmbus_recommended_features_enabled(void)982  bool hyperv_are_vmbus_recommended_features_enabled(void)
983  {
984      return vmbus_recommended_features_enabled;
985  }
986  
hyperv_set_vmbus_recommended_features_enabled(void)987  void hyperv_set_vmbus_recommended_features_enabled(void)
988  {
989      vmbus_recommended_features_enabled = true;
990  }
991