Lines Matching full:cs
47 static int vcpuop_stop_singleshot_timer(CPUState *cs);
55 static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, in kvm_gva_to_gpa() argument
66 if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid || in kvm_gva_to_gpa()
74 static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, in kvm_gva_rw() argument
82 if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) { in kvm_gva_rw()
99 static inline int kvm_copy_from_gva(CPUState *cs, uint64_t gva, void *buf, in kvm_copy_from_gva() argument
102 return kvm_gva_rw(cs, gva, buf, sz, false); in kvm_copy_from_gva()
105 static inline int kvm_copy_to_gva(CPUState *cs, uint64_t gva, void *buf, in kvm_copy_to_gva() argument
108 return kvm_gva_rw(cs, gva, buf, sz, true); in kvm_copy_to_gva()
188 int kvm_xen_init_vcpu(CPUState *cs) in kvm_xen_init_vcpu() argument
190 X86CPU *cpu = X86_CPU(cs); in kvm_xen_init_vcpu()
205 .u.vcpu_id = cs->cpu_index, in kvm_xen_init_vcpu()
207 err = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); in kvm_xen_init_vcpu()
227 env->xen_singleshot_timer->opaque = cs; in kvm_xen_init_vcpu()
235 env->xen_periodic_timer->opaque = cs; in kvm_xen_init_vcpu()
284 static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa) in kvm_xen_set_vcpu_attr() argument
291 trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa); in kvm_xen_set_vcpu_attr()
293 return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi); in kvm_xen_set_vcpu_attr()
296 static int kvm_xen_set_vcpu_callback_vector(CPUState *cs) in kvm_xen_set_vcpu_callback_vector() argument
298 uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; in kvm_xen_set_vcpu_callback_vector()
304 trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector); in kvm_xen_set_vcpu_callback_vector()
306 return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xva); in kvm_xen_set_vcpu_callback_vector()
309 static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_callback_vector() argument
311 X86CPU *cpu = X86_CPU(cs); in do_set_vcpu_callback_vector()
317 kvm_xen_set_vcpu_callback_vector(cs); in do_set_vcpu_callback_vector()
321 static int set_vcpu_info(CPUState *cs, uint64_t gpa) in set_vcpu_info() argument
323 X86CPU *cpu = X86_CPU(cs); in set_vcpu_info()
329 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa); in set_vcpu_info()
358 static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_info_default_gpa() argument
360 X86CPU *cpu = X86_CPU(cs); in do_set_vcpu_info_default_gpa()
367 set_vcpu_info(cs, env->xen_vcpu_info_default_gpa); in do_set_vcpu_info_default_gpa()
371 static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_info_gpa() argument
373 X86CPU *cpu = X86_CPU(cs); in do_set_vcpu_info_gpa()
378 set_vcpu_info(cs, env->xen_vcpu_info_gpa); in do_set_vcpu_info_gpa()
383 CPUState *cs = qemu_get_cpu(vcpu_id); in kvm_xen_get_vcpu_info_hva() local
384 if (!cs) { in kvm_xen_get_vcpu_info_hva()
388 return X86_CPU(cs)->env.xen_vcpu_info_hva; in kvm_xen_get_vcpu_info_hva()
391 void kvm_xen_maybe_deassert_callback(CPUState *cs) in kvm_xen_maybe_deassert_callback() argument
393 CPUX86State *env = &X86_CPU(cs)->env; in kvm_xen_maybe_deassert_callback()
408 X86_CPU(cs)->env.xen_callback_asserted = false; in kvm_xen_maybe_deassert_callback()
417 CPUState *cs = qemu_get_cpu(0); in kvm_xen_set_callback_asserted() local
419 if (cs) { in kvm_xen_set_callback_asserted()
420 X86_CPU(cs)->env.xen_callback_asserted = true; in kvm_xen_set_callback_asserted()
426 CPUState *cs = qemu_get_cpu(0); in kvm_xen_has_vcpu_callback_vector() local
428 return cs && !!X86_CPU(cs)->env.xen_vcpu_callback_vector; in kvm_xen_has_vcpu_callback_vector()
433 CPUState *cs = qemu_get_cpu(vcpu_id); in kvm_xen_inject_vcpu_callback_vector() local
436 if (!cs) { in kvm_xen_inject_vcpu_callback_vector()
440 vector = X86_CPU(cs)->env.xen_vcpu_callback_vector; in kvm_xen_inject_vcpu_callback_vector()
448 (X86_CPU(cs)->apic_id << MSI_ADDR_DEST_ID_SHIFT), in kvm_xen_inject_vcpu_callback_vector()
462 qemu_cpu_kick(cs); in kvm_xen_inject_vcpu_callback_vector()
475 static int kvm_xen_set_vcpu_timer(CPUState *cs) in kvm_xen_set_vcpu_timer() argument
477 X86CPU *cpu = X86_CPU(cs); in kvm_xen_set_vcpu_timer()
487 return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va); in kvm_xen_set_vcpu_timer()
490 static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_timer_virq() argument
492 QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock); in do_set_vcpu_timer_virq()
493 kvm_xen_set_vcpu_timer(cs); in do_set_vcpu_timer_virq()
498 CPUState *cs = qemu_get_cpu(vcpu_id); in kvm_xen_set_vcpu_virq() local
500 if (!cs) { in kvm_xen_set_vcpu_virq()
511 if (port && X86_CPU(cs)->env.xen_virq[virq]) { in kvm_xen_set_vcpu_virq()
515 X86_CPU(cs)->env.xen_virq[virq] = port; in kvm_xen_set_vcpu_virq()
517 async_run_on_cpu(cs, do_set_vcpu_timer_virq, in kvm_xen_set_vcpu_virq()
523 static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_time_info_gpa() argument
525 X86CPU *cpu = X86_CPU(cs); in do_set_vcpu_time_info_gpa()
530 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, in do_set_vcpu_time_info_gpa()
534 static void do_set_vcpu_runstate_gpa(CPUState *cs, run_on_cpu_data data) in do_set_vcpu_runstate_gpa() argument
536 X86CPU *cpu = X86_CPU(cs); in do_set_vcpu_runstate_gpa()
541 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, in do_set_vcpu_runstate_gpa()
545 static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data) in do_vcpu_soft_reset() argument
547 X86CPU *cpu = X86_CPU(cs); in do_vcpu_soft_reset()
557 set_vcpu_info(cs, INVALID_GPA); in do_vcpu_soft_reset()
558 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, in do_vcpu_soft_reset()
560 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, in do_vcpu_soft_reset()
563 kvm_xen_set_vcpu_callback_vector(cs); in do_vcpu_soft_reset()
565 QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock); in do_vcpu_soft_reset()
567 kvm_xen_set_vcpu_timer(cs); in do_vcpu_soft_reset()
569 vcpuop_stop_singleshot_timer(cs); in do_vcpu_soft_reset()
635 CPUState *cs = CPU(cpu); in do_add_to_physmap() local
641 if (kvm_copy_from_gva(cs, arg, &xatp32, sizeof(xatp32))) { in do_add_to_physmap()
650 if (kvm_copy_from_gva(cs, arg, &xatp, sizeof(xatp))) { in do_add_to_physmap()
667 CPUState *cs = CPU(cpu); in do_add_to_physmap_batch() local
674 if (kvm_copy_from_gva(cs, arg, &xatpb32, sizeof(xatpb32))) { in do_add_to_physmap_batch()
686 if (kvm_copy_from_gva(cs, arg, &xatpb, sizeof(xatpb))) { in do_add_to_physmap_batch()
710 if (kvm_copy_from_gva(cs, idxs_gva, &idx, op_sz) || in do_add_to_physmap_batch()
711 kvm_copy_from_gva(cs, gpfns_gva, &gpfn, op_sz)) { in do_add_to_physmap_batch()
719 if (kvm_copy_to_gva(cs, errs_gva, &err, sizeof(err))) { in do_add_to_physmap_batch()
752 CPUState *cs = CPU(cpu); in handle_set_param() local
759 if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { in handle_set_param()
788 CPUState *cs = CPU(cpu); in handle_get_param() local
795 if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) { in handle_get_param()
828 if (!err && kvm_copy_to_gva(cs, arg, &hp, sizeof(hp))) { in handle_get_param()
890 static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target, in vcpuop_register_vcpu_info() argument
904 if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) { in vcpuop_register_vcpu_info()
917 static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target, in vcpuop_register_vcpu_time_info() argument
932 if (kvm_copy_from_gva(cs, arg, &tma, sizeof(tma))) { in vcpuop_register_vcpu_time_info()
943 if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) || in vcpuop_register_vcpu_time_info()
953 static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target, in vcpuop_register_runstate_info() argument
968 if (kvm_copy_from_gva(cs, arg, &rma, sizeof(rma))) { in vcpuop_register_runstate_info()
973 if (!kvm_gva_to_gpa(cs, rma.addr.p, &gpa, &len, false)) { in vcpuop_register_runstate_info()
1054 static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target, in vcpuop_set_periodic_timer() argument
1060 if (kvm_copy_from_gva(cs, arg, &spt, sizeof(spt))) { in vcpuop_set_periodic_timer()
1088 static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs, in do_set_singleshot_timer() argument
1091 CPUX86State *env = &X86_CPU(cs)->env; in do_set_singleshot_timer()
1116 static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg) in vcpuop_set_singleshot_timer() argument
1132 if (kvm_copy_from_gva(cs, arg, &sst, 12)) { in vcpuop_set_singleshot_timer()
1136 QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock); in vcpuop_set_singleshot_timer()
1143 return do_set_singleshot_timer(cs, sst.timeout_abs_ns, false); in vcpuop_set_singleshot_timer()
1146 static int vcpuop_stop_singleshot_timer(CPUState *cs) in vcpuop_stop_singleshot_timer() argument
1148 CPUX86State *env = &X86_CPU(cs)->env; in vcpuop_stop_singleshot_timer()
1177 CPUState *cs = CPU(cpu); in kvm_xen_hcall_vcpu_op() local
1178 CPUState *dest = cs->cpu_index == vcpu_id ? cs : qemu_get_cpu(vcpu_id); in kvm_xen_hcall_vcpu_op()
1188 err = vcpuop_register_runstate_info(cs, dest, arg); in kvm_xen_hcall_vcpu_op()
1191 err = vcpuop_register_vcpu_time_info(cs, dest, arg); in kvm_xen_hcall_vcpu_op()
1194 err = vcpuop_register_vcpu_info(cs, dest, arg); in kvm_xen_hcall_vcpu_op()
1197 if (cs->cpu_index == vcpu_id) { in kvm_xen_hcall_vcpu_op()
1205 if (cs->cpu_index == vcpu_id) { in kvm_xen_hcall_vcpu_op()
1212 err = vcpuop_set_periodic_timer(cs, dest, arg); in kvm_xen_hcall_vcpu_op()
1231 CPUState *cs = CPU(cpu); in kvm_xen_hcall_evtchn_op() local
1246 if (kvm_copy_from_gva(cs, arg, &status, sizeof(status))) { in kvm_xen_hcall_evtchn_op()
1252 if (!err && kvm_copy_to_gva(cs, arg, &status, sizeof(status))) { in kvm_xen_hcall_evtchn_op()
1261 if (kvm_copy_from_gva(cs, arg, &close, sizeof(close))) { in kvm_xen_hcall_evtchn_op()
1273 if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) { in kvm_xen_hcall_evtchn_op()
1285 if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) { in kvm_xen_hcall_evtchn_op()
1291 if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) { in kvm_xen_hcall_evtchn_op()
1300 if (kvm_copy_from_gva(cs, arg, &pirq, sizeof(pirq))) { in kvm_xen_hcall_evtchn_op()
1306 if (!err && kvm_copy_to_gva(cs, arg, &pirq, sizeof(pirq))) { in kvm_xen_hcall_evtchn_op()
1315 if (kvm_copy_from_gva(cs, arg, &ipi, sizeof(ipi))) { in kvm_xen_hcall_evtchn_op()
1321 if (!err && kvm_copy_to_gva(cs, arg, &ipi, sizeof(ipi))) { in kvm_xen_hcall_evtchn_op()
1330 if (kvm_copy_from_gva(cs, arg, &send, sizeof(send))) { in kvm_xen_hcall_evtchn_op()
1342 if (kvm_copy_from_gva(cs, arg, &alloc, sizeof(alloc))) { in kvm_xen_hcall_evtchn_op()
1348 if (!err && kvm_copy_to_gva(cs, arg, &alloc, sizeof(alloc))) { in kvm_xen_hcall_evtchn_op()
1357 if (kvm_copy_from_gva(cs, arg, &interdomain, sizeof(interdomain))) { in kvm_xen_hcall_evtchn_op()
1364 kvm_copy_to_gva(cs, arg, &interdomain, sizeof(interdomain))) { in kvm_xen_hcall_evtchn_op()
1373 if (kvm_copy_from_gva(cs, arg, &vcpu, sizeof(vcpu))) { in kvm_xen_hcall_evtchn_op()
1385 if (kvm_copy_from_gva(cs, arg, &reset, sizeof(reset))) { in kvm_xen_hcall_evtchn_op()
1452 static int schedop_shutdown(CPUState *cs, uint64_t arg) in schedop_shutdown() argument
1460 if (kvm_copy_from_gva(cs, arg, &shutdown, sizeof(shutdown))) { in schedop_shutdown()
1466 cpu_dump_state(cs, stderr, CPU_DUMP_CODE); in schedop_shutdown()
1495 CPUState *cs = CPU(cpu); in kvm_xen_hcall_sched_op() local
1500 err = schedop_shutdown(cs, arg); in kvm_xen_hcall_sched_op()
1527 CPUState *cs = CPU(cpu); in kvm_xen_hcall_gnttab_op() local
1535 if (kvm_copy_from_gva(cs, arg, &set, sizeof(set))) { in kvm_xen_hcall_gnttab_op()
1541 if (!err && kvm_copy_to_gva(cs, arg, &set, sizeof(set))) { in kvm_xen_hcall_gnttab_op()
1550 if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { in kvm_xen_hcall_gnttab_op()
1556 if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { in kvm_xen_hcall_gnttab_op()
1565 if (kvm_copy_from_gva(cs, arg, &size, sizeof(size))) { in kvm_xen_hcall_gnttab_op()
1571 if (!err && kvm_copy_to_gva(cs, arg, &size, sizeof(size))) { in kvm_xen_hcall_gnttab_op()
1596 CPUState *cs = CPU(cpu); in kvm_xen_hcall_physdev_op() local
1606 if (kvm_copy_from_gva(cs, arg, map32, sizeof(*map32))) { in kvm_xen_hcall_physdev_op()
1620 if (kvm_copy_from_gva(cs, arg, &map, sizeof(map))) { in kvm_xen_hcall_physdev_op()
1630 if (!err && kvm_copy_to_gva(cs, arg, &map, in kvm_xen_hcall_physdev_op()
1640 if (kvm_copy_from_gva(cs, arg, &unmap, sizeof(unmap))) { in kvm_xen_hcall_physdev_op()
1646 if (!err && kvm_copy_to_gva(cs, arg, &unmap, sizeof(unmap))) { in kvm_xen_hcall_physdev_op()
1655 if (kvm_copy_from_gva(cs, arg, &eoi, sizeof(eoi))) { in kvm_xen_hcall_physdev_op()
1661 if (!err && kvm_copy_to_gva(cs, arg, &eoi, sizeof(eoi))) { in kvm_xen_hcall_physdev_op()
1670 if (kvm_copy_from_gva(cs, arg, &query, sizeof(query))) { in kvm_xen_hcall_physdev_op()
1676 if (!err && kvm_copy_to_gva(cs, arg, &query, sizeof(query))) { in kvm_xen_hcall_physdev_op()
1685 if (kvm_copy_from_gva(cs, arg, &get, sizeof(get))) { in kvm_xen_hcall_physdev_op()
1691 if (!err && kvm_copy_to_gva(cs, arg, &get, sizeof(get))) { in kvm_xen_hcall_physdev_op()
1809 int kvm_put_xen_state(CPUState *cs) in kvm_put_xen_state() argument
1811 X86CPU *cpu = X86_CPU(cs); in kvm_put_xen_state()
1822 ret = set_vcpu_info(cs, gpa); in kvm_put_xen_state()
1830 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, in kvm_put_xen_state()
1839 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, in kvm_put_xen_state()
1847 ret = do_set_periodic_timer(cs, env->xen_periodic_timer_period); in kvm_put_xen_state()
1860 ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns, in kvm_put_xen_state()
1870 ret = kvm_xen_set_vcpu_callback_vector(cs); in kvm_put_xen_state()
1877 do_set_vcpu_timer_virq(cs, in kvm_put_xen_state()
1883 int kvm_get_xen_state(CPUState *cs) in kvm_get_xen_state() argument
1885 X86CPU *cpu = X86_CPU(cs); in kvm_get_xen_state()
1923 ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va); in kvm_get_xen_state()
1935 QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock); in kvm_get_xen_state()