Lines Matching +full:- +full:kvm
1 // SPDX-License-Identifier: GPL-2.0-only
18 #include <trace/events/kvm.h>
31 struct kvm *kvm, int irq_source_id, int level, in kvm_set_pic_irq() argument
34 struct kvm_pic *pic = kvm->arch.vpic; in kvm_set_pic_irq()
35 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); in kvm_set_pic_irq()
39 struct kvm *kvm, int irq_source_id, int level, in kvm_set_ioapic_irq() argument
42 struct kvm_ioapic *ioapic = kvm->arch.vioapic; in kvm_set_ioapic_irq()
43 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, in kvm_set_ioapic_irq()
47 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, in kvm_irq_delivery_to_apic() argument
50 int r = -1; in kvm_irq_delivery_to_apic()
55 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) in kvm_irq_delivery_to_apic()
58 if (irq->dest_mode == APIC_DEST_PHYSICAL && in kvm_irq_delivery_to_apic()
59 irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { in kvm_irq_delivery_to_apic()
61 irq->delivery_mode = APIC_DM_FIXED; in kvm_irq_delivery_to_apic()
66 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_irq_delivery_to_apic()
70 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, in kvm_irq_delivery_to_apic()
71 irq->dest_id, irq->dest_mode)) in kvm_irq_delivery_to_apic()
78 } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) { in kvm_irq_delivery_to_apic()
92 int idx = kvm_vector_to_index(irq->vector, dest_vcpus, in kvm_irq_delivery_to_apic()
95 lowest = kvm_get_vcpu(kvm, idx); in kvm_irq_delivery_to_apic()
104 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, in kvm_set_msi_irq() argument
107 struct msi_msg msg = { .address_lo = e->msi.address_lo, in kvm_set_msi_irq()
108 .address_hi = e->msi.address_hi, in kvm_set_msi_irq()
109 .data = e->msi.data }; in kvm_set_msi_irq()
111 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? in kvm_set_msi_irq()
114 irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); in kvm_set_msi_irq()
115 irq->vector = msg.arch_data.vector; in kvm_set_msi_irq()
116 irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical); in kvm_set_msi_irq()
117 irq->trig_mode = msg.arch_data.is_level; in kvm_set_msi_irq()
118 irq->delivery_mode = msg.arch_data.delivery_mode << 8; in kvm_set_msi_irq()
119 irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint; in kvm_set_msi_irq()
120 irq->level = 1; in kvm_set_msi_irq()
121 irq->shorthand = APIC_DEST_NOSHORT; in kvm_set_msi_irq()
125 static inline bool kvm_msi_route_invalid(struct kvm *kvm, in kvm_msi_route_invalid() argument
128 return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); in kvm_msi_route_invalid()
132 struct kvm *kvm, int irq_source_id, int level, bool line_status) in kvm_set_msi() argument
136 if (kvm_msi_route_invalid(kvm, e)) in kvm_set_msi()
137 return -EINVAL; in kvm_set_msi()
140 return -1; in kvm_set_msi()
142 kvm_set_msi_irq(kvm, e, &irq); in kvm_set_msi()
144 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); in kvm_set_msi()
149 struct kvm *kvm, int irq_source_id, int level, in kvm_hv_set_sint() argument
153 return -1; in kvm_hv_set_sint()
155 return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); in kvm_hv_set_sint()
159 struct kvm *kvm, int irq_source_id, int level, in kvm_arch_set_irq_inatomic() argument
165 switch (e->type) { in kvm_arch_set_irq_inatomic()
167 return kvm_hv_set_sint(e, kvm, irq_source_id, level, in kvm_arch_set_irq_inatomic()
171 if (kvm_msi_route_invalid(kvm, e)) in kvm_arch_set_irq_inatomic()
172 return -EINVAL; in kvm_arch_set_irq_inatomic()
174 kvm_set_msi_irq(kvm, e, &irq); in kvm_arch_set_irq_inatomic()
176 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) in kvm_arch_set_irq_inatomic()
183 return -1; in kvm_arch_set_irq_inatomic()
185 return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm); in kvm_arch_set_irq_inatomic()
191 return -EWOULDBLOCK; in kvm_arch_set_irq_inatomic()
194 int kvm_request_irq_source_id(struct kvm *kvm) in kvm_request_irq_source_id() argument
196 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; in kvm_request_irq_source_id()
199 mutex_lock(&kvm->irq_lock); in kvm_request_irq_source_id()
204 irq_source_id = -EFAULT; in kvm_request_irq_source_id()
212 mutex_unlock(&kvm->irq_lock); in kvm_request_irq_source_id()
217 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) in kvm_free_irq_source_id() argument
222 mutex_lock(&kvm->irq_lock); in kvm_free_irq_source_id()
228 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); in kvm_free_irq_source_id()
229 if (!irqchip_kernel(kvm)) in kvm_free_irq_source_id()
232 kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); in kvm_free_irq_source_id()
233 kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); in kvm_free_irq_source_id()
235 mutex_unlock(&kvm->irq_lock); in kvm_free_irq_source_id()
238 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, in kvm_register_irq_mask_notifier() argument
241 mutex_lock(&kvm->irq_lock); in kvm_register_irq_mask_notifier()
242 kimn->irq = irq; in kvm_register_irq_mask_notifier()
243 hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); in kvm_register_irq_mask_notifier()
244 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_mask_notifier()
247 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, in kvm_unregister_irq_mask_notifier() argument
250 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_mask_notifier()
251 hlist_del_rcu(&kimn->link); in kvm_unregister_irq_mask_notifier()
252 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_mask_notifier()
253 synchronize_srcu(&kvm->irq_srcu); in kvm_unregister_irq_mask_notifier()
256 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, in kvm_fire_mask_notifiers() argument
262 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_fire_mask_notifiers()
263 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_fire_mask_notifiers()
264 if (gsi != -1) in kvm_fire_mask_notifiers()
265 hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) in kvm_fire_mask_notifiers()
266 if (kimn->irq == gsi) in kvm_fire_mask_notifiers()
267 kimn->func(kimn, mask); in kvm_fire_mask_notifiers()
268 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_fire_mask_notifiers()
271 bool kvm_arch_can_set_irq_routing(struct kvm *kvm) in kvm_arch_can_set_irq_routing() argument
273 return irqchip_in_kernel(kvm); in kvm_arch_can_set_irq_routing()
276 int kvm_set_routing_entry(struct kvm *kvm, in kvm_set_routing_entry() argument
284 switch (ue->type) { in kvm_set_routing_entry()
286 if (irqchip_split(kvm)) in kvm_set_routing_entry()
287 return -EINVAL; in kvm_set_routing_entry()
288 e->irqchip.pin = ue->u.irqchip.pin; in kvm_set_routing_entry()
289 switch (ue->u.irqchip.irqchip) { in kvm_set_routing_entry()
291 e->irqchip.pin += PIC_NUM_PINS / 2; in kvm_set_routing_entry()
294 if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) in kvm_set_routing_entry()
295 return -EINVAL; in kvm_set_routing_entry()
296 e->set = kvm_set_pic_irq; in kvm_set_routing_entry()
299 if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) in kvm_set_routing_entry()
300 return -EINVAL; in kvm_set_routing_entry()
301 e->set = kvm_set_ioapic_irq; in kvm_set_routing_entry()
304 return -EINVAL; in kvm_set_routing_entry()
306 e->irqchip.irqchip = ue->u.irqchip.irqchip; in kvm_set_routing_entry()
309 e->set = kvm_set_msi; in kvm_set_routing_entry()
310 e->msi.address_lo = ue->u.msi.address_lo; in kvm_set_routing_entry()
311 e->msi.address_hi = ue->u.msi.address_hi; in kvm_set_routing_entry()
312 e->msi.data = ue->u.msi.data; in kvm_set_routing_entry()
314 if (kvm_msi_route_invalid(kvm, e)) in kvm_set_routing_entry()
315 return -EINVAL; in kvm_set_routing_entry()
318 e->set = kvm_hv_set_sint; in kvm_set_routing_entry()
319 e->hv_sint.vcpu = ue->u.hv_sint.vcpu; in kvm_set_routing_entry()
320 e->hv_sint.sint = ue->u.hv_sint.sint; in kvm_set_routing_entry()
324 return kvm_xen_setup_evtchn(kvm, e, ue); in kvm_set_routing_entry()
327 return -EINVAL; in kvm_set_routing_entry()
333 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, in kvm_intr_is_single_vcpu() argument
340 if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) in kvm_intr_is_single_vcpu()
343 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_intr_is_single_vcpu()
347 if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand, in kvm_intr_is_single_vcpu()
348 irq->dest_id, irq->dest_mode)) in kvm_intr_is_single_vcpu()
387 int kvm_setup_default_irq_routing(struct kvm *kvm) in kvm_setup_default_irq_routing() argument
389 return kvm_set_irq_routing(kvm, default_routing, in kvm_setup_default_irq_routing()
395 int kvm_setup_empty_irq_routing(struct kvm *kvm) in kvm_setup_empty_irq_routing() argument
397 return kvm_set_irq_routing(kvm, empty_routing, 0, 0); in kvm_setup_empty_irq_routing()
400 void kvm_arch_post_irq_routing_update(struct kvm *kvm) in kvm_arch_post_irq_routing_update() argument
402 if (!irqchip_split(kvm)) in kvm_arch_post_irq_routing_update()
404 kvm_make_scan_ioapic_request(kvm); in kvm_arch_post_irq_routing_update()
410 struct kvm *kvm = vcpu->kvm; in kvm_scan_ioapic_routes() local
416 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_scan_ioapic_routes()
417 table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in kvm_scan_ioapic_routes()
418 nr_ioapic_pins = min_t(u32, table->nr_rt_entries, in kvm_scan_ioapic_routes()
419 kvm->arch.nr_reserved_ioapic_pins); in kvm_scan_ioapic_routes()
421 hlist_for_each_entry(entry, &table->map[i], link) { in kvm_scan_ioapic_routes()
424 if (entry->type != KVM_IRQ_ROUTING_MSI) in kvm_scan_ioapic_routes()
427 kvm_set_msi_irq(vcpu->kvm, entry, &irq); in kvm_scan_ioapic_routes()
436 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_scan_ioapic_routes()
439 void kvm_arch_irq_routing_update(struct kvm *kvm) in kvm_arch_irq_routing_update() argument
441 kvm_hv_irq_routing_update(kvm); in kvm_arch_irq_routing_update()