1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * irq_comm.c: Common API for in kernel interrupt controller 4 * Copyright (c) 2007, Intel Corporation. 5 * 6 * Authors: 7 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 8 * 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 */ 11 12 #include <linux/kvm_host.h> 13 #include <linux/slab.h> 14 #include <linux/export.h> 15 #include <linux/rculist.h> 16 17 #include <trace/events/kvm.h> 18 19 #include "irq.h" 20 21 #include "ioapic.h" 22 23 #include "lapic.h" 24 25 #include "hyperv.h" 26 #include "x86.h" 27 28 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, 29 struct kvm *kvm, int irq_source_id, int level, 30 bool line_status) 31 { 32 struct kvm_pic *pic = kvm->arch.vpic; 33 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); 34 } 35 36 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, 37 struct kvm *kvm, int irq_source_id, int level, 38 bool line_status) 39 { 40 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 41 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, 42 line_status); 43 } 44 45 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 46 struct kvm_lapic_irq *irq, struct dest_map *dest_map) 47 { 48 int i, r = -1; 49 struct kvm_vcpu *vcpu, *lowest = NULL; 50 unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 51 unsigned int dest_vcpus = 0; 52 53 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) 54 return r; 55 56 if (irq->dest_mode == APIC_DEST_PHYSICAL && 57 irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { 58 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 59 irq->delivery_mode = APIC_DM_FIXED; 60 } 61 62 memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap)); 63 64 kvm_for_each_vcpu(i, vcpu, kvm) { 65 if (!kvm_apic_present(vcpu)) 66 continue; 67 68 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, 69 irq->dest_id, irq->dest_mode)) 70 continue; 71 72 if (!kvm_lowest_prio_delivery(irq)) { 73 if (r < 0) 74 r = 0; 75 r += kvm_apic_set_irq(vcpu, irq, dest_map); 76 } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) { 77 if (!kvm_vector_hashing_enabled()) { 78 if (!lowest) 79 lowest = vcpu; 80 else if (kvm_apic_compare_prio(vcpu, lowest) < 0) 81 lowest = vcpu; 82 } else { 83 __set_bit(i, dest_vcpu_bitmap); 84 dest_vcpus++; 85 } 86 } 87 } 88 89 if (dest_vcpus != 0) { 90 int idx = kvm_vector_to_index(irq->vector, dest_vcpus, 91 dest_vcpu_bitmap, KVM_MAX_VCPUS); 92 93 lowest = kvm_get_vcpu(kvm, idx); 94 } 95 96 if (lowest) 97 r = kvm_apic_set_irq(lowest, irq, dest_map); 98 99 return r; 100 } 101 102 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, 103 struct kvm_lapic_irq *irq) 104 { 105 struct msi_msg msg = { .address_lo = e->msi.address_lo, 106 .address_hi = e->msi.address_hi, 107 .data = e->msi.data }; 108 109 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? 110 (u64)msg.address_hi << 32 : 0), msg.data); 111 112 irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); 113 irq->vector = msg.arch_data.vector; 114 irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical); 115 irq->trig_mode = msg.arch_data.is_level; 116 irq->delivery_mode = msg.arch_data.delivery_mode << 8; 117 irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint; 118 irq->level = 1; 119 irq->shorthand = APIC_DEST_NOSHORT; 120 } 121 EXPORT_SYMBOL_GPL(kvm_set_msi_irq); 122 123 static inline bool kvm_msi_route_invalid(struct kvm *kvm, 124 struct kvm_kernel_irq_routing_entry *e) 125 { 126 return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); 127 } 128 129 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, 130 struct kvm *kvm, int irq_source_id, int level, bool line_status) 131 { 132 struct kvm_lapic_irq irq; 133 134 if (kvm_msi_route_invalid(kvm, e)) 135 return -EINVAL; 136 137 if (!level) 138 return -1; 139 140 kvm_set_msi_irq(kvm, e, &irq); 141 142 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); 143 } 144 145 146 static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, 147 struct kvm *kvm, int irq_source_id, int level, 148 bool line_status) 149 { 150 if (!level) 151 return -1; 152 153 return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); 154 } 155 156 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 157 struct kvm *kvm, int irq_source_id, int level, 158 bool line_status) 159 { 160 struct kvm_lapic_irq irq; 161 int r; 162 163 switch (e->type) { 164 case KVM_IRQ_ROUTING_HV_SINT: 165 return kvm_hv_set_sint(e, kvm, irq_source_id, level, 166 line_status); 167 168 case KVM_IRQ_ROUTING_MSI: 169 if (kvm_msi_route_invalid(kvm, e)) 170 return -EINVAL; 171 172 kvm_set_msi_irq(kvm, e, &irq); 173 174 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 175 return r; 176 break; 177 178 default: 179 break; 180 } 181 182 return -EWOULDBLOCK; 183 } 184 185 int kvm_request_irq_source_id(struct kvm *kvm) 186 { 187 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 188 int irq_source_id; 189 190 mutex_lock(&kvm->irq_lock); 191 irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); 192 193 if (irq_source_id >= BITS_PER_LONG) { 194 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); 195 irq_source_id = -EFAULT; 196 goto unlock; 197 } 198 199 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 200 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); 201 set_bit(irq_source_id, bitmap); 202 unlock: 203 mutex_unlock(&kvm->irq_lock); 204 205 return irq_source_id; 206 } 207 208 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) 209 { 210 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 211 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); 212 213 mutex_lock(&kvm->irq_lock); 214 if (irq_source_id < 0 || 215 irq_source_id >= BITS_PER_LONG) { 216 printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); 217 goto unlock; 218 } 219 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 220 if (!irqchip_kernel(kvm)) 221 goto unlock; 222 223 kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); 224 kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); 225 unlock: 226 mutex_unlock(&kvm->irq_lock); 227 } 228 229 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 230 struct kvm_irq_mask_notifier *kimn) 231 { 232 mutex_lock(&kvm->irq_lock); 233 kimn->irq = irq; 234 hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); 235 mutex_unlock(&kvm->irq_lock); 236 } 237 238 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 239 struct kvm_irq_mask_notifier *kimn) 240 { 241 mutex_lock(&kvm->irq_lock); 242 hlist_del_rcu(&kimn->link); 243 mutex_unlock(&kvm->irq_lock); 244 synchronize_srcu(&kvm->irq_srcu); 245 } 246 247 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 248 bool mask) 249 { 250 struct kvm_irq_mask_notifier *kimn; 251 int idx, gsi; 252 253 idx = srcu_read_lock(&kvm->irq_srcu); 254 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); 255 if (gsi != -1) 256 hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) 257 if (kimn->irq == gsi) 258 kimn->func(kimn, mask); 259 srcu_read_unlock(&kvm->irq_srcu, idx); 260 } 261 262 bool kvm_arch_can_set_irq_routing(struct kvm *kvm) 263 { 264 return irqchip_in_kernel(kvm); 265 } 266 267 int kvm_set_routing_entry(struct kvm *kvm, 268 struct kvm_kernel_irq_routing_entry *e, 269 const struct kvm_irq_routing_entry *ue) 270 { 271 /* We can't check irqchip_in_kernel() here as some callers are 272 * currently initializing the irqchip. Other callers should therefore 273 * check kvm_arch_can_set_irq_routing() before calling this function. 274 */ 275 switch (ue->type) { 276 case KVM_IRQ_ROUTING_IRQCHIP: 277 if (irqchip_split(kvm)) 278 return -EINVAL; 279 e->irqchip.pin = ue->u.irqchip.pin; 280 switch (ue->u.irqchip.irqchip) { 281 case KVM_IRQCHIP_PIC_SLAVE: 282 e->irqchip.pin += PIC_NUM_PINS / 2; 283 fallthrough; 284 case KVM_IRQCHIP_PIC_MASTER: 285 if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) 286 return -EINVAL; 287 e->set = kvm_set_pic_irq; 288 break; 289 case KVM_IRQCHIP_IOAPIC: 290 if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) 291 return -EINVAL; 292 e->set = kvm_set_ioapic_irq; 293 break; 294 default: 295 return -EINVAL; 296 } 297 e->irqchip.irqchip = ue->u.irqchip.irqchip; 298 break; 299 case KVM_IRQ_ROUTING_MSI: 300 e->set = kvm_set_msi; 301 e->msi.address_lo = ue->u.msi.address_lo; 302 e->msi.address_hi = ue->u.msi.address_hi; 303 e->msi.data = ue->u.msi.data; 304 305 if (kvm_msi_route_invalid(kvm, e)) 306 return -EINVAL; 307 break; 308 case KVM_IRQ_ROUTING_HV_SINT: 309 e->set = kvm_hv_set_sint; 310 e->hv_sint.vcpu = ue->u.hv_sint.vcpu; 311 e->hv_sint.sint = ue->u.hv_sint.sint; 312 break; 313 default: 314 return -EINVAL; 315 } 316 317 return 0; 318 } 319 320 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, 321 struct kvm_vcpu **dest_vcpu) 322 { 323 int i, r = 0; 324 struct kvm_vcpu *vcpu; 325 326 if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) 327 return true; 328 329 kvm_for_each_vcpu(i, vcpu, kvm) { 330 if (!kvm_apic_present(vcpu)) 331 continue; 332 333 if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand, 334 irq->dest_id, irq->dest_mode)) 335 continue; 336 337 if (++r == 2) 338 return false; 339 340 *dest_vcpu = vcpu; 341 } 342 343 return r == 1; 344 } 345 EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); 346 347 #define IOAPIC_ROUTING_ENTRY(irq) \ 348 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ 349 .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } 350 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) 351 352 #define PIC_ROUTING_ENTRY(irq) \ 353 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ 354 .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } 355 #define ROUTING_ENTRY2(irq) \ 356 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) 357 358 static const struct kvm_irq_routing_entry default_routing[] = { 359 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), 360 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), 361 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), 362 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), 363 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), 364 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), 365 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), 366 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), 367 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), 368 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), 369 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), 370 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), 371 }; 372 373 int kvm_setup_default_irq_routing(struct kvm *kvm) 374 { 375 return kvm_set_irq_routing(kvm, default_routing, 376 ARRAY_SIZE(default_routing), 0); 377 } 378 379 static const struct kvm_irq_routing_entry empty_routing[] = {}; 380 381 int kvm_setup_empty_irq_routing(struct kvm *kvm) 382 { 383 return kvm_set_irq_routing(kvm, empty_routing, 0, 0); 384 } 385 386 void kvm_arch_post_irq_routing_update(struct kvm *kvm) 387 { 388 if (!irqchip_split(kvm)) 389 return; 390 kvm_make_scan_ioapic_request(kvm); 391 } 392 393 void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, 394 ulong *ioapic_handled_vectors) 395 { 396 struct kvm *kvm = vcpu->kvm; 397 struct kvm_kernel_irq_routing_entry *entry; 398 struct kvm_irq_routing_table *table; 399 u32 i, nr_ioapic_pins; 400 int idx; 401 402 idx = srcu_read_lock(&kvm->irq_srcu); 403 table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 404 nr_ioapic_pins = min_t(u32, table->nr_rt_entries, 405 kvm->arch.nr_reserved_ioapic_pins); 406 for (i = 0; i < nr_ioapic_pins; ++i) { 407 hlist_for_each_entry(entry, &table->map[i], link) { 408 struct kvm_lapic_irq irq; 409 410 if (entry->type != KVM_IRQ_ROUTING_MSI) 411 continue; 412 413 kvm_set_msi_irq(vcpu->kvm, entry, &irq); 414 415 if (irq.trig_mode && 416 kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, 417 irq.dest_id, irq.dest_mode)) 418 __set_bit(irq.vector, ioapic_handled_vectors); 419 } 420 } 421 srcu_read_unlock(&kvm->irq_srcu, idx); 422 } 423 424 void kvm_arch_irq_routing_update(struct kvm *kvm) 425 { 426 kvm_hv_irq_routing_update(kvm); 427 } 428