1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kvm_host.h> 3 4 #include <asm/irq_remapping.h> 5 #include <asm/cpu.h> 6 7 #include "lapic.h" 8 #include "irq.h" 9 #include "posted_intr.h" 10 #include "trace.h" 11 #include "vmx.h" 12 13 /* 14 * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we 15 * can find which vCPU should be waken up. 16 */ 17 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); 18 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); 19 20 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) 21 { 22 return &(to_vmx(vcpu)->pi_desc); 23 } 24 25 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) 26 { 27 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 28 struct pi_desc old, new; 29 unsigned int dest; 30 31 /* 32 * In case of hot-plug or hot-unplug, we may have to undo 33 * vmx_vcpu_pi_put even if there is no assigned device. And we 34 * always keep PI.NDST up to date for simplicity: it makes the 35 * code easier, and CPU migration is not a fast path. 36 */ 37 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 38 return; 39 40 /* 41 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change 42 * PI.NDST: pi_post_block is the one expected to change PID.NDST and the 43 * wakeup handler expects the vCPU to be on the blocked_vcpu_list that 44 * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up 45 * correctly. 46 */ 47 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) { 48 pi_clear_sn(pi_desc); 49 goto after_clear_sn; 50 } 51 52 /* The full case. */ 53 do { 54 old.control = new.control = pi_desc->control; 55 56 dest = cpu_physical_id(cpu); 57 58 if (x2apic_mode) 59 new.ndst = dest; 60 else 61 new.ndst = (dest << 8) & 0xFF00; 62 63 new.sn = 0; 64 } while (cmpxchg64(&pi_desc->control, old.control, 65 new.control) != old.control); 66 67 after_clear_sn: 68 69 /* 70 * Clear SN before reading the bitmap. The VT-d firmware 71 * writes the bitmap and reads SN atomically (5.2.3 in the 72 * spec), so it doesn't really have a memory barrier that 73 * pairs with this, but we cannot do that and we need one. 74 */ 75 smp_mb__after_atomic(); 76 77 if (!pi_is_pir_empty(pi_desc)) 78 pi_set_on(pi_desc); 79 } 80 81 static bool vmx_can_use_vtd_pi(struct kvm *kvm) 82 { 83 return irqchip_in_kernel(kvm) && enable_apicv && 84 kvm_arch_has_assigned_device(kvm) && 85 irq_remapping_cap(IRQ_POSTING_CAP); 86 } 87 88 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) 89 { 90 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 91 92 if (!vmx_can_use_vtd_pi(vcpu->kvm)) 93 return; 94 95 /* Set SN when the vCPU is preempted */ 96 if (vcpu->preempted) 97 pi_set_sn(pi_desc); 98 } 99 100 static void __pi_post_block(struct kvm_vcpu *vcpu) 101 { 102 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 103 struct pi_desc old, new; 104 unsigned int dest; 105 106 do { 107 old.control = new.control = pi_desc->control; 108 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, 109 "Wakeup handler not enabled while the VCPU is blocked\n"); 110 111 dest = cpu_physical_id(vcpu->cpu); 112 113 if (x2apic_mode) 114 new.ndst = dest; 115 else 116 new.ndst = (dest << 8) & 0xFF00; 117 118 /* set 'NV' to 'notification vector' */ 119 new.nv = POSTED_INTR_VECTOR; 120 } while (cmpxchg64(&pi_desc->control, old.control, 121 new.control) != old.control); 122 123 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { 124 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 125 list_del(&vcpu->blocked_vcpu_list); 126 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 127 vcpu->pre_pcpu = -1; 128 } 129 } 130 131 /* 132 * This routine does the following things for vCPU which is going 133 * to be blocked if VT-d PI is enabled. 134 * - Store the vCPU to the wakeup list, so when interrupts happen 135 * we can find the right vCPU to wake up. 136 * - Change the Posted-interrupt descriptor as below: 137 * 'NDST' <-- vcpu->pre_pcpu 138 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR 139 * - If 'ON' is set during this process, which means at least one 140 * interrupt is posted for this vCPU, we cannot block it, in 141 * this case, return 1, otherwise, return 0. 142 * 143 */ 144 int pi_pre_block(struct kvm_vcpu *vcpu) 145 { 146 unsigned int dest; 147 struct pi_desc old, new; 148 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 149 150 if (!vmx_can_use_vtd_pi(vcpu->kvm)) 151 return 0; 152 153 WARN_ON(irqs_disabled()); 154 local_irq_disable(); 155 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { 156 vcpu->pre_pcpu = vcpu->cpu; 157 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 158 list_add_tail(&vcpu->blocked_vcpu_list, 159 &per_cpu(blocked_vcpu_on_cpu, 160 vcpu->pre_pcpu)); 161 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 162 } 163 164 do { 165 old.control = new.control = pi_desc->control; 166 167 WARN((pi_desc->sn == 1), 168 "Warning: SN field of posted-interrupts " 169 "is set before blocking\n"); 170 171 /* 172 * Since vCPU can be preempted during this process, 173 * vcpu->cpu could be different with pre_pcpu, we 174 * need to set pre_pcpu as the destination of wakeup 175 * notification event, then we can find the right vCPU 176 * to wakeup in wakeup handler if interrupts happen 177 * when the vCPU is in blocked state. 178 */ 179 dest = cpu_physical_id(vcpu->pre_pcpu); 180 181 if (x2apic_mode) 182 new.ndst = dest; 183 else 184 new.ndst = (dest << 8) & 0xFF00; 185 186 /* set 'NV' to 'wakeup vector' */ 187 new.nv = POSTED_INTR_WAKEUP_VECTOR; 188 } while (cmpxchg64(&pi_desc->control, old.control, 189 new.control) != old.control); 190 191 /* We should not block the vCPU if an interrupt is posted for it. */ 192 if (pi_test_on(pi_desc) == 1) 193 __pi_post_block(vcpu); 194 195 local_irq_enable(); 196 return (vcpu->pre_pcpu == -1); 197 } 198 199 void pi_post_block(struct kvm_vcpu *vcpu) 200 { 201 if (vcpu->pre_pcpu == -1) 202 return; 203 204 WARN_ON(irqs_disabled()); 205 local_irq_disable(); 206 __pi_post_block(vcpu); 207 local_irq_enable(); 208 } 209 210 /* 211 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 212 */ 213 void pi_wakeup_handler(void) 214 { 215 struct kvm_vcpu *vcpu; 216 int cpu = smp_processor_id(); 217 218 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 219 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), 220 blocked_vcpu_list) { 221 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 222 223 if (pi_test_on(pi_desc) == 1) 224 kvm_vcpu_kick(vcpu); 225 } 226 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 227 } 228 229 void __init pi_init_cpu(int cpu) 230 { 231 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); 232 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 233 } 234 235 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) 236 { 237 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 238 239 return pi_test_on(pi_desc) || 240 (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc)); 241 } 242 243 244 /* 245 * Bail out of the block loop if the VM has an assigned 246 * device, but the blocking vCPU didn't reconfigure the 247 * PI.NV to the wakeup vector, i.e. the assigned device 248 * came along after the initial check in pi_pre_block(). 249 */ 250 void vmx_pi_start_assignment(struct kvm *kvm) 251 { 252 if (!irq_remapping_cap(IRQ_POSTING_CAP)) 253 return; 254 255 kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK); 256 } 257 258 /* 259 * pi_update_irte - set IRTE for Posted-Interrupts 260 * 261 * @kvm: kvm 262 * @host_irq: host irq of the interrupt 263 * @guest_irq: gsi of the interrupt 264 * @set: set or unset PI 265 * returns 0 on success, < 0 on failure 266 */ 267 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, 268 bool set) 269 { 270 struct kvm_kernel_irq_routing_entry *e; 271 struct kvm_irq_routing_table *irq_rt; 272 struct kvm_lapic_irq irq; 273 struct kvm_vcpu *vcpu; 274 struct vcpu_data vcpu_info; 275 int idx, ret = 0; 276 277 if (!vmx_can_use_vtd_pi(kvm)) 278 return 0; 279 280 idx = srcu_read_lock(&kvm->irq_srcu); 281 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 282 if (guest_irq >= irq_rt->nr_rt_entries || 283 hlist_empty(&irq_rt->map[guest_irq])) { 284 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", 285 guest_irq, irq_rt->nr_rt_entries); 286 goto out; 287 } 288 289 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { 290 if (e->type != KVM_IRQ_ROUTING_MSI) 291 continue; 292 /* 293 * VT-d PI cannot support posting multicast/broadcast 294 * interrupts to a vCPU, we still use interrupt remapping 295 * for these kind of interrupts. 296 * 297 * For lowest-priority interrupts, we only support 298 * those with single CPU as the destination, e.g. user 299 * configures the interrupts via /proc/irq or uses 300 * irqbalance to make the interrupts single-CPU. 301 * 302 * We will support full lowest-priority interrupt later. 303 * 304 * In addition, we can only inject generic interrupts using 305 * the PI mechanism, refuse to route others through it. 306 */ 307 308 kvm_set_msi_irq(kvm, e, &irq); 309 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || 310 !kvm_irq_is_postable(&irq)) { 311 /* 312 * Make sure the IRTE is in remapped mode if 313 * we don't handle it in posted mode. 314 */ 315 ret = irq_set_vcpu_affinity(host_irq, NULL); 316 if (ret < 0) { 317 printk(KERN_INFO 318 "failed to back to remapped mode, irq: %u\n", 319 host_irq); 320 goto out; 321 } 322 323 continue; 324 } 325 326 vcpu_info.pi_desc_addr = __pa(&to_vmx(vcpu)->pi_desc); 327 vcpu_info.vector = irq.vector; 328 329 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, 330 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 331 332 if (set) 333 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); 334 else 335 ret = irq_set_vcpu_affinity(host_irq, NULL); 336 337 if (ret < 0) { 338 printk(KERN_INFO "%s: failed to update PI IRTE\n", 339 __func__); 340 goto out; 341 } 342 } 343 344 ret = 0; 345 out: 346 srcu_read_unlock(&kvm->irq_srcu, idx); 347 return ret; 348 } 349