Lines Matching refs:irq

84 static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)  in vgic_v4_doorbell_handler()  argument
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) in vgic_v4_doorbell_handler()
91 disable_irq_nosync(irq); in vgic_v4_doorbell_handler()
108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) in vgic_v4_sync_sgi_config() argument
110 vpe->sgi_config[irq->intid].enabled = irq->enabled; in vgic_v4_sync_sgi_config()
111 vpe->sgi_config[irq->intid].group = irq->group; in vgic_v4_sync_sgi_config()
112 vpe->sgi_config[irq->intid].priority = irq->priority; in vgic_v4_sync_sgi_config()
126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i); in vgic_v4_enable_vsgis() local
131 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_v4_enable_vsgis()
133 if (irq->hw) in vgic_v4_enable_vsgis()
136 irq->hw = true; in vgic_v4_enable_vsgis()
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i); in vgic_v4_enable_vsgis()
140 vgic_v4_sync_sgi_config(vpe, irq); in vgic_v4_enable_vsgis()
141 desc = irq_to_desc(irq->host_irq); in vgic_v4_enable_vsgis()
146 ret = irq_set_irqchip_state(irq->host_irq, in vgic_v4_enable_vsgis()
148 irq->pending_latch); in vgic_v4_enable_vsgis()
150 irq->pending_latch = false; in vgic_v4_enable_vsgis()
153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_v4_enable_vsgis()
154 vgic_put_irq(vcpu->kvm, irq); in vgic_v4_enable_vsgis()
163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i); in vgic_v4_disable_vsgis() local
168 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_v4_disable_vsgis()
170 if (!irq->hw) in vgic_v4_disable_vsgis()
173 irq->hw = false; in vgic_v4_disable_vsgis()
174 ret = irq_get_irqchip_state(irq->host_irq, in vgic_v4_disable_vsgis()
176 &irq->pending_latch); in vgic_v4_disable_vsgis()
179 desc = irq_to_desc(irq->host_irq); in vgic_v4_disable_vsgis()
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_v4_disable_vsgis()
183 vgic_put_irq(vcpu->kvm, irq); in vgic_v4_disable_vsgis()
213 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) in vgic_v4_get_vlpi_state() argument
215 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_get_vlpi_state()
216 int mask = BIT(irq->intid % BITS_PER_BYTE); in vgic_v4_get_vlpi_state()
221 ptr = va + irq->intid / BITS_PER_BYTE; in vgic_v4_get_vlpi_state()
226 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) in vgic_v4_request_vpe_irq() argument
228 return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu); in vgic_v4_request_vpe_irq()
276 int irq = dist->its_vm.vpes[i]->irq; in vgic_v4_init() local
291 irq_set_status_flags(irq, irq_flags); in vgic_v4_init()
293 ret = vgic_v4_request_vpe_irq(vcpu, irq); in vgic_v4_init()
295 kvm_err("failed to allocate vcpu IRQ%d\n", irq); in vgic_v4_init()
327 int irq = its_vm->vpes[i]->irq; in vgic_v4_teardown() local
329 irq_clear_status_flags(irq, DB_IRQ_FLAGS); in vgic_v4_teardown()
330 free_irq(irq, vcpu); in vgic_v4_teardown()
366 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id())); in vgic_v4_load()
380 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); in vgic_v4_load()
415 struct vgic_irq *irq; in kvm_vgic_v4_set_forwarding() local
435 irq_entry->msi.data, &irq); in kvm_vgic_v4_set_forwarding()
447 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe, in kvm_vgic_v4_set_forwarding()
448 .vintid = irq->intid, in kvm_vgic_v4_set_forwarding()
449 .properties = ((irq->priority & 0xfc) | in kvm_vgic_v4_set_forwarding()
450 (irq->enabled ? LPI_PROP_ENABLED : 0) | in kvm_vgic_v4_set_forwarding()
459 irq->hw = true; in kvm_vgic_v4_set_forwarding()
460 irq->host_irq = virq; in kvm_vgic_v4_set_forwarding()
464 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_v4_set_forwarding()
465 if (irq->pending_latch) { in kvm_vgic_v4_set_forwarding()
466 ret = irq_set_irqchip_state(irq->host_irq, in kvm_vgic_v4_set_forwarding()
468 irq->pending_latch); in kvm_vgic_v4_set_forwarding()
469 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); in kvm_vgic_v4_set_forwarding()
475 irq->pending_latch = false; in kvm_vgic_v4_set_forwarding()
476 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_v4_set_forwarding()
478 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_v4_set_forwarding()
490 struct vgic_irq *irq; in kvm_vgic_v4_unset_forwarding() local
507 irq_entry->msi.data, &irq); in kvm_vgic_v4_unset_forwarding()
511 WARN_ON(!(irq->hw && irq->host_irq == virq)); in kvm_vgic_v4_unset_forwarding()
512 if (irq->hw) { in kvm_vgic_v4_unset_forwarding()
513 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); in kvm_vgic_v4_unset_forwarding()
514 irq->hw = false; in kvm_vgic_v4_unset_forwarding()