Lines Matching full:vpe
266 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) in vpe_to_cpuid_lock() argument
268 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock()
269 return vpe->col_idx; in vpe_to_cpuid_lock()
272 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) in vpe_to_cpuid_unlock() argument
274 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock()
281 struct its_vpe *vpe = NULL; in irq_to_cpuid_lock() local
285 vpe = irq_data_get_irq_chip_data(d); in irq_to_cpuid_lock()
289 vpe = map->vpe; in irq_to_cpuid_lock()
292 if (vpe) { in irq_to_cpuid_lock()
293 cpu = vpe_to_cpuid_lock(vpe, flags); in irq_to_cpuid_lock()
307 struct its_vpe *vpe = NULL; in irq_to_cpuid_unlock() local
310 vpe = irq_data_get_irq_chip_data(d); in irq_to_cpuid_unlock()
314 vpe = map->vpe; in irq_to_cpuid_unlock()
317 if (vpe) in irq_to_cpuid_unlock()
318 vpe_to_cpuid_unlock(vpe, flags); in irq_to_cpuid_unlock()
329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
331 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
332 return vpe; in valid_vpe()
390 struct its_vpe *vpe; member
394 struct its_vpe *vpe; member
400 struct its_vpe *vpe; member
408 struct its_vpe *vpe; member
415 struct its_vpe *vpe; member
422 struct its_vpe *vpe; member
426 struct its_vpe *vpe; member
778 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); in its_build_vinvall_cmd()
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
789 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd() local
795 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); in its_build_vmapp_cmd()
799 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
803 * Unmapping a VPE is self-synchronizing on GICv4.1, in its_build_vmapp_cmd()
806 vpe = NULL; in its_build_vmapp_cmd()
812 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); in its_build_vmapp_cmd()
819 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
824 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); in its_build_vmapp_cmd()
829 * GICv4.1 provides a way to get the VLPI state, which needs the vPE in its_build_vmapp_cmd()
830 * to be unmapped first, and in this case, we may remap the vPE in its_build_vmapp_cmd()
836 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); in its_build_vmapp_cmd()
841 return vpe; in its_build_vmapp_cmd()
851 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; in its_build_vmapti_cmd()
857 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); in its_build_vmapti_cmd()
864 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
874 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; in its_build_vmovi_cmd()
880 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); in its_build_vmovi_cmd()
887 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
900 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); in its_build_vmovp_cmd()
905 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); in its_build_vmovp_cmd()
910 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
928 return valid_vpe(its, map->vpe); in its_build_vinv_cmd()
946 return valid_vpe(its, map->vpe); in its_build_vint_cmd()
964 return valid_vpe(its, map->vpe); in its_build_vclear_cmd()
975 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); in its_build_invdb_cmd()
979 return valid_vpe(its, desc->its_invdb_cmd.vpe); in its_build_invdb_cmd()
990 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); in its_build_vsgi_cmd()
999 return valid_vpe(its, desc->its_vsgi_cmd.vpe); in its_build_vsgi_cmd()
1282 desc.its_vmapti_cmd.vpe = map->vpe; in its_send_vmapti()
1296 desc.its_vmovi_cmd.vpe = map->vpe; in its_send_vmovi()
1305 struct its_vpe *vpe, bool valid) in its_send_vmapp() argument
1309 desc.its_vmapp_cmd.vpe = vpe; in its_send_vmapp()
1311 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
1316 static void its_send_vmovp(struct its_vpe *vpe) in its_send_vmovp() argument
1321 int col_id = vpe->col_idx; in its_send_vmovp()
1323 desc.its_vmovp_cmd.vpe = vpe; in its_send_vmovp()
1343 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); in its_send_vmovp()
1350 if (!require_its_list_vmovp(vpe->its_vm, its)) in its_send_vmovp()
1360 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1364 desc.its_vinvall_cmd.vpe = vpe; in its_send_vinvall()
1410 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) in its_send_invdb() argument
1414 desc.its_invdb_cmd.vpe = vpe; in its_send_invdb()
1490 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); in direct_lpi_inv()
1540 * to the /same/ vPE, using this opportunity to adjust the in its_vlpi_set_doorbell()
1811 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm() local
1812 struct irq_data *d = irq_get_irq_data(vpe->irq); in its_map_vm()
1814 /* Map the VPE to the first possible CPU */ in its_map_vm()
1815 vpe->col_idx = cpumask_first(cpu_online_mask); in its_map_vm()
1816 its_send_vmapp(its, vpe, true); in its_map_vm()
1817 its_send_vinvall(its, vpe); in its_map_vm()
1818 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_map_vm()
2520 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ in find_sibling_its()
2682 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ in inherit_vpe_l1_table_from_its()
2838 * VPE by setting PendingLast while clearing Valid. This has the in allocate_vpe_l1_table()
3176 * scheduled as a vPE, especially for the first CPU, and the in its_cpu_init_lpis()
3360 * that have interrupts targeted at this VPE, but the in its_alloc_vpe_table()
3517 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", in its_msi_prepare()
3716 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) in its_vpe_db_proxy_unmap_locked() argument
3723 if (vpe->vpe_proxy_event == -1) in its_vpe_db_proxy_unmap_locked()
3726 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_db_proxy_unmap_locked()
3727 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3737 vpe_proxy.next_victim = vpe->vpe_proxy_event; in its_vpe_db_proxy_unmap_locked()
3739 vpe->vpe_proxy_event = -1; in its_vpe_db_proxy_unmap_locked()
3742 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) in its_vpe_db_proxy_unmap() argument
3752 its_vpe_db_proxy_unmap_locked(vpe); in its_vpe_db_proxy_unmap()
3757 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) in its_vpe_db_proxy_map_locked() argument
3764 if (vpe->vpe_proxy_event != -1) in its_vpe_db_proxy_map_locked()
3767 /* This slot was already allocated. Kick the other VPE out. */ in its_vpe_db_proxy_map_locked()
3771 /* Map the new VPE instead */ in its_vpe_db_proxy_map_locked()
3772 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
3773 vpe->vpe_proxy_event = vpe_proxy.next_victim; in its_vpe_db_proxy_map_locked()
3776 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; in its_vpe_db_proxy_map_locked()
3777 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); in its_vpe_db_proxy_map_locked()
3780 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) in its_vpe_db_proxy_move() argument
3793 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_db_proxy_move()
3801 its_vpe_db_proxy_map_locked(vpe); in its_vpe_db_proxy_move()
3804 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); in its_vpe_db_proxy_move()
3805 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; in its_vpe_db_proxy_move()
3814 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_affinity() local
3820 * Check if we're racing against a VPE being destroyed, for in its_vpe_set_affinity()
3823 if (!atomic_read(&vpe->vmapp_count)) in its_vpe_set_affinity()
3832 * Another thing is that changing the affinity of a vPE affects in its_vpe_set_affinity()
3834 * this vPE. This means that the irq_desc lock is not enough to in its_vpe_set_affinity()
3835 * protect us, and that we must ensure nobody samples vpe->col_idx in its_vpe_set_affinity()
3837 * taken on any vLPI handling path that evaluates vpe->col_idx. in its_vpe_set_affinity()
3839 from = vpe_to_cpuid_lock(vpe, &flags); in its_vpe_set_affinity()
3854 vpe->col_idx = cpu; in its_vpe_set_affinity()
3856 its_send_vmovp(vpe); in its_vpe_set_affinity()
3857 its_vpe_db_proxy_move(vpe, from, cpu); in its_vpe_set_affinity()
3861 vpe_to_cpuid_unlock(vpe, flags); in its_vpe_set_affinity()
3880 static void its_vpe_schedule(struct its_vpe *vpe) in its_vpe_schedule() argument
3885 /* Schedule the VPE */ in its_vpe_schedule()
3886 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & in its_vpe_schedule()
3895 val = virt_to_phys(page_address(vpe->vpt_page)) & in its_vpe_schedule()
3904 * easily. So in the end, vpe->pending_last is only an in its_vpe_schedule()
3911 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; in its_vpe_schedule()
3916 static void its_vpe_deschedule(struct its_vpe *vpe) in its_vpe_deschedule() argument
3923 vpe->idai = !!(val & GICR_VPENDBASER_IDAI); in its_vpe_deschedule()
3924 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_deschedule()
3927 static void its_vpe_invall(struct its_vpe *vpe) in its_vpe_invall() argument
3935 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
3942 its_send_vinvall(its, vpe); in its_vpe_invall()
3949 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_vcpu_affinity() local
3954 its_vpe_schedule(vpe); in its_vpe_set_vcpu_affinity()
3958 its_vpe_deschedule(vpe); in its_vpe_set_vcpu_affinity()
3966 its_vpe_invall(vpe); in its_vpe_set_vcpu_affinity()
3974 static void its_vpe_send_cmd(struct its_vpe *vpe, in its_vpe_send_cmd() argument
3981 its_vpe_db_proxy_map_locked(vpe); in its_vpe_send_cmd()
3982 cmd(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_send_cmd()
3989 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_send_inv() local
3994 its_vpe_send_cmd(vpe, its_send_inv); in its_vpe_send_inv()
4020 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_irqchip_state() local
4028 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_set_irqchip_state()
4030 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); in its_vpe_set_irqchip_state()
4032 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_set_irqchip_state()
4037 its_vpe_send_cmd(vpe, its_send_int); in its_vpe_set_irqchip_state()
4039 its_vpe_send_cmd(vpe, its_send_clear); in its_vpe_set_irqchip_state()
4051 .name = "GICv4-vpe",
4080 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_4_1_send_inv() local
4090 its_send_invdb(its, vpe); in its_vpe_4_1_send_inv()
4105 static void its_vpe_4_1_schedule(struct its_vpe *vpe, in its_vpe_4_1_schedule() argument
4111 /* Schedule the VPE */ in its_vpe_4_1_schedule()
4115 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); in its_vpe_4_1_schedule()
4120 static void its_vpe_4_1_deschedule(struct its_vpe *vpe, in its_vpe_4_1_deschedule() argument
4130 * vPE is going to block: make the vPE non-resident with in its_vpe_4_1_deschedule()
4139 raw_spin_lock_irqsave(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4143 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_4_1_deschedule()
4144 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4147 * We're not blocking, so just make the vPE non-resident in its_vpe_4_1_deschedule()
4153 vpe->pending_last = true; in its_vpe_4_1_deschedule()
4157 static void its_vpe_4_1_invall(struct its_vpe *vpe) in its_vpe_4_1_invall() argument
4165 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); in its_vpe_4_1_invall()
4167 /* Target the redistributor this vPE is currently known on */ in its_vpe_4_1_invall()
4168 cpu = vpe_to_cpuid_lock(vpe, &flags); in its_vpe_4_1_invall()
4175 vpe_to_cpuid_unlock(vpe, flags); in its_vpe_4_1_invall()
4180 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_4_1_set_vcpu_affinity() local
4185 its_vpe_4_1_schedule(vpe, info); in its_vpe_4_1_set_vcpu_affinity()
4189 its_vpe_4_1_deschedule(vpe, info); in its_vpe_4_1_set_vcpu_affinity()
4197 its_vpe_4_1_invall(vpe); in its_vpe_4_1_set_vcpu_affinity()
4206 .name = "GICv4.1-vpe",
4216 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_configure_sgi() local
4219 desc.its_vsgi_cmd.vpe = vpe; in its_configure_sgi()
4221 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; in its_configure_sgi()
4222 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; in its_configure_sgi()
4223 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; in its_configure_sgi()
4228 * destination VPE is mapped there. Since we map them eagerly at in its_configure_sgi()
4236 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_mask_irq() local
4238 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_mask_irq()
4244 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_unmask_irq() local
4246 vpe->sgi_config[d->hwirq].enabled = true; in its_sgi_unmask_irq()
4256 * not on the host (since they can only be targeting a vPE). in its_sgi_set_affinity()
4271 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_set_irqchip_state() local
4275 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); in its_sgi_set_irqchip_state()
4288 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_get_irqchip_state() local
4301 * - Concurrent vPE affinity change: we must make sure it cannot in its_sgi_get_irqchip_state()
4308 cpu = vpe_to_cpuid_lock(vpe, &flags); in its_sgi_get_irqchip_state()
4311 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); in its_sgi_get_irqchip_state()
4328 vpe_to_cpuid_unlock(vpe, flags); in its_sgi_get_irqchip_state()
4340 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_set_vcpu_affinity() local
4345 vpe->sgi_config[d->hwirq].priority = info->priority; in its_sgi_set_vcpu_affinity()
4346 vpe->sgi_config[d->hwirq].group = info->group; in its_sgi_set_vcpu_affinity()
4369 struct its_vpe *vpe = args; in its_sgi_irq_domain_alloc() local
4376 vpe->sgi_config[i].priority = 0; in its_sgi_irq_domain_alloc()
4377 vpe->sgi_config[i].enabled = false; in its_sgi_irq_domain_alloc()
4378 vpe->sgi_config[i].group = false; in its_sgi_irq_domain_alloc()
4381 &its_sgi_irq_chip, vpe); in its_sgi_irq_domain_alloc()
4406 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_irq_domain_deactivate() local
4418 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_irq_domain_deactivate()
4440 static int its_vpe_init(struct its_vpe *vpe) in its_vpe_init() argument
4463 raw_spin_lock_init(&vpe->vpe_lock); in its_vpe_init()
4464 vpe->vpe_id = vpe_id; in its_vpe_init()
4465 vpe->vpt_page = vpt_page; in its_vpe_init()
4466 atomic_set(&vpe->vmapp_count, 0); in its_vpe_init()
4468 vpe->vpe_proxy_event = -1; in its_vpe_init()
4473 static void its_vpe_teardown(struct its_vpe *vpe) in its_vpe_teardown() argument
4475 its_vpe_db_proxy_unmap(vpe); in its_vpe_teardown()
4476 its_vpe_id_free(vpe->vpe_id); in its_vpe_teardown()
4477 its_free_pending_table(vpe->vpt_page); in its_vpe_teardown()
4492 struct its_vpe *vpe = irq_data_get_irq_chip_data(data); in its_vpe_irq_domain_free() local
4494 BUG_ON(vm != vpe->its_vm); in its_vpe_irq_domain_free()
4497 its_vpe_teardown(vpe); in its_vpe_irq_domain_free()
4563 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_activate() local
4568 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs in its_vpe_irq_domain_activate()
4574 /* Map the VPE to the first possible CPU */ in its_vpe_irq_domain_activate()
4575 vpe->col_idx = cpumask_first(cpu_online_mask); in its_vpe_irq_domain_activate()
4581 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
4582 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
4585 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_vpe_irq_domain_activate()
4593 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_deactivate() local
4597 * If we use the list map on GICv4.0, we unmap the VPE once no in its_vpe_irq_domain_deactivate()
4607 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
4612 * vPE, to guarantee the validity of this, we make the VPT in its_vpe_irq_domain_deactivate()
4615 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) in its_vpe_irq_domain_deactivate()
4616 gic_flush_dcache_to_poc(page_address(vpe->vpt_page), in its_vpe_irq_domain_deactivate()
5007 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); in its_init_vpe_domain()