19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only 29ed24f4bSMarc Zyngier /* 39ed24f4bSMarc Zyngier * GICv3 ITS emulation 49ed24f4bSMarc Zyngier * 59ed24f4bSMarc Zyngier * Copyright (C) 2015,2016 ARM Ltd. 69ed24f4bSMarc Zyngier * Author: Andre Przywara <andre.przywara@arm.com> 79ed24f4bSMarc Zyngier */ 89ed24f4bSMarc Zyngier 99ed24f4bSMarc Zyngier #include <linux/cpu.h> 109ed24f4bSMarc Zyngier #include <linux/kvm.h> 119ed24f4bSMarc Zyngier #include <linux/kvm_host.h> 129ed24f4bSMarc Zyngier #include <linux/interrupt.h> 139ed24f4bSMarc Zyngier #include <linux/list.h> 149ed24f4bSMarc Zyngier #include <linux/uaccess.h> 159ed24f4bSMarc Zyngier #include <linux/list_sort.h> 169ed24f4bSMarc Zyngier 179ed24f4bSMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 189ed24f4bSMarc Zyngier 199ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h> 209ed24f4bSMarc Zyngier #include <asm/kvm_arm.h> 219ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h> 229ed24f4bSMarc Zyngier 239ed24f4bSMarc Zyngier #include "vgic.h" 249ed24f4bSMarc Zyngier #include "vgic-mmio.h" 259ed24f4bSMarc Zyngier 269ed24f4bSMarc Zyngier static int vgic_its_save_tables_v0(struct vgic_its *its); 279ed24f4bSMarc Zyngier static int vgic_its_restore_tables_v0(struct vgic_its *its); 289ed24f4bSMarc Zyngier static int vgic_its_commit_v0(struct vgic_its *its); 299ed24f4bSMarc Zyngier static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 309ed24f4bSMarc Zyngier struct kvm_vcpu *filter_vcpu, bool needs_inv); 319ed24f4bSMarc Zyngier 329ed24f4bSMarc Zyngier /* 339ed24f4bSMarc Zyngier * Creates a new (reference to a) struct vgic_irq for a given LPI. 349ed24f4bSMarc Zyngier * If this LPI is already mapped on another ITS, we increase its refcount 359ed24f4bSMarc Zyngier * and return a pointer to the existing structure. 369ed24f4bSMarc Zyngier * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq. 379ed24f4bSMarc Zyngier * This function returns a pointer to the _unlocked_ structure. 389ed24f4bSMarc Zyngier */ 399ed24f4bSMarc Zyngier static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, 409ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu) 419ed24f4bSMarc Zyngier { 429ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 439ed24f4bSMarc Zyngier struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; 449ed24f4bSMarc Zyngier unsigned long flags; 459ed24f4bSMarc Zyngier int ret; 469ed24f4bSMarc Zyngier 479ed24f4bSMarc Zyngier /* In this case there is no put, since we keep the reference. */ 489ed24f4bSMarc Zyngier if (irq) 499ed24f4bSMarc Zyngier return irq; 509ed24f4bSMarc Zyngier 513ef23167SJia He irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); 529ed24f4bSMarc Zyngier if (!irq) 539ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 549ed24f4bSMarc Zyngier 559ed24f4bSMarc Zyngier INIT_LIST_HEAD(&irq->lpi_list); 569ed24f4bSMarc Zyngier INIT_LIST_HEAD(&irq->ap_list); 579ed24f4bSMarc Zyngier raw_spin_lock_init(&irq->irq_lock); 589ed24f4bSMarc Zyngier 599ed24f4bSMarc Zyngier irq->config = VGIC_CONFIG_EDGE; 609ed24f4bSMarc Zyngier kref_init(&irq->refcount); 619ed24f4bSMarc Zyngier irq->intid = intid; 629ed24f4bSMarc Zyngier irq->target_vcpu = vcpu; 639ed24f4bSMarc Zyngier irq->group = 1; 649ed24f4bSMarc Zyngier 659ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 669ed24f4bSMarc Zyngier 679ed24f4bSMarc Zyngier /* 689ed24f4bSMarc Zyngier * There could be a race with another vgic_add_lpi(), so we need to 699ed24f4bSMarc Zyngier * check that we don't add a second list entry with the same LPI. 709ed24f4bSMarc Zyngier */ 719ed24f4bSMarc Zyngier list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) { 729ed24f4bSMarc Zyngier if (oldirq->intid != intid) 739ed24f4bSMarc Zyngier continue; 749ed24f4bSMarc Zyngier 759ed24f4bSMarc Zyngier /* Someone was faster with adding this LPI, lets use that. */ 769ed24f4bSMarc Zyngier kfree(irq); 779ed24f4bSMarc Zyngier irq = oldirq; 789ed24f4bSMarc Zyngier 799ed24f4bSMarc Zyngier /* 809ed24f4bSMarc Zyngier * This increases the refcount, the caller is expected to 819ed24f4bSMarc Zyngier * call vgic_put_irq() on the returned pointer once it's 829ed24f4bSMarc Zyngier * finished with the IRQ. 839ed24f4bSMarc Zyngier */ 849ed24f4bSMarc Zyngier vgic_get_irq_kref(irq); 859ed24f4bSMarc Zyngier 869ed24f4bSMarc Zyngier goto out_unlock; 879ed24f4bSMarc Zyngier } 889ed24f4bSMarc Zyngier 899ed24f4bSMarc Zyngier list_add_tail(&irq->lpi_list, &dist->lpi_list_head); 909ed24f4bSMarc Zyngier dist->lpi_list_count++; 919ed24f4bSMarc Zyngier 929ed24f4bSMarc Zyngier out_unlock: 939ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 949ed24f4bSMarc Zyngier 959ed24f4bSMarc Zyngier /* 969ed24f4bSMarc Zyngier * We "cache" the configuration table entries in our struct vgic_irq's. 979ed24f4bSMarc Zyngier * However we only have those structs for mapped IRQs, so we read in 989ed24f4bSMarc Zyngier * the respective config data from memory here upon mapping the LPI. 999ed24f4bSMarc Zyngier * 1009ed24f4bSMarc Zyngier * Should any of these fail, behave as if we couldn't create the LPI 1019ed24f4bSMarc Zyngier * by dropping the refcount and returning the error. 1029ed24f4bSMarc Zyngier */ 1039ed24f4bSMarc Zyngier ret = update_lpi_config(kvm, irq, NULL, false); 1049ed24f4bSMarc Zyngier if (ret) { 1059ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 1069ed24f4bSMarc Zyngier return ERR_PTR(ret); 1079ed24f4bSMarc Zyngier } 1089ed24f4bSMarc Zyngier 1099ed24f4bSMarc Zyngier ret = vgic_v3_lpi_sync_pending_status(kvm, irq); 1109ed24f4bSMarc Zyngier if (ret) { 1119ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 1129ed24f4bSMarc Zyngier return ERR_PTR(ret); 1139ed24f4bSMarc Zyngier } 1149ed24f4bSMarc Zyngier 1159ed24f4bSMarc Zyngier return irq; 1169ed24f4bSMarc Zyngier } 1179ed24f4bSMarc Zyngier 1189ed24f4bSMarc Zyngier struct its_device { 1199ed24f4bSMarc Zyngier struct list_head dev_list; 1209ed24f4bSMarc Zyngier 1219ed24f4bSMarc Zyngier /* the head for the list of ITTEs */ 1229ed24f4bSMarc Zyngier struct list_head itt_head; 1239ed24f4bSMarc Zyngier u32 num_eventid_bits; 1249ed24f4bSMarc Zyngier gpa_t itt_addr; 1259ed24f4bSMarc Zyngier u32 device_id; 1269ed24f4bSMarc Zyngier }; 1279ed24f4bSMarc Zyngier 1289ed24f4bSMarc Zyngier #define COLLECTION_NOT_MAPPED ((u32)~0) 1299ed24f4bSMarc Zyngier 1309ed24f4bSMarc Zyngier struct its_collection { 1319ed24f4bSMarc Zyngier struct list_head coll_list; 1329ed24f4bSMarc Zyngier 1339ed24f4bSMarc Zyngier u32 collection_id; 1349ed24f4bSMarc Zyngier u32 target_addr; 1359ed24f4bSMarc Zyngier }; 1369ed24f4bSMarc Zyngier 1379ed24f4bSMarc Zyngier #define its_is_collection_mapped(coll) ((coll) && \ 1389ed24f4bSMarc Zyngier ((coll)->target_addr != COLLECTION_NOT_MAPPED)) 1399ed24f4bSMarc Zyngier 1409ed24f4bSMarc Zyngier struct its_ite { 1419ed24f4bSMarc Zyngier struct list_head ite_list; 1429ed24f4bSMarc Zyngier 1439ed24f4bSMarc Zyngier struct vgic_irq *irq; 1449ed24f4bSMarc Zyngier struct its_collection *collection; 1459ed24f4bSMarc Zyngier u32 event_id; 1469ed24f4bSMarc Zyngier }; 1479ed24f4bSMarc Zyngier 1489ed24f4bSMarc Zyngier struct vgic_translation_cache_entry { 1499ed24f4bSMarc Zyngier struct list_head entry; 1509ed24f4bSMarc Zyngier phys_addr_t db; 1519ed24f4bSMarc Zyngier u32 devid; 1529ed24f4bSMarc Zyngier u32 eventid; 1539ed24f4bSMarc Zyngier struct vgic_irq *irq; 1549ed24f4bSMarc Zyngier }; 1559ed24f4bSMarc Zyngier 1569ed24f4bSMarc Zyngier /** 1579ed24f4bSMarc Zyngier * struct vgic_its_abi - ITS abi ops and settings 1589ed24f4bSMarc Zyngier * @cte_esz: collection table entry size 1599ed24f4bSMarc Zyngier * @dte_esz: device table entry size 1609ed24f4bSMarc Zyngier * @ite_esz: interrupt translation table entry size 1619ed24f4bSMarc Zyngier * @save tables: save the ITS tables into guest RAM 1629ed24f4bSMarc Zyngier * @restore_tables: restore the ITS internal structs from tables 1639ed24f4bSMarc Zyngier * stored in guest RAM 1649ed24f4bSMarc Zyngier * @commit: initialize the registers which expose the ABI settings, 1659ed24f4bSMarc Zyngier * especially the entry sizes 1669ed24f4bSMarc Zyngier */ 1679ed24f4bSMarc Zyngier struct vgic_its_abi { 1689ed24f4bSMarc Zyngier int cte_esz; 1699ed24f4bSMarc Zyngier int dte_esz; 1709ed24f4bSMarc Zyngier int ite_esz; 1719ed24f4bSMarc Zyngier int (*save_tables)(struct vgic_its *its); 1729ed24f4bSMarc Zyngier int (*restore_tables)(struct vgic_its *its); 1739ed24f4bSMarc Zyngier int (*commit)(struct vgic_its *its); 1749ed24f4bSMarc Zyngier }; 1759ed24f4bSMarc Zyngier 1769ed24f4bSMarc Zyngier #define ABI_0_ESZ 8 1779ed24f4bSMarc Zyngier #define ESZ_MAX ABI_0_ESZ 1789ed24f4bSMarc Zyngier 1799ed24f4bSMarc Zyngier static const struct vgic_its_abi its_table_abi_versions[] = { 1809ed24f4bSMarc Zyngier [0] = { 1819ed24f4bSMarc Zyngier .cte_esz = ABI_0_ESZ, 1829ed24f4bSMarc Zyngier .dte_esz = ABI_0_ESZ, 1839ed24f4bSMarc Zyngier .ite_esz = ABI_0_ESZ, 1849ed24f4bSMarc Zyngier .save_tables = vgic_its_save_tables_v0, 1859ed24f4bSMarc Zyngier .restore_tables = vgic_its_restore_tables_v0, 1869ed24f4bSMarc Zyngier .commit = vgic_its_commit_v0, 1879ed24f4bSMarc Zyngier }, 1889ed24f4bSMarc Zyngier }; 1899ed24f4bSMarc Zyngier 1909ed24f4bSMarc Zyngier #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions) 1919ed24f4bSMarc Zyngier 1929ed24f4bSMarc Zyngier inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its) 1939ed24f4bSMarc Zyngier { 1949ed24f4bSMarc Zyngier return &its_table_abi_versions[its->abi_rev]; 1959ed24f4bSMarc Zyngier } 1969ed24f4bSMarc Zyngier 1979ed24f4bSMarc Zyngier static int vgic_its_set_abi(struct vgic_its *its, u32 rev) 1989ed24f4bSMarc Zyngier { 1999ed24f4bSMarc Zyngier const struct vgic_its_abi *abi; 2009ed24f4bSMarc Zyngier 2019ed24f4bSMarc Zyngier its->abi_rev = rev; 2029ed24f4bSMarc Zyngier abi = vgic_its_get_abi(its); 2039ed24f4bSMarc Zyngier return abi->commit(its); 2049ed24f4bSMarc Zyngier } 2059ed24f4bSMarc Zyngier 2069ed24f4bSMarc Zyngier /* 2079ed24f4bSMarc Zyngier * Find and returns a device in the device table for an ITS. 2089ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2099ed24f4bSMarc Zyngier */ 2109ed24f4bSMarc Zyngier static struct its_device *find_its_device(struct vgic_its *its, u32 device_id) 2119ed24f4bSMarc Zyngier { 2129ed24f4bSMarc Zyngier struct its_device *device; 2139ed24f4bSMarc Zyngier 2149ed24f4bSMarc Zyngier list_for_each_entry(device, &its->device_list, dev_list) 2159ed24f4bSMarc Zyngier if (device_id == device->device_id) 2169ed24f4bSMarc Zyngier return device; 2179ed24f4bSMarc Zyngier 2189ed24f4bSMarc Zyngier return NULL; 2199ed24f4bSMarc Zyngier } 2209ed24f4bSMarc Zyngier 2219ed24f4bSMarc Zyngier /* 2229ed24f4bSMarc Zyngier * Find and returns an interrupt translation table entry (ITTE) for a given 2239ed24f4bSMarc Zyngier * Device ID/Event ID pair on an ITS. 2249ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2259ed24f4bSMarc Zyngier */ 2269ed24f4bSMarc Zyngier static struct its_ite *find_ite(struct vgic_its *its, u32 device_id, 2279ed24f4bSMarc Zyngier u32 event_id) 2289ed24f4bSMarc Zyngier { 2299ed24f4bSMarc Zyngier struct its_device *device; 2309ed24f4bSMarc Zyngier struct its_ite *ite; 2319ed24f4bSMarc Zyngier 2329ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 2339ed24f4bSMarc Zyngier if (device == NULL) 2349ed24f4bSMarc Zyngier return NULL; 2359ed24f4bSMarc Zyngier 2369ed24f4bSMarc Zyngier list_for_each_entry(ite, &device->itt_head, ite_list) 2379ed24f4bSMarc Zyngier if (ite->event_id == event_id) 2389ed24f4bSMarc Zyngier return ite; 2399ed24f4bSMarc Zyngier 2409ed24f4bSMarc Zyngier return NULL; 2419ed24f4bSMarc Zyngier } 2429ed24f4bSMarc Zyngier 2439ed24f4bSMarc Zyngier /* To be used as an iterator this macro misses the enclosing parentheses */ 2449ed24f4bSMarc Zyngier #define for_each_lpi_its(dev, ite, its) \ 2459ed24f4bSMarc Zyngier list_for_each_entry(dev, &(its)->device_list, dev_list) \ 2469ed24f4bSMarc Zyngier list_for_each_entry(ite, &(dev)->itt_head, ite_list) 2479ed24f4bSMarc Zyngier 2489ed24f4bSMarc Zyngier #define GIC_LPI_OFFSET 8192 2499ed24f4bSMarc Zyngier 2509ed24f4bSMarc Zyngier #define VITS_TYPER_IDBITS 16 2519ed24f4bSMarc Zyngier #define VITS_TYPER_DEVBITS 16 2529ed24f4bSMarc Zyngier #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1) 2539ed24f4bSMarc Zyngier #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1) 2549ed24f4bSMarc Zyngier 2559ed24f4bSMarc Zyngier /* 2569ed24f4bSMarc Zyngier * Finds and returns a collection in the ITS collection table. 2579ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2589ed24f4bSMarc Zyngier */ 2599ed24f4bSMarc Zyngier static struct its_collection *find_collection(struct vgic_its *its, int coll_id) 2609ed24f4bSMarc Zyngier { 2619ed24f4bSMarc Zyngier struct its_collection *collection; 2629ed24f4bSMarc Zyngier 2639ed24f4bSMarc Zyngier list_for_each_entry(collection, &its->collection_list, coll_list) { 2649ed24f4bSMarc Zyngier if (coll_id == collection->collection_id) 2659ed24f4bSMarc Zyngier return collection; 2669ed24f4bSMarc Zyngier } 2679ed24f4bSMarc Zyngier 2689ed24f4bSMarc Zyngier return NULL; 2699ed24f4bSMarc Zyngier } 2709ed24f4bSMarc Zyngier 2719ed24f4bSMarc Zyngier #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED) 2729ed24f4bSMarc Zyngier #define LPI_PROP_PRIORITY(p) ((p) & 0xfc) 2739ed24f4bSMarc Zyngier 2749ed24f4bSMarc Zyngier /* 2759ed24f4bSMarc Zyngier * Reads the configuration data for a given LPI from guest memory and 2769ed24f4bSMarc Zyngier * updates the fields in struct vgic_irq. 2779ed24f4bSMarc Zyngier * If filter_vcpu is not NULL, applies only if the IRQ is targeting this 2789ed24f4bSMarc Zyngier * VCPU. Unconditionally applies if filter_vcpu is NULL. 2799ed24f4bSMarc Zyngier */ 2809ed24f4bSMarc Zyngier static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 2819ed24f4bSMarc Zyngier struct kvm_vcpu *filter_vcpu, bool needs_inv) 2829ed24f4bSMarc Zyngier { 2839ed24f4bSMarc Zyngier u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); 2849ed24f4bSMarc Zyngier u8 prop; 2859ed24f4bSMarc Zyngier int ret; 2869ed24f4bSMarc Zyngier unsigned long flags; 2879ed24f4bSMarc Zyngier 2889ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, 2899ed24f4bSMarc Zyngier &prop, 1); 2909ed24f4bSMarc Zyngier 2919ed24f4bSMarc Zyngier if (ret) 2929ed24f4bSMarc Zyngier return ret; 2939ed24f4bSMarc Zyngier 2949ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 2959ed24f4bSMarc Zyngier 2969ed24f4bSMarc Zyngier if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 2979ed24f4bSMarc Zyngier irq->priority = LPI_PROP_PRIORITY(prop); 2989ed24f4bSMarc Zyngier irq->enabled = LPI_PROP_ENABLE_BIT(prop); 2999ed24f4bSMarc Zyngier 3009ed24f4bSMarc Zyngier if (!irq->hw) { 3019ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 3029ed24f4bSMarc Zyngier return 0; 3039ed24f4bSMarc Zyngier } 3049ed24f4bSMarc Zyngier } 3059ed24f4bSMarc Zyngier 3069ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 3079ed24f4bSMarc Zyngier 3089ed24f4bSMarc Zyngier if (irq->hw) 3099ed24f4bSMarc Zyngier return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 3109ed24f4bSMarc Zyngier 3119ed24f4bSMarc Zyngier return 0; 3129ed24f4bSMarc Zyngier } 3139ed24f4bSMarc Zyngier 3149ed24f4bSMarc Zyngier /* 3159ed24f4bSMarc Zyngier * Create a snapshot of the current LPIs targeting @vcpu, so that we can 3169ed24f4bSMarc Zyngier * enumerate those LPIs without holding any lock. 3179ed24f4bSMarc Zyngier * Returns their number and puts the kmalloc'ed array into intid_ptr. 3189ed24f4bSMarc Zyngier */ 3199ed24f4bSMarc Zyngier int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) 3209ed24f4bSMarc Zyngier { 3219ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 3229ed24f4bSMarc Zyngier struct vgic_irq *irq; 3239ed24f4bSMarc Zyngier unsigned long flags; 3249ed24f4bSMarc Zyngier u32 *intids; 3259ed24f4bSMarc Zyngier int irq_count, i = 0; 3269ed24f4bSMarc Zyngier 3279ed24f4bSMarc Zyngier /* 3289ed24f4bSMarc Zyngier * There is an obvious race between allocating the array and LPIs 3299ed24f4bSMarc Zyngier * being mapped/unmapped. If we ended up here as a result of a 3309ed24f4bSMarc Zyngier * command, we're safe (locks are held, preventing another 3319ed24f4bSMarc Zyngier * command). If coming from another path (such as enabling LPIs), 3329ed24f4bSMarc Zyngier * we must be careful not to overrun the array. 3339ed24f4bSMarc Zyngier */ 3349ed24f4bSMarc Zyngier irq_count = READ_ONCE(dist->lpi_list_count); 3353ef23167SJia He intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT); 3369ed24f4bSMarc Zyngier if (!intids) 3379ed24f4bSMarc Zyngier return -ENOMEM; 3389ed24f4bSMarc Zyngier 3399ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 3409ed24f4bSMarc Zyngier list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 3419ed24f4bSMarc Zyngier if (i == irq_count) 3429ed24f4bSMarc Zyngier break; 3439ed24f4bSMarc Zyngier /* We don't need to "get" the IRQ, as we hold the list lock. */ 3449ed24f4bSMarc Zyngier if (vcpu && irq->target_vcpu != vcpu) 3459ed24f4bSMarc Zyngier continue; 3469ed24f4bSMarc Zyngier intids[i++] = irq->intid; 3479ed24f4bSMarc Zyngier } 3489ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 3499ed24f4bSMarc Zyngier 3509ed24f4bSMarc Zyngier *intid_ptr = intids; 3519ed24f4bSMarc Zyngier return i; 3529ed24f4bSMarc Zyngier } 3539ed24f4bSMarc Zyngier 3549ed24f4bSMarc Zyngier static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) 3559ed24f4bSMarc Zyngier { 3569ed24f4bSMarc Zyngier int ret = 0; 3579ed24f4bSMarc Zyngier unsigned long flags; 3589ed24f4bSMarc Zyngier 3599ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 3609ed24f4bSMarc Zyngier irq->target_vcpu = vcpu; 3619ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 3629ed24f4bSMarc Zyngier 3639ed24f4bSMarc Zyngier if (irq->hw) { 3649ed24f4bSMarc Zyngier struct its_vlpi_map map; 3659ed24f4bSMarc Zyngier 3669ed24f4bSMarc Zyngier ret = its_get_vlpi(irq->host_irq, &map); 3679ed24f4bSMarc Zyngier if (ret) 3689ed24f4bSMarc Zyngier return ret; 3699ed24f4bSMarc Zyngier 3709ed24f4bSMarc Zyngier if (map.vpe) 3719ed24f4bSMarc Zyngier atomic_dec(&map.vpe->vlpi_count); 3729ed24f4bSMarc Zyngier map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 3739ed24f4bSMarc Zyngier atomic_inc(&map.vpe->vlpi_count); 3749ed24f4bSMarc Zyngier 3759ed24f4bSMarc Zyngier ret = its_map_vlpi(irq->host_irq, &map); 3769ed24f4bSMarc Zyngier } 3779ed24f4bSMarc Zyngier 3789ed24f4bSMarc Zyngier return ret; 3799ed24f4bSMarc Zyngier } 3809ed24f4bSMarc Zyngier 3819ed24f4bSMarc Zyngier /* 3829ed24f4bSMarc Zyngier * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI 3839ed24f4bSMarc Zyngier * is targeting) to the VGIC's view, which deals with target VCPUs. 3849ed24f4bSMarc Zyngier * Needs to be called whenever either the collection for a LPIs has 3859ed24f4bSMarc Zyngier * changed or the collection itself got retargeted. 3869ed24f4bSMarc Zyngier */ 3879ed24f4bSMarc Zyngier static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite) 3889ed24f4bSMarc Zyngier { 3899ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 3909ed24f4bSMarc Zyngier 3919ed24f4bSMarc Zyngier if (!its_is_collection_mapped(ite->collection)) 3929ed24f4bSMarc Zyngier return; 3939ed24f4bSMarc Zyngier 3949ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); 3959ed24f4bSMarc Zyngier update_affinity(ite->irq, vcpu); 3969ed24f4bSMarc Zyngier } 3979ed24f4bSMarc Zyngier 3989ed24f4bSMarc Zyngier /* 3999ed24f4bSMarc Zyngier * Updates the target VCPU for every LPI targeting this collection. 4009ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 4019ed24f4bSMarc Zyngier */ 4029ed24f4bSMarc Zyngier static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its, 4039ed24f4bSMarc Zyngier struct its_collection *coll) 4049ed24f4bSMarc Zyngier { 4059ed24f4bSMarc Zyngier struct its_device *device; 4069ed24f4bSMarc Zyngier struct its_ite *ite; 4079ed24f4bSMarc Zyngier 4089ed24f4bSMarc Zyngier for_each_lpi_its(device, ite, its) { 409096560ddSGavin Shan if (ite->collection != coll) 4109ed24f4bSMarc Zyngier continue; 4119ed24f4bSMarc Zyngier 4129ed24f4bSMarc Zyngier update_affinity_ite(kvm, ite); 4139ed24f4bSMarc Zyngier } 4149ed24f4bSMarc Zyngier } 4159ed24f4bSMarc Zyngier 4169ed24f4bSMarc Zyngier static u32 max_lpis_propbaser(u64 propbaser) 4179ed24f4bSMarc Zyngier { 4189ed24f4bSMarc Zyngier int nr_idbits = (propbaser & 0x1f) + 1; 4199ed24f4bSMarc Zyngier 4209ed24f4bSMarc Zyngier return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS); 4219ed24f4bSMarc Zyngier } 4229ed24f4bSMarc Zyngier 4239ed24f4bSMarc Zyngier /* 4249ed24f4bSMarc Zyngier * Sync the pending table pending bit of LPIs targeting @vcpu 4259ed24f4bSMarc Zyngier * with our own data structures. This relies on the LPI being 4269ed24f4bSMarc Zyngier * mapped before. 4279ed24f4bSMarc Zyngier */ 4289ed24f4bSMarc Zyngier static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) 4299ed24f4bSMarc Zyngier { 4309ed24f4bSMarc Zyngier gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 4319ed24f4bSMarc Zyngier struct vgic_irq *irq; 4329ed24f4bSMarc Zyngier int last_byte_offset = -1; 4339ed24f4bSMarc Zyngier int ret = 0; 4349ed24f4bSMarc Zyngier u32 *intids; 4359ed24f4bSMarc Zyngier int nr_irqs, i; 4369ed24f4bSMarc Zyngier unsigned long flags; 4379ed24f4bSMarc Zyngier u8 pendmask; 4389ed24f4bSMarc Zyngier 4399ed24f4bSMarc Zyngier nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids); 4409ed24f4bSMarc Zyngier if (nr_irqs < 0) 4419ed24f4bSMarc Zyngier return nr_irqs; 4429ed24f4bSMarc Zyngier 4439ed24f4bSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 4449ed24f4bSMarc Zyngier int byte_offset, bit_nr; 4459ed24f4bSMarc Zyngier 4469ed24f4bSMarc Zyngier byte_offset = intids[i] / BITS_PER_BYTE; 4479ed24f4bSMarc Zyngier bit_nr = intids[i] % BITS_PER_BYTE; 4489ed24f4bSMarc Zyngier 4499ed24f4bSMarc Zyngier /* 4509ed24f4bSMarc Zyngier * For contiguously allocated LPIs chances are we just read 4519ed24f4bSMarc Zyngier * this very same byte in the last iteration. Reuse that. 4529ed24f4bSMarc Zyngier */ 4539ed24f4bSMarc Zyngier if (byte_offset != last_byte_offset) { 4549ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(vcpu->kvm, 4559ed24f4bSMarc Zyngier pendbase + byte_offset, 4569ed24f4bSMarc Zyngier &pendmask, 1); 4579ed24f4bSMarc Zyngier if (ret) { 4589ed24f4bSMarc Zyngier kfree(intids); 4599ed24f4bSMarc Zyngier return ret; 4609ed24f4bSMarc Zyngier } 4619ed24f4bSMarc Zyngier last_byte_offset = byte_offset; 4629ed24f4bSMarc Zyngier } 4639ed24f4bSMarc Zyngier 4649ed24f4bSMarc Zyngier irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 4659ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 4669ed24f4bSMarc Zyngier irq->pending_latch = pendmask & (1U << bit_nr); 4679ed24f4bSMarc Zyngier vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 4689ed24f4bSMarc Zyngier vgic_put_irq(vcpu->kvm, irq); 4699ed24f4bSMarc Zyngier } 4709ed24f4bSMarc Zyngier 4719ed24f4bSMarc Zyngier kfree(intids); 4729ed24f4bSMarc Zyngier 4739ed24f4bSMarc Zyngier return ret; 4749ed24f4bSMarc Zyngier } 4759ed24f4bSMarc Zyngier 4769ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, 4779ed24f4bSMarc Zyngier struct vgic_its *its, 4789ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 4799ed24f4bSMarc Zyngier { 4809ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 4819ed24f4bSMarc Zyngier u64 reg = GITS_TYPER_PLPIS; 4829ed24f4bSMarc Zyngier 4839ed24f4bSMarc Zyngier /* 4849ed24f4bSMarc Zyngier * We use linear CPU numbers for redistributor addressing, 4859ed24f4bSMarc Zyngier * so GITS_TYPER.PTA is 0. 4869ed24f4bSMarc Zyngier * Also we force all PROPBASER registers to be the same, so 4879ed24f4bSMarc Zyngier * CommonLPIAff is 0 as well. 4889ed24f4bSMarc Zyngier * To avoid memory waste in the guest, we keep the number of IDBits and 4899ed24f4bSMarc Zyngier * DevBits low - as least for the time being. 4909ed24f4bSMarc Zyngier */ 4919ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT; 4929ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT; 4939ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT; 4949ed24f4bSMarc Zyngier 4959ed24f4bSMarc Zyngier return extract_bytes(reg, addr & 7, len); 4969ed24f4bSMarc Zyngier } 4979ed24f4bSMarc Zyngier 4989ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, 4999ed24f4bSMarc Zyngier struct vgic_its *its, 5009ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 5019ed24f4bSMarc Zyngier { 5029ed24f4bSMarc Zyngier u32 val; 5039ed24f4bSMarc Zyngier 5049ed24f4bSMarc Zyngier val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK; 5059ed24f4bSMarc Zyngier val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM; 5069ed24f4bSMarc Zyngier return val; 5079ed24f4bSMarc Zyngier } 5089ed24f4bSMarc Zyngier 5099ed24f4bSMarc Zyngier static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, 5109ed24f4bSMarc Zyngier struct vgic_its *its, 5119ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 5129ed24f4bSMarc Zyngier unsigned long val) 5139ed24f4bSMarc Zyngier { 5149ed24f4bSMarc Zyngier u32 rev = GITS_IIDR_REV(val); 5159ed24f4bSMarc Zyngier 5169ed24f4bSMarc Zyngier if (rev >= NR_ITS_ABIS) 5179ed24f4bSMarc Zyngier return -EINVAL; 5189ed24f4bSMarc Zyngier return vgic_its_set_abi(its, rev); 5199ed24f4bSMarc Zyngier } 5209ed24f4bSMarc Zyngier 5219ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm, 5229ed24f4bSMarc Zyngier struct vgic_its *its, 5239ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 5249ed24f4bSMarc Zyngier { 5259ed24f4bSMarc Zyngier switch (addr & 0xffff) { 5269ed24f4bSMarc Zyngier case GITS_PIDR0: 5279ed24f4bSMarc Zyngier return 0x92; /* part number, bits[7:0] */ 5289ed24f4bSMarc Zyngier case GITS_PIDR1: 5299ed24f4bSMarc Zyngier return 0xb4; /* part number, bits[11:8] */ 5309ed24f4bSMarc Zyngier case GITS_PIDR2: 5319ed24f4bSMarc Zyngier return GIC_PIDR2_ARCH_GICv3 | 0x0b; 5329ed24f4bSMarc Zyngier case GITS_PIDR4: 5339ed24f4bSMarc Zyngier return 0x40; /* This is a 64K software visible page */ 5349ed24f4bSMarc Zyngier /* The following are the ID registers for (any) GIC. */ 5359ed24f4bSMarc Zyngier case GITS_CIDR0: 5369ed24f4bSMarc Zyngier return 0x0d; 5379ed24f4bSMarc Zyngier case GITS_CIDR1: 5389ed24f4bSMarc Zyngier return 0xf0; 5399ed24f4bSMarc Zyngier case GITS_CIDR2: 5409ed24f4bSMarc Zyngier return 0x05; 5419ed24f4bSMarc Zyngier case GITS_CIDR3: 5429ed24f4bSMarc Zyngier return 0xb1; 5439ed24f4bSMarc Zyngier } 5449ed24f4bSMarc Zyngier 5459ed24f4bSMarc Zyngier return 0; 5469ed24f4bSMarc Zyngier } 5479ed24f4bSMarc Zyngier 5489ed24f4bSMarc Zyngier static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist, 5499ed24f4bSMarc Zyngier phys_addr_t db, 5509ed24f4bSMarc Zyngier u32 devid, u32 eventid) 5519ed24f4bSMarc Zyngier { 5529ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 5539ed24f4bSMarc Zyngier 5549ed24f4bSMarc Zyngier list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { 5559ed24f4bSMarc Zyngier /* 5569ed24f4bSMarc Zyngier * If we hit a NULL entry, there is nothing after this 5579ed24f4bSMarc Zyngier * point. 5589ed24f4bSMarc Zyngier */ 5599ed24f4bSMarc Zyngier if (!cte->irq) 5609ed24f4bSMarc Zyngier break; 5619ed24f4bSMarc Zyngier 5629ed24f4bSMarc Zyngier if (cte->db != db || cte->devid != devid || 5639ed24f4bSMarc Zyngier cte->eventid != eventid) 5649ed24f4bSMarc Zyngier continue; 5659ed24f4bSMarc Zyngier 5669ed24f4bSMarc Zyngier /* 5679ed24f4bSMarc Zyngier * Move this entry to the head, as it is the most 5689ed24f4bSMarc Zyngier * recently used. 5699ed24f4bSMarc Zyngier */ 5709ed24f4bSMarc Zyngier if (!list_is_first(&cte->entry, &dist->lpi_translation_cache)) 5719ed24f4bSMarc Zyngier list_move(&cte->entry, &dist->lpi_translation_cache); 5729ed24f4bSMarc Zyngier 5739ed24f4bSMarc Zyngier return cte->irq; 5749ed24f4bSMarc Zyngier } 5759ed24f4bSMarc Zyngier 5769ed24f4bSMarc Zyngier return NULL; 5779ed24f4bSMarc Zyngier } 5789ed24f4bSMarc Zyngier 5799ed24f4bSMarc Zyngier static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db, 5809ed24f4bSMarc Zyngier u32 devid, u32 eventid) 5819ed24f4bSMarc Zyngier { 5829ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 5839ed24f4bSMarc Zyngier struct vgic_irq *irq; 5849ed24f4bSMarc Zyngier unsigned long flags; 5859ed24f4bSMarc Zyngier 5869ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 58765b201bfSOliver Upton 5889ed24f4bSMarc Zyngier irq = __vgic_its_check_cache(dist, db, devid, eventid); 58965b201bfSOliver Upton if (irq) 59065b201bfSOliver Upton vgic_get_irq_kref(irq); 59165b201bfSOliver Upton 5929ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 5939ed24f4bSMarc Zyngier 5949ed24f4bSMarc Zyngier return irq; 5959ed24f4bSMarc Zyngier } 5969ed24f4bSMarc Zyngier 5979ed24f4bSMarc Zyngier static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, 5989ed24f4bSMarc Zyngier u32 devid, u32 eventid, 5999ed24f4bSMarc Zyngier struct vgic_irq *irq) 6009ed24f4bSMarc Zyngier { 6019ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 6029ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 6039ed24f4bSMarc Zyngier unsigned long flags; 6049ed24f4bSMarc Zyngier phys_addr_t db; 6059ed24f4bSMarc Zyngier 6069ed24f4bSMarc Zyngier /* Do not cache a directly injected interrupt */ 6079ed24f4bSMarc Zyngier if (irq->hw) 6089ed24f4bSMarc Zyngier return; 6099ed24f4bSMarc Zyngier 6109ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 6119ed24f4bSMarc Zyngier 6129ed24f4bSMarc Zyngier if (unlikely(list_empty(&dist->lpi_translation_cache))) 6139ed24f4bSMarc Zyngier goto out; 6149ed24f4bSMarc Zyngier 6159ed24f4bSMarc Zyngier /* 6169ed24f4bSMarc Zyngier * We could have raced with another CPU caching the same 6179ed24f4bSMarc Zyngier * translation behind our back, so let's check it is not in 6189ed24f4bSMarc Zyngier * already 6199ed24f4bSMarc Zyngier */ 6209ed24f4bSMarc Zyngier db = its->vgic_its_base + GITS_TRANSLATER; 6219ed24f4bSMarc Zyngier if (__vgic_its_check_cache(dist, db, devid, eventid)) 6229ed24f4bSMarc Zyngier goto out; 6239ed24f4bSMarc Zyngier 6249ed24f4bSMarc Zyngier /* Always reuse the last entry (LRU policy) */ 6259ed24f4bSMarc Zyngier cte = list_last_entry(&dist->lpi_translation_cache, 6269ed24f4bSMarc Zyngier typeof(*cte), entry); 6279ed24f4bSMarc Zyngier 6289ed24f4bSMarc Zyngier /* 6299ed24f4bSMarc Zyngier * Caching the translation implies having an extra reference 6309ed24f4bSMarc Zyngier * to the interrupt, so drop the potential reference on what 6319ed24f4bSMarc Zyngier * was in the cache, and increment it on the new interrupt. 6329ed24f4bSMarc Zyngier */ 6339ed24f4bSMarc Zyngier if (cte->irq) 6349ed24f4bSMarc Zyngier __vgic_put_lpi_locked(kvm, cte->irq); 6359ed24f4bSMarc Zyngier 6369ed24f4bSMarc Zyngier vgic_get_irq_kref(irq); 6379ed24f4bSMarc Zyngier 6389ed24f4bSMarc Zyngier cte->db = db; 6399ed24f4bSMarc Zyngier cte->devid = devid; 6409ed24f4bSMarc Zyngier cte->eventid = eventid; 6419ed24f4bSMarc Zyngier cte->irq = irq; 6429ed24f4bSMarc Zyngier 6439ed24f4bSMarc Zyngier /* Move the new translation to the head of the list */ 6449ed24f4bSMarc Zyngier list_move(&cte->entry, &dist->lpi_translation_cache); 6459ed24f4bSMarc Zyngier 6469ed24f4bSMarc Zyngier out: 6479ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 6489ed24f4bSMarc Zyngier } 6499ed24f4bSMarc Zyngier 6509ed24f4bSMarc Zyngier void vgic_its_invalidate_cache(struct kvm *kvm) 6519ed24f4bSMarc Zyngier { 6529ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 6539ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 6549ed24f4bSMarc Zyngier unsigned long flags; 6559ed24f4bSMarc Zyngier 6569ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 6579ed24f4bSMarc Zyngier 6589ed24f4bSMarc Zyngier list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { 6599ed24f4bSMarc Zyngier /* 6609ed24f4bSMarc Zyngier * If we hit a NULL entry, there is nothing after this 6619ed24f4bSMarc Zyngier * point. 6629ed24f4bSMarc Zyngier */ 6639ed24f4bSMarc Zyngier if (!cte->irq) 6649ed24f4bSMarc Zyngier break; 6659ed24f4bSMarc Zyngier 6669ed24f4bSMarc Zyngier __vgic_put_lpi_locked(kvm, cte->irq); 6679ed24f4bSMarc Zyngier cte->irq = NULL; 6689ed24f4bSMarc Zyngier } 6699ed24f4bSMarc Zyngier 6709ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 6719ed24f4bSMarc Zyngier } 6729ed24f4bSMarc Zyngier 6739ed24f4bSMarc Zyngier int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, 6749ed24f4bSMarc Zyngier u32 devid, u32 eventid, struct vgic_irq **irq) 6759ed24f4bSMarc Zyngier { 6769ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 6779ed24f4bSMarc Zyngier struct its_ite *ite; 6789ed24f4bSMarc Zyngier 6799ed24f4bSMarc Zyngier if (!its->enabled) 6809ed24f4bSMarc Zyngier return -EBUSY; 6819ed24f4bSMarc Zyngier 6829ed24f4bSMarc Zyngier ite = find_ite(its, devid, eventid); 6839ed24f4bSMarc Zyngier if (!ite || !its_is_collection_mapped(ite->collection)) 6849ed24f4bSMarc Zyngier return E_ITS_INT_UNMAPPED_INTERRUPT; 6859ed24f4bSMarc Zyngier 6869ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); 6879ed24f4bSMarc Zyngier if (!vcpu) 6889ed24f4bSMarc Zyngier return E_ITS_INT_UNMAPPED_INTERRUPT; 6899ed24f4bSMarc Zyngier 69094828468SMarc Zyngier if (!vgic_lpis_enabled(vcpu)) 6919ed24f4bSMarc Zyngier return -EBUSY; 6929ed24f4bSMarc Zyngier 6939ed24f4bSMarc Zyngier vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq); 6949ed24f4bSMarc Zyngier 6959ed24f4bSMarc Zyngier *irq = ite->irq; 6969ed24f4bSMarc Zyngier return 0; 6979ed24f4bSMarc Zyngier } 6989ed24f4bSMarc Zyngier 6999ed24f4bSMarc Zyngier struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi) 7009ed24f4bSMarc Zyngier { 7019ed24f4bSMarc Zyngier u64 address; 7029ed24f4bSMarc Zyngier struct kvm_io_device *kvm_io_dev; 7039ed24f4bSMarc Zyngier struct vgic_io_device *iodev; 7049ed24f4bSMarc Zyngier 7059ed24f4bSMarc Zyngier if (!vgic_has_its(kvm)) 7069ed24f4bSMarc Zyngier return ERR_PTR(-ENODEV); 7079ed24f4bSMarc Zyngier 7089ed24f4bSMarc Zyngier if (!(msi->flags & KVM_MSI_VALID_DEVID)) 7099ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7109ed24f4bSMarc Zyngier 7119ed24f4bSMarc Zyngier address = (u64)msi->address_hi << 32 | msi->address_lo; 7129ed24f4bSMarc Zyngier 7139ed24f4bSMarc Zyngier kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); 7149ed24f4bSMarc Zyngier if (!kvm_io_dev) 7159ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7169ed24f4bSMarc Zyngier 7179ed24f4bSMarc Zyngier if (kvm_io_dev->ops != &kvm_io_gic_ops) 7189ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7199ed24f4bSMarc Zyngier 7209ed24f4bSMarc Zyngier iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); 7219ed24f4bSMarc Zyngier if (iodev->iodev_type != IODEV_ITS) 7229ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7239ed24f4bSMarc Zyngier 7249ed24f4bSMarc Zyngier return iodev->its; 7259ed24f4bSMarc Zyngier } 7269ed24f4bSMarc Zyngier 7279ed24f4bSMarc Zyngier /* 7289ed24f4bSMarc Zyngier * Find the target VCPU and the LPI number for a given devid/eventid pair 7299ed24f4bSMarc Zyngier * and make this IRQ pending, possibly injecting it. 7309ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 7319ed24f4bSMarc Zyngier * Returns 0 on success, a positive error value for any ITS mapping 7329ed24f4bSMarc Zyngier * related errors and negative error values for generic errors. 7339ed24f4bSMarc Zyngier */ 7349ed24f4bSMarc Zyngier static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, 7359ed24f4bSMarc Zyngier u32 devid, u32 eventid) 7369ed24f4bSMarc Zyngier { 7379ed24f4bSMarc Zyngier struct vgic_irq *irq = NULL; 7389ed24f4bSMarc Zyngier unsigned long flags; 7399ed24f4bSMarc Zyngier int err; 7409ed24f4bSMarc Zyngier 7419ed24f4bSMarc Zyngier err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq); 7429ed24f4bSMarc Zyngier if (err) 7439ed24f4bSMarc Zyngier return err; 7449ed24f4bSMarc Zyngier 7459ed24f4bSMarc Zyngier if (irq->hw) 7469ed24f4bSMarc Zyngier return irq_set_irqchip_state(irq->host_irq, 7479ed24f4bSMarc Zyngier IRQCHIP_STATE_PENDING, true); 7489ed24f4bSMarc Zyngier 7499ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 7509ed24f4bSMarc Zyngier irq->pending_latch = true; 7519ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 7529ed24f4bSMarc Zyngier 7539ed24f4bSMarc Zyngier return 0; 7549ed24f4bSMarc Zyngier } 7559ed24f4bSMarc Zyngier 7569ed24f4bSMarc Zyngier int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi) 7579ed24f4bSMarc Zyngier { 7589ed24f4bSMarc Zyngier struct vgic_irq *irq; 7599ed24f4bSMarc Zyngier unsigned long flags; 7609ed24f4bSMarc Zyngier phys_addr_t db; 7619ed24f4bSMarc Zyngier 7629ed24f4bSMarc Zyngier db = (u64)msi->address_hi << 32 | msi->address_lo; 7639ed24f4bSMarc Zyngier irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data); 7649ed24f4bSMarc Zyngier if (!irq) 765a47dee55SMarc Zyngier return -EWOULDBLOCK; 7669ed24f4bSMarc Zyngier 7679ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 7689ed24f4bSMarc Zyngier irq->pending_latch = true; 7699ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 77065b201bfSOliver Upton vgic_put_irq(kvm, irq); 7719ed24f4bSMarc Zyngier 7729ed24f4bSMarc Zyngier return 0; 7739ed24f4bSMarc Zyngier } 7749ed24f4bSMarc Zyngier 7759ed24f4bSMarc Zyngier /* 7769ed24f4bSMarc Zyngier * Queries the KVM IO bus framework to get the ITS pointer from the given 7779ed24f4bSMarc Zyngier * doorbell address. 7789ed24f4bSMarc Zyngier * We then call vgic_its_trigger_msi() with the decoded data. 7799ed24f4bSMarc Zyngier * According to the KVM_SIGNAL_MSI API description returns 1 on success. 7809ed24f4bSMarc Zyngier */ 7819ed24f4bSMarc Zyngier int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 7829ed24f4bSMarc Zyngier { 7839ed24f4bSMarc Zyngier struct vgic_its *its; 7849ed24f4bSMarc Zyngier int ret; 7859ed24f4bSMarc Zyngier 7869ed24f4bSMarc Zyngier if (!vgic_its_inject_cached_translation(kvm, msi)) 7879ed24f4bSMarc Zyngier return 1; 7889ed24f4bSMarc Zyngier 7899ed24f4bSMarc Zyngier its = vgic_msi_to_its(kvm, msi); 7909ed24f4bSMarc Zyngier if (IS_ERR(its)) 7919ed24f4bSMarc Zyngier return PTR_ERR(its); 7929ed24f4bSMarc Zyngier 7939ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 7949ed24f4bSMarc Zyngier ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data); 7959ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 7969ed24f4bSMarc Zyngier 7979ed24f4bSMarc Zyngier if (ret < 0) 7989ed24f4bSMarc Zyngier return ret; 7999ed24f4bSMarc Zyngier 8009ed24f4bSMarc Zyngier /* 8019ed24f4bSMarc Zyngier * KVM_SIGNAL_MSI demands a return value > 0 for success and 0 8029ed24f4bSMarc Zyngier * if the guest has blocked the MSI. So we map any LPI mapping 8039ed24f4bSMarc Zyngier * related error to that. 8049ed24f4bSMarc Zyngier */ 8059ed24f4bSMarc Zyngier if (ret) 8069ed24f4bSMarc Zyngier return 0; 8079ed24f4bSMarc Zyngier else 8089ed24f4bSMarc Zyngier return 1; 8099ed24f4bSMarc Zyngier } 8109ed24f4bSMarc Zyngier 8119ed24f4bSMarc Zyngier /* Requires the its_lock to be held. */ 8129ed24f4bSMarc Zyngier static void its_free_ite(struct kvm *kvm, struct its_ite *ite) 8139ed24f4bSMarc Zyngier { 8149ed24f4bSMarc Zyngier list_del(&ite->ite_list); 8159ed24f4bSMarc Zyngier 8169ed24f4bSMarc Zyngier /* This put matches the get in vgic_add_lpi. */ 8179ed24f4bSMarc Zyngier if (ite->irq) { 8189ed24f4bSMarc Zyngier if (ite->irq->hw) 8199ed24f4bSMarc Zyngier WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); 8209ed24f4bSMarc Zyngier 8219ed24f4bSMarc Zyngier vgic_put_irq(kvm, ite->irq); 8229ed24f4bSMarc Zyngier } 8239ed24f4bSMarc Zyngier 8249ed24f4bSMarc Zyngier kfree(ite); 8259ed24f4bSMarc Zyngier } 8269ed24f4bSMarc Zyngier 8279ed24f4bSMarc Zyngier static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size) 8289ed24f4bSMarc Zyngier { 8299ed24f4bSMarc Zyngier return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1); 8309ed24f4bSMarc Zyngier } 8319ed24f4bSMarc Zyngier 8329ed24f4bSMarc Zyngier #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) 8339ed24f4bSMarc Zyngier #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) 8349ed24f4bSMarc Zyngier #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1) 8359ed24f4bSMarc Zyngier #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) 8369ed24f4bSMarc Zyngier #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) 8379ed24f4bSMarc Zyngier #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) 8389ed24f4bSMarc Zyngier #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) 8399ed24f4bSMarc Zyngier #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) 8409ed24f4bSMarc Zyngier #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) 8419ed24f4bSMarc Zyngier 8429ed24f4bSMarc Zyngier /* 8439ed24f4bSMarc Zyngier * The DISCARD command frees an Interrupt Translation Table Entry (ITTE). 8449ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 8459ed24f4bSMarc Zyngier */ 8469ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its, 8479ed24f4bSMarc Zyngier u64 *its_cmd) 8489ed24f4bSMarc Zyngier { 8499ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 8509ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 8519ed24f4bSMarc Zyngier struct its_ite *ite; 8529ed24f4bSMarc Zyngier 8539ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 8549ed24f4bSMarc Zyngier if (ite && its_is_collection_mapped(ite->collection)) { 8559ed24f4bSMarc Zyngier /* 8569ed24f4bSMarc Zyngier * Though the spec talks about removing the pending state, we 8579ed24f4bSMarc Zyngier * don't bother here since we clear the ITTE anyway and the 8589ed24f4bSMarc Zyngier * pending state is a property of the ITTE struct. 8599ed24f4bSMarc Zyngier */ 8609ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 8619ed24f4bSMarc Zyngier 8629ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 8639ed24f4bSMarc Zyngier return 0; 8649ed24f4bSMarc Zyngier } 8659ed24f4bSMarc Zyngier 8669ed24f4bSMarc Zyngier return E_ITS_DISCARD_UNMAPPED_INTERRUPT; 8679ed24f4bSMarc Zyngier } 8689ed24f4bSMarc Zyngier 8699ed24f4bSMarc Zyngier /* 8709ed24f4bSMarc Zyngier * The MOVI command moves an ITTE to a different collection. 8719ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 8729ed24f4bSMarc Zyngier */ 8739ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its, 8749ed24f4bSMarc Zyngier u64 *its_cmd) 8759ed24f4bSMarc Zyngier { 8769ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 8779ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 8789ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 8799ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 8809ed24f4bSMarc Zyngier struct its_ite *ite; 8819ed24f4bSMarc Zyngier struct its_collection *collection; 8829ed24f4bSMarc Zyngier 8839ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 8849ed24f4bSMarc Zyngier if (!ite) 8859ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_INTERRUPT; 8869ed24f4bSMarc Zyngier 8879ed24f4bSMarc Zyngier if (!its_is_collection_mapped(ite->collection)) 8889ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_COLLECTION; 8899ed24f4bSMarc Zyngier 8909ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 8919ed24f4bSMarc Zyngier if (!its_is_collection_mapped(collection)) 8929ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_COLLECTION; 8939ed24f4bSMarc Zyngier 8949ed24f4bSMarc Zyngier ite->collection = collection; 8959ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 8969ed24f4bSMarc Zyngier 8979ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 8989ed24f4bSMarc Zyngier 8999ed24f4bSMarc Zyngier return update_affinity(ite->irq, vcpu); 9009ed24f4bSMarc Zyngier } 9019ed24f4bSMarc Zyngier 902cafe7e54SRicardo Koller static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa) 903cafe7e54SRicardo Koller { 904cafe7e54SRicardo Koller gfn_t gfn = gpa >> PAGE_SHIFT; 905cafe7e54SRicardo Koller int idx; 906cafe7e54SRicardo Koller bool ret; 907cafe7e54SRicardo Koller 908cafe7e54SRicardo Koller idx = srcu_read_lock(&its->dev->kvm->srcu); 909cafe7e54SRicardo Koller ret = kvm_is_visible_gfn(its->dev->kvm, gfn); 910cafe7e54SRicardo Koller srcu_read_unlock(&its->dev->kvm->srcu, idx); 911cafe7e54SRicardo Koller return ret; 912cafe7e54SRicardo Koller } 913cafe7e54SRicardo Koller 9149ed24f4bSMarc Zyngier /* 9159ed24f4bSMarc Zyngier * Check whether an ID can be stored into the corresponding guest table. 9169ed24f4bSMarc Zyngier * For a direct table this is pretty easy, but gets a bit nasty for 9179ed24f4bSMarc Zyngier * indirect tables. We check whether the resulting guest physical address 9189ed24f4bSMarc Zyngier * is actually valid (covered by a memslot and guest accessible). 9199ed24f4bSMarc Zyngier * For this we have to read the respective first level entry. 9209ed24f4bSMarc Zyngier */ 9219ed24f4bSMarc Zyngier static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, 9229ed24f4bSMarc Zyngier gpa_t *eaddr) 9239ed24f4bSMarc Zyngier { 9249ed24f4bSMarc Zyngier int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 9259ed24f4bSMarc Zyngier u64 indirect_ptr, type = GITS_BASER_TYPE(baser); 9269ed24f4bSMarc Zyngier phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); 9279ed24f4bSMarc Zyngier int esz = GITS_BASER_ENTRY_SIZE(baser); 928cafe7e54SRicardo Koller int index; 9299ed24f4bSMarc Zyngier 9309ed24f4bSMarc Zyngier switch (type) { 9319ed24f4bSMarc Zyngier case GITS_BASER_TYPE_DEVICE: 9329ed24f4bSMarc Zyngier if (id >= BIT_ULL(VITS_TYPER_DEVBITS)) 9339ed24f4bSMarc Zyngier return false; 9349ed24f4bSMarc Zyngier break; 9359ed24f4bSMarc Zyngier case GITS_BASER_TYPE_COLLECTION: 9369ed24f4bSMarc Zyngier /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */ 9379ed24f4bSMarc Zyngier if (id >= BIT_ULL(16)) 9389ed24f4bSMarc Zyngier return false; 9399ed24f4bSMarc Zyngier break; 9409ed24f4bSMarc Zyngier default: 9419ed24f4bSMarc Zyngier return false; 9429ed24f4bSMarc Zyngier } 9439ed24f4bSMarc Zyngier 9449ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_INDIRECT)) { 9459ed24f4bSMarc Zyngier phys_addr_t addr; 9469ed24f4bSMarc Zyngier 9479ed24f4bSMarc Zyngier if (id >= (l1_tbl_size / esz)) 9489ed24f4bSMarc Zyngier return false; 9499ed24f4bSMarc Zyngier 9509ed24f4bSMarc Zyngier addr = base + id * esz; 9519ed24f4bSMarc Zyngier 9529ed24f4bSMarc Zyngier if (eaddr) 9539ed24f4bSMarc Zyngier *eaddr = addr; 9549ed24f4bSMarc Zyngier 955cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, addr); 9569ed24f4bSMarc Zyngier } 9579ed24f4bSMarc Zyngier 9589ed24f4bSMarc Zyngier /* calculate and check the index into the 1st level */ 9599ed24f4bSMarc Zyngier index = id / (SZ_64K / esz); 9609ed24f4bSMarc Zyngier if (index >= (l1_tbl_size / sizeof(u64))) 9619ed24f4bSMarc Zyngier return false; 9629ed24f4bSMarc Zyngier 9639ed24f4bSMarc Zyngier /* Each 1st level entry is represented by a 64-bit value. */ 9649ed24f4bSMarc Zyngier if (kvm_read_guest_lock(its->dev->kvm, 9659ed24f4bSMarc Zyngier base + index * sizeof(indirect_ptr), 9669ed24f4bSMarc Zyngier &indirect_ptr, sizeof(indirect_ptr))) 9679ed24f4bSMarc Zyngier return false; 9689ed24f4bSMarc Zyngier 9699ed24f4bSMarc Zyngier indirect_ptr = le64_to_cpu(indirect_ptr); 9709ed24f4bSMarc Zyngier 9719ed24f4bSMarc Zyngier /* check the valid bit of the first level entry */ 9729ed24f4bSMarc Zyngier if (!(indirect_ptr & BIT_ULL(63))) 9739ed24f4bSMarc Zyngier return false; 9749ed24f4bSMarc Zyngier 9759ed24f4bSMarc Zyngier /* Mask the guest physical address and calculate the frame number. */ 9769ed24f4bSMarc Zyngier indirect_ptr &= GENMASK_ULL(51, 16); 9779ed24f4bSMarc Zyngier 9789ed24f4bSMarc Zyngier /* Find the address of the actual entry */ 9799ed24f4bSMarc Zyngier index = id % (SZ_64K / esz); 9809ed24f4bSMarc Zyngier indirect_ptr += index * esz; 9819ed24f4bSMarc Zyngier 9829ed24f4bSMarc Zyngier if (eaddr) 9839ed24f4bSMarc Zyngier *eaddr = indirect_ptr; 9849ed24f4bSMarc Zyngier 985cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, indirect_ptr); 9869ed24f4bSMarc Zyngier } 9879ed24f4bSMarc Zyngier 988cafe7e54SRicardo Koller /* 989cafe7e54SRicardo Koller * Check whether an event ID can be stored in the corresponding Interrupt 990cafe7e54SRicardo Koller * Translation Table, which starts at device->itt_addr. 991cafe7e54SRicardo Koller */ 992cafe7e54SRicardo Koller static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device, 993cafe7e54SRicardo Koller u32 event_id) 994cafe7e54SRicardo Koller { 995cafe7e54SRicardo Koller const struct vgic_its_abi *abi = vgic_its_get_abi(its); 996cafe7e54SRicardo Koller int ite_esz = abi->ite_esz; 997cafe7e54SRicardo Koller gpa_t gpa; 998cafe7e54SRicardo Koller 999cafe7e54SRicardo Koller /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */ 1000cafe7e54SRicardo Koller if (event_id >= BIT_ULL(device->num_eventid_bits)) 1001cafe7e54SRicardo Koller return false; 1002cafe7e54SRicardo Koller 1003cafe7e54SRicardo Koller gpa = device->itt_addr + event_id * ite_esz; 1004cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, gpa); 10059ed24f4bSMarc Zyngier } 10069ed24f4bSMarc Zyngier 1007a1ccfd6fSRicardo Koller /* 1008a1ccfd6fSRicardo Koller * Add a new collection into the ITS collection table. 1009a1ccfd6fSRicardo Koller * Returns 0 on success, and a negative error value for generic errors. 1010a1ccfd6fSRicardo Koller */ 10119ed24f4bSMarc Zyngier static int vgic_its_alloc_collection(struct vgic_its *its, 10129ed24f4bSMarc Zyngier struct its_collection **colp, 10139ed24f4bSMarc Zyngier u32 coll_id) 10149ed24f4bSMarc Zyngier { 10159ed24f4bSMarc Zyngier struct its_collection *collection; 10169ed24f4bSMarc Zyngier 10173ef23167SJia He collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT); 10189ed24f4bSMarc Zyngier if (!collection) 10199ed24f4bSMarc Zyngier return -ENOMEM; 10209ed24f4bSMarc Zyngier 10219ed24f4bSMarc Zyngier collection->collection_id = coll_id; 10229ed24f4bSMarc Zyngier collection->target_addr = COLLECTION_NOT_MAPPED; 10239ed24f4bSMarc Zyngier 10249ed24f4bSMarc Zyngier list_add_tail(&collection->coll_list, &its->collection_list); 10259ed24f4bSMarc Zyngier *colp = collection; 10269ed24f4bSMarc Zyngier 10279ed24f4bSMarc Zyngier return 0; 10289ed24f4bSMarc Zyngier } 10299ed24f4bSMarc Zyngier 10309ed24f4bSMarc Zyngier static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id) 10319ed24f4bSMarc Zyngier { 10329ed24f4bSMarc Zyngier struct its_collection *collection; 10339ed24f4bSMarc Zyngier struct its_device *device; 10349ed24f4bSMarc Zyngier struct its_ite *ite; 10359ed24f4bSMarc Zyngier 10369ed24f4bSMarc Zyngier /* 10379ed24f4bSMarc Zyngier * Clearing the mapping for that collection ID removes the 10389ed24f4bSMarc Zyngier * entry from the list. If there wasn't any before, we can 10399ed24f4bSMarc Zyngier * go home early. 10409ed24f4bSMarc Zyngier */ 10419ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 10429ed24f4bSMarc Zyngier if (!collection) 10439ed24f4bSMarc Zyngier return; 10449ed24f4bSMarc Zyngier 10459ed24f4bSMarc Zyngier for_each_lpi_its(device, ite, its) 10469ed24f4bSMarc Zyngier if (ite->collection && 10479ed24f4bSMarc Zyngier ite->collection->collection_id == coll_id) 10489ed24f4bSMarc Zyngier ite->collection = NULL; 10499ed24f4bSMarc Zyngier 10509ed24f4bSMarc Zyngier list_del(&collection->coll_list); 10519ed24f4bSMarc Zyngier kfree(collection); 10529ed24f4bSMarc Zyngier } 10539ed24f4bSMarc Zyngier 10549ed24f4bSMarc Zyngier /* Must be called with its_lock mutex held */ 10559ed24f4bSMarc Zyngier static struct its_ite *vgic_its_alloc_ite(struct its_device *device, 10569ed24f4bSMarc Zyngier struct its_collection *collection, 10579ed24f4bSMarc Zyngier u32 event_id) 10589ed24f4bSMarc Zyngier { 10599ed24f4bSMarc Zyngier struct its_ite *ite; 10609ed24f4bSMarc Zyngier 10613ef23167SJia He ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT); 10629ed24f4bSMarc Zyngier if (!ite) 10639ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 10649ed24f4bSMarc Zyngier 10659ed24f4bSMarc Zyngier ite->event_id = event_id; 10669ed24f4bSMarc Zyngier ite->collection = collection; 10679ed24f4bSMarc Zyngier 10689ed24f4bSMarc Zyngier list_add_tail(&ite->ite_list, &device->itt_head); 10699ed24f4bSMarc Zyngier return ite; 10709ed24f4bSMarc Zyngier } 10719ed24f4bSMarc Zyngier 10729ed24f4bSMarc Zyngier /* 10739ed24f4bSMarc Zyngier * The MAPTI and MAPI commands map LPIs to ITTEs. 10749ed24f4bSMarc Zyngier * Must be called with its_lock mutex held. 10759ed24f4bSMarc Zyngier */ 10769ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, 10779ed24f4bSMarc Zyngier u64 *its_cmd) 10789ed24f4bSMarc Zyngier { 10799ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 10809ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 10819ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 10829ed24f4bSMarc Zyngier struct its_ite *ite; 10839ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = NULL; 10849ed24f4bSMarc Zyngier struct its_device *device; 10859ed24f4bSMarc Zyngier struct its_collection *collection, *new_coll = NULL; 10869ed24f4bSMarc Zyngier struct vgic_irq *irq; 10879ed24f4bSMarc Zyngier int lpi_nr; 10889ed24f4bSMarc Zyngier 10899ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 10909ed24f4bSMarc Zyngier if (!device) 10919ed24f4bSMarc Zyngier return E_ITS_MAPTI_UNMAPPED_DEVICE; 10929ed24f4bSMarc Zyngier 1093cafe7e54SRicardo Koller if (!vgic_its_check_event_id(its, device, event_id)) 10949ed24f4bSMarc Zyngier return E_ITS_MAPTI_ID_OOR; 10959ed24f4bSMarc Zyngier 10969ed24f4bSMarc Zyngier if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI) 10979ed24f4bSMarc Zyngier lpi_nr = its_cmd_get_physical_id(its_cmd); 10989ed24f4bSMarc Zyngier else 10999ed24f4bSMarc Zyngier lpi_nr = event_id; 11009ed24f4bSMarc Zyngier if (lpi_nr < GIC_LPI_OFFSET || 11019ed24f4bSMarc Zyngier lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) 11029ed24f4bSMarc Zyngier return E_ITS_MAPTI_PHYSICALID_OOR; 11039ed24f4bSMarc Zyngier 11049ed24f4bSMarc Zyngier /* If there is an existing mapping, behavior is UNPREDICTABLE. */ 11059ed24f4bSMarc Zyngier if (find_ite(its, device_id, event_id)) 11069ed24f4bSMarc Zyngier return 0; 11079ed24f4bSMarc Zyngier 11089ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 11099ed24f4bSMarc Zyngier if (!collection) { 1110a1ccfd6fSRicardo Koller int ret; 1111a1ccfd6fSRicardo Koller 1112a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) 1113a1ccfd6fSRicardo Koller return E_ITS_MAPC_COLLECTION_OOR; 1114a1ccfd6fSRicardo Koller 1115a1ccfd6fSRicardo Koller ret = vgic_its_alloc_collection(its, &collection, coll_id); 11169ed24f4bSMarc Zyngier if (ret) 11179ed24f4bSMarc Zyngier return ret; 11189ed24f4bSMarc Zyngier new_coll = collection; 11199ed24f4bSMarc Zyngier } 11209ed24f4bSMarc Zyngier 11219ed24f4bSMarc Zyngier ite = vgic_its_alloc_ite(device, collection, event_id); 11229ed24f4bSMarc Zyngier if (IS_ERR(ite)) { 11239ed24f4bSMarc Zyngier if (new_coll) 11249ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 11259ed24f4bSMarc Zyngier return PTR_ERR(ite); 11269ed24f4bSMarc Zyngier } 11279ed24f4bSMarc Zyngier 11289ed24f4bSMarc Zyngier if (its_is_collection_mapped(collection)) 11299ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 11309ed24f4bSMarc Zyngier 11319ed24f4bSMarc Zyngier irq = vgic_add_lpi(kvm, lpi_nr, vcpu); 11329ed24f4bSMarc Zyngier if (IS_ERR(irq)) { 11339ed24f4bSMarc Zyngier if (new_coll) 11349ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 11359ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 11369ed24f4bSMarc Zyngier return PTR_ERR(irq); 11379ed24f4bSMarc Zyngier } 11389ed24f4bSMarc Zyngier ite->irq = irq; 11399ed24f4bSMarc Zyngier 11409ed24f4bSMarc Zyngier return 0; 11419ed24f4bSMarc Zyngier } 11429ed24f4bSMarc Zyngier 11439ed24f4bSMarc Zyngier /* Requires the its_lock to be held. */ 11449ed24f4bSMarc Zyngier static void vgic_its_free_device(struct kvm *kvm, struct its_device *device) 11459ed24f4bSMarc Zyngier { 11469ed24f4bSMarc Zyngier struct its_ite *ite, *temp; 11479ed24f4bSMarc Zyngier 11489ed24f4bSMarc Zyngier /* 11499ed24f4bSMarc Zyngier * The spec says that unmapping a device with still valid 11509ed24f4bSMarc Zyngier * ITTEs associated is UNPREDICTABLE. We remove all ITTEs, 11519ed24f4bSMarc Zyngier * since we cannot leave the memory unreferenced. 11529ed24f4bSMarc Zyngier */ 11539ed24f4bSMarc Zyngier list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list) 11549ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 11559ed24f4bSMarc Zyngier 11569ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 11579ed24f4bSMarc Zyngier 11589ed24f4bSMarc Zyngier list_del(&device->dev_list); 11599ed24f4bSMarc Zyngier kfree(device); 11609ed24f4bSMarc Zyngier } 11619ed24f4bSMarc Zyngier 11629ed24f4bSMarc Zyngier /* its lock must be held */ 11639ed24f4bSMarc Zyngier static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its) 11649ed24f4bSMarc Zyngier { 11659ed24f4bSMarc Zyngier struct its_device *cur, *temp; 11669ed24f4bSMarc Zyngier 11679ed24f4bSMarc Zyngier list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) 11689ed24f4bSMarc Zyngier vgic_its_free_device(kvm, cur); 11699ed24f4bSMarc Zyngier } 11709ed24f4bSMarc Zyngier 11719ed24f4bSMarc Zyngier /* its lock must be held */ 11729ed24f4bSMarc Zyngier static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its) 11739ed24f4bSMarc Zyngier { 11749ed24f4bSMarc Zyngier struct its_collection *cur, *temp; 11759ed24f4bSMarc Zyngier 11769ed24f4bSMarc Zyngier list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) 11779ed24f4bSMarc Zyngier vgic_its_free_collection(its, cur->collection_id); 11789ed24f4bSMarc Zyngier } 11799ed24f4bSMarc Zyngier 11809ed24f4bSMarc Zyngier /* Must be called with its_lock mutex held */ 11819ed24f4bSMarc Zyngier static struct its_device *vgic_its_alloc_device(struct vgic_its *its, 11829ed24f4bSMarc Zyngier u32 device_id, gpa_t itt_addr, 11839ed24f4bSMarc Zyngier u8 num_eventid_bits) 11849ed24f4bSMarc Zyngier { 11859ed24f4bSMarc Zyngier struct its_device *device; 11869ed24f4bSMarc Zyngier 11873ef23167SJia He device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT); 11889ed24f4bSMarc Zyngier if (!device) 11899ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 11909ed24f4bSMarc Zyngier 11919ed24f4bSMarc Zyngier device->device_id = device_id; 11929ed24f4bSMarc Zyngier device->itt_addr = itt_addr; 11939ed24f4bSMarc Zyngier device->num_eventid_bits = num_eventid_bits; 11949ed24f4bSMarc Zyngier INIT_LIST_HEAD(&device->itt_head); 11959ed24f4bSMarc Zyngier 11969ed24f4bSMarc Zyngier list_add_tail(&device->dev_list, &its->device_list); 11979ed24f4bSMarc Zyngier return device; 11989ed24f4bSMarc Zyngier } 11999ed24f4bSMarc Zyngier 12009ed24f4bSMarc Zyngier /* 12019ed24f4bSMarc Zyngier * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs). 12029ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 12039ed24f4bSMarc Zyngier */ 12049ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, 12059ed24f4bSMarc Zyngier u64 *its_cmd) 12069ed24f4bSMarc Zyngier { 12079ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 12089ed24f4bSMarc Zyngier bool valid = its_cmd_get_validbit(its_cmd); 12099ed24f4bSMarc Zyngier u8 num_eventid_bits = its_cmd_get_size(its_cmd); 12109ed24f4bSMarc Zyngier gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd); 12119ed24f4bSMarc Zyngier struct its_device *device; 12129ed24f4bSMarc Zyngier 12139ed24f4bSMarc Zyngier if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL)) 12149ed24f4bSMarc Zyngier return E_ITS_MAPD_DEVICE_OOR; 12159ed24f4bSMarc Zyngier 12169ed24f4bSMarc Zyngier if (valid && num_eventid_bits > VITS_TYPER_IDBITS) 12179ed24f4bSMarc Zyngier return E_ITS_MAPD_ITTSIZE_OOR; 12189ed24f4bSMarc Zyngier 12199ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 12209ed24f4bSMarc Zyngier 12219ed24f4bSMarc Zyngier /* 12229ed24f4bSMarc Zyngier * The spec says that calling MAPD on an already mapped device 12239ed24f4bSMarc Zyngier * invalidates all cached data for this device. We implement this 12249ed24f4bSMarc Zyngier * by removing the mapping and re-establishing it. 12259ed24f4bSMarc Zyngier */ 12269ed24f4bSMarc Zyngier if (device) 12279ed24f4bSMarc Zyngier vgic_its_free_device(kvm, device); 12289ed24f4bSMarc Zyngier 12299ed24f4bSMarc Zyngier /* 12309ed24f4bSMarc Zyngier * The spec does not say whether unmapping a not-mapped device 12319ed24f4bSMarc Zyngier * is an error, so we are done in any case. 12329ed24f4bSMarc Zyngier */ 12339ed24f4bSMarc Zyngier if (!valid) 12349ed24f4bSMarc Zyngier return 0; 12359ed24f4bSMarc Zyngier 12369ed24f4bSMarc Zyngier device = vgic_its_alloc_device(its, device_id, itt_addr, 12379ed24f4bSMarc Zyngier num_eventid_bits); 12389ed24f4bSMarc Zyngier 12399ed24f4bSMarc Zyngier return PTR_ERR_OR_ZERO(device); 12409ed24f4bSMarc Zyngier } 12419ed24f4bSMarc Zyngier 12429ed24f4bSMarc Zyngier /* 12439ed24f4bSMarc Zyngier * The MAPC command maps collection IDs to redistributors. 12449ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 12459ed24f4bSMarc Zyngier */ 12469ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, 12479ed24f4bSMarc Zyngier u64 *its_cmd) 12489ed24f4bSMarc Zyngier { 12499ed24f4bSMarc Zyngier u16 coll_id; 12509ed24f4bSMarc Zyngier u32 target_addr; 12519ed24f4bSMarc Zyngier struct its_collection *collection; 12529ed24f4bSMarc Zyngier bool valid; 12539ed24f4bSMarc Zyngier 12549ed24f4bSMarc Zyngier valid = its_cmd_get_validbit(its_cmd); 12559ed24f4bSMarc Zyngier coll_id = its_cmd_get_collection(its_cmd); 12569ed24f4bSMarc Zyngier target_addr = its_cmd_get_target_addr(its_cmd); 12579ed24f4bSMarc Zyngier 12589ed24f4bSMarc Zyngier if (target_addr >= atomic_read(&kvm->online_vcpus)) 12599ed24f4bSMarc Zyngier return E_ITS_MAPC_PROCNUM_OOR; 12609ed24f4bSMarc Zyngier 12619ed24f4bSMarc Zyngier if (!valid) { 12629ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 12639ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 12649ed24f4bSMarc Zyngier } else { 12659ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 12669ed24f4bSMarc Zyngier 12679ed24f4bSMarc Zyngier if (!collection) { 12689ed24f4bSMarc Zyngier int ret; 12699ed24f4bSMarc Zyngier 1270a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, 1271a1ccfd6fSRicardo Koller coll_id, NULL)) 1272a1ccfd6fSRicardo Koller return E_ITS_MAPC_COLLECTION_OOR; 1273a1ccfd6fSRicardo Koller 12749ed24f4bSMarc Zyngier ret = vgic_its_alloc_collection(its, &collection, 12759ed24f4bSMarc Zyngier coll_id); 12769ed24f4bSMarc Zyngier if (ret) 12779ed24f4bSMarc Zyngier return ret; 12789ed24f4bSMarc Zyngier collection->target_addr = target_addr; 12799ed24f4bSMarc Zyngier } else { 12809ed24f4bSMarc Zyngier collection->target_addr = target_addr; 12819ed24f4bSMarc Zyngier update_affinity_collection(kvm, its, collection); 12829ed24f4bSMarc Zyngier } 12839ed24f4bSMarc Zyngier } 12849ed24f4bSMarc Zyngier 12859ed24f4bSMarc Zyngier return 0; 12869ed24f4bSMarc Zyngier } 12879ed24f4bSMarc Zyngier 12889ed24f4bSMarc Zyngier /* 12899ed24f4bSMarc Zyngier * The CLEAR command removes the pending state for a particular LPI. 12909ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 12919ed24f4bSMarc Zyngier */ 12929ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its, 12939ed24f4bSMarc Zyngier u64 *its_cmd) 12949ed24f4bSMarc Zyngier { 12959ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 12969ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 12979ed24f4bSMarc Zyngier struct its_ite *ite; 12989ed24f4bSMarc Zyngier 12999ed24f4bSMarc Zyngier 13009ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 13019ed24f4bSMarc Zyngier if (!ite) 13029ed24f4bSMarc Zyngier return E_ITS_CLEAR_UNMAPPED_INTERRUPT; 13039ed24f4bSMarc Zyngier 13049ed24f4bSMarc Zyngier ite->irq->pending_latch = false; 13059ed24f4bSMarc Zyngier 13069ed24f4bSMarc Zyngier if (ite->irq->hw) 13079ed24f4bSMarc Zyngier return irq_set_irqchip_state(ite->irq->host_irq, 13089ed24f4bSMarc Zyngier IRQCHIP_STATE_PENDING, false); 13099ed24f4bSMarc Zyngier 13109ed24f4bSMarc Zyngier return 0; 13119ed24f4bSMarc Zyngier } 13129ed24f4bSMarc Zyngier 13134645d11fSMarc Zyngier int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq) 13144645d11fSMarc Zyngier { 13154645d11fSMarc Zyngier return update_lpi_config(kvm, irq, NULL, true); 13164645d11fSMarc Zyngier } 13174645d11fSMarc Zyngier 13189ed24f4bSMarc Zyngier /* 13199ed24f4bSMarc Zyngier * The INV command syncs the configuration bits from the memory table. 13209ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 13219ed24f4bSMarc Zyngier */ 13229ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its, 13239ed24f4bSMarc Zyngier u64 *its_cmd) 13249ed24f4bSMarc Zyngier { 13259ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 13269ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 13279ed24f4bSMarc Zyngier struct its_ite *ite; 13289ed24f4bSMarc Zyngier 13299ed24f4bSMarc Zyngier 13309ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 13319ed24f4bSMarc Zyngier if (!ite) 13329ed24f4bSMarc Zyngier return E_ITS_INV_UNMAPPED_INTERRUPT; 13339ed24f4bSMarc Zyngier 13344645d11fSMarc Zyngier return vgic_its_inv_lpi(kvm, ite->irq); 13354645d11fSMarc Zyngier } 13364645d11fSMarc Zyngier 13374645d11fSMarc Zyngier /** 13384645d11fSMarc Zyngier * vgic_its_invall - invalidate all LPIs targetting a given vcpu 13394645d11fSMarc Zyngier * @vcpu: the vcpu for which the RD is targetted by an invalidation 13404645d11fSMarc Zyngier * 13414645d11fSMarc Zyngier * Contrary to the INVALL command, this targets a RD instead of a 13424645d11fSMarc Zyngier * collection, and we don't need to hold the its_lock, since no ITS is 13434645d11fSMarc Zyngier * involved here. 13444645d11fSMarc Zyngier */ 13454645d11fSMarc Zyngier int vgic_its_invall(struct kvm_vcpu *vcpu) 13464645d11fSMarc Zyngier { 13474645d11fSMarc Zyngier struct kvm *kvm = vcpu->kvm; 13484645d11fSMarc Zyngier int irq_count, i = 0; 13494645d11fSMarc Zyngier u32 *intids; 13504645d11fSMarc Zyngier 13514645d11fSMarc Zyngier irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids); 13524645d11fSMarc Zyngier if (irq_count < 0) 13534645d11fSMarc Zyngier return irq_count; 13544645d11fSMarc Zyngier 13554645d11fSMarc Zyngier for (i = 0; i < irq_count; i++) { 13564645d11fSMarc Zyngier struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]); 13574645d11fSMarc Zyngier if (!irq) 13584645d11fSMarc Zyngier continue; 13594645d11fSMarc Zyngier update_lpi_config(kvm, irq, vcpu, false); 13604645d11fSMarc Zyngier vgic_put_irq(kvm, irq); 13614645d11fSMarc Zyngier } 13624645d11fSMarc Zyngier 13634645d11fSMarc Zyngier kfree(intids); 13644645d11fSMarc Zyngier 13654645d11fSMarc Zyngier if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm) 13664645d11fSMarc Zyngier its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe); 13674645d11fSMarc Zyngier 13684645d11fSMarc Zyngier return 0; 13699ed24f4bSMarc Zyngier } 13709ed24f4bSMarc Zyngier 13719ed24f4bSMarc Zyngier /* 13729ed24f4bSMarc Zyngier * The INVALL command requests flushing of all IRQ data in this collection. 13739ed24f4bSMarc Zyngier * Find the VCPU mapped to that collection, then iterate over the VM's list 13749ed24f4bSMarc Zyngier * of mapped LPIs and update the configuration for each IRQ which targets 13759ed24f4bSMarc Zyngier * the specified vcpu. The configuration will be read from the in-memory 13769ed24f4bSMarc Zyngier * configuration table. 13779ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 13789ed24f4bSMarc Zyngier */ 13799ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, 13809ed24f4bSMarc Zyngier u64 *its_cmd) 13819ed24f4bSMarc Zyngier { 13829ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 13839ed24f4bSMarc Zyngier struct its_collection *collection; 13849ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 13859ed24f4bSMarc Zyngier 13869ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 13879ed24f4bSMarc Zyngier if (!its_is_collection_mapped(collection)) 13889ed24f4bSMarc Zyngier return E_ITS_INVALL_UNMAPPED_COLLECTION; 13899ed24f4bSMarc Zyngier 13909ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 13914645d11fSMarc Zyngier vgic_its_invall(vcpu); 13929ed24f4bSMarc Zyngier 13939ed24f4bSMarc Zyngier return 0; 13949ed24f4bSMarc Zyngier } 13959ed24f4bSMarc Zyngier 13969ed24f4bSMarc Zyngier /* 13979ed24f4bSMarc Zyngier * The MOVALL command moves the pending state of all IRQs targeting one 13989ed24f4bSMarc Zyngier * redistributor to another. We don't hold the pending state in the VCPUs, 13999ed24f4bSMarc Zyngier * but in the IRQs instead, so there is really not much to do for us here. 14009ed24f4bSMarc Zyngier * However the spec says that no IRQ must target the old redistributor 14019ed24f4bSMarc Zyngier * afterwards, so we make sure that no LPI is using the associated target_vcpu. 14029ed24f4bSMarc Zyngier * This command affects all LPIs in the system that target that redistributor. 14039ed24f4bSMarc Zyngier */ 14049ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, 14059ed24f4bSMarc Zyngier u64 *its_cmd) 14069ed24f4bSMarc Zyngier { 14079ed24f4bSMarc Zyngier u32 target1_addr = its_cmd_get_target_addr(its_cmd); 14089ed24f4bSMarc Zyngier u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32); 14099ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu1, *vcpu2; 14109ed24f4bSMarc Zyngier struct vgic_irq *irq; 14119ed24f4bSMarc Zyngier u32 *intids; 14129ed24f4bSMarc Zyngier int irq_count, i; 14139ed24f4bSMarc Zyngier 14149ed24f4bSMarc Zyngier if (target1_addr >= atomic_read(&kvm->online_vcpus) || 14159ed24f4bSMarc Zyngier target2_addr >= atomic_read(&kvm->online_vcpus)) 14169ed24f4bSMarc Zyngier return E_ITS_MOVALL_PROCNUM_OOR; 14179ed24f4bSMarc Zyngier 14189ed24f4bSMarc Zyngier if (target1_addr == target2_addr) 14199ed24f4bSMarc Zyngier return 0; 14209ed24f4bSMarc Zyngier 14219ed24f4bSMarc Zyngier vcpu1 = kvm_get_vcpu(kvm, target1_addr); 14229ed24f4bSMarc Zyngier vcpu2 = kvm_get_vcpu(kvm, target2_addr); 14239ed24f4bSMarc Zyngier 14249ed24f4bSMarc Zyngier irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids); 14259ed24f4bSMarc Zyngier if (irq_count < 0) 14269ed24f4bSMarc Zyngier return irq_count; 14279ed24f4bSMarc Zyngier 14289ed24f4bSMarc Zyngier for (i = 0; i < irq_count; i++) { 14299ed24f4bSMarc Zyngier irq = vgic_get_irq(kvm, NULL, intids[i]); 1430*fcf90b47SOliver Upton if (!irq) 1431*fcf90b47SOliver Upton continue; 14329ed24f4bSMarc Zyngier 14339ed24f4bSMarc Zyngier update_affinity(irq, vcpu2); 14349ed24f4bSMarc Zyngier 14359ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 14369ed24f4bSMarc Zyngier } 14379ed24f4bSMarc Zyngier 14389ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 14399ed24f4bSMarc Zyngier 14409ed24f4bSMarc Zyngier kfree(intids); 14419ed24f4bSMarc Zyngier return 0; 14429ed24f4bSMarc Zyngier } 14439ed24f4bSMarc Zyngier 14449ed24f4bSMarc Zyngier /* 14459ed24f4bSMarc Zyngier * The INT command injects the LPI associated with that DevID/EvID pair. 14469ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 14479ed24f4bSMarc Zyngier */ 14489ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its, 14499ed24f4bSMarc Zyngier u64 *its_cmd) 14509ed24f4bSMarc Zyngier { 14519ed24f4bSMarc Zyngier u32 msi_data = its_cmd_get_id(its_cmd); 14529ed24f4bSMarc Zyngier u64 msi_devid = its_cmd_get_deviceid(its_cmd); 14539ed24f4bSMarc Zyngier 14549ed24f4bSMarc Zyngier return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); 14559ed24f4bSMarc Zyngier } 14569ed24f4bSMarc Zyngier 14579ed24f4bSMarc Zyngier /* 14589ed24f4bSMarc Zyngier * This function is called with the its_cmd lock held, but the ITS data 14599ed24f4bSMarc Zyngier * structure lock dropped. 14609ed24f4bSMarc Zyngier */ 14619ed24f4bSMarc Zyngier static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its, 14629ed24f4bSMarc Zyngier u64 *its_cmd) 14639ed24f4bSMarc Zyngier { 14649ed24f4bSMarc Zyngier int ret = -ENODEV; 14659ed24f4bSMarc Zyngier 14669ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 14679ed24f4bSMarc Zyngier switch (its_cmd_get_command(its_cmd)) { 14689ed24f4bSMarc Zyngier case GITS_CMD_MAPD: 14699ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd); 14709ed24f4bSMarc Zyngier break; 14719ed24f4bSMarc Zyngier case GITS_CMD_MAPC: 14729ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd); 14739ed24f4bSMarc Zyngier break; 14749ed24f4bSMarc Zyngier case GITS_CMD_MAPI: 14759ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd); 14769ed24f4bSMarc Zyngier break; 14779ed24f4bSMarc Zyngier case GITS_CMD_MAPTI: 14789ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd); 14799ed24f4bSMarc Zyngier break; 14809ed24f4bSMarc Zyngier case GITS_CMD_MOVI: 14819ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd); 14829ed24f4bSMarc Zyngier break; 14839ed24f4bSMarc Zyngier case GITS_CMD_DISCARD: 14849ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd); 14859ed24f4bSMarc Zyngier break; 14869ed24f4bSMarc Zyngier case GITS_CMD_CLEAR: 14879ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd); 14889ed24f4bSMarc Zyngier break; 14899ed24f4bSMarc Zyngier case GITS_CMD_MOVALL: 14909ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd); 14919ed24f4bSMarc Zyngier break; 14929ed24f4bSMarc Zyngier case GITS_CMD_INT: 14939ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_int(kvm, its, its_cmd); 14949ed24f4bSMarc Zyngier break; 14959ed24f4bSMarc Zyngier case GITS_CMD_INV: 14969ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd); 14979ed24f4bSMarc Zyngier break; 14989ed24f4bSMarc Zyngier case GITS_CMD_INVALL: 14999ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd); 15009ed24f4bSMarc Zyngier break; 15019ed24f4bSMarc Zyngier case GITS_CMD_SYNC: 15029ed24f4bSMarc Zyngier /* we ignore this command: we are in sync all of the time */ 15039ed24f4bSMarc Zyngier ret = 0; 15049ed24f4bSMarc Zyngier break; 15059ed24f4bSMarc Zyngier } 15069ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 15079ed24f4bSMarc Zyngier 15089ed24f4bSMarc Zyngier return ret; 15099ed24f4bSMarc Zyngier } 15109ed24f4bSMarc Zyngier 15119ed24f4bSMarc Zyngier static u64 vgic_sanitise_its_baser(u64 reg) 15129ed24f4bSMarc Zyngier { 15139ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK, 15149ed24f4bSMarc Zyngier GITS_BASER_SHAREABILITY_SHIFT, 15159ed24f4bSMarc Zyngier vgic_sanitise_shareability); 15169ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK, 15179ed24f4bSMarc Zyngier GITS_BASER_INNER_CACHEABILITY_SHIFT, 15189ed24f4bSMarc Zyngier vgic_sanitise_inner_cacheability); 15199ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK, 15209ed24f4bSMarc Zyngier GITS_BASER_OUTER_CACHEABILITY_SHIFT, 15219ed24f4bSMarc Zyngier vgic_sanitise_outer_cacheability); 15229ed24f4bSMarc Zyngier 15239ed24f4bSMarc Zyngier /* We support only one (ITS) page size: 64K */ 15249ed24f4bSMarc Zyngier reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K; 15259ed24f4bSMarc Zyngier 15269ed24f4bSMarc Zyngier return reg; 15279ed24f4bSMarc Zyngier } 15289ed24f4bSMarc Zyngier 15299ed24f4bSMarc Zyngier static u64 vgic_sanitise_its_cbaser(u64 reg) 15309ed24f4bSMarc Zyngier { 15319ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK, 15329ed24f4bSMarc Zyngier GITS_CBASER_SHAREABILITY_SHIFT, 15339ed24f4bSMarc Zyngier vgic_sanitise_shareability); 15349ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK, 15359ed24f4bSMarc Zyngier GITS_CBASER_INNER_CACHEABILITY_SHIFT, 15369ed24f4bSMarc Zyngier vgic_sanitise_inner_cacheability); 15379ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK, 15389ed24f4bSMarc Zyngier GITS_CBASER_OUTER_CACHEABILITY_SHIFT, 15399ed24f4bSMarc Zyngier vgic_sanitise_outer_cacheability); 15409ed24f4bSMarc Zyngier 15419ed24f4bSMarc Zyngier /* Sanitise the physical address to be 64k aligned. */ 15429ed24f4bSMarc Zyngier reg &= ~GENMASK_ULL(15, 12); 15439ed24f4bSMarc Zyngier 15449ed24f4bSMarc Zyngier return reg; 15459ed24f4bSMarc Zyngier } 15469ed24f4bSMarc Zyngier 15479ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm, 15489ed24f4bSMarc Zyngier struct vgic_its *its, 15499ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 15509ed24f4bSMarc Zyngier { 15519ed24f4bSMarc Zyngier return extract_bytes(its->cbaser, addr & 7, len); 15529ed24f4bSMarc Zyngier } 15539ed24f4bSMarc Zyngier 15549ed24f4bSMarc Zyngier static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its, 15559ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 15569ed24f4bSMarc Zyngier unsigned long val) 15579ed24f4bSMarc Zyngier { 15589ed24f4bSMarc Zyngier /* When GITS_CTLR.Enable is 1, this register is RO. */ 15599ed24f4bSMarc Zyngier if (its->enabled) 15609ed24f4bSMarc Zyngier return; 15619ed24f4bSMarc Zyngier 15629ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 15639ed24f4bSMarc Zyngier its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val); 15649ed24f4bSMarc Zyngier its->cbaser = vgic_sanitise_its_cbaser(its->cbaser); 15659ed24f4bSMarc Zyngier its->creadr = 0; 15669ed24f4bSMarc Zyngier /* 15679ed24f4bSMarc Zyngier * CWRITER is architecturally UNKNOWN on reset, but we need to reset 15689ed24f4bSMarc Zyngier * it to CREADR to make sure we start with an empty command buffer. 15699ed24f4bSMarc Zyngier */ 15709ed24f4bSMarc Zyngier its->cwriter = its->creadr; 15719ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 15729ed24f4bSMarc Zyngier } 15739ed24f4bSMarc Zyngier 15749ed24f4bSMarc Zyngier #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12) 15759ed24f4bSMarc Zyngier #define ITS_CMD_SIZE 32 15769ed24f4bSMarc Zyngier #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) 15779ed24f4bSMarc Zyngier 15789ed24f4bSMarc Zyngier /* Must be called with the cmd_lock held. */ 15799ed24f4bSMarc Zyngier static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) 15809ed24f4bSMarc Zyngier { 15819ed24f4bSMarc Zyngier gpa_t cbaser; 15829ed24f4bSMarc Zyngier u64 cmd_buf[4]; 15839ed24f4bSMarc Zyngier 15849ed24f4bSMarc Zyngier /* Commands are only processed when the ITS is enabled. */ 15859ed24f4bSMarc Zyngier if (!its->enabled) 15869ed24f4bSMarc Zyngier return; 15879ed24f4bSMarc Zyngier 15889ed24f4bSMarc Zyngier cbaser = GITS_CBASER_ADDRESS(its->cbaser); 15899ed24f4bSMarc Zyngier 15909ed24f4bSMarc Zyngier while (its->cwriter != its->creadr) { 15919ed24f4bSMarc Zyngier int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, 15929ed24f4bSMarc Zyngier cmd_buf, ITS_CMD_SIZE); 15939ed24f4bSMarc Zyngier /* 15949ed24f4bSMarc Zyngier * If kvm_read_guest() fails, this could be due to the guest 15959ed24f4bSMarc Zyngier * programming a bogus value in CBASER or something else going 15969ed24f4bSMarc Zyngier * wrong from which we cannot easily recover. 15979ed24f4bSMarc Zyngier * According to section 6.3.2 in the GICv3 spec we can just 15989ed24f4bSMarc Zyngier * ignore that command then. 15999ed24f4bSMarc Zyngier */ 16009ed24f4bSMarc Zyngier if (!ret) 16019ed24f4bSMarc Zyngier vgic_its_handle_command(kvm, its, cmd_buf); 16029ed24f4bSMarc Zyngier 16039ed24f4bSMarc Zyngier its->creadr += ITS_CMD_SIZE; 16049ed24f4bSMarc Zyngier if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) 16059ed24f4bSMarc Zyngier its->creadr = 0; 16069ed24f4bSMarc Zyngier } 16079ed24f4bSMarc Zyngier } 16089ed24f4bSMarc Zyngier 16099ed24f4bSMarc Zyngier /* 16109ed24f4bSMarc Zyngier * By writing to CWRITER the guest announces new commands to be processed. 16119ed24f4bSMarc Zyngier * To avoid any races in the first place, we take the its_cmd lock, which 16129ed24f4bSMarc Zyngier * protects our ring buffer variables, so that there is only one user 16139ed24f4bSMarc Zyngier * per ITS handling commands at a given time. 16149ed24f4bSMarc Zyngier */ 16159ed24f4bSMarc Zyngier static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, 16169ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 16179ed24f4bSMarc Zyngier unsigned long val) 16189ed24f4bSMarc Zyngier { 16199ed24f4bSMarc Zyngier u64 reg; 16209ed24f4bSMarc Zyngier 16219ed24f4bSMarc Zyngier if (!its) 16229ed24f4bSMarc Zyngier return; 16239ed24f4bSMarc Zyngier 16249ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 16259ed24f4bSMarc Zyngier 16269ed24f4bSMarc Zyngier reg = update_64bit_reg(its->cwriter, addr & 7, len, val); 16279ed24f4bSMarc Zyngier reg = ITS_CMD_OFFSET(reg); 16289ed24f4bSMarc Zyngier if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { 16299ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16309ed24f4bSMarc Zyngier return; 16319ed24f4bSMarc Zyngier } 16329ed24f4bSMarc Zyngier its->cwriter = reg; 16339ed24f4bSMarc Zyngier 16349ed24f4bSMarc Zyngier vgic_its_process_commands(kvm, its); 16359ed24f4bSMarc Zyngier 16369ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16379ed24f4bSMarc Zyngier } 16389ed24f4bSMarc Zyngier 16399ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm, 16409ed24f4bSMarc Zyngier struct vgic_its *its, 16419ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16429ed24f4bSMarc Zyngier { 16439ed24f4bSMarc Zyngier return extract_bytes(its->cwriter, addr & 0x7, len); 16449ed24f4bSMarc Zyngier } 16459ed24f4bSMarc Zyngier 16469ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm, 16479ed24f4bSMarc Zyngier struct vgic_its *its, 16489ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16499ed24f4bSMarc Zyngier { 16509ed24f4bSMarc Zyngier return extract_bytes(its->creadr, addr & 0x7, len); 16519ed24f4bSMarc Zyngier } 16529ed24f4bSMarc Zyngier 16539ed24f4bSMarc Zyngier static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm, 16549ed24f4bSMarc Zyngier struct vgic_its *its, 16559ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 16569ed24f4bSMarc Zyngier unsigned long val) 16579ed24f4bSMarc Zyngier { 16589ed24f4bSMarc Zyngier u32 cmd_offset; 16599ed24f4bSMarc Zyngier int ret = 0; 16609ed24f4bSMarc Zyngier 16619ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 16629ed24f4bSMarc Zyngier 16639ed24f4bSMarc Zyngier if (its->enabled) { 16649ed24f4bSMarc Zyngier ret = -EBUSY; 16659ed24f4bSMarc Zyngier goto out; 16669ed24f4bSMarc Zyngier } 16679ed24f4bSMarc Zyngier 16689ed24f4bSMarc Zyngier cmd_offset = ITS_CMD_OFFSET(val); 16699ed24f4bSMarc Zyngier if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { 16709ed24f4bSMarc Zyngier ret = -EINVAL; 16719ed24f4bSMarc Zyngier goto out; 16729ed24f4bSMarc Zyngier } 16739ed24f4bSMarc Zyngier 16749ed24f4bSMarc Zyngier its->creadr = cmd_offset; 16759ed24f4bSMarc Zyngier out: 16769ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16779ed24f4bSMarc Zyngier return ret; 16789ed24f4bSMarc Zyngier } 16799ed24f4bSMarc Zyngier 16809ed24f4bSMarc Zyngier #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7) 16819ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm, 16829ed24f4bSMarc Zyngier struct vgic_its *its, 16839ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16849ed24f4bSMarc Zyngier { 16859ed24f4bSMarc Zyngier u64 reg; 16869ed24f4bSMarc Zyngier 16879ed24f4bSMarc Zyngier switch (BASER_INDEX(addr)) { 16889ed24f4bSMarc Zyngier case 0: 16899ed24f4bSMarc Zyngier reg = its->baser_device_table; 16909ed24f4bSMarc Zyngier break; 16919ed24f4bSMarc Zyngier case 1: 16929ed24f4bSMarc Zyngier reg = its->baser_coll_table; 16939ed24f4bSMarc Zyngier break; 16949ed24f4bSMarc Zyngier default: 16959ed24f4bSMarc Zyngier reg = 0; 16969ed24f4bSMarc Zyngier break; 16979ed24f4bSMarc Zyngier } 16989ed24f4bSMarc Zyngier 16999ed24f4bSMarc Zyngier return extract_bytes(reg, addr & 7, len); 17009ed24f4bSMarc Zyngier } 17019ed24f4bSMarc Zyngier 17029ed24f4bSMarc Zyngier #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56)) 17039ed24f4bSMarc Zyngier static void vgic_mmio_write_its_baser(struct kvm *kvm, 17049ed24f4bSMarc Zyngier struct vgic_its *its, 17059ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 17069ed24f4bSMarc Zyngier unsigned long val) 17079ed24f4bSMarc Zyngier { 17089ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 17099ed24f4bSMarc Zyngier u64 entry_size, table_type; 17109ed24f4bSMarc Zyngier u64 reg, *regptr, clearbits = 0; 17119ed24f4bSMarc Zyngier 17129ed24f4bSMarc Zyngier /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ 17139ed24f4bSMarc Zyngier if (its->enabled) 17149ed24f4bSMarc Zyngier return; 17159ed24f4bSMarc Zyngier 17169ed24f4bSMarc Zyngier switch (BASER_INDEX(addr)) { 17179ed24f4bSMarc Zyngier case 0: 17189ed24f4bSMarc Zyngier regptr = &its->baser_device_table; 17199ed24f4bSMarc Zyngier entry_size = abi->dte_esz; 17209ed24f4bSMarc Zyngier table_type = GITS_BASER_TYPE_DEVICE; 17219ed24f4bSMarc Zyngier break; 17229ed24f4bSMarc Zyngier case 1: 17239ed24f4bSMarc Zyngier regptr = &its->baser_coll_table; 17249ed24f4bSMarc Zyngier entry_size = abi->cte_esz; 17259ed24f4bSMarc Zyngier table_type = GITS_BASER_TYPE_COLLECTION; 17269ed24f4bSMarc Zyngier clearbits = GITS_BASER_INDIRECT; 17279ed24f4bSMarc Zyngier break; 17289ed24f4bSMarc Zyngier default: 17299ed24f4bSMarc Zyngier return; 17309ed24f4bSMarc Zyngier } 17319ed24f4bSMarc Zyngier 17329ed24f4bSMarc Zyngier reg = update_64bit_reg(*regptr, addr & 7, len, val); 17339ed24f4bSMarc Zyngier reg &= ~GITS_BASER_RO_MASK; 17349ed24f4bSMarc Zyngier reg &= ~clearbits; 17359ed24f4bSMarc Zyngier 17369ed24f4bSMarc Zyngier reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; 17379ed24f4bSMarc Zyngier reg |= table_type << GITS_BASER_TYPE_SHIFT; 17389ed24f4bSMarc Zyngier reg = vgic_sanitise_its_baser(reg); 17399ed24f4bSMarc Zyngier 17409ed24f4bSMarc Zyngier *regptr = reg; 17419ed24f4bSMarc Zyngier 17429ed24f4bSMarc Zyngier if (!(reg & GITS_BASER_VALID)) { 17439ed24f4bSMarc Zyngier /* Take the its_lock to prevent a race with a save/restore */ 17449ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 17459ed24f4bSMarc Zyngier switch (table_type) { 17469ed24f4bSMarc Zyngier case GITS_BASER_TYPE_DEVICE: 17479ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 17489ed24f4bSMarc Zyngier break; 17499ed24f4bSMarc Zyngier case GITS_BASER_TYPE_COLLECTION: 17509ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 17519ed24f4bSMarc Zyngier break; 17529ed24f4bSMarc Zyngier } 17539ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 17549ed24f4bSMarc Zyngier } 17559ed24f4bSMarc Zyngier } 17569ed24f4bSMarc Zyngier 17579ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, 17589ed24f4bSMarc Zyngier struct vgic_its *its, 17599ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 17609ed24f4bSMarc Zyngier { 17619ed24f4bSMarc Zyngier u32 reg = 0; 17629ed24f4bSMarc Zyngier 17639ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 17649ed24f4bSMarc Zyngier if (its->creadr == its->cwriter) 17659ed24f4bSMarc Zyngier reg |= GITS_CTLR_QUIESCENT; 17669ed24f4bSMarc Zyngier if (its->enabled) 17679ed24f4bSMarc Zyngier reg |= GITS_CTLR_ENABLE; 17689ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 17699ed24f4bSMarc Zyngier 17709ed24f4bSMarc Zyngier return reg; 17719ed24f4bSMarc Zyngier } 17729ed24f4bSMarc Zyngier 17739ed24f4bSMarc Zyngier static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, 17749ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 17759ed24f4bSMarc Zyngier unsigned long val) 17769ed24f4bSMarc Zyngier { 17779ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 17789ed24f4bSMarc Zyngier 17799ed24f4bSMarc Zyngier /* 17809ed24f4bSMarc Zyngier * It is UNPREDICTABLE to enable the ITS if any of the CBASER or 17819ed24f4bSMarc Zyngier * device/collection BASER are invalid 17829ed24f4bSMarc Zyngier */ 17839ed24f4bSMarc Zyngier if (!its->enabled && (val & GITS_CTLR_ENABLE) && 17849ed24f4bSMarc Zyngier (!(its->baser_device_table & GITS_BASER_VALID) || 17859ed24f4bSMarc Zyngier !(its->baser_coll_table & GITS_BASER_VALID) || 17869ed24f4bSMarc Zyngier !(its->cbaser & GITS_CBASER_VALID))) 17879ed24f4bSMarc Zyngier goto out; 17889ed24f4bSMarc Zyngier 17899ed24f4bSMarc Zyngier its->enabled = !!(val & GITS_CTLR_ENABLE); 17909ed24f4bSMarc Zyngier if (!its->enabled) 17919ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 17929ed24f4bSMarc Zyngier 17939ed24f4bSMarc Zyngier /* 17949ed24f4bSMarc Zyngier * Try to process any pending commands. This function bails out early 17959ed24f4bSMarc Zyngier * if the ITS is disabled or no commands have been queued. 17969ed24f4bSMarc Zyngier */ 17979ed24f4bSMarc Zyngier vgic_its_process_commands(kvm, its); 17989ed24f4bSMarc Zyngier 17999ed24f4bSMarc Zyngier out: 18009ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 18019ed24f4bSMarc Zyngier } 18029ed24f4bSMarc Zyngier 18039ed24f4bSMarc Zyngier #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ 18049ed24f4bSMarc Zyngier { \ 18059ed24f4bSMarc Zyngier .reg_offset = off, \ 18069ed24f4bSMarc Zyngier .len = length, \ 18079ed24f4bSMarc Zyngier .access_flags = acc, \ 18089ed24f4bSMarc Zyngier .its_read = rd, \ 18099ed24f4bSMarc Zyngier .its_write = wr, \ 18109ed24f4bSMarc Zyngier } 18119ed24f4bSMarc Zyngier 18129ed24f4bSMarc Zyngier #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\ 18139ed24f4bSMarc Zyngier { \ 18149ed24f4bSMarc Zyngier .reg_offset = off, \ 18159ed24f4bSMarc Zyngier .len = length, \ 18169ed24f4bSMarc Zyngier .access_flags = acc, \ 18179ed24f4bSMarc Zyngier .its_read = rd, \ 18189ed24f4bSMarc Zyngier .its_write = wr, \ 18199ed24f4bSMarc Zyngier .uaccess_its_write = uwr, \ 18209ed24f4bSMarc Zyngier } 18219ed24f4bSMarc Zyngier 18229ed24f4bSMarc Zyngier static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its, 18239ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, unsigned long val) 18249ed24f4bSMarc Zyngier { 18259ed24f4bSMarc Zyngier /* Ignore */ 18269ed24f4bSMarc Zyngier } 18279ed24f4bSMarc Zyngier 18289ed24f4bSMarc Zyngier static struct vgic_register_region its_registers[] = { 18299ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CTLR, 18309ed24f4bSMarc Zyngier vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4, 18319ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18329ed24f4bSMarc Zyngier REGISTER_ITS_DESC_UACCESS(GITS_IIDR, 18339ed24f4bSMarc Zyngier vgic_mmio_read_its_iidr, its_mmio_write_wi, 18349ed24f4bSMarc Zyngier vgic_mmio_uaccess_write_its_iidr, 4, 18359ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18369ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_TYPER, 18379ed24f4bSMarc Zyngier vgic_mmio_read_its_typer, its_mmio_write_wi, 8, 18389ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18399ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CBASER, 18409ed24f4bSMarc Zyngier vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8, 18419ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18429ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CWRITER, 18439ed24f4bSMarc Zyngier vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8, 18449ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18459ed24f4bSMarc Zyngier REGISTER_ITS_DESC_UACCESS(GITS_CREADR, 18469ed24f4bSMarc Zyngier vgic_mmio_read_its_creadr, its_mmio_write_wi, 18479ed24f4bSMarc Zyngier vgic_mmio_uaccess_write_its_creadr, 8, 18489ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18499ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_BASER, 18509ed24f4bSMarc Zyngier vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40, 18519ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18529ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_IDREGS_BASE, 18539ed24f4bSMarc Zyngier vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30, 18549ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18559ed24f4bSMarc Zyngier }; 18569ed24f4bSMarc Zyngier 18579ed24f4bSMarc Zyngier /* This is called on setting the LPI enable bit in the redistributor. */ 18589ed24f4bSMarc Zyngier void vgic_enable_lpis(struct kvm_vcpu *vcpu) 18599ed24f4bSMarc Zyngier { 18609ed24f4bSMarc Zyngier if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ)) 18619ed24f4bSMarc Zyngier its_sync_lpi_pending_table(vcpu); 18629ed24f4bSMarc Zyngier } 18639ed24f4bSMarc Zyngier 18649ed24f4bSMarc Zyngier static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its, 18659ed24f4bSMarc Zyngier u64 addr) 18669ed24f4bSMarc Zyngier { 18679ed24f4bSMarc Zyngier struct vgic_io_device *iodev = &its->iodev; 18689ed24f4bSMarc Zyngier int ret; 18699ed24f4bSMarc Zyngier 18709ed24f4bSMarc Zyngier mutex_lock(&kvm->slots_lock); 18719ed24f4bSMarc Zyngier if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { 18729ed24f4bSMarc Zyngier ret = -EBUSY; 18739ed24f4bSMarc Zyngier goto out; 18749ed24f4bSMarc Zyngier } 18759ed24f4bSMarc Zyngier 18769ed24f4bSMarc Zyngier its->vgic_its_base = addr; 18779ed24f4bSMarc Zyngier iodev->regions = its_registers; 18789ed24f4bSMarc Zyngier iodev->nr_regions = ARRAY_SIZE(its_registers); 18799ed24f4bSMarc Zyngier kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops); 18809ed24f4bSMarc Zyngier 18819ed24f4bSMarc Zyngier iodev->base_addr = its->vgic_its_base; 18829ed24f4bSMarc Zyngier iodev->iodev_type = IODEV_ITS; 18839ed24f4bSMarc Zyngier iodev->its = its; 18849ed24f4bSMarc Zyngier ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr, 18859ed24f4bSMarc Zyngier KVM_VGIC_V3_ITS_SIZE, &iodev->dev); 18869ed24f4bSMarc Zyngier out: 18879ed24f4bSMarc Zyngier mutex_unlock(&kvm->slots_lock); 18889ed24f4bSMarc Zyngier 18899ed24f4bSMarc Zyngier return ret; 18909ed24f4bSMarc Zyngier } 18919ed24f4bSMarc Zyngier 18929ed24f4bSMarc Zyngier /* Default is 16 cached LPIs per vcpu */ 18939ed24f4bSMarc Zyngier #define LPI_DEFAULT_PCPU_CACHE_SIZE 16 18949ed24f4bSMarc Zyngier 18959ed24f4bSMarc Zyngier void vgic_lpi_translation_cache_init(struct kvm *kvm) 18969ed24f4bSMarc Zyngier { 18979ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 18989ed24f4bSMarc Zyngier unsigned int sz; 18999ed24f4bSMarc Zyngier int i; 19009ed24f4bSMarc Zyngier 19019ed24f4bSMarc Zyngier if (!list_empty(&dist->lpi_translation_cache)) 19029ed24f4bSMarc Zyngier return; 19039ed24f4bSMarc Zyngier 19049ed24f4bSMarc Zyngier sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE; 19059ed24f4bSMarc Zyngier 19069ed24f4bSMarc Zyngier for (i = 0; i < sz; i++) { 19079ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 19089ed24f4bSMarc Zyngier 19099ed24f4bSMarc Zyngier /* An allocation failure is not fatal */ 19103ef23167SJia He cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT); 19119ed24f4bSMarc Zyngier if (WARN_ON(!cte)) 19129ed24f4bSMarc Zyngier break; 19139ed24f4bSMarc Zyngier 19149ed24f4bSMarc Zyngier INIT_LIST_HEAD(&cte->entry); 19159ed24f4bSMarc Zyngier list_add(&cte->entry, &dist->lpi_translation_cache); 19169ed24f4bSMarc Zyngier } 19179ed24f4bSMarc Zyngier } 19189ed24f4bSMarc Zyngier 19199ed24f4bSMarc Zyngier void vgic_lpi_translation_cache_destroy(struct kvm *kvm) 19209ed24f4bSMarc Zyngier { 19219ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 19229ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte, *tmp; 19239ed24f4bSMarc Zyngier 19249ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 19259ed24f4bSMarc Zyngier 19269ed24f4bSMarc Zyngier list_for_each_entry_safe(cte, tmp, 19279ed24f4bSMarc Zyngier &dist->lpi_translation_cache, entry) { 19289ed24f4bSMarc Zyngier list_del(&cte->entry); 19299ed24f4bSMarc Zyngier kfree(cte); 19309ed24f4bSMarc Zyngier } 19319ed24f4bSMarc Zyngier } 19329ed24f4bSMarc Zyngier 19339ed24f4bSMarc Zyngier #define INITIAL_BASER_VALUE \ 19349ed24f4bSMarc Zyngier (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ 19359ed24f4bSMarc Zyngier GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ 19369ed24f4bSMarc Zyngier GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \ 19379ed24f4bSMarc Zyngier GITS_BASER_PAGE_SIZE_64K) 19389ed24f4bSMarc Zyngier 19399ed24f4bSMarc Zyngier #define INITIAL_PROPBASER_VALUE \ 19409ed24f4bSMarc Zyngier (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \ 19419ed24f4bSMarc Zyngier GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \ 19429ed24f4bSMarc Zyngier GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable)) 19439ed24f4bSMarc Zyngier 19449ed24f4bSMarc Zyngier static int vgic_its_create(struct kvm_device *dev, u32 type) 19459ed24f4bSMarc Zyngier { 19469cf2f840SJean-Philippe Brucker int ret; 19479ed24f4bSMarc Zyngier struct vgic_its *its; 19489ed24f4bSMarc Zyngier 19499ed24f4bSMarc Zyngier if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) 19509ed24f4bSMarc Zyngier return -ENODEV; 19519ed24f4bSMarc Zyngier 19523ef23167SJia He its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT); 19539ed24f4bSMarc Zyngier if (!its) 19549ed24f4bSMarc Zyngier return -ENOMEM; 19559ed24f4bSMarc Zyngier 19569cf2f840SJean-Philippe Brucker mutex_lock(&dev->kvm->arch.config_lock); 19579cf2f840SJean-Philippe Brucker 19589ed24f4bSMarc Zyngier if (vgic_initialized(dev->kvm)) { 19599cf2f840SJean-Philippe Brucker ret = vgic_v4_init(dev->kvm); 19609ed24f4bSMarc Zyngier if (ret < 0) { 19619cf2f840SJean-Philippe Brucker mutex_unlock(&dev->kvm->arch.config_lock); 19629ed24f4bSMarc Zyngier kfree(its); 19639ed24f4bSMarc Zyngier return ret; 19649ed24f4bSMarc Zyngier } 19659ed24f4bSMarc Zyngier 19669ed24f4bSMarc Zyngier vgic_lpi_translation_cache_init(dev->kvm); 19679ed24f4bSMarc Zyngier } 19689ed24f4bSMarc Zyngier 19699ed24f4bSMarc Zyngier mutex_init(&its->its_lock); 19709ed24f4bSMarc Zyngier mutex_init(&its->cmd_lock); 19719ed24f4bSMarc Zyngier 197249e5d16bSOliver Upton /* Yep, even more trickery for lock ordering... */ 197349e5d16bSOliver Upton #ifdef CONFIG_LOCKDEP 197449e5d16bSOliver Upton mutex_lock(&its->cmd_lock); 197549e5d16bSOliver Upton mutex_lock(&its->its_lock); 197649e5d16bSOliver Upton mutex_unlock(&its->its_lock); 197749e5d16bSOliver Upton mutex_unlock(&its->cmd_lock); 197849e5d16bSOliver Upton #endif 197949e5d16bSOliver Upton 19809ed24f4bSMarc Zyngier its->vgic_its_base = VGIC_ADDR_UNDEF; 19819ed24f4bSMarc Zyngier 19829ed24f4bSMarc Zyngier INIT_LIST_HEAD(&its->device_list); 19839ed24f4bSMarc Zyngier INIT_LIST_HEAD(&its->collection_list); 19849ed24f4bSMarc Zyngier 19859ed24f4bSMarc Zyngier dev->kvm->arch.vgic.msis_require_devid = true; 19869ed24f4bSMarc Zyngier dev->kvm->arch.vgic.has_its = true; 19879ed24f4bSMarc Zyngier its->enabled = false; 19889ed24f4bSMarc Zyngier its->dev = dev; 19899ed24f4bSMarc Zyngier 19909ed24f4bSMarc Zyngier its->baser_device_table = INITIAL_BASER_VALUE | 19919ed24f4bSMarc Zyngier ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT); 19929ed24f4bSMarc Zyngier its->baser_coll_table = INITIAL_BASER_VALUE | 19939ed24f4bSMarc Zyngier ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT); 19949ed24f4bSMarc Zyngier dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE; 19959ed24f4bSMarc Zyngier 19969ed24f4bSMarc Zyngier dev->private = its; 19979ed24f4bSMarc Zyngier 19989cf2f840SJean-Philippe Brucker ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); 19999cf2f840SJean-Philippe Brucker 20009cf2f840SJean-Philippe Brucker mutex_unlock(&dev->kvm->arch.config_lock); 20019cf2f840SJean-Philippe Brucker 20029cf2f840SJean-Philippe Brucker return ret; 20039ed24f4bSMarc Zyngier } 20049ed24f4bSMarc Zyngier 20059ed24f4bSMarc Zyngier static void vgic_its_destroy(struct kvm_device *kvm_dev) 20069ed24f4bSMarc Zyngier { 20079ed24f4bSMarc Zyngier struct kvm *kvm = kvm_dev->kvm; 20089ed24f4bSMarc Zyngier struct vgic_its *its = kvm_dev->private; 20099ed24f4bSMarc Zyngier 20109ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 20119ed24f4bSMarc Zyngier 20129ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 20139ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 20149ed24f4bSMarc Zyngier 20159ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 20169ed24f4bSMarc Zyngier kfree(its); 20179ed24f4bSMarc Zyngier kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */ 20189ed24f4bSMarc Zyngier } 20199ed24f4bSMarc Zyngier 20209ed24f4bSMarc Zyngier static int vgic_its_has_attr_regs(struct kvm_device *dev, 20219ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 20229ed24f4bSMarc Zyngier { 20239ed24f4bSMarc Zyngier const struct vgic_register_region *region; 20249ed24f4bSMarc Zyngier gpa_t offset = attr->attr; 20259ed24f4bSMarc Zyngier int align; 20269ed24f4bSMarc Zyngier 20279ed24f4bSMarc Zyngier align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7; 20289ed24f4bSMarc Zyngier 20299ed24f4bSMarc Zyngier if (offset & align) 20309ed24f4bSMarc Zyngier return -EINVAL; 20319ed24f4bSMarc Zyngier 20329ed24f4bSMarc Zyngier region = vgic_find_mmio_region(its_registers, 20339ed24f4bSMarc Zyngier ARRAY_SIZE(its_registers), 20349ed24f4bSMarc Zyngier offset); 20359ed24f4bSMarc Zyngier if (!region) 20369ed24f4bSMarc Zyngier return -ENXIO; 20379ed24f4bSMarc Zyngier 20389ed24f4bSMarc Zyngier return 0; 20399ed24f4bSMarc Zyngier } 20409ed24f4bSMarc Zyngier 20419ed24f4bSMarc Zyngier static int vgic_its_attr_regs_access(struct kvm_device *dev, 20429ed24f4bSMarc Zyngier struct kvm_device_attr *attr, 20439ed24f4bSMarc Zyngier u64 *reg, bool is_write) 20449ed24f4bSMarc Zyngier { 20459ed24f4bSMarc Zyngier const struct vgic_register_region *region; 20469ed24f4bSMarc Zyngier struct vgic_its *its; 20479ed24f4bSMarc Zyngier gpa_t addr, offset; 20489ed24f4bSMarc Zyngier unsigned int len; 20499ed24f4bSMarc Zyngier int align, ret = 0; 20509ed24f4bSMarc Zyngier 20519ed24f4bSMarc Zyngier its = dev->private; 20529ed24f4bSMarc Zyngier offset = attr->attr; 20539ed24f4bSMarc Zyngier 20549ed24f4bSMarc Zyngier /* 20559ed24f4bSMarc Zyngier * Although the spec supports upper/lower 32-bit accesses to 20569ed24f4bSMarc Zyngier * 64-bit ITS registers, the userspace ABI requires 64-bit 20579ed24f4bSMarc Zyngier * accesses to all 64-bit wide registers. We therefore only 20589ed24f4bSMarc Zyngier * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID 20599ed24f4bSMarc Zyngier * registers 20609ed24f4bSMarc Zyngier */ 20619ed24f4bSMarc Zyngier if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4)) 20629ed24f4bSMarc Zyngier align = 0x3; 20639ed24f4bSMarc Zyngier else 20649ed24f4bSMarc Zyngier align = 0x7; 20659ed24f4bSMarc Zyngier 20669ed24f4bSMarc Zyngier if (offset & align) 20679ed24f4bSMarc Zyngier return -EINVAL; 20689ed24f4bSMarc Zyngier 20699ed24f4bSMarc Zyngier mutex_lock(&dev->kvm->lock); 20709ed24f4bSMarc Zyngier 2071f0032773SOliver Upton if (!lock_all_vcpus(dev->kvm)) { 2072f0032773SOliver Upton mutex_unlock(&dev->kvm->lock); 2073f0032773SOliver Upton return -EBUSY; 2074f0032773SOliver Upton } 2075f0032773SOliver Upton 2076f0032773SOliver Upton mutex_lock(&dev->kvm->arch.config_lock); 2077f0032773SOliver Upton 20789ed24f4bSMarc Zyngier if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { 20799ed24f4bSMarc Zyngier ret = -ENXIO; 20809ed24f4bSMarc Zyngier goto out; 20819ed24f4bSMarc Zyngier } 20829ed24f4bSMarc Zyngier 20839ed24f4bSMarc Zyngier region = vgic_find_mmio_region(its_registers, 20849ed24f4bSMarc Zyngier ARRAY_SIZE(its_registers), 20859ed24f4bSMarc Zyngier offset); 20869ed24f4bSMarc Zyngier if (!region) { 20879ed24f4bSMarc Zyngier ret = -ENXIO; 20889ed24f4bSMarc Zyngier goto out; 20899ed24f4bSMarc Zyngier } 20909ed24f4bSMarc Zyngier 20919ed24f4bSMarc Zyngier addr = its->vgic_its_base + offset; 20929ed24f4bSMarc Zyngier 20939ed24f4bSMarc Zyngier len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; 20949ed24f4bSMarc Zyngier 20959ed24f4bSMarc Zyngier if (is_write) { 20969ed24f4bSMarc Zyngier if (region->uaccess_its_write) 20979ed24f4bSMarc Zyngier ret = region->uaccess_its_write(dev->kvm, its, addr, 20989ed24f4bSMarc Zyngier len, *reg); 20999ed24f4bSMarc Zyngier else 21009ed24f4bSMarc Zyngier region->its_write(dev->kvm, its, addr, len, *reg); 21019ed24f4bSMarc Zyngier } else { 21029ed24f4bSMarc Zyngier *reg = region->its_read(dev->kvm, its, addr, len); 21039ed24f4bSMarc Zyngier } 21049ed24f4bSMarc Zyngier out: 2105f0032773SOliver Upton mutex_unlock(&dev->kvm->arch.config_lock); 2106f0032773SOliver Upton unlock_all_vcpus(dev->kvm); 21079ed24f4bSMarc Zyngier mutex_unlock(&dev->kvm->lock); 21089ed24f4bSMarc Zyngier return ret; 21099ed24f4bSMarc Zyngier } 21109ed24f4bSMarc Zyngier 21119ed24f4bSMarc Zyngier static u32 compute_next_devid_offset(struct list_head *h, 21129ed24f4bSMarc Zyngier struct its_device *dev) 21139ed24f4bSMarc Zyngier { 21149ed24f4bSMarc Zyngier struct its_device *next; 21159ed24f4bSMarc Zyngier u32 next_offset; 21169ed24f4bSMarc Zyngier 21179ed24f4bSMarc Zyngier if (list_is_last(&dev->dev_list, h)) 21189ed24f4bSMarc Zyngier return 0; 21199ed24f4bSMarc Zyngier next = list_next_entry(dev, dev_list); 21209ed24f4bSMarc Zyngier next_offset = next->device_id - dev->device_id; 21219ed24f4bSMarc Zyngier 21229ed24f4bSMarc Zyngier return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET); 21239ed24f4bSMarc Zyngier } 21249ed24f4bSMarc Zyngier 21259ed24f4bSMarc Zyngier static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite) 21269ed24f4bSMarc Zyngier { 21279ed24f4bSMarc Zyngier struct its_ite *next; 21289ed24f4bSMarc Zyngier u32 next_offset; 21299ed24f4bSMarc Zyngier 21309ed24f4bSMarc Zyngier if (list_is_last(&ite->ite_list, h)) 21319ed24f4bSMarc Zyngier return 0; 21329ed24f4bSMarc Zyngier next = list_next_entry(ite, ite_list); 21339ed24f4bSMarc Zyngier next_offset = next->event_id - ite->event_id; 21349ed24f4bSMarc Zyngier 21359ed24f4bSMarc Zyngier return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET); 21369ed24f4bSMarc Zyngier } 21379ed24f4bSMarc Zyngier 21389ed24f4bSMarc Zyngier /** 21399ed24f4bSMarc Zyngier * entry_fn_t - Callback called on a table entry restore path 21409ed24f4bSMarc Zyngier * @its: its handle 21419ed24f4bSMarc Zyngier * @id: id of the entry 21429ed24f4bSMarc Zyngier * @entry: pointer to the entry 21439ed24f4bSMarc Zyngier * @opaque: pointer to an opaque data 21449ed24f4bSMarc Zyngier * 21459ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if last element was identified, id offset to next 21469ed24f4bSMarc Zyngier * element otherwise 21479ed24f4bSMarc Zyngier */ 21489ed24f4bSMarc Zyngier typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry, 21499ed24f4bSMarc Zyngier void *opaque); 21509ed24f4bSMarc Zyngier 21519ed24f4bSMarc Zyngier /** 21529ed24f4bSMarc Zyngier * scan_its_table - Scan a contiguous table in guest RAM and applies a function 21539ed24f4bSMarc Zyngier * to each entry 21549ed24f4bSMarc Zyngier * 21559ed24f4bSMarc Zyngier * @its: its handle 21569ed24f4bSMarc Zyngier * @base: base gpa of the table 21579ed24f4bSMarc Zyngier * @size: size of the table in bytes 21589ed24f4bSMarc Zyngier * @esz: entry size in bytes 21599ed24f4bSMarc Zyngier * @start_id: the ID of the first entry in the table 21609ed24f4bSMarc Zyngier * (non zero for 2d level tables) 21619ed24f4bSMarc Zyngier * @fn: function to apply on each entry 21629ed24f4bSMarc Zyngier * 21639ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if last element was identified, 1 otherwise 21649ed24f4bSMarc Zyngier * (the last element may not be found on second level tables) 21659ed24f4bSMarc Zyngier */ 21669ed24f4bSMarc Zyngier static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, 21679ed24f4bSMarc Zyngier int start_id, entry_fn_t fn, void *opaque) 21689ed24f4bSMarc Zyngier { 21699ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 21709ed24f4bSMarc Zyngier unsigned long len = size; 21719ed24f4bSMarc Zyngier int id = start_id; 21729ed24f4bSMarc Zyngier gpa_t gpa = base; 21739ed24f4bSMarc Zyngier char entry[ESZ_MAX]; 21749ed24f4bSMarc Zyngier int ret; 21759ed24f4bSMarc Zyngier 21769ed24f4bSMarc Zyngier memset(entry, 0, esz); 21779ed24f4bSMarc Zyngier 2178c000a260SEric Ren while (true) { 21799ed24f4bSMarc Zyngier int next_offset; 21809ed24f4bSMarc Zyngier size_t byte_offset; 21819ed24f4bSMarc Zyngier 21829ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, gpa, entry, esz); 21839ed24f4bSMarc Zyngier if (ret) 21849ed24f4bSMarc Zyngier return ret; 21859ed24f4bSMarc Zyngier 21869ed24f4bSMarc Zyngier next_offset = fn(its, id, entry, opaque); 21879ed24f4bSMarc Zyngier if (next_offset <= 0) 21889ed24f4bSMarc Zyngier return next_offset; 21899ed24f4bSMarc Zyngier 21909ed24f4bSMarc Zyngier byte_offset = next_offset * esz; 2191c000a260SEric Ren if (byte_offset >= len) 2192c000a260SEric Ren break; 2193c000a260SEric Ren 21949ed24f4bSMarc Zyngier id += next_offset; 21959ed24f4bSMarc Zyngier gpa += byte_offset; 21969ed24f4bSMarc Zyngier len -= byte_offset; 21979ed24f4bSMarc Zyngier } 21989ed24f4bSMarc Zyngier return 1; 21999ed24f4bSMarc Zyngier } 22009ed24f4bSMarc Zyngier 22019ed24f4bSMarc Zyngier /** 22029ed24f4bSMarc Zyngier * vgic_its_save_ite - Save an interrupt translation entry at @gpa 22039ed24f4bSMarc Zyngier */ 22049ed24f4bSMarc Zyngier static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, 22059ed24f4bSMarc Zyngier struct its_ite *ite, gpa_t gpa, int ite_esz) 22069ed24f4bSMarc Zyngier { 22079ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 22089ed24f4bSMarc Zyngier u32 next_offset; 22099ed24f4bSMarc Zyngier u64 val; 22109ed24f4bSMarc Zyngier 22119ed24f4bSMarc Zyngier next_offset = compute_next_eventid_offset(&dev->itt_head, ite); 22129ed24f4bSMarc Zyngier val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) | 22139ed24f4bSMarc Zyngier ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | 22149ed24f4bSMarc Zyngier ite->collection->collection_id; 22159ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2216a23eaf93SGavin Shan return vgic_write_guest_lock(kvm, gpa, &val, ite_esz); 22179ed24f4bSMarc Zyngier } 22189ed24f4bSMarc Zyngier 22199ed24f4bSMarc Zyngier /** 22209ed24f4bSMarc Zyngier * vgic_its_restore_ite - restore an interrupt translation entry 22219ed24f4bSMarc Zyngier * @event_id: id used for indexing 22229ed24f4bSMarc Zyngier * @ptr: pointer to the ITE entry 22239ed24f4bSMarc Zyngier * @opaque: pointer to the its_device 22249ed24f4bSMarc Zyngier */ 22259ed24f4bSMarc Zyngier static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, 22269ed24f4bSMarc Zyngier void *ptr, void *opaque) 22279ed24f4bSMarc Zyngier { 2228c707663eSYu Zhe struct its_device *dev = opaque; 22299ed24f4bSMarc Zyngier struct its_collection *collection; 22309ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 22319ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = NULL; 22329ed24f4bSMarc Zyngier u64 val; 22339ed24f4bSMarc Zyngier u64 *p = (u64 *)ptr; 22349ed24f4bSMarc Zyngier struct vgic_irq *irq; 22359ed24f4bSMarc Zyngier u32 coll_id, lpi_id; 22369ed24f4bSMarc Zyngier struct its_ite *ite; 22379ed24f4bSMarc Zyngier u32 offset; 22389ed24f4bSMarc Zyngier 22399ed24f4bSMarc Zyngier val = *p; 22409ed24f4bSMarc Zyngier 22419ed24f4bSMarc Zyngier val = le64_to_cpu(val); 22429ed24f4bSMarc Zyngier 22439ed24f4bSMarc Zyngier coll_id = val & KVM_ITS_ITE_ICID_MASK; 22449ed24f4bSMarc Zyngier lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT; 22459ed24f4bSMarc Zyngier 22469ed24f4bSMarc Zyngier if (!lpi_id) 22479ed24f4bSMarc Zyngier return 1; /* invalid entry, no choice but to scan next entry */ 22489ed24f4bSMarc Zyngier 22499ed24f4bSMarc Zyngier if (lpi_id < VGIC_MIN_LPI) 22509ed24f4bSMarc Zyngier return -EINVAL; 22519ed24f4bSMarc Zyngier 22529ed24f4bSMarc Zyngier offset = val >> KVM_ITS_ITE_NEXT_SHIFT; 22539ed24f4bSMarc Zyngier if (event_id + offset >= BIT_ULL(dev->num_eventid_bits)) 22549ed24f4bSMarc Zyngier return -EINVAL; 22559ed24f4bSMarc Zyngier 22569ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 22579ed24f4bSMarc Zyngier if (!collection) 22589ed24f4bSMarc Zyngier return -EINVAL; 22599ed24f4bSMarc Zyngier 2260243b1f6cSRicardo Koller if (!vgic_its_check_event_id(its, dev, event_id)) 2261243b1f6cSRicardo Koller return -EINVAL; 2262243b1f6cSRicardo Koller 22639ed24f4bSMarc Zyngier ite = vgic_its_alloc_ite(dev, collection, event_id); 22649ed24f4bSMarc Zyngier if (IS_ERR(ite)) 22659ed24f4bSMarc Zyngier return PTR_ERR(ite); 22669ed24f4bSMarc Zyngier 22679ed24f4bSMarc Zyngier if (its_is_collection_mapped(collection)) 22689ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 22699ed24f4bSMarc Zyngier 22709ed24f4bSMarc Zyngier irq = vgic_add_lpi(kvm, lpi_id, vcpu); 22718c5e74c9SRicardo Koller if (IS_ERR(irq)) { 22728c5e74c9SRicardo Koller its_free_ite(kvm, ite); 22739ed24f4bSMarc Zyngier return PTR_ERR(irq); 22748c5e74c9SRicardo Koller } 22759ed24f4bSMarc Zyngier ite->irq = irq; 22769ed24f4bSMarc Zyngier 22779ed24f4bSMarc Zyngier return offset; 22789ed24f4bSMarc Zyngier } 22799ed24f4bSMarc Zyngier 22804f0f586bSSami Tolvanen static int vgic_its_ite_cmp(void *priv, const struct list_head *a, 22814f0f586bSSami Tolvanen const struct list_head *b) 22829ed24f4bSMarc Zyngier { 22839ed24f4bSMarc Zyngier struct its_ite *itea = container_of(a, struct its_ite, ite_list); 22849ed24f4bSMarc Zyngier struct its_ite *iteb = container_of(b, struct its_ite, ite_list); 22859ed24f4bSMarc Zyngier 22869ed24f4bSMarc Zyngier if (itea->event_id < iteb->event_id) 22879ed24f4bSMarc Zyngier return -1; 22889ed24f4bSMarc Zyngier else 22899ed24f4bSMarc Zyngier return 1; 22909ed24f4bSMarc Zyngier } 22919ed24f4bSMarc Zyngier 22929ed24f4bSMarc Zyngier static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device) 22939ed24f4bSMarc Zyngier { 22949ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 22959ed24f4bSMarc Zyngier gpa_t base = device->itt_addr; 22969ed24f4bSMarc Zyngier struct its_ite *ite; 22979ed24f4bSMarc Zyngier int ret; 22989ed24f4bSMarc Zyngier int ite_esz = abi->ite_esz; 22999ed24f4bSMarc Zyngier 23009ed24f4bSMarc Zyngier list_sort(NULL, &device->itt_head, vgic_its_ite_cmp); 23019ed24f4bSMarc Zyngier 23029ed24f4bSMarc Zyngier list_for_each_entry(ite, &device->itt_head, ite_list) { 23039ed24f4bSMarc Zyngier gpa_t gpa = base + ite->event_id * ite_esz; 23049ed24f4bSMarc Zyngier 23059ed24f4bSMarc Zyngier /* 23069ed24f4bSMarc Zyngier * If an LPI carries the HW bit, this means that this 23079ed24f4bSMarc Zyngier * interrupt is controlled by GICv4, and we do not 23088082d50fSShenming Lu * have direct access to that state without GICv4.1. 23098082d50fSShenming Lu * Let's simply fail the save operation... 23109ed24f4bSMarc Zyngier */ 23118082d50fSShenming Lu if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1) 23129ed24f4bSMarc Zyngier return -EACCES; 23139ed24f4bSMarc Zyngier 23149ed24f4bSMarc Zyngier ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz); 23159ed24f4bSMarc Zyngier if (ret) 23169ed24f4bSMarc Zyngier return ret; 23179ed24f4bSMarc Zyngier } 23189ed24f4bSMarc Zyngier return 0; 23199ed24f4bSMarc Zyngier } 23209ed24f4bSMarc Zyngier 23219ed24f4bSMarc Zyngier /** 23229ed24f4bSMarc Zyngier * vgic_its_restore_itt - restore the ITT of a device 23239ed24f4bSMarc Zyngier * 23249ed24f4bSMarc Zyngier * @its: its handle 23259ed24f4bSMarc Zyngier * @dev: device handle 23269ed24f4bSMarc Zyngier * 23279ed24f4bSMarc Zyngier * Return 0 on success, < 0 on error 23289ed24f4bSMarc Zyngier */ 23299ed24f4bSMarc Zyngier static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) 23309ed24f4bSMarc Zyngier { 23319ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 23329ed24f4bSMarc Zyngier gpa_t base = dev->itt_addr; 23339ed24f4bSMarc Zyngier int ret; 23349ed24f4bSMarc Zyngier int ite_esz = abi->ite_esz; 23359ed24f4bSMarc Zyngier size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz; 23369ed24f4bSMarc Zyngier 23379ed24f4bSMarc Zyngier ret = scan_its_table(its, base, max_size, ite_esz, 0, 23389ed24f4bSMarc Zyngier vgic_its_restore_ite, dev); 23399ed24f4bSMarc Zyngier 23409ed24f4bSMarc Zyngier /* scan_its_table returns +1 if all ITEs are invalid */ 23419ed24f4bSMarc Zyngier if (ret > 0) 23429ed24f4bSMarc Zyngier ret = 0; 23439ed24f4bSMarc Zyngier 23449ed24f4bSMarc Zyngier return ret; 23459ed24f4bSMarc Zyngier } 23469ed24f4bSMarc Zyngier 23479ed24f4bSMarc Zyngier /** 23489ed24f4bSMarc Zyngier * vgic_its_save_dte - Save a device table entry at a given GPA 23499ed24f4bSMarc Zyngier * 23509ed24f4bSMarc Zyngier * @its: ITS handle 23519ed24f4bSMarc Zyngier * @dev: ITS device 23529ed24f4bSMarc Zyngier * @ptr: GPA 23539ed24f4bSMarc Zyngier */ 23549ed24f4bSMarc Zyngier static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, 23559ed24f4bSMarc Zyngier gpa_t ptr, int dte_esz) 23569ed24f4bSMarc Zyngier { 23579ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 23589ed24f4bSMarc Zyngier u64 val, itt_addr_field; 23599ed24f4bSMarc Zyngier u32 next_offset; 23609ed24f4bSMarc Zyngier 23619ed24f4bSMarc Zyngier itt_addr_field = dev->itt_addr >> 8; 23629ed24f4bSMarc Zyngier next_offset = compute_next_devid_offset(&its->device_list, dev); 23639ed24f4bSMarc Zyngier val = (1ULL << KVM_ITS_DTE_VALID_SHIFT | 23649ed24f4bSMarc Zyngier ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) | 23659ed24f4bSMarc Zyngier (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | 23669ed24f4bSMarc Zyngier (dev->num_eventid_bits - 1)); 23679ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2368a23eaf93SGavin Shan return vgic_write_guest_lock(kvm, ptr, &val, dte_esz); 23699ed24f4bSMarc Zyngier } 23709ed24f4bSMarc Zyngier 23719ed24f4bSMarc Zyngier /** 23729ed24f4bSMarc Zyngier * vgic_its_restore_dte - restore a device table entry 23739ed24f4bSMarc Zyngier * 23749ed24f4bSMarc Zyngier * @its: its handle 23759ed24f4bSMarc Zyngier * @id: device id the DTE corresponds to 23769ed24f4bSMarc Zyngier * @ptr: kernel VA where the 8 byte DTE is located 23779ed24f4bSMarc Zyngier * @opaque: unused 23789ed24f4bSMarc Zyngier * 23799ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if the dte is the last one, id offset to the 23809ed24f4bSMarc Zyngier * next dte otherwise 23819ed24f4bSMarc Zyngier */ 23829ed24f4bSMarc Zyngier static int vgic_its_restore_dte(struct vgic_its *its, u32 id, 23839ed24f4bSMarc Zyngier void *ptr, void *opaque) 23849ed24f4bSMarc Zyngier { 23859ed24f4bSMarc Zyngier struct its_device *dev; 2386243b1f6cSRicardo Koller u64 baser = its->baser_device_table; 23879ed24f4bSMarc Zyngier gpa_t itt_addr; 23889ed24f4bSMarc Zyngier u8 num_eventid_bits; 23899ed24f4bSMarc Zyngier u64 entry = *(u64 *)ptr; 23909ed24f4bSMarc Zyngier bool valid; 23919ed24f4bSMarc Zyngier u32 offset; 23929ed24f4bSMarc Zyngier int ret; 23939ed24f4bSMarc Zyngier 23949ed24f4bSMarc Zyngier entry = le64_to_cpu(entry); 23959ed24f4bSMarc Zyngier 23969ed24f4bSMarc Zyngier valid = entry >> KVM_ITS_DTE_VALID_SHIFT; 23979ed24f4bSMarc Zyngier num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1; 23989ed24f4bSMarc Zyngier itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK) 23999ed24f4bSMarc Zyngier >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8; 24009ed24f4bSMarc Zyngier 24019ed24f4bSMarc Zyngier if (!valid) 24029ed24f4bSMarc Zyngier return 1; 24039ed24f4bSMarc Zyngier 24049ed24f4bSMarc Zyngier /* dte entry is valid */ 24059ed24f4bSMarc Zyngier offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT; 24069ed24f4bSMarc Zyngier 2407243b1f6cSRicardo Koller if (!vgic_its_check_id(its, baser, id, NULL)) 2408243b1f6cSRicardo Koller return -EINVAL; 2409243b1f6cSRicardo Koller 24109ed24f4bSMarc Zyngier dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits); 24119ed24f4bSMarc Zyngier if (IS_ERR(dev)) 24129ed24f4bSMarc Zyngier return PTR_ERR(dev); 24139ed24f4bSMarc Zyngier 24149ed24f4bSMarc Zyngier ret = vgic_its_restore_itt(its, dev); 24159ed24f4bSMarc Zyngier if (ret) { 24169ed24f4bSMarc Zyngier vgic_its_free_device(its->dev->kvm, dev); 24179ed24f4bSMarc Zyngier return ret; 24189ed24f4bSMarc Zyngier } 24199ed24f4bSMarc Zyngier 24209ed24f4bSMarc Zyngier return offset; 24219ed24f4bSMarc Zyngier } 24229ed24f4bSMarc Zyngier 24234f0f586bSSami Tolvanen static int vgic_its_device_cmp(void *priv, const struct list_head *a, 24244f0f586bSSami Tolvanen const struct list_head *b) 24259ed24f4bSMarc Zyngier { 24269ed24f4bSMarc Zyngier struct its_device *deva = container_of(a, struct its_device, dev_list); 24279ed24f4bSMarc Zyngier struct its_device *devb = container_of(b, struct its_device, dev_list); 24289ed24f4bSMarc Zyngier 24299ed24f4bSMarc Zyngier if (deva->device_id < devb->device_id) 24309ed24f4bSMarc Zyngier return -1; 24319ed24f4bSMarc Zyngier else 24329ed24f4bSMarc Zyngier return 1; 24339ed24f4bSMarc Zyngier } 24349ed24f4bSMarc Zyngier 24359ed24f4bSMarc Zyngier /** 24369ed24f4bSMarc Zyngier * vgic_its_save_device_tables - Save the device table and all ITT 24379ed24f4bSMarc Zyngier * into guest RAM 24389ed24f4bSMarc Zyngier * 24399ed24f4bSMarc Zyngier * L1/L2 handling is hidden by vgic_its_check_id() helper which directly 24409ed24f4bSMarc Zyngier * returns the GPA of the device entry 24419ed24f4bSMarc Zyngier */ 24429ed24f4bSMarc Zyngier static int vgic_its_save_device_tables(struct vgic_its *its) 24439ed24f4bSMarc Zyngier { 24449ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 24459ed24f4bSMarc Zyngier u64 baser = its->baser_device_table; 24469ed24f4bSMarc Zyngier struct its_device *dev; 24479ed24f4bSMarc Zyngier int dte_esz = abi->dte_esz; 24489ed24f4bSMarc Zyngier 24499ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 24509ed24f4bSMarc Zyngier return 0; 24519ed24f4bSMarc Zyngier 24529ed24f4bSMarc Zyngier list_sort(NULL, &its->device_list, vgic_its_device_cmp); 24539ed24f4bSMarc Zyngier 24549ed24f4bSMarc Zyngier list_for_each_entry(dev, &its->device_list, dev_list) { 24559ed24f4bSMarc Zyngier int ret; 24569ed24f4bSMarc Zyngier gpa_t eaddr; 24579ed24f4bSMarc Zyngier 24589ed24f4bSMarc Zyngier if (!vgic_its_check_id(its, baser, 24599ed24f4bSMarc Zyngier dev->device_id, &eaddr)) 24609ed24f4bSMarc Zyngier return -EINVAL; 24619ed24f4bSMarc Zyngier 24629ed24f4bSMarc Zyngier ret = vgic_its_save_itt(its, dev); 24639ed24f4bSMarc Zyngier if (ret) 24649ed24f4bSMarc Zyngier return ret; 24659ed24f4bSMarc Zyngier 24669ed24f4bSMarc Zyngier ret = vgic_its_save_dte(its, dev, eaddr, dte_esz); 24679ed24f4bSMarc Zyngier if (ret) 24689ed24f4bSMarc Zyngier return ret; 24699ed24f4bSMarc Zyngier } 24709ed24f4bSMarc Zyngier return 0; 24719ed24f4bSMarc Zyngier } 24729ed24f4bSMarc Zyngier 24739ed24f4bSMarc Zyngier /** 24749ed24f4bSMarc Zyngier * handle_l1_dte - callback used for L1 device table entries (2 stage case) 24759ed24f4bSMarc Zyngier * 24769ed24f4bSMarc Zyngier * @its: its handle 24779ed24f4bSMarc Zyngier * @id: index of the entry in the L1 table 24789ed24f4bSMarc Zyngier * @addr: kernel VA 24799ed24f4bSMarc Zyngier * @opaque: unused 24809ed24f4bSMarc Zyngier * 24819ed24f4bSMarc Zyngier * L1 table entries are scanned by steps of 1 entry 24829ed24f4bSMarc Zyngier * Return < 0 if error, 0 if last dte was found when scanning the L2 24839ed24f4bSMarc Zyngier * table, +1 otherwise (meaning next L1 entry must be scanned) 24849ed24f4bSMarc Zyngier */ 24859ed24f4bSMarc Zyngier static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr, 24869ed24f4bSMarc Zyngier void *opaque) 24879ed24f4bSMarc Zyngier { 24889ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 24899ed24f4bSMarc Zyngier int l2_start_id = id * (SZ_64K / abi->dte_esz); 24909ed24f4bSMarc Zyngier u64 entry = *(u64 *)addr; 24919ed24f4bSMarc Zyngier int dte_esz = abi->dte_esz; 24929ed24f4bSMarc Zyngier gpa_t gpa; 24939ed24f4bSMarc Zyngier int ret; 24949ed24f4bSMarc Zyngier 24959ed24f4bSMarc Zyngier entry = le64_to_cpu(entry); 24969ed24f4bSMarc Zyngier 24979ed24f4bSMarc Zyngier if (!(entry & KVM_ITS_L1E_VALID_MASK)) 24989ed24f4bSMarc Zyngier return 1; 24999ed24f4bSMarc Zyngier 25009ed24f4bSMarc Zyngier gpa = entry & KVM_ITS_L1E_ADDR_MASK; 25019ed24f4bSMarc Zyngier 25029ed24f4bSMarc Zyngier ret = scan_its_table(its, gpa, SZ_64K, dte_esz, 25039ed24f4bSMarc Zyngier l2_start_id, vgic_its_restore_dte, NULL); 25049ed24f4bSMarc Zyngier 25059ed24f4bSMarc Zyngier return ret; 25069ed24f4bSMarc Zyngier } 25079ed24f4bSMarc Zyngier 25089ed24f4bSMarc Zyngier /** 25099ed24f4bSMarc Zyngier * vgic_its_restore_device_tables - Restore the device table and all ITT 25109ed24f4bSMarc Zyngier * from guest RAM to internal data structs 25119ed24f4bSMarc Zyngier */ 25129ed24f4bSMarc Zyngier static int vgic_its_restore_device_tables(struct vgic_its *its) 25139ed24f4bSMarc Zyngier { 25149ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 25159ed24f4bSMarc Zyngier u64 baser = its->baser_device_table; 25169ed24f4bSMarc Zyngier int l1_esz, ret; 25179ed24f4bSMarc Zyngier int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 25189ed24f4bSMarc Zyngier gpa_t l1_gpa; 25199ed24f4bSMarc Zyngier 25209ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 25219ed24f4bSMarc Zyngier return 0; 25229ed24f4bSMarc Zyngier 25239ed24f4bSMarc Zyngier l1_gpa = GITS_BASER_ADDR_48_to_52(baser); 25249ed24f4bSMarc Zyngier 25259ed24f4bSMarc Zyngier if (baser & GITS_BASER_INDIRECT) { 25269ed24f4bSMarc Zyngier l1_esz = GITS_LVL1_ENTRY_SIZE; 25279ed24f4bSMarc Zyngier ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0, 25289ed24f4bSMarc Zyngier handle_l1_dte, NULL); 25299ed24f4bSMarc Zyngier } else { 25309ed24f4bSMarc Zyngier l1_esz = abi->dte_esz; 25319ed24f4bSMarc Zyngier ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0, 25329ed24f4bSMarc Zyngier vgic_its_restore_dte, NULL); 25339ed24f4bSMarc Zyngier } 25349ed24f4bSMarc Zyngier 25359ed24f4bSMarc Zyngier /* scan_its_table returns +1 if all entries are invalid */ 25369ed24f4bSMarc Zyngier if (ret > 0) 25379ed24f4bSMarc Zyngier ret = 0; 25389ed24f4bSMarc Zyngier 25398c5e74c9SRicardo Koller if (ret < 0) 25408c5e74c9SRicardo Koller vgic_its_free_device_list(its->dev->kvm, its); 25418c5e74c9SRicardo Koller 25429ed24f4bSMarc Zyngier return ret; 25439ed24f4bSMarc Zyngier } 25449ed24f4bSMarc Zyngier 25459ed24f4bSMarc Zyngier static int vgic_its_save_cte(struct vgic_its *its, 25469ed24f4bSMarc Zyngier struct its_collection *collection, 25479ed24f4bSMarc Zyngier gpa_t gpa, int esz) 25489ed24f4bSMarc Zyngier { 25499ed24f4bSMarc Zyngier u64 val; 25509ed24f4bSMarc Zyngier 25519ed24f4bSMarc Zyngier val = (1ULL << KVM_ITS_CTE_VALID_SHIFT | 25529ed24f4bSMarc Zyngier ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | 25539ed24f4bSMarc Zyngier collection->collection_id); 25549ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2555a23eaf93SGavin Shan return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz); 25569ed24f4bSMarc Zyngier } 25579ed24f4bSMarc Zyngier 2558a1ccfd6fSRicardo Koller /* 2559a1ccfd6fSRicardo Koller * Restore a collection entry into the ITS collection table. 2560a1ccfd6fSRicardo Koller * Return +1 on success, 0 if the entry was invalid (which should be 2561a1ccfd6fSRicardo Koller * interpreted as end-of-table), and a negative error value for generic errors. 2562a1ccfd6fSRicardo Koller */ 25639ed24f4bSMarc Zyngier static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) 25649ed24f4bSMarc Zyngier { 25659ed24f4bSMarc Zyngier struct its_collection *collection; 25669ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 25679ed24f4bSMarc Zyngier u32 target_addr, coll_id; 25689ed24f4bSMarc Zyngier u64 val; 25699ed24f4bSMarc Zyngier int ret; 25709ed24f4bSMarc Zyngier 25719ed24f4bSMarc Zyngier BUG_ON(esz > sizeof(val)); 25729ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, gpa, &val, esz); 25739ed24f4bSMarc Zyngier if (ret) 25749ed24f4bSMarc Zyngier return ret; 25759ed24f4bSMarc Zyngier val = le64_to_cpu(val); 25769ed24f4bSMarc Zyngier if (!(val & KVM_ITS_CTE_VALID_MASK)) 25779ed24f4bSMarc Zyngier return 0; 25789ed24f4bSMarc Zyngier 25799ed24f4bSMarc Zyngier target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT); 25809ed24f4bSMarc Zyngier coll_id = val & KVM_ITS_CTE_ICID_MASK; 25819ed24f4bSMarc Zyngier 25829ed24f4bSMarc Zyngier if (target_addr != COLLECTION_NOT_MAPPED && 25839ed24f4bSMarc Zyngier target_addr >= atomic_read(&kvm->online_vcpus)) 25849ed24f4bSMarc Zyngier return -EINVAL; 25859ed24f4bSMarc Zyngier 25869ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 25879ed24f4bSMarc Zyngier if (collection) 25889ed24f4bSMarc Zyngier return -EEXIST; 2589a1ccfd6fSRicardo Koller 2590a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) 2591a1ccfd6fSRicardo Koller return -EINVAL; 2592a1ccfd6fSRicardo Koller 25939ed24f4bSMarc Zyngier ret = vgic_its_alloc_collection(its, &collection, coll_id); 25949ed24f4bSMarc Zyngier if (ret) 25959ed24f4bSMarc Zyngier return ret; 25969ed24f4bSMarc Zyngier collection->target_addr = target_addr; 25979ed24f4bSMarc Zyngier return 1; 25989ed24f4bSMarc Zyngier } 25999ed24f4bSMarc Zyngier 26009ed24f4bSMarc Zyngier /** 26019ed24f4bSMarc Zyngier * vgic_its_save_collection_table - Save the collection table into 26029ed24f4bSMarc Zyngier * guest RAM 26039ed24f4bSMarc Zyngier */ 26049ed24f4bSMarc Zyngier static int vgic_its_save_collection_table(struct vgic_its *its) 26059ed24f4bSMarc Zyngier { 26069ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 26079ed24f4bSMarc Zyngier u64 baser = its->baser_coll_table; 26089ed24f4bSMarc Zyngier gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser); 26099ed24f4bSMarc Zyngier struct its_collection *collection; 26109ed24f4bSMarc Zyngier u64 val; 26119ed24f4bSMarc Zyngier size_t max_size, filled = 0; 26129ed24f4bSMarc Zyngier int ret, cte_esz = abi->cte_esz; 26139ed24f4bSMarc Zyngier 26149ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 26159ed24f4bSMarc Zyngier return 0; 26169ed24f4bSMarc Zyngier 26179ed24f4bSMarc Zyngier max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 26189ed24f4bSMarc Zyngier 26199ed24f4bSMarc Zyngier list_for_each_entry(collection, &its->collection_list, coll_list) { 26209ed24f4bSMarc Zyngier ret = vgic_its_save_cte(its, collection, gpa, cte_esz); 26219ed24f4bSMarc Zyngier if (ret) 26229ed24f4bSMarc Zyngier return ret; 26239ed24f4bSMarc Zyngier gpa += cte_esz; 26249ed24f4bSMarc Zyngier filled += cte_esz; 26259ed24f4bSMarc Zyngier } 26269ed24f4bSMarc Zyngier 26279ed24f4bSMarc Zyngier if (filled == max_size) 26289ed24f4bSMarc Zyngier return 0; 26299ed24f4bSMarc Zyngier 26309ed24f4bSMarc Zyngier /* 26319ed24f4bSMarc Zyngier * table is not fully filled, add a last dummy element 26329ed24f4bSMarc Zyngier * with valid bit unset 26339ed24f4bSMarc Zyngier */ 26349ed24f4bSMarc Zyngier val = 0; 26359ed24f4bSMarc Zyngier BUG_ON(cte_esz > sizeof(val)); 2636a23eaf93SGavin Shan ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); 26379ed24f4bSMarc Zyngier return ret; 26389ed24f4bSMarc Zyngier } 26399ed24f4bSMarc Zyngier 26409ed24f4bSMarc Zyngier /** 26419ed24f4bSMarc Zyngier * vgic_its_restore_collection_table - reads the collection table 26429ed24f4bSMarc Zyngier * in guest memory and restores the ITS internal state. Requires the 26439ed24f4bSMarc Zyngier * BASER registers to be restored before. 26449ed24f4bSMarc Zyngier */ 26459ed24f4bSMarc Zyngier static int vgic_its_restore_collection_table(struct vgic_its *its) 26469ed24f4bSMarc Zyngier { 26479ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 26489ed24f4bSMarc Zyngier u64 baser = its->baser_coll_table; 26499ed24f4bSMarc Zyngier int cte_esz = abi->cte_esz; 26509ed24f4bSMarc Zyngier size_t max_size, read = 0; 26519ed24f4bSMarc Zyngier gpa_t gpa; 26529ed24f4bSMarc Zyngier int ret; 26539ed24f4bSMarc Zyngier 26549ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 26559ed24f4bSMarc Zyngier return 0; 26569ed24f4bSMarc Zyngier 26579ed24f4bSMarc Zyngier gpa = GITS_BASER_ADDR_48_to_52(baser); 26589ed24f4bSMarc Zyngier 26599ed24f4bSMarc Zyngier max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 26609ed24f4bSMarc Zyngier 26619ed24f4bSMarc Zyngier while (read < max_size) { 26629ed24f4bSMarc Zyngier ret = vgic_its_restore_cte(its, gpa, cte_esz); 26639ed24f4bSMarc Zyngier if (ret <= 0) 26649ed24f4bSMarc Zyngier break; 26659ed24f4bSMarc Zyngier gpa += cte_esz; 26669ed24f4bSMarc Zyngier read += cte_esz; 26679ed24f4bSMarc Zyngier } 26689ed24f4bSMarc Zyngier 26699ed24f4bSMarc Zyngier if (ret > 0) 26709ed24f4bSMarc Zyngier return 0; 26719ed24f4bSMarc Zyngier 26728c5e74c9SRicardo Koller if (ret < 0) 26738c5e74c9SRicardo Koller vgic_its_free_collection_list(its->dev->kvm, its); 26748c5e74c9SRicardo Koller 26759ed24f4bSMarc Zyngier return ret; 26769ed24f4bSMarc Zyngier } 26779ed24f4bSMarc Zyngier 26789ed24f4bSMarc Zyngier /** 26799ed24f4bSMarc Zyngier * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM 26809ed24f4bSMarc Zyngier * according to v0 ABI 26819ed24f4bSMarc Zyngier */ 26829ed24f4bSMarc Zyngier static int vgic_its_save_tables_v0(struct vgic_its *its) 26839ed24f4bSMarc Zyngier { 26849ed24f4bSMarc Zyngier int ret; 26859ed24f4bSMarc Zyngier 26869ed24f4bSMarc Zyngier ret = vgic_its_save_device_tables(its); 26879ed24f4bSMarc Zyngier if (ret) 26889ed24f4bSMarc Zyngier return ret; 26899ed24f4bSMarc Zyngier 26909ed24f4bSMarc Zyngier return vgic_its_save_collection_table(its); 26919ed24f4bSMarc Zyngier } 26929ed24f4bSMarc Zyngier 26939ed24f4bSMarc Zyngier /** 26949ed24f4bSMarc Zyngier * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM 26959ed24f4bSMarc Zyngier * to internal data structs according to V0 ABI 26969ed24f4bSMarc Zyngier * 26979ed24f4bSMarc Zyngier */ 26989ed24f4bSMarc Zyngier static int vgic_its_restore_tables_v0(struct vgic_its *its) 26999ed24f4bSMarc Zyngier { 27009ed24f4bSMarc Zyngier int ret; 27019ed24f4bSMarc Zyngier 27029ed24f4bSMarc Zyngier ret = vgic_its_restore_collection_table(its); 27039ed24f4bSMarc Zyngier if (ret) 27049ed24f4bSMarc Zyngier return ret; 27059ed24f4bSMarc Zyngier 27068c5e74c9SRicardo Koller ret = vgic_its_restore_device_tables(its); 27078c5e74c9SRicardo Koller if (ret) 27088c5e74c9SRicardo Koller vgic_its_free_collection_list(its->dev->kvm, its); 27098c5e74c9SRicardo Koller return ret; 27109ed24f4bSMarc Zyngier } 27119ed24f4bSMarc Zyngier 27129ed24f4bSMarc Zyngier static int vgic_its_commit_v0(struct vgic_its *its) 27139ed24f4bSMarc Zyngier { 27149ed24f4bSMarc Zyngier const struct vgic_its_abi *abi; 27159ed24f4bSMarc Zyngier 27169ed24f4bSMarc Zyngier abi = vgic_its_get_abi(its); 27179ed24f4bSMarc Zyngier its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK; 27189ed24f4bSMarc Zyngier its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK; 27199ed24f4bSMarc Zyngier 27209ed24f4bSMarc Zyngier its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5) 27219ed24f4bSMarc Zyngier << GITS_BASER_ENTRY_SIZE_SHIFT); 27229ed24f4bSMarc Zyngier 27239ed24f4bSMarc Zyngier its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5) 27249ed24f4bSMarc Zyngier << GITS_BASER_ENTRY_SIZE_SHIFT); 27259ed24f4bSMarc Zyngier return 0; 27269ed24f4bSMarc Zyngier } 27279ed24f4bSMarc Zyngier 27289ed24f4bSMarc Zyngier static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) 27299ed24f4bSMarc Zyngier { 27309ed24f4bSMarc Zyngier /* We need to keep the ABI specific field values */ 27319ed24f4bSMarc Zyngier its->baser_coll_table &= ~GITS_BASER_VALID; 27329ed24f4bSMarc Zyngier its->baser_device_table &= ~GITS_BASER_VALID; 27339ed24f4bSMarc Zyngier its->cbaser = 0; 27349ed24f4bSMarc Zyngier its->creadr = 0; 27359ed24f4bSMarc Zyngier its->cwriter = 0; 27369ed24f4bSMarc Zyngier its->enabled = 0; 27379ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 27389ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 27399ed24f4bSMarc Zyngier } 27409ed24f4bSMarc Zyngier 27419ed24f4bSMarc Zyngier static int vgic_its_has_attr(struct kvm_device *dev, 27429ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 27439ed24f4bSMarc Zyngier { 27449ed24f4bSMarc Zyngier switch (attr->group) { 27459ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: 27469ed24f4bSMarc Zyngier switch (attr->attr) { 27479ed24f4bSMarc Zyngier case KVM_VGIC_ITS_ADDR_TYPE: 27489ed24f4bSMarc Zyngier return 0; 27499ed24f4bSMarc Zyngier } 27509ed24f4bSMarc Zyngier break; 27519ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_CTRL: 27529ed24f4bSMarc Zyngier switch (attr->attr) { 27539ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_CTRL_INIT: 27549ed24f4bSMarc Zyngier return 0; 27559ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_CTRL_RESET: 27569ed24f4bSMarc Zyngier return 0; 27579ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_SAVE_TABLES: 27589ed24f4bSMarc Zyngier return 0; 27599ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_RESTORE_TABLES: 27609ed24f4bSMarc Zyngier return 0; 27619ed24f4bSMarc Zyngier } 27629ed24f4bSMarc Zyngier break; 27639ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: 27649ed24f4bSMarc Zyngier return vgic_its_has_attr_regs(dev, attr); 27659ed24f4bSMarc Zyngier } 27669ed24f4bSMarc Zyngier return -ENXIO; 27679ed24f4bSMarc Zyngier } 27689ed24f4bSMarc Zyngier 27699ed24f4bSMarc Zyngier static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) 27709ed24f4bSMarc Zyngier { 27719ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 27729ed24f4bSMarc Zyngier int ret = 0; 27739ed24f4bSMarc Zyngier 27749ed24f4bSMarc Zyngier if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ 27759ed24f4bSMarc Zyngier return 0; 27769ed24f4bSMarc Zyngier 27779ed24f4bSMarc Zyngier mutex_lock(&kvm->lock); 27789ed24f4bSMarc Zyngier 27799ed24f4bSMarc Zyngier if (!lock_all_vcpus(kvm)) { 27809ed24f4bSMarc Zyngier mutex_unlock(&kvm->lock); 27819ed24f4bSMarc Zyngier return -EBUSY; 27829ed24f4bSMarc Zyngier } 27839ed24f4bSMarc Zyngier 2784f0032773SOliver Upton mutex_lock(&kvm->arch.config_lock); 278549e5d16bSOliver Upton mutex_lock(&its->its_lock); 2786f0032773SOliver Upton 27879ed24f4bSMarc Zyngier switch (attr) { 27889ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_CTRL_RESET: 27899ed24f4bSMarc Zyngier vgic_its_reset(kvm, its); 27909ed24f4bSMarc Zyngier break; 27919ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_SAVE_TABLES: 27929ed24f4bSMarc Zyngier ret = abi->save_tables(its); 27939ed24f4bSMarc Zyngier break; 27949ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_RESTORE_TABLES: 27959ed24f4bSMarc Zyngier ret = abi->restore_tables(its); 27969ed24f4bSMarc Zyngier break; 27979ed24f4bSMarc Zyngier } 27989ed24f4bSMarc Zyngier 279949e5d16bSOliver Upton mutex_unlock(&its->its_lock); 2800f0032773SOliver Upton mutex_unlock(&kvm->arch.config_lock); 28019ed24f4bSMarc Zyngier unlock_all_vcpus(kvm); 28029ed24f4bSMarc Zyngier mutex_unlock(&kvm->lock); 28039ed24f4bSMarc Zyngier return ret; 28049ed24f4bSMarc Zyngier } 28059ed24f4bSMarc Zyngier 28069cb1096fSGavin Shan /* 28079cb1096fSGavin Shan * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory 28089cb1096fSGavin Shan * without the running VCPU when dirty ring is enabled. 28099cb1096fSGavin Shan * 28109cb1096fSGavin Shan * The running VCPU is required to track dirty guest pages when dirty ring 28119cb1096fSGavin Shan * is enabled. Otherwise, the backup bitmap should be used to track the 28129cb1096fSGavin Shan * dirty guest pages. When vgic/its tables are being saved, the backup 28139cb1096fSGavin Shan * bitmap is used to track the dirty guest pages due to the missed running 28149cb1096fSGavin Shan * VCPU in the period. 28159cb1096fSGavin Shan */ 28169cb1096fSGavin Shan bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm) 28179cb1096fSGavin Shan { 28189cb1096fSGavin Shan struct vgic_dist *dist = &kvm->arch.vgic; 28199cb1096fSGavin Shan 2820a23eaf93SGavin Shan return dist->table_write_in_progress; 28219cb1096fSGavin Shan } 28229cb1096fSGavin Shan 28239ed24f4bSMarc Zyngier static int vgic_its_set_attr(struct kvm_device *dev, 28249ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 28259ed24f4bSMarc Zyngier { 28269ed24f4bSMarc Zyngier struct vgic_its *its = dev->private; 28279ed24f4bSMarc Zyngier int ret; 28289ed24f4bSMarc Zyngier 28299ed24f4bSMarc Zyngier switch (attr->group) { 28309ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: { 28319ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28329ed24f4bSMarc Zyngier unsigned long type = (unsigned long)attr->attr; 28339ed24f4bSMarc Zyngier u64 addr; 28349ed24f4bSMarc Zyngier 28359ed24f4bSMarc Zyngier if (type != KVM_VGIC_ITS_ADDR_TYPE) 28369ed24f4bSMarc Zyngier return -ENODEV; 28379ed24f4bSMarc Zyngier 28389ed24f4bSMarc Zyngier if (copy_from_user(&addr, uaddr, sizeof(addr))) 28399ed24f4bSMarc Zyngier return -EFAULT; 28409ed24f4bSMarc Zyngier 28412ec02f6cSRicardo Koller ret = vgic_check_iorange(dev->kvm, its->vgic_its_base, 28422ec02f6cSRicardo Koller addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE); 28439ed24f4bSMarc Zyngier if (ret) 28449ed24f4bSMarc Zyngier return ret; 28459ed24f4bSMarc Zyngier 28469ed24f4bSMarc Zyngier return vgic_register_its_iodev(dev->kvm, its, addr); 28479ed24f4bSMarc Zyngier } 28489ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_CTRL: 28499ed24f4bSMarc Zyngier return vgic_its_ctrl(dev->kvm, its, attr->attr); 28509ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { 28519ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28529ed24f4bSMarc Zyngier u64 reg; 28539ed24f4bSMarc Zyngier 28549ed24f4bSMarc Zyngier if (get_user(reg, uaddr)) 28559ed24f4bSMarc Zyngier return -EFAULT; 28569ed24f4bSMarc Zyngier 28579ed24f4bSMarc Zyngier return vgic_its_attr_regs_access(dev, attr, ®, true); 28589ed24f4bSMarc Zyngier } 28599ed24f4bSMarc Zyngier } 28609ed24f4bSMarc Zyngier return -ENXIO; 28619ed24f4bSMarc Zyngier } 28629ed24f4bSMarc Zyngier 28639ed24f4bSMarc Zyngier static int vgic_its_get_attr(struct kvm_device *dev, 28649ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 28659ed24f4bSMarc Zyngier { 28669ed24f4bSMarc Zyngier switch (attr->group) { 28679ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: { 28689ed24f4bSMarc Zyngier struct vgic_its *its = dev->private; 28699ed24f4bSMarc Zyngier u64 addr = its->vgic_its_base; 28709ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28719ed24f4bSMarc Zyngier unsigned long type = (unsigned long)attr->attr; 28729ed24f4bSMarc Zyngier 28739ed24f4bSMarc Zyngier if (type != KVM_VGIC_ITS_ADDR_TYPE) 28749ed24f4bSMarc Zyngier return -ENODEV; 28759ed24f4bSMarc Zyngier 28769ed24f4bSMarc Zyngier if (copy_to_user(uaddr, &addr, sizeof(addr))) 28779ed24f4bSMarc Zyngier return -EFAULT; 28789ed24f4bSMarc Zyngier break; 28799ed24f4bSMarc Zyngier } 28809ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { 28819ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28829ed24f4bSMarc Zyngier u64 reg; 28839ed24f4bSMarc Zyngier int ret; 28849ed24f4bSMarc Zyngier 28859ed24f4bSMarc Zyngier ret = vgic_its_attr_regs_access(dev, attr, ®, false); 28869ed24f4bSMarc Zyngier if (ret) 28879ed24f4bSMarc Zyngier return ret; 28889ed24f4bSMarc Zyngier return put_user(reg, uaddr); 28899ed24f4bSMarc Zyngier } 28909ed24f4bSMarc Zyngier default: 28919ed24f4bSMarc Zyngier return -ENXIO; 28929ed24f4bSMarc Zyngier } 28939ed24f4bSMarc Zyngier 28949ed24f4bSMarc Zyngier return 0; 28959ed24f4bSMarc Zyngier } 28969ed24f4bSMarc Zyngier 28979ed24f4bSMarc Zyngier static struct kvm_device_ops kvm_arm_vgic_its_ops = { 28989ed24f4bSMarc Zyngier .name = "kvm-arm-vgic-its", 28999ed24f4bSMarc Zyngier .create = vgic_its_create, 29009ed24f4bSMarc Zyngier .destroy = vgic_its_destroy, 29019ed24f4bSMarc Zyngier .set_attr = vgic_its_set_attr, 29029ed24f4bSMarc Zyngier .get_attr = vgic_its_get_attr, 29039ed24f4bSMarc Zyngier .has_attr = vgic_its_has_attr, 29049ed24f4bSMarc Zyngier }; 29059ed24f4bSMarc Zyngier 29069ed24f4bSMarc Zyngier int kvm_vgic_register_its_device(void) 29079ed24f4bSMarc Zyngier { 29089ed24f4bSMarc Zyngier return kvm_register_device_ops(&kvm_arm_vgic_its_ops, 29099ed24f4bSMarc Zyngier KVM_DEV_TYPE_ARM_VGIC_ITS); 29109ed24f4bSMarc Zyngier } 2911