19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only 29ed24f4bSMarc Zyngier /* 39ed24f4bSMarc Zyngier * GICv3 ITS emulation 49ed24f4bSMarc Zyngier * 59ed24f4bSMarc Zyngier * Copyright (C) 2015,2016 ARM Ltd. 69ed24f4bSMarc Zyngier * Author: Andre Przywara <andre.przywara@arm.com> 79ed24f4bSMarc Zyngier */ 89ed24f4bSMarc Zyngier 99ed24f4bSMarc Zyngier #include <linux/cpu.h> 109ed24f4bSMarc Zyngier #include <linux/kvm.h> 119ed24f4bSMarc Zyngier #include <linux/kvm_host.h> 129ed24f4bSMarc Zyngier #include <linux/interrupt.h> 139ed24f4bSMarc Zyngier #include <linux/list.h> 149ed24f4bSMarc Zyngier #include <linux/uaccess.h> 159ed24f4bSMarc Zyngier #include <linux/list_sort.h> 169ed24f4bSMarc Zyngier 179ed24f4bSMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 189ed24f4bSMarc Zyngier 199ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h> 209ed24f4bSMarc Zyngier #include <asm/kvm_arm.h> 219ed24f4bSMarc Zyngier #include <asm/kvm_mmu.h> 229ed24f4bSMarc Zyngier 239ed24f4bSMarc Zyngier #include "vgic.h" 249ed24f4bSMarc Zyngier #include "vgic-mmio.h" 259ed24f4bSMarc Zyngier 269ed24f4bSMarc Zyngier static int vgic_its_save_tables_v0(struct vgic_its *its); 279ed24f4bSMarc Zyngier static int vgic_its_restore_tables_v0(struct vgic_its *its); 289ed24f4bSMarc Zyngier static int vgic_its_commit_v0(struct vgic_its *its); 299ed24f4bSMarc Zyngier static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 309ed24f4bSMarc Zyngier struct kvm_vcpu *filter_vcpu, bool needs_inv); 319ed24f4bSMarc Zyngier 329ed24f4bSMarc Zyngier /* 339ed24f4bSMarc Zyngier * Creates a new (reference to a) struct vgic_irq for a given LPI. 349ed24f4bSMarc Zyngier * If this LPI is already mapped on another ITS, we increase its refcount 359ed24f4bSMarc Zyngier * and return a pointer to the existing structure. 369ed24f4bSMarc Zyngier * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq. 379ed24f4bSMarc Zyngier * This function returns a pointer to the _unlocked_ structure. 389ed24f4bSMarc Zyngier */ 399ed24f4bSMarc Zyngier static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, 409ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu) 419ed24f4bSMarc Zyngier { 429ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 439ed24f4bSMarc Zyngier struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; 449ed24f4bSMarc Zyngier unsigned long flags; 459ed24f4bSMarc Zyngier int ret; 469ed24f4bSMarc Zyngier 479ed24f4bSMarc Zyngier /* In this case there is no put, since we keep the reference. */ 489ed24f4bSMarc Zyngier if (irq) 499ed24f4bSMarc Zyngier return irq; 509ed24f4bSMarc Zyngier 513ef23167SJia He irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT); 529ed24f4bSMarc Zyngier if (!irq) 539ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 549ed24f4bSMarc Zyngier 559ed24f4bSMarc Zyngier INIT_LIST_HEAD(&irq->lpi_list); 569ed24f4bSMarc Zyngier INIT_LIST_HEAD(&irq->ap_list); 579ed24f4bSMarc Zyngier raw_spin_lock_init(&irq->irq_lock); 589ed24f4bSMarc Zyngier 599ed24f4bSMarc Zyngier irq->config = VGIC_CONFIG_EDGE; 609ed24f4bSMarc Zyngier kref_init(&irq->refcount); 619ed24f4bSMarc Zyngier irq->intid = intid; 629ed24f4bSMarc Zyngier irq->target_vcpu = vcpu; 639ed24f4bSMarc Zyngier irq->group = 1; 649ed24f4bSMarc Zyngier 659ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 669ed24f4bSMarc Zyngier 679ed24f4bSMarc Zyngier /* 689ed24f4bSMarc Zyngier * There could be a race with another vgic_add_lpi(), so we need to 699ed24f4bSMarc Zyngier * check that we don't add a second list entry with the same LPI. 709ed24f4bSMarc Zyngier */ 719ed24f4bSMarc Zyngier list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) { 729ed24f4bSMarc Zyngier if (oldirq->intid != intid) 739ed24f4bSMarc Zyngier continue; 749ed24f4bSMarc Zyngier 759ed24f4bSMarc Zyngier /* Someone was faster with adding this LPI, lets use that. */ 769ed24f4bSMarc Zyngier kfree(irq); 779ed24f4bSMarc Zyngier irq = oldirq; 789ed24f4bSMarc Zyngier 799ed24f4bSMarc Zyngier /* 809ed24f4bSMarc Zyngier * This increases the refcount, the caller is expected to 819ed24f4bSMarc Zyngier * call vgic_put_irq() on the returned pointer once it's 829ed24f4bSMarc Zyngier * finished with the IRQ. 839ed24f4bSMarc Zyngier */ 849ed24f4bSMarc Zyngier vgic_get_irq_kref(irq); 859ed24f4bSMarc Zyngier 869ed24f4bSMarc Zyngier goto out_unlock; 879ed24f4bSMarc Zyngier } 889ed24f4bSMarc Zyngier 899ed24f4bSMarc Zyngier list_add_tail(&irq->lpi_list, &dist->lpi_list_head); 909ed24f4bSMarc Zyngier dist->lpi_list_count++; 919ed24f4bSMarc Zyngier 929ed24f4bSMarc Zyngier out_unlock: 939ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 949ed24f4bSMarc Zyngier 959ed24f4bSMarc Zyngier /* 969ed24f4bSMarc Zyngier * We "cache" the configuration table entries in our struct vgic_irq's. 979ed24f4bSMarc Zyngier * However we only have those structs for mapped IRQs, so we read in 989ed24f4bSMarc Zyngier * the respective config data from memory here upon mapping the LPI. 999ed24f4bSMarc Zyngier * 1009ed24f4bSMarc Zyngier * Should any of these fail, behave as if we couldn't create the LPI 1019ed24f4bSMarc Zyngier * by dropping the refcount and returning the error. 1029ed24f4bSMarc Zyngier */ 1039ed24f4bSMarc Zyngier ret = update_lpi_config(kvm, irq, NULL, false); 1049ed24f4bSMarc Zyngier if (ret) { 1059ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 1069ed24f4bSMarc Zyngier return ERR_PTR(ret); 1079ed24f4bSMarc Zyngier } 1089ed24f4bSMarc Zyngier 1099ed24f4bSMarc Zyngier ret = vgic_v3_lpi_sync_pending_status(kvm, irq); 1109ed24f4bSMarc Zyngier if (ret) { 1119ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 1129ed24f4bSMarc Zyngier return ERR_PTR(ret); 1139ed24f4bSMarc Zyngier } 1149ed24f4bSMarc Zyngier 1159ed24f4bSMarc Zyngier return irq; 1169ed24f4bSMarc Zyngier } 1179ed24f4bSMarc Zyngier 1189ed24f4bSMarc Zyngier struct its_device { 1199ed24f4bSMarc Zyngier struct list_head dev_list; 1209ed24f4bSMarc Zyngier 1219ed24f4bSMarc Zyngier /* the head for the list of ITTEs */ 1229ed24f4bSMarc Zyngier struct list_head itt_head; 1239ed24f4bSMarc Zyngier u32 num_eventid_bits; 1249ed24f4bSMarc Zyngier gpa_t itt_addr; 1259ed24f4bSMarc Zyngier u32 device_id; 1269ed24f4bSMarc Zyngier }; 1279ed24f4bSMarc Zyngier 1289ed24f4bSMarc Zyngier #define COLLECTION_NOT_MAPPED ((u32)~0) 1299ed24f4bSMarc Zyngier 1309ed24f4bSMarc Zyngier struct its_collection { 1319ed24f4bSMarc Zyngier struct list_head coll_list; 1329ed24f4bSMarc Zyngier 1339ed24f4bSMarc Zyngier u32 collection_id; 1349ed24f4bSMarc Zyngier u32 target_addr; 1359ed24f4bSMarc Zyngier }; 1369ed24f4bSMarc Zyngier 1379ed24f4bSMarc Zyngier #define its_is_collection_mapped(coll) ((coll) && \ 1389ed24f4bSMarc Zyngier ((coll)->target_addr != COLLECTION_NOT_MAPPED)) 1399ed24f4bSMarc Zyngier 1409ed24f4bSMarc Zyngier struct its_ite { 1419ed24f4bSMarc Zyngier struct list_head ite_list; 1429ed24f4bSMarc Zyngier 1439ed24f4bSMarc Zyngier struct vgic_irq *irq; 1449ed24f4bSMarc Zyngier struct its_collection *collection; 1459ed24f4bSMarc Zyngier u32 event_id; 1469ed24f4bSMarc Zyngier }; 1479ed24f4bSMarc Zyngier 1489ed24f4bSMarc Zyngier struct vgic_translation_cache_entry { 1499ed24f4bSMarc Zyngier struct list_head entry; 1509ed24f4bSMarc Zyngier phys_addr_t db; 1519ed24f4bSMarc Zyngier u32 devid; 1529ed24f4bSMarc Zyngier u32 eventid; 1539ed24f4bSMarc Zyngier struct vgic_irq *irq; 1549ed24f4bSMarc Zyngier }; 1559ed24f4bSMarc Zyngier 1569ed24f4bSMarc Zyngier /** 1579ed24f4bSMarc Zyngier * struct vgic_its_abi - ITS abi ops and settings 1589ed24f4bSMarc Zyngier * @cte_esz: collection table entry size 1599ed24f4bSMarc Zyngier * @dte_esz: device table entry size 1609ed24f4bSMarc Zyngier * @ite_esz: interrupt translation table entry size 1619ed24f4bSMarc Zyngier * @save tables: save the ITS tables into guest RAM 1629ed24f4bSMarc Zyngier * @restore_tables: restore the ITS internal structs from tables 1639ed24f4bSMarc Zyngier * stored in guest RAM 1649ed24f4bSMarc Zyngier * @commit: initialize the registers which expose the ABI settings, 1659ed24f4bSMarc Zyngier * especially the entry sizes 1669ed24f4bSMarc Zyngier */ 1679ed24f4bSMarc Zyngier struct vgic_its_abi { 1689ed24f4bSMarc Zyngier int cte_esz; 1699ed24f4bSMarc Zyngier int dte_esz; 1709ed24f4bSMarc Zyngier int ite_esz; 1719ed24f4bSMarc Zyngier int (*save_tables)(struct vgic_its *its); 1729ed24f4bSMarc Zyngier int (*restore_tables)(struct vgic_its *its); 1739ed24f4bSMarc Zyngier int (*commit)(struct vgic_its *its); 1749ed24f4bSMarc Zyngier }; 1759ed24f4bSMarc Zyngier 1769ed24f4bSMarc Zyngier #define ABI_0_ESZ 8 1779ed24f4bSMarc Zyngier #define ESZ_MAX ABI_0_ESZ 1789ed24f4bSMarc Zyngier 1799ed24f4bSMarc Zyngier static const struct vgic_its_abi its_table_abi_versions[] = { 1809ed24f4bSMarc Zyngier [0] = { 1819ed24f4bSMarc Zyngier .cte_esz = ABI_0_ESZ, 1829ed24f4bSMarc Zyngier .dte_esz = ABI_0_ESZ, 1839ed24f4bSMarc Zyngier .ite_esz = ABI_0_ESZ, 1849ed24f4bSMarc Zyngier .save_tables = vgic_its_save_tables_v0, 1859ed24f4bSMarc Zyngier .restore_tables = vgic_its_restore_tables_v0, 1869ed24f4bSMarc Zyngier .commit = vgic_its_commit_v0, 1879ed24f4bSMarc Zyngier }, 1889ed24f4bSMarc Zyngier }; 1899ed24f4bSMarc Zyngier 1909ed24f4bSMarc Zyngier #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions) 1919ed24f4bSMarc Zyngier 1929ed24f4bSMarc Zyngier inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its) 1939ed24f4bSMarc Zyngier { 1949ed24f4bSMarc Zyngier return &its_table_abi_versions[its->abi_rev]; 1959ed24f4bSMarc Zyngier } 1969ed24f4bSMarc Zyngier 1979ed24f4bSMarc Zyngier static int vgic_its_set_abi(struct vgic_its *its, u32 rev) 1989ed24f4bSMarc Zyngier { 1999ed24f4bSMarc Zyngier const struct vgic_its_abi *abi; 2009ed24f4bSMarc Zyngier 2019ed24f4bSMarc Zyngier its->abi_rev = rev; 2029ed24f4bSMarc Zyngier abi = vgic_its_get_abi(its); 2039ed24f4bSMarc Zyngier return abi->commit(its); 2049ed24f4bSMarc Zyngier } 2059ed24f4bSMarc Zyngier 2069ed24f4bSMarc Zyngier /* 2079ed24f4bSMarc Zyngier * Find and returns a device in the device table for an ITS. 2089ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2099ed24f4bSMarc Zyngier */ 2109ed24f4bSMarc Zyngier static struct its_device *find_its_device(struct vgic_its *its, u32 device_id) 2119ed24f4bSMarc Zyngier { 2129ed24f4bSMarc Zyngier struct its_device *device; 2139ed24f4bSMarc Zyngier 2149ed24f4bSMarc Zyngier list_for_each_entry(device, &its->device_list, dev_list) 2159ed24f4bSMarc Zyngier if (device_id == device->device_id) 2169ed24f4bSMarc Zyngier return device; 2179ed24f4bSMarc Zyngier 2189ed24f4bSMarc Zyngier return NULL; 2199ed24f4bSMarc Zyngier } 2209ed24f4bSMarc Zyngier 2219ed24f4bSMarc Zyngier /* 2229ed24f4bSMarc Zyngier * Find and returns an interrupt translation table entry (ITTE) for a given 2239ed24f4bSMarc Zyngier * Device ID/Event ID pair on an ITS. 2249ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2259ed24f4bSMarc Zyngier */ 2269ed24f4bSMarc Zyngier static struct its_ite *find_ite(struct vgic_its *its, u32 device_id, 2279ed24f4bSMarc Zyngier u32 event_id) 2289ed24f4bSMarc Zyngier { 2299ed24f4bSMarc Zyngier struct its_device *device; 2309ed24f4bSMarc Zyngier struct its_ite *ite; 2319ed24f4bSMarc Zyngier 2329ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 2339ed24f4bSMarc Zyngier if (device == NULL) 2349ed24f4bSMarc Zyngier return NULL; 2359ed24f4bSMarc Zyngier 2369ed24f4bSMarc Zyngier list_for_each_entry(ite, &device->itt_head, ite_list) 2379ed24f4bSMarc Zyngier if (ite->event_id == event_id) 2389ed24f4bSMarc Zyngier return ite; 2399ed24f4bSMarc Zyngier 2409ed24f4bSMarc Zyngier return NULL; 2419ed24f4bSMarc Zyngier } 2429ed24f4bSMarc Zyngier 2439ed24f4bSMarc Zyngier /* To be used as an iterator this macro misses the enclosing parentheses */ 2449ed24f4bSMarc Zyngier #define for_each_lpi_its(dev, ite, its) \ 2459ed24f4bSMarc Zyngier list_for_each_entry(dev, &(its)->device_list, dev_list) \ 2469ed24f4bSMarc Zyngier list_for_each_entry(ite, &(dev)->itt_head, ite_list) 2479ed24f4bSMarc Zyngier 2489ed24f4bSMarc Zyngier #define GIC_LPI_OFFSET 8192 2499ed24f4bSMarc Zyngier 2509ed24f4bSMarc Zyngier #define VITS_TYPER_IDBITS 16 2519ed24f4bSMarc Zyngier #define VITS_TYPER_DEVBITS 16 2529ed24f4bSMarc Zyngier #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1) 2539ed24f4bSMarc Zyngier #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1) 2549ed24f4bSMarc Zyngier 2559ed24f4bSMarc Zyngier /* 2569ed24f4bSMarc Zyngier * Finds and returns a collection in the ITS collection table. 2579ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 2589ed24f4bSMarc Zyngier */ 2599ed24f4bSMarc Zyngier static struct its_collection *find_collection(struct vgic_its *its, int coll_id) 2609ed24f4bSMarc Zyngier { 2619ed24f4bSMarc Zyngier struct its_collection *collection; 2629ed24f4bSMarc Zyngier 2639ed24f4bSMarc Zyngier list_for_each_entry(collection, &its->collection_list, coll_list) { 2649ed24f4bSMarc Zyngier if (coll_id == collection->collection_id) 2659ed24f4bSMarc Zyngier return collection; 2669ed24f4bSMarc Zyngier } 2679ed24f4bSMarc Zyngier 2689ed24f4bSMarc Zyngier return NULL; 2699ed24f4bSMarc Zyngier } 2709ed24f4bSMarc Zyngier 2719ed24f4bSMarc Zyngier #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED) 2729ed24f4bSMarc Zyngier #define LPI_PROP_PRIORITY(p) ((p) & 0xfc) 2739ed24f4bSMarc Zyngier 2749ed24f4bSMarc Zyngier /* 2759ed24f4bSMarc Zyngier * Reads the configuration data for a given LPI from guest memory and 2769ed24f4bSMarc Zyngier * updates the fields in struct vgic_irq. 2779ed24f4bSMarc Zyngier * If filter_vcpu is not NULL, applies only if the IRQ is targeting this 2789ed24f4bSMarc Zyngier * VCPU. Unconditionally applies if filter_vcpu is NULL. 2799ed24f4bSMarc Zyngier */ 2809ed24f4bSMarc Zyngier static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 2819ed24f4bSMarc Zyngier struct kvm_vcpu *filter_vcpu, bool needs_inv) 2829ed24f4bSMarc Zyngier { 2839ed24f4bSMarc Zyngier u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); 2849ed24f4bSMarc Zyngier u8 prop; 2859ed24f4bSMarc Zyngier int ret; 2869ed24f4bSMarc Zyngier unsigned long flags; 2879ed24f4bSMarc Zyngier 2889ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, 2899ed24f4bSMarc Zyngier &prop, 1); 2909ed24f4bSMarc Zyngier 2919ed24f4bSMarc Zyngier if (ret) 2929ed24f4bSMarc Zyngier return ret; 2939ed24f4bSMarc Zyngier 2949ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 2959ed24f4bSMarc Zyngier 2969ed24f4bSMarc Zyngier if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 2979ed24f4bSMarc Zyngier irq->priority = LPI_PROP_PRIORITY(prop); 2989ed24f4bSMarc Zyngier irq->enabled = LPI_PROP_ENABLE_BIT(prop); 2999ed24f4bSMarc Zyngier 3009ed24f4bSMarc Zyngier if (!irq->hw) { 3019ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 3029ed24f4bSMarc Zyngier return 0; 3039ed24f4bSMarc Zyngier } 3049ed24f4bSMarc Zyngier } 3059ed24f4bSMarc Zyngier 3069ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 3079ed24f4bSMarc Zyngier 3089ed24f4bSMarc Zyngier if (irq->hw) 3099ed24f4bSMarc Zyngier return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 3109ed24f4bSMarc Zyngier 3119ed24f4bSMarc Zyngier return 0; 3129ed24f4bSMarc Zyngier } 3139ed24f4bSMarc Zyngier 3149ed24f4bSMarc Zyngier /* 3159ed24f4bSMarc Zyngier * Create a snapshot of the current LPIs targeting @vcpu, so that we can 3169ed24f4bSMarc Zyngier * enumerate those LPIs without holding any lock. 3179ed24f4bSMarc Zyngier * Returns their number and puts the kmalloc'ed array into intid_ptr. 3189ed24f4bSMarc Zyngier */ 3199ed24f4bSMarc Zyngier int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) 3209ed24f4bSMarc Zyngier { 3219ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 3229ed24f4bSMarc Zyngier struct vgic_irq *irq; 3239ed24f4bSMarc Zyngier unsigned long flags; 3249ed24f4bSMarc Zyngier u32 *intids; 3259ed24f4bSMarc Zyngier int irq_count, i = 0; 3269ed24f4bSMarc Zyngier 3279ed24f4bSMarc Zyngier /* 3289ed24f4bSMarc Zyngier * There is an obvious race between allocating the array and LPIs 3299ed24f4bSMarc Zyngier * being mapped/unmapped. If we ended up here as a result of a 3309ed24f4bSMarc Zyngier * command, we're safe (locks are held, preventing another 3319ed24f4bSMarc Zyngier * command). If coming from another path (such as enabling LPIs), 3329ed24f4bSMarc Zyngier * we must be careful not to overrun the array. 3339ed24f4bSMarc Zyngier */ 3349ed24f4bSMarc Zyngier irq_count = READ_ONCE(dist->lpi_list_count); 3353ef23167SJia He intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT); 3369ed24f4bSMarc Zyngier if (!intids) 3379ed24f4bSMarc Zyngier return -ENOMEM; 3389ed24f4bSMarc Zyngier 3399ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 3409ed24f4bSMarc Zyngier list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 3419ed24f4bSMarc Zyngier if (i == irq_count) 3429ed24f4bSMarc Zyngier break; 3439ed24f4bSMarc Zyngier /* We don't need to "get" the IRQ, as we hold the list lock. */ 3449ed24f4bSMarc Zyngier if (vcpu && irq->target_vcpu != vcpu) 3459ed24f4bSMarc Zyngier continue; 3469ed24f4bSMarc Zyngier intids[i++] = irq->intid; 3479ed24f4bSMarc Zyngier } 3489ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 3499ed24f4bSMarc Zyngier 3509ed24f4bSMarc Zyngier *intid_ptr = intids; 3519ed24f4bSMarc Zyngier return i; 3529ed24f4bSMarc Zyngier } 3539ed24f4bSMarc Zyngier 3549ed24f4bSMarc Zyngier static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) 3559ed24f4bSMarc Zyngier { 3569ed24f4bSMarc Zyngier int ret = 0; 3579ed24f4bSMarc Zyngier unsigned long flags; 3589ed24f4bSMarc Zyngier 3599ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 3609ed24f4bSMarc Zyngier irq->target_vcpu = vcpu; 3619ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 3629ed24f4bSMarc Zyngier 3639ed24f4bSMarc Zyngier if (irq->hw) { 3649ed24f4bSMarc Zyngier struct its_vlpi_map map; 3659ed24f4bSMarc Zyngier 3669ed24f4bSMarc Zyngier ret = its_get_vlpi(irq->host_irq, &map); 3679ed24f4bSMarc Zyngier if (ret) 3689ed24f4bSMarc Zyngier return ret; 3699ed24f4bSMarc Zyngier 3709ed24f4bSMarc Zyngier if (map.vpe) 3719ed24f4bSMarc Zyngier atomic_dec(&map.vpe->vlpi_count); 3729ed24f4bSMarc Zyngier map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 3739ed24f4bSMarc Zyngier atomic_inc(&map.vpe->vlpi_count); 3749ed24f4bSMarc Zyngier 3759ed24f4bSMarc Zyngier ret = its_map_vlpi(irq->host_irq, &map); 3769ed24f4bSMarc Zyngier } 3779ed24f4bSMarc Zyngier 3789ed24f4bSMarc Zyngier return ret; 3799ed24f4bSMarc Zyngier } 3809ed24f4bSMarc Zyngier 3819ed24f4bSMarc Zyngier /* 3829ed24f4bSMarc Zyngier * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI 3839ed24f4bSMarc Zyngier * is targeting) to the VGIC's view, which deals with target VCPUs. 3849ed24f4bSMarc Zyngier * Needs to be called whenever either the collection for a LPIs has 3859ed24f4bSMarc Zyngier * changed or the collection itself got retargeted. 3869ed24f4bSMarc Zyngier */ 3879ed24f4bSMarc Zyngier static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite) 3889ed24f4bSMarc Zyngier { 3899ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 3909ed24f4bSMarc Zyngier 3919ed24f4bSMarc Zyngier if (!its_is_collection_mapped(ite->collection)) 3929ed24f4bSMarc Zyngier return; 3939ed24f4bSMarc Zyngier 3949ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); 3959ed24f4bSMarc Zyngier update_affinity(ite->irq, vcpu); 3969ed24f4bSMarc Zyngier } 3979ed24f4bSMarc Zyngier 3989ed24f4bSMarc Zyngier /* 3999ed24f4bSMarc Zyngier * Updates the target VCPU for every LPI targeting this collection. 4009ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 4019ed24f4bSMarc Zyngier */ 4029ed24f4bSMarc Zyngier static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its, 4039ed24f4bSMarc Zyngier struct its_collection *coll) 4049ed24f4bSMarc Zyngier { 4059ed24f4bSMarc Zyngier struct its_device *device; 4069ed24f4bSMarc Zyngier struct its_ite *ite; 4079ed24f4bSMarc Zyngier 4089ed24f4bSMarc Zyngier for_each_lpi_its(device, ite, its) { 409096560ddSGavin Shan if (ite->collection != coll) 4109ed24f4bSMarc Zyngier continue; 4119ed24f4bSMarc Zyngier 4129ed24f4bSMarc Zyngier update_affinity_ite(kvm, ite); 4139ed24f4bSMarc Zyngier } 4149ed24f4bSMarc Zyngier } 4159ed24f4bSMarc Zyngier 4169ed24f4bSMarc Zyngier static u32 max_lpis_propbaser(u64 propbaser) 4179ed24f4bSMarc Zyngier { 4189ed24f4bSMarc Zyngier int nr_idbits = (propbaser & 0x1f) + 1; 4199ed24f4bSMarc Zyngier 4209ed24f4bSMarc Zyngier return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS); 4219ed24f4bSMarc Zyngier } 4229ed24f4bSMarc Zyngier 4239ed24f4bSMarc Zyngier /* 4249ed24f4bSMarc Zyngier * Sync the pending table pending bit of LPIs targeting @vcpu 4259ed24f4bSMarc Zyngier * with our own data structures. This relies on the LPI being 4269ed24f4bSMarc Zyngier * mapped before. 4279ed24f4bSMarc Zyngier */ 4289ed24f4bSMarc Zyngier static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) 4299ed24f4bSMarc Zyngier { 4309ed24f4bSMarc Zyngier gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 4319ed24f4bSMarc Zyngier struct vgic_irq *irq; 4329ed24f4bSMarc Zyngier int last_byte_offset = -1; 4339ed24f4bSMarc Zyngier int ret = 0; 4349ed24f4bSMarc Zyngier u32 *intids; 4359ed24f4bSMarc Zyngier int nr_irqs, i; 4369ed24f4bSMarc Zyngier unsigned long flags; 4379ed24f4bSMarc Zyngier u8 pendmask; 4389ed24f4bSMarc Zyngier 4399ed24f4bSMarc Zyngier nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids); 4409ed24f4bSMarc Zyngier if (nr_irqs < 0) 4419ed24f4bSMarc Zyngier return nr_irqs; 4429ed24f4bSMarc Zyngier 4439ed24f4bSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 4449ed24f4bSMarc Zyngier int byte_offset, bit_nr; 4459ed24f4bSMarc Zyngier 4469ed24f4bSMarc Zyngier byte_offset = intids[i] / BITS_PER_BYTE; 4479ed24f4bSMarc Zyngier bit_nr = intids[i] % BITS_PER_BYTE; 4489ed24f4bSMarc Zyngier 4499ed24f4bSMarc Zyngier /* 4509ed24f4bSMarc Zyngier * For contiguously allocated LPIs chances are we just read 4519ed24f4bSMarc Zyngier * this very same byte in the last iteration. Reuse that. 4529ed24f4bSMarc Zyngier */ 4539ed24f4bSMarc Zyngier if (byte_offset != last_byte_offset) { 4549ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(vcpu->kvm, 4559ed24f4bSMarc Zyngier pendbase + byte_offset, 4569ed24f4bSMarc Zyngier &pendmask, 1); 4579ed24f4bSMarc Zyngier if (ret) { 4589ed24f4bSMarc Zyngier kfree(intids); 4599ed24f4bSMarc Zyngier return ret; 4609ed24f4bSMarc Zyngier } 4619ed24f4bSMarc Zyngier last_byte_offset = byte_offset; 4629ed24f4bSMarc Zyngier } 4639ed24f4bSMarc Zyngier 4649ed24f4bSMarc Zyngier irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 4659ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 4669ed24f4bSMarc Zyngier irq->pending_latch = pendmask & (1U << bit_nr); 4679ed24f4bSMarc Zyngier vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 4689ed24f4bSMarc Zyngier vgic_put_irq(vcpu->kvm, irq); 4699ed24f4bSMarc Zyngier } 4709ed24f4bSMarc Zyngier 4719ed24f4bSMarc Zyngier kfree(intids); 4729ed24f4bSMarc Zyngier 4739ed24f4bSMarc Zyngier return ret; 4749ed24f4bSMarc Zyngier } 4759ed24f4bSMarc Zyngier 4769ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, 4779ed24f4bSMarc Zyngier struct vgic_its *its, 4789ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 4799ed24f4bSMarc Zyngier { 4809ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 4819ed24f4bSMarc Zyngier u64 reg = GITS_TYPER_PLPIS; 4829ed24f4bSMarc Zyngier 4839ed24f4bSMarc Zyngier /* 4849ed24f4bSMarc Zyngier * We use linear CPU numbers for redistributor addressing, 4859ed24f4bSMarc Zyngier * so GITS_TYPER.PTA is 0. 4869ed24f4bSMarc Zyngier * Also we force all PROPBASER registers to be the same, so 4879ed24f4bSMarc Zyngier * CommonLPIAff is 0 as well. 4889ed24f4bSMarc Zyngier * To avoid memory waste in the guest, we keep the number of IDBits and 4899ed24f4bSMarc Zyngier * DevBits low - as least for the time being. 4909ed24f4bSMarc Zyngier */ 4919ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT; 4929ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT; 4939ed24f4bSMarc Zyngier reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT; 4949ed24f4bSMarc Zyngier 4959ed24f4bSMarc Zyngier return extract_bytes(reg, addr & 7, len); 4969ed24f4bSMarc Zyngier } 4979ed24f4bSMarc Zyngier 4989ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, 4999ed24f4bSMarc Zyngier struct vgic_its *its, 5009ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 5019ed24f4bSMarc Zyngier { 5029ed24f4bSMarc Zyngier u32 val; 5039ed24f4bSMarc Zyngier 5049ed24f4bSMarc Zyngier val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK; 5059ed24f4bSMarc Zyngier val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM; 5069ed24f4bSMarc Zyngier return val; 5079ed24f4bSMarc Zyngier } 5089ed24f4bSMarc Zyngier 5099ed24f4bSMarc Zyngier static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, 5109ed24f4bSMarc Zyngier struct vgic_its *its, 5119ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 5129ed24f4bSMarc Zyngier unsigned long val) 5139ed24f4bSMarc Zyngier { 5149ed24f4bSMarc Zyngier u32 rev = GITS_IIDR_REV(val); 5159ed24f4bSMarc Zyngier 5169ed24f4bSMarc Zyngier if (rev >= NR_ITS_ABIS) 5179ed24f4bSMarc Zyngier return -EINVAL; 5189ed24f4bSMarc Zyngier return vgic_its_set_abi(its, rev); 5199ed24f4bSMarc Zyngier } 5209ed24f4bSMarc Zyngier 5219ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm, 5229ed24f4bSMarc Zyngier struct vgic_its *its, 5239ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 5249ed24f4bSMarc Zyngier { 5259ed24f4bSMarc Zyngier switch (addr & 0xffff) { 5269ed24f4bSMarc Zyngier case GITS_PIDR0: 5279ed24f4bSMarc Zyngier return 0x92; /* part number, bits[7:0] */ 5289ed24f4bSMarc Zyngier case GITS_PIDR1: 5299ed24f4bSMarc Zyngier return 0xb4; /* part number, bits[11:8] */ 5309ed24f4bSMarc Zyngier case GITS_PIDR2: 5319ed24f4bSMarc Zyngier return GIC_PIDR2_ARCH_GICv3 | 0x0b; 5329ed24f4bSMarc Zyngier case GITS_PIDR4: 5339ed24f4bSMarc Zyngier return 0x40; /* This is a 64K software visible page */ 5349ed24f4bSMarc Zyngier /* The following are the ID registers for (any) GIC. */ 5359ed24f4bSMarc Zyngier case GITS_CIDR0: 5369ed24f4bSMarc Zyngier return 0x0d; 5379ed24f4bSMarc Zyngier case GITS_CIDR1: 5389ed24f4bSMarc Zyngier return 0xf0; 5399ed24f4bSMarc Zyngier case GITS_CIDR2: 5409ed24f4bSMarc Zyngier return 0x05; 5419ed24f4bSMarc Zyngier case GITS_CIDR3: 5429ed24f4bSMarc Zyngier return 0xb1; 5439ed24f4bSMarc Zyngier } 5449ed24f4bSMarc Zyngier 5459ed24f4bSMarc Zyngier return 0; 5469ed24f4bSMarc Zyngier } 5479ed24f4bSMarc Zyngier 5489ed24f4bSMarc Zyngier static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist, 5499ed24f4bSMarc Zyngier phys_addr_t db, 5509ed24f4bSMarc Zyngier u32 devid, u32 eventid) 5519ed24f4bSMarc Zyngier { 5529ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 5539ed24f4bSMarc Zyngier 5549ed24f4bSMarc Zyngier list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { 5559ed24f4bSMarc Zyngier /* 5569ed24f4bSMarc Zyngier * If we hit a NULL entry, there is nothing after this 5579ed24f4bSMarc Zyngier * point. 5589ed24f4bSMarc Zyngier */ 5599ed24f4bSMarc Zyngier if (!cte->irq) 5609ed24f4bSMarc Zyngier break; 5619ed24f4bSMarc Zyngier 5629ed24f4bSMarc Zyngier if (cte->db != db || cte->devid != devid || 5639ed24f4bSMarc Zyngier cte->eventid != eventid) 5649ed24f4bSMarc Zyngier continue; 5659ed24f4bSMarc Zyngier 5669ed24f4bSMarc Zyngier /* 5679ed24f4bSMarc Zyngier * Move this entry to the head, as it is the most 5689ed24f4bSMarc Zyngier * recently used. 5699ed24f4bSMarc Zyngier */ 5709ed24f4bSMarc Zyngier if (!list_is_first(&cte->entry, &dist->lpi_translation_cache)) 5719ed24f4bSMarc Zyngier list_move(&cte->entry, &dist->lpi_translation_cache); 5729ed24f4bSMarc Zyngier 5739ed24f4bSMarc Zyngier return cte->irq; 5749ed24f4bSMarc Zyngier } 5759ed24f4bSMarc Zyngier 5769ed24f4bSMarc Zyngier return NULL; 5779ed24f4bSMarc Zyngier } 5789ed24f4bSMarc Zyngier 5799ed24f4bSMarc Zyngier static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db, 5809ed24f4bSMarc Zyngier u32 devid, u32 eventid) 5819ed24f4bSMarc Zyngier { 5829ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 5839ed24f4bSMarc Zyngier struct vgic_irq *irq; 5849ed24f4bSMarc Zyngier unsigned long flags; 5859ed24f4bSMarc Zyngier 5869ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 5879ed24f4bSMarc Zyngier irq = __vgic_its_check_cache(dist, db, devid, eventid); 5889ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 5899ed24f4bSMarc Zyngier 5909ed24f4bSMarc Zyngier return irq; 5919ed24f4bSMarc Zyngier } 5929ed24f4bSMarc Zyngier 5939ed24f4bSMarc Zyngier static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, 5949ed24f4bSMarc Zyngier u32 devid, u32 eventid, 5959ed24f4bSMarc Zyngier struct vgic_irq *irq) 5969ed24f4bSMarc Zyngier { 5979ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 5989ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 5999ed24f4bSMarc Zyngier unsigned long flags; 6009ed24f4bSMarc Zyngier phys_addr_t db; 6019ed24f4bSMarc Zyngier 6029ed24f4bSMarc Zyngier /* Do not cache a directly injected interrupt */ 6039ed24f4bSMarc Zyngier if (irq->hw) 6049ed24f4bSMarc Zyngier return; 6059ed24f4bSMarc Zyngier 6069ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 6079ed24f4bSMarc Zyngier 6089ed24f4bSMarc Zyngier if (unlikely(list_empty(&dist->lpi_translation_cache))) 6099ed24f4bSMarc Zyngier goto out; 6109ed24f4bSMarc Zyngier 6119ed24f4bSMarc Zyngier /* 6129ed24f4bSMarc Zyngier * We could have raced with another CPU caching the same 6139ed24f4bSMarc Zyngier * translation behind our back, so let's check it is not in 6149ed24f4bSMarc Zyngier * already 6159ed24f4bSMarc Zyngier */ 6169ed24f4bSMarc Zyngier db = its->vgic_its_base + GITS_TRANSLATER; 6179ed24f4bSMarc Zyngier if (__vgic_its_check_cache(dist, db, devid, eventid)) 6189ed24f4bSMarc Zyngier goto out; 6199ed24f4bSMarc Zyngier 6209ed24f4bSMarc Zyngier /* Always reuse the last entry (LRU policy) */ 6219ed24f4bSMarc Zyngier cte = list_last_entry(&dist->lpi_translation_cache, 6229ed24f4bSMarc Zyngier typeof(*cte), entry); 6239ed24f4bSMarc Zyngier 6249ed24f4bSMarc Zyngier /* 6259ed24f4bSMarc Zyngier * Caching the translation implies having an extra reference 6269ed24f4bSMarc Zyngier * to the interrupt, so drop the potential reference on what 6279ed24f4bSMarc Zyngier * was in the cache, and increment it on the new interrupt. 6289ed24f4bSMarc Zyngier */ 6299ed24f4bSMarc Zyngier if (cte->irq) 6309ed24f4bSMarc Zyngier __vgic_put_lpi_locked(kvm, cte->irq); 6319ed24f4bSMarc Zyngier 6329ed24f4bSMarc Zyngier vgic_get_irq_kref(irq); 6339ed24f4bSMarc Zyngier 6349ed24f4bSMarc Zyngier cte->db = db; 6359ed24f4bSMarc Zyngier cte->devid = devid; 6369ed24f4bSMarc Zyngier cte->eventid = eventid; 6379ed24f4bSMarc Zyngier cte->irq = irq; 6389ed24f4bSMarc Zyngier 6399ed24f4bSMarc Zyngier /* Move the new translation to the head of the list */ 6409ed24f4bSMarc Zyngier list_move(&cte->entry, &dist->lpi_translation_cache); 6419ed24f4bSMarc Zyngier 6429ed24f4bSMarc Zyngier out: 6439ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 6449ed24f4bSMarc Zyngier } 6459ed24f4bSMarc Zyngier 6469ed24f4bSMarc Zyngier void vgic_its_invalidate_cache(struct kvm *kvm) 6479ed24f4bSMarc Zyngier { 6489ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 6499ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 6509ed24f4bSMarc Zyngier unsigned long flags; 6519ed24f4bSMarc Zyngier 6529ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 6539ed24f4bSMarc Zyngier 6549ed24f4bSMarc Zyngier list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { 6559ed24f4bSMarc Zyngier /* 6569ed24f4bSMarc Zyngier * If we hit a NULL entry, there is nothing after this 6579ed24f4bSMarc Zyngier * point. 6589ed24f4bSMarc Zyngier */ 6599ed24f4bSMarc Zyngier if (!cte->irq) 6609ed24f4bSMarc Zyngier break; 6619ed24f4bSMarc Zyngier 6629ed24f4bSMarc Zyngier __vgic_put_lpi_locked(kvm, cte->irq); 6639ed24f4bSMarc Zyngier cte->irq = NULL; 6649ed24f4bSMarc Zyngier } 6659ed24f4bSMarc Zyngier 6669ed24f4bSMarc Zyngier raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 6679ed24f4bSMarc Zyngier } 6689ed24f4bSMarc Zyngier 6699ed24f4bSMarc Zyngier int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, 6709ed24f4bSMarc Zyngier u32 devid, u32 eventid, struct vgic_irq **irq) 6719ed24f4bSMarc Zyngier { 6729ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 6739ed24f4bSMarc Zyngier struct its_ite *ite; 6749ed24f4bSMarc Zyngier 6759ed24f4bSMarc Zyngier if (!its->enabled) 6769ed24f4bSMarc Zyngier return -EBUSY; 6779ed24f4bSMarc Zyngier 6789ed24f4bSMarc Zyngier ite = find_ite(its, devid, eventid); 6799ed24f4bSMarc Zyngier if (!ite || !its_is_collection_mapped(ite->collection)) 6809ed24f4bSMarc Zyngier return E_ITS_INT_UNMAPPED_INTERRUPT; 6819ed24f4bSMarc Zyngier 6829ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); 6839ed24f4bSMarc Zyngier if (!vcpu) 6849ed24f4bSMarc Zyngier return E_ITS_INT_UNMAPPED_INTERRUPT; 6859ed24f4bSMarc Zyngier 68694828468SMarc Zyngier if (!vgic_lpis_enabled(vcpu)) 6879ed24f4bSMarc Zyngier return -EBUSY; 6889ed24f4bSMarc Zyngier 6899ed24f4bSMarc Zyngier vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq); 6909ed24f4bSMarc Zyngier 6919ed24f4bSMarc Zyngier *irq = ite->irq; 6929ed24f4bSMarc Zyngier return 0; 6939ed24f4bSMarc Zyngier } 6949ed24f4bSMarc Zyngier 6959ed24f4bSMarc Zyngier struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi) 6969ed24f4bSMarc Zyngier { 6979ed24f4bSMarc Zyngier u64 address; 6989ed24f4bSMarc Zyngier struct kvm_io_device *kvm_io_dev; 6999ed24f4bSMarc Zyngier struct vgic_io_device *iodev; 7009ed24f4bSMarc Zyngier 7019ed24f4bSMarc Zyngier if (!vgic_has_its(kvm)) 7029ed24f4bSMarc Zyngier return ERR_PTR(-ENODEV); 7039ed24f4bSMarc Zyngier 7049ed24f4bSMarc Zyngier if (!(msi->flags & KVM_MSI_VALID_DEVID)) 7059ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7069ed24f4bSMarc Zyngier 7079ed24f4bSMarc Zyngier address = (u64)msi->address_hi << 32 | msi->address_lo; 7089ed24f4bSMarc Zyngier 7099ed24f4bSMarc Zyngier kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); 7109ed24f4bSMarc Zyngier if (!kvm_io_dev) 7119ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7129ed24f4bSMarc Zyngier 7139ed24f4bSMarc Zyngier if (kvm_io_dev->ops != &kvm_io_gic_ops) 7149ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7159ed24f4bSMarc Zyngier 7169ed24f4bSMarc Zyngier iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); 7179ed24f4bSMarc Zyngier if (iodev->iodev_type != IODEV_ITS) 7189ed24f4bSMarc Zyngier return ERR_PTR(-EINVAL); 7199ed24f4bSMarc Zyngier 7209ed24f4bSMarc Zyngier return iodev->its; 7219ed24f4bSMarc Zyngier } 7229ed24f4bSMarc Zyngier 7239ed24f4bSMarc Zyngier /* 7249ed24f4bSMarc Zyngier * Find the target VCPU and the LPI number for a given devid/eventid pair 7259ed24f4bSMarc Zyngier * and make this IRQ pending, possibly injecting it. 7269ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 7279ed24f4bSMarc Zyngier * Returns 0 on success, a positive error value for any ITS mapping 7289ed24f4bSMarc Zyngier * related errors and negative error values for generic errors. 7299ed24f4bSMarc Zyngier */ 7309ed24f4bSMarc Zyngier static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, 7319ed24f4bSMarc Zyngier u32 devid, u32 eventid) 7329ed24f4bSMarc Zyngier { 7339ed24f4bSMarc Zyngier struct vgic_irq *irq = NULL; 7349ed24f4bSMarc Zyngier unsigned long flags; 7359ed24f4bSMarc Zyngier int err; 7369ed24f4bSMarc Zyngier 7379ed24f4bSMarc Zyngier err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq); 7389ed24f4bSMarc Zyngier if (err) 7399ed24f4bSMarc Zyngier return err; 7409ed24f4bSMarc Zyngier 7419ed24f4bSMarc Zyngier if (irq->hw) 7429ed24f4bSMarc Zyngier return irq_set_irqchip_state(irq->host_irq, 7439ed24f4bSMarc Zyngier IRQCHIP_STATE_PENDING, true); 7449ed24f4bSMarc Zyngier 7459ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 7469ed24f4bSMarc Zyngier irq->pending_latch = true; 7479ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 7489ed24f4bSMarc Zyngier 7499ed24f4bSMarc Zyngier return 0; 7509ed24f4bSMarc Zyngier } 7519ed24f4bSMarc Zyngier 7529ed24f4bSMarc Zyngier int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi) 7539ed24f4bSMarc Zyngier { 7549ed24f4bSMarc Zyngier struct vgic_irq *irq; 7559ed24f4bSMarc Zyngier unsigned long flags; 7569ed24f4bSMarc Zyngier phys_addr_t db; 7579ed24f4bSMarc Zyngier 7589ed24f4bSMarc Zyngier db = (u64)msi->address_hi << 32 | msi->address_lo; 7599ed24f4bSMarc Zyngier irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data); 7609ed24f4bSMarc Zyngier if (!irq) 761a47dee55SMarc Zyngier return -EWOULDBLOCK; 7629ed24f4bSMarc Zyngier 7639ed24f4bSMarc Zyngier raw_spin_lock_irqsave(&irq->irq_lock, flags); 7649ed24f4bSMarc Zyngier irq->pending_latch = true; 7659ed24f4bSMarc Zyngier vgic_queue_irq_unlock(kvm, irq, flags); 7669ed24f4bSMarc Zyngier 7679ed24f4bSMarc Zyngier return 0; 7689ed24f4bSMarc Zyngier } 7699ed24f4bSMarc Zyngier 7709ed24f4bSMarc Zyngier /* 7719ed24f4bSMarc Zyngier * Queries the KVM IO bus framework to get the ITS pointer from the given 7729ed24f4bSMarc Zyngier * doorbell address. 7739ed24f4bSMarc Zyngier * We then call vgic_its_trigger_msi() with the decoded data. 7749ed24f4bSMarc Zyngier * According to the KVM_SIGNAL_MSI API description returns 1 on success. 7759ed24f4bSMarc Zyngier */ 7769ed24f4bSMarc Zyngier int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 7779ed24f4bSMarc Zyngier { 7789ed24f4bSMarc Zyngier struct vgic_its *its; 7799ed24f4bSMarc Zyngier int ret; 7809ed24f4bSMarc Zyngier 7819ed24f4bSMarc Zyngier if (!vgic_its_inject_cached_translation(kvm, msi)) 7829ed24f4bSMarc Zyngier return 1; 7839ed24f4bSMarc Zyngier 7849ed24f4bSMarc Zyngier its = vgic_msi_to_its(kvm, msi); 7859ed24f4bSMarc Zyngier if (IS_ERR(its)) 7869ed24f4bSMarc Zyngier return PTR_ERR(its); 7879ed24f4bSMarc Zyngier 7889ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 7899ed24f4bSMarc Zyngier ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data); 7909ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 7919ed24f4bSMarc Zyngier 7929ed24f4bSMarc Zyngier if (ret < 0) 7939ed24f4bSMarc Zyngier return ret; 7949ed24f4bSMarc Zyngier 7959ed24f4bSMarc Zyngier /* 7969ed24f4bSMarc Zyngier * KVM_SIGNAL_MSI demands a return value > 0 for success and 0 7979ed24f4bSMarc Zyngier * if the guest has blocked the MSI. So we map any LPI mapping 7989ed24f4bSMarc Zyngier * related error to that. 7999ed24f4bSMarc Zyngier */ 8009ed24f4bSMarc Zyngier if (ret) 8019ed24f4bSMarc Zyngier return 0; 8029ed24f4bSMarc Zyngier else 8039ed24f4bSMarc Zyngier return 1; 8049ed24f4bSMarc Zyngier } 8059ed24f4bSMarc Zyngier 8069ed24f4bSMarc Zyngier /* Requires the its_lock to be held. */ 8079ed24f4bSMarc Zyngier static void its_free_ite(struct kvm *kvm, struct its_ite *ite) 8089ed24f4bSMarc Zyngier { 8099ed24f4bSMarc Zyngier list_del(&ite->ite_list); 8109ed24f4bSMarc Zyngier 8119ed24f4bSMarc Zyngier /* This put matches the get in vgic_add_lpi. */ 8129ed24f4bSMarc Zyngier if (ite->irq) { 8139ed24f4bSMarc Zyngier if (ite->irq->hw) 8149ed24f4bSMarc Zyngier WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); 8159ed24f4bSMarc Zyngier 8169ed24f4bSMarc Zyngier vgic_put_irq(kvm, ite->irq); 8179ed24f4bSMarc Zyngier } 8189ed24f4bSMarc Zyngier 8199ed24f4bSMarc Zyngier kfree(ite); 8209ed24f4bSMarc Zyngier } 8219ed24f4bSMarc Zyngier 8229ed24f4bSMarc Zyngier static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size) 8239ed24f4bSMarc Zyngier { 8249ed24f4bSMarc Zyngier return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1); 8259ed24f4bSMarc Zyngier } 8269ed24f4bSMarc Zyngier 8279ed24f4bSMarc Zyngier #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) 8289ed24f4bSMarc Zyngier #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) 8299ed24f4bSMarc Zyngier #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1) 8309ed24f4bSMarc Zyngier #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) 8319ed24f4bSMarc Zyngier #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) 8329ed24f4bSMarc Zyngier #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) 8339ed24f4bSMarc Zyngier #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8) 8349ed24f4bSMarc Zyngier #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32) 8359ed24f4bSMarc Zyngier #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1) 8369ed24f4bSMarc Zyngier 8379ed24f4bSMarc Zyngier /* 8389ed24f4bSMarc Zyngier * The DISCARD command frees an Interrupt Translation Table Entry (ITTE). 8399ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 8409ed24f4bSMarc Zyngier */ 8419ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its, 8429ed24f4bSMarc Zyngier u64 *its_cmd) 8439ed24f4bSMarc Zyngier { 8449ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 8459ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 8469ed24f4bSMarc Zyngier struct its_ite *ite; 8479ed24f4bSMarc Zyngier 8489ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 8499ed24f4bSMarc Zyngier if (ite && its_is_collection_mapped(ite->collection)) { 8509ed24f4bSMarc Zyngier /* 8519ed24f4bSMarc Zyngier * Though the spec talks about removing the pending state, we 8529ed24f4bSMarc Zyngier * don't bother here since we clear the ITTE anyway and the 8539ed24f4bSMarc Zyngier * pending state is a property of the ITTE struct. 8549ed24f4bSMarc Zyngier */ 8559ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 8569ed24f4bSMarc Zyngier 8579ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 8589ed24f4bSMarc Zyngier return 0; 8599ed24f4bSMarc Zyngier } 8609ed24f4bSMarc Zyngier 8619ed24f4bSMarc Zyngier return E_ITS_DISCARD_UNMAPPED_INTERRUPT; 8629ed24f4bSMarc Zyngier } 8639ed24f4bSMarc Zyngier 8649ed24f4bSMarc Zyngier /* 8659ed24f4bSMarc Zyngier * The MOVI command moves an ITTE to a different collection. 8669ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 8679ed24f4bSMarc Zyngier */ 8689ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its, 8699ed24f4bSMarc Zyngier u64 *its_cmd) 8709ed24f4bSMarc Zyngier { 8719ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 8729ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 8739ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 8749ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 8759ed24f4bSMarc Zyngier struct its_ite *ite; 8769ed24f4bSMarc Zyngier struct its_collection *collection; 8779ed24f4bSMarc Zyngier 8789ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 8799ed24f4bSMarc Zyngier if (!ite) 8809ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_INTERRUPT; 8819ed24f4bSMarc Zyngier 8829ed24f4bSMarc Zyngier if (!its_is_collection_mapped(ite->collection)) 8839ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_COLLECTION; 8849ed24f4bSMarc Zyngier 8859ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 8869ed24f4bSMarc Zyngier if (!its_is_collection_mapped(collection)) 8879ed24f4bSMarc Zyngier return E_ITS_MOVI_UNMAPPED_COLLECTION; 8889ed24f4bSMarc Zyngier 8899ed24f4bSMarc Zyngier ite->collection = collection; 8909ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 8919ed24f4bSMarc Zyngier 8929ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 8939ed24f4bSMarc Zyngier 8949ed24f4bSMarc Zyngier return update_affinity(ite->irq, vcpu); 8959ed24f4bSMarc Zyngier } 8969ed24f4bSMarc Zyngier 897cafe7e54SRicardo Koller static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa) 898cafe7e54SRicardo Koller { 899cafe7e54SRicardo Koller gfn_t gfn = gpa >> PAGE_SHIFT; 900cafe7e54SRicardo Koller int idx; 901cafe7e54SRicardo Koller bool ret; 902cafe7e54SRicardo Koller 903cafe7e54SRicardo Koller idx = srcu_read_lock(&its->dev->kvm->srcu); 904cafe7e54SRicardo Koller ret = kvm_is_visible_gfn(its->dev->kvm, gfn); 905cafe7e54SRicardo Koller srcu_read_unlock(&its->dev->kvm->srcu, idx); 906cafe7e54SRicardo Koller return ret; 907cafe7e54SRicardo Koller } 908cafe7e54SRicardo Koller 9099ed24f4bSMarc Zyngier /* 9109ed24f4bSMarc Zyngier * Check whether an ID can be stored into the corresponding guest table. 9119ed24f4bSMarc Zyngier * For a direct table this is pretty easy, but gets a bit nasty for 9129ed24f4bSMarc Zyngier * indirect tables. We check whether the resulting guest physical address 9139ed24f4bSMarc Zyngier * is actually valid (covered by a memslot and guest accessible). 9149ed24f4bSMarc Zyngier * For this we have to read the respective first level entry. 9159ed24f4bSMarc Zyngier */ 9169ed24f4bSMarc Zyngier static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, 9179ed24f4bSMarc Zyngier gpa_t *eaddr) 9189ed24f4bSMarc Zyngier { 9199ed24f4bSMarc Zyngier int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 9209ed24f4bSMarc Zyngier u64 indirect_ptr, type = GITS_BASER_TYPE(baser); 9219ed24f4bSMarc Zyngier phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); 9229ed24f4bSMarc Zyngier int esz = GITS_BASER_ENTRY_SIZE(baser); 923cafe7e54SRicardo Koller int index; 9249ed24f4bSMarc Zyngier 9259ed24f4bSMarc Zyngier switch (type) { 9269ed24f4bSMarc Zyngier case GITS_BASER_TYPE_DEVICE: 9279ed24f4bSMarc Zyngier if (id >= BIT_ULL(VITS_TYPER_DEVBITS)) 9289ed24f4bSMarc Zyngier return false; 9299ed24f4bSMarc Zyngier break; 9309ed24f4bSMarc Zyngier case GITS_BASER_TYPE_COLLECTION: 9319ed24f4bSMarc Zyngier /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */ 9329ed24f4bSMarc Zyngier if (id >= BIT_ULL(16)) 9339ed24f4bSMarc Zyngier return false; 9349ed24f4bSMarc Zyngier break; 9359ed24f4bSMarc Zyngier default: 9369ed24f4bSMarc Zyngier return false; 9379ed24f4bSMarc Zyngier } 9389ed24f4bSMarc Zyngier 9399ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_INDIRECT)) { 9409ed24f4bSMarc Zyngier phys_addr_t addr; 9419ed24f4bSMarc Zyngier 9429ed24f4bSMarc Zyngier if (id >= (l1_tbl_size / esz)) 9439ed24f4bSMarc Zyngier return false; 9449ed24f4bSMarc Zyngier 9459ed24f4bSMarc Zyngier addr = base + id * esz; 9469ed24f4bSMarc Zyngier 9479ed24f4bSMarc Zyngier if (eaddr) 9489ed24f4bSMarc Zyngier *eaddr = addr; 9499ed24f4bSMarc Zyngier 950cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, addr); 9519ed24f4bSMarc Zyngier } 9529ed24f4bSMarc Zyngier 9539ed24f4bSMarc Zyngier /* calculate and check the index into the 1st level */ 9549ed24f4bSMarc Zyngier index = id / (SZ_64K / esz); 9559ed24f4bSMarc Zyngier if (index >= (l1_tbl_size / sizeof(u64))) 9569ed24f4bSMarc Zyngier return false; 9579ed24f4bSMarc Zyngier 9589ed24f4bSMarc Zyngier /* Each 1st level entry is represented by a 64-bit value. */ 9599ed24f4bSMarc Zyngier if (kvm_read_guest_lock(its->dev->kvm, 9609ed24f4bSMarc Zyngier base + index * sizeof(indirect_ptr), 9619ed24f4bSMarc Zyngier &indirect_ptr, sizeof(indirect_ptr))) 9629ed24f4bSMarc Zyngier return false; 9639ed24f4bSMarc Zyngier 9649ed24f4bSMarc Zyngier indirect_ptr = le64_to_cpu(indirect_ptr); 9659ed24f4bSMarc Zyngier 9669ed24f4bSMarc Zyngier /* check the valid bit of the first level entry */ 9679ed24f4bSMarc Zyngier if (!(indirect_ptr & BIT_ULL(63))) 9689ed24f4bSMarc Zyngier return false; 9699ed24f4bSMarc Zyngier 9709ed24f4bSMarc Zyngier /* Mask the guest physical address and calculate the frame number. */ 9719ed24f4bSMarc Zyngier indirect_ptr &= GENMASK_ULL(51, 16); 9729ed24f4bSMarc Zyngier 9739ed24f4bSMarc Zyngier /* Find the address of the actual entry */ 9749ed24f4bSMarc Zyngier index = id % (SZ_64K / esz); 9759ed24f4bSMarc Zyngier indirect_ptr += index * esz; 9769ed24f4bSMarc Zyngier 9779ed24f4bSMarc Zyngier if (eaddr) 9789ed24f4bSMarc Zyngier *eaddr = indirect_ptr; 9799ed24f4bSMarc Zyngier 980cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, indirect_ptr); 9819ed24f4bSMarc Zyngier } 9829ed24f4bSMarc Zyngier 983cafe7e54SRicardo Koller /* 984cafe7e54SRicardo Koller * Check whether an event ID can be stored in the corresponding Interrupt 985cafe7e54SRicardo Koller * Translation Table, which starts at device->itt_addr. 986cafe7e54SRicardo Koller */ 987cafe7e54SRicardo Koller static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device, 988cafe7e54SRicardo Koller u32 event_id) 989cafe7e54SRicardo Koller { 990cafe7e54SRicardo Koller const struct vgic_its_abi *abi = vgic_its_get_abi(its); 991cafe7e54SRicardo Koller int ite_esz = abi->ite_esz; 992cafe7e54SRicardo Koller gpa_t gpa; 993cafe7e54SRicardo Koller 994cafe7e54SRicardo Koller /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */ 995cafe7e54SRicardo Koller if (event_id >= BIT_ULL(device->num_eventid_bits)) 996cafe7e54SRicardo Koller return false; 997cafe7e54SRicardo Koller 998cafe7e54SRicardo Koller gpa = device->itt_addr + event_id * ite_esz; 999cafe7e54SRicardo Koller return __is_visible_gfn_locked(its, gpa); 10009ed24f4bSMarc Zyngier } 10019ed24f4bSMarc Zyngier 1002a1ccfd6fSRicardo Koller /* 1003a1ccfd6fSRicardo Koller * Add a new collection into the ITS collection table. 1004a1ccfd6fSRicardo Koller * Returns 0 on success, and a negative error value for generic errors. 1005a1ccfd6fSRicardo Koller */ 10069ed24f4bSMarc Zyngier static int vgic_its_alloc_collection(struct vgic_its *its, 10079ed24f4bSMarc Zyngier struct its_collection **colp, 10089ed24f4bSMarc Zyngier u32 coll_id) 10099ed24f4bSMarc Zyngier { 10109ed24f4bSMarc Zyngier struct its_collection *collection; 10119ed24f4bSMarc Zyngier 10123ef23167SJia He collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT); 10139ed24f4bSMarc Zyngier if (!collection) 10149ed24f4bSMarc Zyngier return -ENOMEM; 10159ed24f4bSMarc Zyngier 10169ed24f4bSMarc Zyngier collection->collection_id = coll_id; 10179ed24f4bSMarc Zyngier collection->target_addr = COLLECTION_NOT_MAPPED; 10189ed24f4bSMarc Zyngier 10199ed24f4bSMarc Zyngier list_add_tail(&collection->coll_list, &its->collection_list); 10209ed24f4bSMarc Zyngier *colp = collection; 10219ed24f4bSMarc Zyngier 10229ed24f4bSMarc Zyngier return 0; 10239ed24f4bSMarc Zyngier } 10249ed24f4bSMarc Zyngier 10259ed24f4bSMarc Zyngier static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id) 10269ed24f4bSMarc Zyngier { 10279ed24f4bSMarc Zyngier struct its_collection *collection; 10289ed24f4bSMarc Zyngier struct its_device *device; 10299ed24f4bSMarc Zyngier struct its_ite *ite; 10309ed24f4bSMarc Zyngier 10319ed24f4bSMarc Zyngier /* 10329ed24f4bSMarc Zyngier * Clearing the mapping for that collection ID removes the 10339ed24f4bSMarc Zyngier * entry from the list. If there wasn't any before, we can 10349ed24f4bSMarc Zyngier * go home early. 10359ed24f4bSMarc Zyngier */ 10369ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 10379ed24f4bSMarc Zyngier if (!collection) 10389ed24f4bSMarc Zyngier return; 10399ed24f4bSMarc Zyngier 10409ed24f4bSMarc Zyngier for_each_lpi_its(device, ite, its) 10419ed24f4bSMarc Zyngier if (ite->collection && 10429ed24f4bSMarc Zyngier ite->collection->collection_id == coll_id) 10439ed24f4bSMarc Zyngier ite->collection = NULL; 10449ed24f4bSMarc Zyngier 10459ed24f4bSMarc Zyngier list_del(&collection->coll_list); 10469ed24f4bSMarc Zyngier kfree(collection); 10479ed24f4bSMarc Zyngier } 10489ed24f4bSMarc Zyngier 10499ed24f4bSMarc Zyngier /* Must be called with its_lock mutex held */ 10509ed24f4bSMarc Zyngier static struct its_ite *vgic_its_alloc_ite(struct its_device *device, 10519ed24f4bSMarc Zyngier struct its_collection *collection, 10529ed24f4bSMarc Zyngier u32 event_id) 10539ed24f4bSMarc Zyngier { 10549ed24f4bSMarc Zyngier struct its_ite *ite; 10559ed24f4bSMarc Zyngier 10563ef23167SJia He ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT); 10579ed24f4bSMarc Zyngier if (!ite) 10589ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 10599ed24f4bSMarc Zyngier 10609ed24f4bSMarc Zyngier ite->event_id = event_id; 10619ed24f4bSMarc Zyngier ite->collection = collection; 10629ed24f4bSMarc Zyngier 10639ed24f4bSMarc Zyngier list_add_tail(&ite->ite_list, &device->itt_head); 10649ed24f4bSMarc Zyngier return ite; 10659ed24f4bSMarc Zyngier } 10669ed24f4bSMarc Zyngier 10679ed24f4bSMarc Zyngier /* 10689ed24f4bSMarc Zyngier * The MAPTI and MAPI commands map LPIs to ITTEs. 10699ed24f4bSMarc Zyngier * Must be called with its_lock mutex held. 10709ed24f4bSMarc Zyngier */ 10719ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, 10729ed24f4bSMarc Zyngier u64 *its_cmd) 10739ed24f4bSMarc Zyngier { 10749ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 10759ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 10769ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 10779ed24f4bSMarc Zyngier struct its_ite *ite; 10789ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = NULL; 10799ed24f4bSMarc Zyngier struct its_device *device; 10809ed24f4bSMarc Zyngier struct its_collection *collection, *new_coll = NULL; 10819ed24f4bSMarc Zyngier struct vgic_irq *irq; 10829ed24f4bSMarc Zyngier int lpi_nr; 10839ed24f4bSMarc Zyngier 10849ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 10859ed24f4bSMarc Zyngier if (!device) 10869ed24f4bSMarc Zyngier return E_ITS_MAPTI_UNMAPPED_DEVICE; 10879ed24f4bSMarc Zyngier 1088cafe7e54SRicardo Koller if (!vgic_its_check_event_id(its, device, event_id)) 10899ed24f4bSMarc Zyngier return E_ITS_MAPTI_ID_OOR; 10909ed24f4bSMarc Zyngier 10919ed24f4bSMarc Zyngier if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI) 10929ed24f4bSMarc Zyngier lpi_nr = its_cmd_get_physical_id(its_cmd); 10939ed24f4bSMarc Zyngier else 10949ed24f4bSMarc Zyngier lpi_nr = event_id; 10959ed24f4bSMarc Zyngier if (lpi_nr < GIC_LPI_OFFSET || 10969ed24f4bSMarc Zyngier lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) 10979ed24f4bSMarc Zyngier return E_ITS_MAPTI_PHYSICALID_OOR; 10989ed24f4bSMarc Zyngier 10999ed24f4bSMarc Zyngier /* If there is an existing mapping, behavior is UNPREDICTABLE. */ 11009ed24f4bSMarc Zyngier if (find_ite(its, device_id, event_id)) 11019ed24f4bSMarc Zyngier return 0; 11029ed24f4bSMarc Zyngier 11039ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 11049ed24f4bSMarc Zyngier if (!collection) { 1105a1ccfd6fSRicardo Koller int ret; 1106a1ccfd6fSRicardo Koller 1107a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) 1108a1ccfd6fSRicardo Koller return E_ITS_MAPC_COLLECTION_OOR; 1109a1ccfd6fSRicardo Koller 1110a1ccfd6fSRicardo Koller ret = vgic_its_alloc_collection(its, &collection, coll_id); 11119ed24f4bSMarc Zyngier if (ret) 11129ed24f4bSMarc Zyngier return ret; 11139ed24f4bSMarc Zyngier new_coll = collection; 11149ed24f4bSMarc Zyngier } 11159ed24f4bSMarc Zyngier 11169ed24f4bSMarc Zyngier ite = vgic_its_alloc_ite(device, collection, event_id); 11179ed24f4bSMarc Zyngier if (IS_ERR(ite)) { 11189ed24f4bSMarc Zyngier if (new_coll) 11199ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 11209ed24f4bSMarc Zyngier return PTR_ERR(ite); 11219ed24f4bSMarc Zyngier } 11229ed24f4bSMarc Zyngier 11239ed24f4bSMarc Zyngier if (its_is_collection_mapped(collection)) 11249ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 11259ed24f4bSMarc Zyngier 11269ed24f4bSMarc Zyngier irq = vgic_add_lpi(kvm, lpi_nr, vcpu); 11279ed24f4bSMarc Zyngier if (IS_ERR(irq)) { 11289ed24f4bSMarc Zyngier if (new_coll) 11299ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 11309ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 11319ed24f4bSMarc Zyngier return PTR_ERR(irq); 11329ed24f4bSMarc Zyngier } 11339ed24f4bSMarc Zyngier ite->irq = irq; 11349ed24f4bSMarc Zyngier 11359ed24f4bSMarc Zyngier return 0; 11369ed24f4bSMarc Zyngier } 11379ed24f4bSMarc Zyngier 11389ed24f4bSMarc Zyngier /* Requires the its_lock to be held. */ 11399ed24f4bSMarc Zyngier static void vgic_its_free_device(struct kvm *kvm, struct its_device *device) 11409ed24f4bSMarc Zyngier { 11419ed24f4bSMarc Zyngier struct its_ite *ite, *temp; 11429ed24f4bSMarc Zyngier 11439ed24f4bSMarc Zyngier /* 11449ed24f4bSMarc Zyngier * The spec says that unmapping a device with still valid 11459ed24f4bSMarc Zyngier * ITTEs associated is UNPREDICTABLE. We remove all ITTEs, 11469ed24f4bSMarc Zyngier * since we cannot leave the memory unreferenced. 11479ed24f4bSMarc Zyngier */ 11489ed24f4bSMarc Zyngier list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list) 11499ed24f4bSMarc Zyngier its_free_ite(kvm, ite); 11509ed24f4bSMarc Zyngier 11519ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 11529ed24f4bSMarc Zyngier 11539ed24f4bSMarc Zyngier list_del(&device->dev_list); 11549ed24f4bSMarc Zyngier kfree(device); 11559ed24f4bSMarc Zyngier } 11569ed24f4bSMarc Zyngier 11579ed24f4bSMarc Zyngier /* its lock must be held */ 11589ed24f4bSMarc Zyngier static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its) 11599ed24f4bSMarc Zyngier { 11609ed24f4bSMarc Zyngier struct its_device *cur, *temp; 11619ed24f4bSMarc Zyngier 11629ed24f4bSMarc Zyngier list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) 11639ed24f4bSMarc Zyngier vgic_its_free_device(kvm, cur); 11649ed24f4bSMarc Zyngier } 11659ed24f4bSMarc Zyngier 11669ed24f4bSMarc Zyngier /* its lock must be held */ 11679ed24f4bSMarc Zyngier static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its) 11689ed24f4bSMarc Zyngier { 11699ed24f4bSMarc Zyngier struct its_collection *cur, *temp; 11709ed24f4bSMarc Zyngier 11719ed24f4bSMarc Zyngier list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) 11729ed24f4bSMarc Zyngier vgic_its_free_collection(its, cur->collection_id); 11739ed24f4bSMarc Zyngier } 11749ed24f4bSMarc Zyngier 11759ed24f4bSMarc Zyngier /* Must be called with its_lock mutex held */ 11769ed24f4bSMarc Zyngier static struct its_device *vgic_its_alloc_device(struct vgic_its *its, 11779ed24f4bSMarc Zyngier u32 device_id, gpa_t itt_addr, 11789ed24f4bSMarc Zyngier u8 num_eventid_bits) 11799ed24f4bSMarc Zyngier { 11809ed24f4bSMarc Zyngier struct its_device *device; 11819ed24f4bSMarc Zyngier 11823ef23167SJia He device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT); 11839ed24f4bSMarc Zyngier if (!device) 11849ed24f4bSMarc Zyngier return ERR_PTR(-ENOMEM); 11859ed24f4bSMarc Zyngier 11869ed24f4bSMarc Zyngier device->device_id = device_id; 11879ed24f4bSMarc Zyngier device->itt_addr = itt_addr; 11889ed24f4bSMarc Zyngier device->num_eventid_bits = num_eventid_bits; 11899ed24f4bSMarc Zyngier INIT_LIST_HEAD(&device->itt_head); 11909ed24f4bSMarc Zyngier 11919ed24f4bSMarc Zyngier list_add_tail(&device->dev_list, &its->device_list); 11929ed24f4bSMarc Zyngier return device; 11939ed24f4bSMarc Zyngier } 11949ed24f4bSMarc Zyngier 11959ed24f4bSMarc Zyngier /* 11969ed24f4bSMarc Zyngier * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs). 11979ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 11989ed24f4bSMarc Zyngier */ 11999ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, 12009ed24f4bSMarc Zyngier u64 *its_cmd) 12019ed24f4bSMarc Zyngier { 12029ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 12039ed24f4bSMarc Zyngier bool valid = its_cmd_get_validbit(its_cmd); 12049ed24f4bSMarc Zyngier u8 num_eventid_bits = its_cmd_get_size(its_cmd); 12059ed24f4bSMarc Zyngier gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd); 12069ed24f4bSMarc Zyngier struct its_device *device; 12079ed24f4bSMarc Zyngier 12089ed24f4bSMarc Zyngier if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL)) 12099ed24f4bSMarc Zyngier return E_ITS_MAPD_DEVICE_OOR; 12109ed24f4bSMarc Zyngier 12119ed24f4bSMarc Zyngier if (valid && num_eventid_bits > VITS_TYPER_IDBITS) 12129ed24f4bSMarc Zyngier return E_ITS_MAPD_ITTSIZE_OOR; 12139ed24f4bSMarc Zyngier 12149ed24f4bSMarc Zyngier device = find_its_device(its, device_id); 12159ed24f4bSMarc Zyngier 12169ed24f4bSMarc Zyngier /* 12179ed24f4bSMarc Zyngier * The spec says that calling MAPD on an already mapped device 12189ed24f4bSMarc Zyngier * invalidates all cached data for this device. We implement this 12199ed24f4bSMarc Zyngier * by removing the mapping and re-establishing it. 12209ed24f4bSMarc Zyngier */ 12219ed24f4bSMarc Zyngier if (device) 12229ed24f4bSMarc Zyngier vgic_its_free_device(kvm, device); 12239ed24f4bSMarc Zyngier 12249ed24f4bSMarc Zyngier /* 12259ed24f4bSMarc Zyngier * The spec does not say whether unmapping a not-mapped device 12269ed24f4bSMarc Zyngier * is an error, so we are done in any case. 12279ed24f4bSMarc Zyngier */ 12289ed24f4bSMarc Zyngier if (!valid) 12299ed24f4bSMarc Zyngier return 0; 12309ed24f4bSMarc Zyngier 12319ed24f4bSMarc Zyngier device = vgic_its_alloc_device(its, device_id, itt_addr, 12329ed24f4bSMarc Zyngier num_eventid_bits); 12339ed24f4bSMarc Zyngier 12349ed24f4bSMarc Zyngier return PTR_ERR_OR_ZERO(device); 12359ed24f4bSMarc Zyngier } 12369ed24f4bSMarc Zyngier 12379ed24f4bSMarc Zyngier /* 12389ed24f4bSMarc Zyngier * The MAPC command maps collection IDs to redistributors. 12399ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 12409ed24f4bSMarc Zyngier */ 12419ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, 12429ed24f4bSMarc Zyngier u64 *its_cmd) 12439ed24f4bSMarc Zyngier { 12449ed24f4bSMarc Zyngier u16 coll_id; 12459ed24f4bSMarc Zyngier u32 target_addr; 12469ed24f4bSMarc Zyngier struct its_collection *collection; 12479ed24f4bSMarc Zyngier bool valid; 12489ed24f4bSMarc Zyngier 12499ed24f4bSMarc Zyngier valid = its_cmd_get_validbit(its_cmd); 12509ed24f4bSMarc Zyngier coll_id = its_cmd_get_collection(its_cmd); 12519ed24f4bSMarc Zyngier target_addr = its_cmd_get_target_addr(its_cmd); 12529ed24f4bSMarc Zyngier 12539ed24f4bSMarc Zyngier if (target_addr >= atomic_read(&kvm->online_vcpus)) 12549ed24f4bSMarc Zyngier return E_ITS_MAPC_PROCNUM_OOR; 12559ed24f4bSMarc Zyngier 12569ed24f4bSMarc Zyngier if (!valid) { 12579ed24f4bSMarc Zyngier vgic_its_free_collection(its, coll_id); 12589ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 12599ed24f4bSMarc Zyngier } else { 12609ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 12619ed24f4bSMarc Zyngier 12629ed24f4bSMarc Zyngier if (!collection) { 12639ed24f4bSMarc Zyngier int ret; 12649ed24f4bSMarc Zyngier 1265a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, 1266a1ccfd6fSRicardo Koller coll_id, NULL)) 1267a1ccfd6fSRicardo Koller return E_ITS_MAPC_COLLECTION_OOR; 1268a1ccfd6fSRicardo Koller 12699ed24f4bSMarc Zyngier ret = vgic_its_alloc_collection(its, &collection, 12709ed24f4bSMarc Zyngier coll_id); 12719ed24f4bSMarc Zyngier if (ret) 12729ed24f4bSMarc Zyngier return ret; 12739ed24f4bSMarc Zyngier collection->target_addr = target_addr; 12749ed24f4bSMarc Zyngier } else { 12759ed24f4bSMarc Zyngier collection->target_addr = target_addr; 12769ed24f4bSMarc Zyngier update_affinity_collection(kvm, its, collection); 12779ed24f4bSMarc Zyngier } 12789ed24f4bSMarc Zyngier } 12799ed24f4bSMarc Zyngier 12809ed24f4bSMarc Zyngier return 0; 12819ed24f4bSMarc Zyngier } 12829ed24f4bSMarc Zyngier 12839ed24f4bSMarc Zyngier /* 12849ed24f4bSMarc Zyngier * The CLEAR command removes the pending state for a particular LPI. 12859ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 12869ed24f4bSMarc Zyngier */ 12879ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its, 12889ed24f4bSMarc Zyngier u64 *its_cmd) 12899ed24f4bSMarc Zyngier { 12909ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 12919ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 12929ed24f4bSMarc Zyngier struct its_ite *ite; 12939ed24f4bSMarc Zyngier 12949ed24f4bSMarc Zyngier 12959ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 12969ed24f4bSMarc Zyngier if (!ite) 12979ed24f4bSMarc Zyngier return E_ITS_CLEAR_UNMAPPED_INTERRUPT; 12989ed24f4bSMarc Zyngier 12999ed24f4bSMarc Zyngier ite->irq->pending_latch = false; 13009ed24f4bSMarc Zyngier 13019ed24f4bSMarc Zyngier if (ite->irq->hw) 13029ed24f4bSMarc Zyngier return irq_set_irqchip_state(ite->irq->host_irq, 13039ed24f4bSMarc Zyngier IRQCHIP_STATE_PENDING, false); 13049ed24f4bSMarc Zyngier 13059ed24f4bSMarc Zyngier return 0; 13069ed24f4bSMarc Zyngier } 13079ed24f4bSMarc Zyngier 13084645d11fSMarc Zyngier int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq) 13094645d11fSMarc Zyngier { 13104645d11fSMarc Zyngier return update_lpi_config(kvm, irq, NULL, true); 13114645d11fSMarc Zyngier } 13124645d11fSMarc Zyngier 13139ed24f4bSMarc Zyngier /* 13149ed24f4bSMarc Zyngier * The INV command syncs the configuration bits from the memory table. 13159ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 13169ed24f4bSMarc Zyngier */ 13179ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its, 13189ed24f4bSMarc Zyngier u64 *its_cmd) 13199ed24f4bSMarc Zyngier { 13209ed24f4bSMarc Zyngier u32 device_id = its_cmd_get_deviceid(its_cmd); 13219ed24f4bSMarc Zyngier u32 event_id = its_cmd_get_id(its_cmd); 13229ed24f4bSMarc Zyngier struct its_ite *ite; 13239ed24f4bSMarc Zyngier 13249ed24f4bSMarc Zyngier 13259ed24f4bSMarc Zyngier ite = find_ite(its, device_id, event_id); 13269ed24f4bSMarc Zyngier if (!ite) 13279ed24f4bSMarc Zyngier return E_ITS_INV_UNMAPPED_INTERRUPT; 13289ed24f4bSMarc Zyngier 13294645d11fSMarc Zyngier return vgic_its_inv_lpi(kvm, ite->irq); 13304645d11fSMarc Zyngier } 13314645d11fSMarc Zyngier 13324645d11fSMarc Zyngier /** 13334645d11fSMarc Zyngier * vgic_its_invall - invalidate all LPIs targetting a given vcpu 13344645d11fSMarc Zyngier * @vcpu: the vcpu for which the RD is targetted by an invalidation 13354645d11fSMarc Zyngier * 13364645d11fSMarc Zyngier * Contrary to the INVALL command, this targets a RD instead of a 13374645d11fSMarc Zyngier * collection, and we don't need to hold the its_lock, since no ITS is 13384645d11fSMarc Zyngier * involved here. 13394645d11fSMarc Zyngier */ 13404645d11fSMarc Zyngier int vgic_its_invall(struct kvm_vcpu *vcpu) 13414645d11fSMarc Zyngier { 13424645d11fSMarc Zyngier struct kvm *kvm = vcpu->kvm; 13434645d11fSMarc Zyngier int irq_count, i = 0; 13444645d11fSMarc Zyngier u32 *intids; 13454645d11fSMarc Zyngier 13464645d11fSMarc Zyngier irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids); 13474645d11fSMarc Zyngier if (irq_count < 0) 13484645d11fSMarc Zyngier return irq_count; 13494645d11fSMarc Zyngier 13504645d11fSMarc Zyngier for (i = 0; i < irq_count; i++) { 13514645d11fSMarc Zyngier struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]); 13524645d11fSMarc Zyngier if (!irq) 13534645d11fSMarc Zyngier continue; 13544645d11fSMarc Zyngier update_lpi_config(kvm, irq, vcpu, false); 13554645d11fSMarc Zyngier vgic_put_irq(kvm, irq); 13564645d11fSMarc Zyngier } 13574645d11fSMarc Zyngier 13584645d11fSMarc Zyngier kfree(intids); 13594645d11fSMarc Zyngier 13604645d11fSMarc Zyngier if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm) 13614645d11fSMarc Zyngier its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe); 13624645d11fSMarc Zyngier 13634645d11fSMarc Zyngier return 0; 13649ed24f4bSMarc Zyngier } 13659ed24f4bSMarc Zyngier 13669ed24f4bSMarc Zyngier /* 13679ed24f4bSMarc Zyngier * The INVALL command requests flushing of all IRQ data in this collection. 13689ed24f4bSMarc Zyngier * Find the VCPU mapped to that collection, then iterate over the VM's list 13699ed24f4bSMarc Zyngier * of mapped LPIs and update the configuration for each IRQ which targets 13709ed24f4bSMarc Zyngier * the specified vcpu. The configuration will be read from the in-memory 13719ed24f4bSMarc Zyngier * configuration table. 13729ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 13739ed24f4bSMarc Zyngier */ 13749ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, 13759ed24f4bSMarc Zyngier u64 *its_cmd) 13769ed24f4bSMarc Zyngier { 13779ed24f4bSMarc Zyngier u32 coll_id = its_cmd_get_collection(its_cmd); 13789ed24f4bSMarc Zyngier struct its_collection *collection; 13799ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu; 13809ed24f4bSMarc Zyngier 13819ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 13829ed24f4bSMarc Zyngier if (!its_is_collection_mapped(collection)) 13839ed24f4bSMarc Zyngier return E_ITS_INVALL_UNMAPPED_COLLECTION; 13849ed24f4bSMarc Zyngier 13859ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 13864645d11fSMarc Zyngier vgic_its_invall(vcpu); 13879ed24f4bSMarc Zyngier 13889ed24f4bSMarc Zyngier return 0; 13899ed24f4bSMarc Zyngier } 13909ed24f4bSMarc Zyngier 13919ed24f4bSMarc Zyngier /* 13929ed24f4bSMarc Zyngier * The MOVALL command moves the pending state of all IRQs targeting one 13939ed24f4bSMarc Zyngier * redistributor to another. We don't hold the pending state in the VCPUs, 13949ed24f4bSMarc Zyngier * but in the IRQs instead, so there is really not much to do for us here. 13959ed24f4bSMarc Zyngier * However the spec says that no IRQ must target the old redistributor 13969ed24f4bSMarc Zyngier * afterwards, so we make sure that no LPI is using the associated target_vcpu. 13979ed24f4bSMarc Zyngier * This command affects all LPIs in the system that target that redistributor. 13989ed24f4bSMarc Zyngier */ 13999ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, 14009ed24f4bSMarc Zyngier u64 *its_cmd) 14019ed24f4bSMarc Zyngier { 14029ed24f4bSMarc Zyngier u32 target1_addr = its_cmd_get_target_addr(its_cmd); 14039ed24f4bSMarc Zyngier u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32); 14049ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu1, *vcpu2; 14059ed24f4bSMarc Zyngier struct vgic_irq *irq; 14069ed24f4bSMarc Zyngier u32 *intids; 14079ed24f4bSMarc Zyngier int irq_count, i; 14089ed24f4bSMarc Zyngier 14099ed24f4bSMarc Zyngier if (target1_addr >= atomic_read(&kvm->online_vcpus) || 14109ed24f4bSMarc Zyngier target2_addr >= atomic_read(&kvm->online_vcpus)) 14119ed24f4bSMarc Zyngier return E_ITS_MOVALL_PROCNUM_OOR; 14129ed24f4bSMarc Zyngier 14139ed24f4bSMarc Zyngier if (target1_addr == target2_addr) 14149ed24f4bSMarc Zyngier return 0; 14159ed24f4bSMarc Zyngier 14169ed24f4bSMarc Zyngier vcpu1 = kvm_get_vcpu(kvm, target1_addr); 14179ed24f4bSMarc Zyngier vcpu2 = kvm_get_vcpu(kvm, target2_addr); 14189ed24f4bSMarc Zyngier 14199ed24f4bSMarc Zyngier irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids); 14209ed24f4bSMarc Zyngier if (irq_count < 0) 14219ed24f4bSMarc Zyngier return irq_count; 14229ed24f4bSMarc Zyngier 14239ed24f4bSMarc Zyngier for (i = 0; i < irq_count; i++) { 14249ed24f4bSMarc Zyngier irq = vgic_get_irq(kvm, NULL, intids[i]); 14259ed24f4bSMarc Zyngier 14269ed24f4bSMarc Zyngier update_affinity(irq, vcpu2); 14279ed24f4bSMarc Zyngier 14289ed24f4bSMarc Zyngier vgic_put_irq(kvm, irq); 14299ed24f4bSMarc Zyngier } 14309ed24f4bSMarc Zyngier 14319ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 14329ed24f4bSMarc Zyngier 14339ed24f4bSMarc Zyngier kfree(intids); 14349ed24f4bSMarc Zyngier return 0; 14359ed24f4bSMarc Zyngier } 14369ed24f4bSMarc Zyngier 14379ed24f4bSMarc Zyngier /* 14389ed24f4bSMarc Zyngier * The INT command injects the LPI associated with that DevID/EvID pair. 14399ed24f4bSMarc Zyngier * Must be called with the its_lock mutex held. 14409ed24f4bSMarc Zyngier */ 14419ed24f4bSMarc Zyngier static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its, 14429ed24f4bSMarc Zyngier u64 *its_cmd) 14439ed24f4bSMarc Zyngier { 14449ed24f4bSMarc Zyngier u32 msi_data = its_cmd_get_id(its_cmd); 14459ed24f4bSMarc Zyngier u64 msi_devid = its_cmd_get_deviceid(its_cmd); 14469ed24f4bSMarc Zyngier 14479ed24f4bSMarc Zyngier return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); 14489ed24f4bSMarc Zyngier } 14499ed24f4bSMarc Zyngier 14509ed24f4bSMarc Zyngier /* 14519ed24f4bSMarc Zyngier * This function is called with the its_cmd lock held, but the ITS data 14529ed24f4bSMarc Zyngier * structure lock dropped. 14539ed24f4bSMarc Zyngier */ 14549ed24f4bSMarc Zyngier static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its, 14559ed24f4bSMarc Zyngier u64 *its_cmd) 14569ed24f4bSMarc Zyngier { 14579ed24f4bSMarc Zyngier int ret = -ENODEV; 14589ed24f4bSMarc Zyngier 14599ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 14609ed24f4bSMarc Zyngier switch (its_cmd_get_command(its_cmd)) { 14619ed24f4bSMarc Zyngier case GITS_CMD_MAPD: 14629ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd); 14639ed24f4bSMarc Zyngier break; 14649ed24f4bSMarc Zyngier case GITS_CMD_MAPC: 14659ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd); 14669ed24f4bSMarc Zyngier break; 14679ed24f4bSMarc Zyngier case GITS_CMD_MAPI: 14689ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd); 14699ed24f4bSMarc Zyngier break; 14709ed24f4bSMarc Zyngier case GITS_CMD_MAPTI: 14719ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd); 14729ed24f4bSMarc Zyngier break; 14739ed24f4bSMarc Zyngier case GITS_CMD_MOVI: 14749ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd); 14759ed24f4bSMarc Zyngier break; 14769ed24f4bSMarc Zyngier case GITS_CMD_DISCARD: 14779ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd); 14789ed24f4bSMarc Zyngier break; 14799ed24f4bSMarc Zyngier case GITS_CMD_CLEAR: 14809ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd); 14819ed24f4bSMarc Zyngier break; 14829ed24f4bSMarc Zyngier case GITS_CMD_MOVALL: 14839ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd); 14849ed24f4bSMarc Zyngier break; 14859ed24f4bSMarc Zyngier case GITS_CMD_INT: 14869ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_int(kvm, its, its_cmd); 14879ed24f4bSMarc Zyngier break; 14889ed24f4bSMarc Zyngier case GITS_CMD_INV: 14899ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd); 14909ed24f4bSMarc Zyngier break; 14919ed24f4bSMarc Zyngier case GITS_CMD_INVALL: 14929ed24f4bSMarc Zyngier ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd); 14939ed24f4bSMarc Zyngier break; 14949ed24f4bSMarc Zyngier case GITS_CMD_SYNC: 14959ed24f4bSMarc Zyngier /* we ignore this command: we are in sync all of the time */ 14969ed24f4bSMarc Zyngier ret = 0; 14979ed24f4bSMarc Zyngier break; 14989ed24f4bSMarc Zyngier } 14999ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 15009ed24f4bSMarc Zyngier 15019ed24f4bSMarc Zyngier return ret; 15029ed24f4bSMarc Zyngier } 15039ed24f4bSMarc Zyngier 15049ed24f4bSMarc Zyngier static u64 vgic_sanitise_its_baser(u64 reg) 15059ed24f4bSMarc Zyngier { 15069ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK, 15079ed24f4bSMarc Zyngier GITS_BASER_SHAREABILITY_SHIFT, 15089ed24f4bSMarc Zyngier vgic_sanitise_shareability); 15099ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK, 15109ed24f4bSMarc Zyngier GITS_BASER_INNER_CACHEABILITY_SHIFT, 15119ed24f4bSMarc Zyngier vgic_sanitise_inner_cacheability); 15129ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK, 15139ed24f4bSMarc Zyngier GITS_BASER_OUTER_CACHEABILITY_SHIFT, 15149ed24f4bSMarc Zyngier vgic_sanitise_outer_cacheability); 15159ed24f4bSMarc Zyngier 15169ed24f4bSMarc Zyngier /* We support only one (ITS) page size: 64K */ 15179ed24f4bSMarc Zyngier reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K; 15189ed24f4bSMarc Zyngier 15199ed24f4bSMarc Zyngier return reg; 15209ed24f4bSMarc Zyngier } 15219ed24f4bSMarc Zyngier 15229ed24f4bSMarc Zyngier static u64 vgic_sanitise_its_cbaser(u64 reg) 15239ed24f4bSMarc Zyngier { 15249ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK, 15259ed24f4bSMarc Zyngier GITS_CBASER_SHAREABILITY_SHIFT, 15269ed24f4bSMarc Zyngier vgic_sanitise_shareability); 15279ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK, 15289ed24f4bSMarc Zyngier GITS_CBASER_INNER_CACHEABILITY_SHIFT, 15299ed24f4bSMarc Zyngier vgic_sanitise_inner_cacheability); 15309ed24f4bSMarc Zyngier reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK, 15319ed24f4bSMarc Zyngier GITS_CBASER_OUTER_CACHEABILITY_SHIFT, 15329ed24f4bSMarc Zyngier vgic_sanitise_outer_cacheability); 15339ed24f4bSMarc Zyngier 15349ed24f4bSMarc Zyngier /* Sanitise the physical address to be 64k aligned. */ 15359ed24f4bSMarc Zyngier reg &= ~GENMASK_ULL(15, 12); 15369ed24f4bSMarc Zyngier 15379ed24f4bSMarc Zyngier return reg; 15389ed24f4bSMarc Zyngier } 15399ed24f4bSMarc Zyngier 15409ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm, 15419ed24f4bSMarc Zyngier struct vgic_its *its, 15429ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 15439ed24f4bSMarc Zyngier { 15449ed24f4bSMarc Zyngier return extract_bytes(its->cbaser, addr & 7, len); 15459ed24f4bSMarc Zyngier } 15469ed24f4bSMarc Zyngier 15479ed24f4bSMarc Zyngier static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its, 15489ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 15499ed24f4bSMarc Zyngier unsigned long val) 15509ed24f4bSMarc Zyngier { 15519ed24f4bSMarc Zyngier /* When GITS_CTLR.Enable is 1, this register is RO. */ 15529ed24f4bSMarc Zyngier if (its->enabled) 15539ed24f4bSMarc Zyngier return; 15549ed24f4bSMarc Zyngier 15559ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 15569ed24f4bSMarc Zyngier its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val); 15579ed24f4bSMarc Zyngier its->cbaser = vgic_sanitise_its_cbaser(its->cbaser); 15589ed24f4bSMarc Zyngier its->creadr = 0; 15599ed24f4bSMarc Zyngier /* 15609ed24f4bSMarc Zyngier * CWRITER is architecturally UNKNOWN on reset, but we need to reset 15619ed24f4bSMarc Zyngier * it to CREADR to make sure we start with an empty command buffer. 15629ed24f4bSMarc Zyngier */ 15639ed24f4bSMarc Zyngier its->cwriter = its->creadr; 15649ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 15659ed24f4bSMarc Zyngier } 15669ed24f4bSMarc Zyngier 15679ed24f4bSMarc Zyngier #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12) 15689ed24f4bSMarc Zyngier #define ITS_CMD_SIZE 32 15699ed24f4bSMarc Zyngier #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) 15709ed24f4bSMarc Zyngier 15719ed24f4bSMarc Zyngier /* Must be called with the cmd_lock held. */ 15729ed24f4bSMarc Zyngier static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) 15739ed24f4bSMarc Zyngier { 15749ed24f4bSMarc Zyngier gpa_t cbaser; 15759ed24f4bSMarc Zyngier u64 cmd_buf[4]; 15769ed24f4bSMarc Zyngier 15779ed24f4bSMarc Zyngier /* Commands are only processed when the ITS is enabled. */ 15789ed24f4bSMarc Zyngier if (!its->enabled) 15799ed24f4bSMarc Zyngier return; 15809ed24f4bSMarc Zyngier 15819ed24f4bSMarc Zyngier cbaser = GITS_CBASER_ADDRESS(its->cbaser); 15829ed24f4bSMarc Zyngier 15839ed24f4bSMarc Zyngier while (its->cwriter != its->creadr) { 15849ed24f4bSMarc Zyngier int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, 15859ed24f4bSMarc Zyngier cmd_buf, ITS_CMD_SIZE); 15869ed24f4bSMarc Zyngier /* 15879ed24f4bSMarc Zyngier * If kvm_read_guest() fails, this could be due to the guest 15889ed24f4bSMarc Zyngier * programming a bogus value in CBASER or something else going 15899ed24f4bSMarc Zyngier * wrong from which we cannot easily recover. 15909ed24f4bSMarc Zyngier * According to section 6.3.2 in the GICv3 spec we can just 15919ed24f4bSMarc Zyngier * ignore that command then. 15929ed24f4bSMarc Zyngier */ 15939ed24f4bSMarc Zyngier if (!ret) 15949ed24f4bSMarc Zyngier vgic_its_handle_command(kvm, its, cmd_buf); 15959ed24f4bSMarc Zyngier 15969ed24f4bSMarc Zyngier its->creadr += ITS_CMD_SIZE; 15979ed24f4bSMarc Zyngier if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) 15989ed24f4bSMarc Zyngier its->creadr = 0; 15999ed24f4bSMarc Zyngier } 16009ed24f4bSMarc Zyngier } 16019ed24f4bSMarc Zyngier 16029ed24f4bSMarc Zyngier /* 16039ed24f4bSMarc Zyngier * By writing to CWRITER the guest announces new commands to be processed. 16049ed24f4bSMarc Zyngier * To avoid any races in the first place, we take the its_cmd lock, which 16059ed24f4bSMarc Zyngier * protects our ring buffer variables, so that there is only one user 16069ed24f4bSMarc Zyngier * per ITS handling commands at a given time. 16079ed24f4bSMarc Zyngier */ 16089ed24f4bSMarc Zyngier static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its, 16099ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 16109ed24f4bSMarc Zyngier unsigned long val) 16119ed24f4bSMarc Zyngier { 16129ed24f4bSMarc Zyngier u64 reg; 16139ed24f4bSMarc Zyngier 16149ed24f4bSMarc Zyngier if (!its) 16159ed24f4bSMarc Zyngier return; 16169ed24f4bSMarc Zyngier 16179ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 16189ed24f4bSMarc Zyngier 16199ed24f4bSMarc Zyngier reg = update_64bit_reg(its->cwriter, addr & 7, len, val); 16209ed24f4bSMarc Zyngier reg = ITS_CMD_OFFSET(reg); 16219ed24f4bSMarc Zyngier if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { 16229ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16239ed24f4bSMarc Zyngier return; 16249ed24f4bSMarc Zyngier } 16259ed24f4bSMarc Zyngier its->cwriter = reg; 16269ed24f4bSMarc Zyngier 16279ed24f4bSMarc Zyngier vgic_its_process_commands(kvm, its); 16289ed24f4bSMarc Zyngier 16299ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16309ed24f4bSMarc Zyngier } 16319ed24f4bSMarc Zyngier 16329ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm, 16339ed24f4bSMarc Zyngier struct vgic_its *its, 16349ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16359ed24f4bSMarc Zyngier { 16369ed24f4bSMarc Zyngier return extract_bytes(its->cwriter, addr & 0x7, len); 16379ed24f4bSMarc Zyngier } 16389ed24f4bSMarc Zyngier 16399ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm, 16409ed24f4bSMarc Zyngier struct vgic_its *its, 16419ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16429ed24f4bSMarc Zyngier { 16439ed24f4bSMarc Zyngier return extract_bytes(its->creadr, addr & 0x7, len); 16449ed24f4bSMarc Zyngier } 16459ed24f4bSMarc Zyngier 16469ed24f4bSMarc Zyngier static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm, 16479ed24f4bSMarc Zyngier struct vgic_its *its, 16489ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 16499ed24f4bSMarc Zyngier unsigned long val) 16509ed24f4bSMarc Zyngier { 16519ed24f4bSMarc Zyngier u32 cmd_offset; 16529ed24f4bSMarc Zyngier int ret = 0; 16539ed24f4bSMarc Zyngier 16549ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 16559ed24f4bSMarc Zyngier 16569ed24f4bSMarc Zyngier if (its->enabled) { 16579ed24f4bSMarc Zyngier ret = -EBUSY; 16589ed24f4bSMarc Zyngier goto out; 16599ed24f4bSMarc Zyngier } 16609ed24f4bSMarc Zyngier 16619ed24f4bSMarc Zyngier cmd_offset = ITS_CMD_OFFSET(val); 16629ed24f4bSMarc Zyngier if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) { 16639ed24f4bSMarc Zyngier ret = -EINVAL; 16649ed24f4bSMarc Zyngier goto out; 16659ed24f4bSMarc Zyngier } 16669ed24f4bSMarc Zyngier 16679ed24f4bSMarc Zyngier its->creadr = cmd_offset; 16689ed24f4bSMarc Zyngier out: 16699ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 16709ed24f4bSMarc Zyngier return ret; 16719ed24f4bSMarc Zyngier } 16729ed24f4bSMarc Zyngier 16739ed24f4bSMarc Zyngier #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7) 16749ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm, 16759ed24f4bSMarc Zyngier struct vgic_its *its, 16769ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 16779ed24f4bSMarc Zyngier { 16789ed24f4bSMarc Zyngier u64 reg; 16799ed24f4bSMarc Zyngier 16809ed24f4bSMarc Zyngier switch (BASER_INDEX(addr)) { 16819ed24f4bSMarc Zyngier case 0: 16829ed24f4bSMarc Zyngier reg = its->baser_device_table; 16839ed24f4bSMarc Zyngier break; 16849ed24f4bSMarc Zyngier case 1: 16859ed24f4bSMarc Zyngier reg = its->baser_coll_table; 16869ed24f4bSMarc Zyngier break; 16879ed24f4bSMarc Zyngier default: 16889ed24f4bSMarc Zyngier reg = 0; 16899ed24f4bSMarc Zyngier break; 16909ed24f4bSMarc Zyngier } 16919ed24f4bSMarc Zyngier 16929ed24f4bSMarc Zyngier return extract_bytes(reg, addr & 7, len); 16939ed24f4bSMarc Zyngier } 16949ed24f4bSMarc Zyngier 16959ed24f4bSMarc Zyngier #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56)) 16969ed24f4bSMarc Zyngier static void vgic_mmio_write_its_baser(struct kvm *kvm, 16979ed24f4bSMarc Zyngier struct vgic_its *its, 16989ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 16999ed24f4bSMarc Zyngier unsigned long val) 17009ed24f4bSMarc Zyngier { 17019ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 17029ed24f4bSMarc Zyngier u64 entry_size, table_type; 17039ed24f4bSMarc Zyngier u64 reg, *regptr, clearbits = 0; 17049ed24f4bSMarc Zyngier 17059ed24f4bSMarc Zyngier /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ 17069ed24f4bSMarc Zyngier if (its->enabled) 17079ed24f4bSMarc Zyngier return; 17089ed24f4bSMarc Zyngier 17099ed24f4bSMarc Zyngier switch (BASER_INDEX(addr)) { 17109ed24f4bSMarc Zyngier case 0: 17119ed24f4bSMarc Zyngier regptr = &its->baser_device_table; 17129ed24f4bSMarc Zyngier entry_size = abi->dte_esz; 17139ed24f4bSMarc Zyngier table_type = GITS_BASER_TYPE_DEVICE; 17149ed24f4bSMarc Zyngier break; 17159ed24f4bSMarc Zyngier case 1: 17169ed24f4bSMarc Zyngier regptr = &its->baser_coll_table; 17179ed24f4bSMarc Zyngier entry_size = abi->cte_esz; 17189ed24f4bSMarc Zyngier table_type = GITS_BASER_TYPE_COLLECTION; 17199ed24f4bSMarc Zyngier clearbits = GITS_BASER_INDIRECT; 17209ed24f4bSMarc Zyngier break; 17219ed24f4bSMarc Zyngier default: 17229ed24f4bSMarc Zyngier return; 17239ed24f4bSMarc Zyngier } 17249ed24f4bSMarc Zyngier 17259ed24f4bSMarc Zyngier reg = update_64bit_reg(*regptr, addr & 7, len, val); 17269ed24f4bSMarc Zyngier reg &= ~GITS_BASER_RO_MASK; 17279ed24f4bSMarc Zyngier reg &= ~clearbits; 17289ed24f4bSMarc Zyngier 17299ed24f4bSMarc Zyngier reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; 17309ed24f4bSMarc Zyngier reg |= table_type << GITS_BASER_TYPE_SHIFT; 17319ed24f4bSMarc Zyngier reg = vgic_sanitise_its_baser(reg); 17329ed24f4bSMarc Zyngier 17339ed24f4bSMarc Zyngier *regptr = reg; 17349ed24f4bSMarc Zyngier 17359ed24f4bSMarc Zyngier if (!(reg & GITS_BASER_VALID)) { 17369ed24f4bSMarc Zyngier /* Take the its_lock to prevent a race with a save/restore */ 17379ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 17389ed24f4bSMarc Zyngier switch (table_type) { 17399ed24f4bSMarc Zyngier case GITS_BASER_TYPE_DEVICE: 17409ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 17419ed24f4bSMarc Zyngier break; 17429ed24f4bSMarc Zyngier case GITS_BASER_TYPE_COLLECTION: 17439ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 17449ed24f4bSMarc Zyngier break; 17459ed24f4bSMarc Zyngier } 17469ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 17479ed24f4bSMarc Zyngier } 17489ed24f4bSMarc Zyngier } 17499ed24f4bSMarc Zyngier 17509ed24f4bSMarc Zyngier static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, 17519ed24f4bSMarc Zyngier struct vgic_its *its, 17529ed24f4bSMarc Zyngier gpa_t addr, unsigned int len) 17539ed24f4bSMarc Zyngier { 17549ed24f4bSMarc Zyngier u32 reg = 0; 17559ed24f4bSMarc Zyngier 17569ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 17579ed24f4bSMarc Zyngier if (its->creadr == its->cwriter) 17589ed24f4bSMarc Zyngier reg |= GITS_CTLR_QUIESCENT; 17599ed24f4bSMarc Zyngier if (its->enabled) 17609ed24f4bSMarc Zyngier reg |= GITS_CTLR_ENABLE; 17619ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 17629ed24f4bSMarc Zyngier 17639ed24f4bSMarc Zyngier return reg; 17649ed24f4bSMarc Zyngier } 17659ed24f4bSMarc Zyngier 17669ed24f4bSMarc Zyngier static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, 17679ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, 17689ed24f4bSMarc Zyngier unsigned long val) 17699ed24f4bSMarc Zyngier { 17709ed24f4bSMarc Zyngier mutex_lock(&its->cmd_lock); 17719ed24f4bSMarc Zyngier 17729ed24f4bSMarc Zyngier /* 17739ed24f4bSMarc Zyngier * It is UNPREDICTABLE to enable the ITS if any of the CBASER or 17749ed24f4bSMarc Zyngier * device/collection BASER are invalid 17759ed24f4bSMarc Zyngier */ 17769ed24f4bSMarc Zyngier if (!its->enabled && (val & GITS_CTLR_ENABLE) && 17779ed24f4bSMarc Zyngier (!(its->baser_device_table & GITS_BASER_VALID) || 17789ed24f4bSMarc Zyngier !(its->baser_coll_table & GITS_BASER_VALID) || 17799ed24f4bSMarc Zyngier !(its->cbaser & GITS_CBASER_VALID))) 17809ed24f4bSMarc Zyngier goto out; 17819ed24f4bSMarc Zyngier 17829ed24f4bSMarc Zyngier its->enabled = !!(val & GITS_CTLR_ENABLE); 17839ed24f4bSMarc Zyngier if (!its->enabled) 17849ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 17859ed24f4bSMarc Zyngier 17869ed24f4bSMarc Zyngier /* 17879ed24f4bSMarc Zyngier * Try to process any pending commands. This function bails out early 17889ed24f4bSMarc Zyngier * if the ITS is disabled or no commands have been queued. 17899ed24f4bSMarc Zyngier */ 17909ed24f4bSMarc Zyngier vgic_its_process_commands(kvm, its); 17919ed24f4bSMarc Zyngier 17929ed24f4bSMarc Zyngier out: 17939ed24f4bSMarc Zyngier mutex_unlock(&its->cmd_lock); 17949ed24f4bSMarc Zyngier } 17959ed24f4bSMarc Zyngier 17969ed24f4bSMarc Zyngier #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ 17979ed24f4bSMarc Zyngier { \ 17989ed24f4bSMarc Zyngier .reg_offset = off, \ 17999ed24f4bSMarc Zyngier .len = length, \ 18009ed24f4bSMarc Zyngier .access_flags = acc, \ 18019ed24f4bSMarc Zyngier .its_read = rd, \ 18029ed24f4bSMarc Zyngier .its_write = wr, \ 18039ed24f4bSMarc Zyngier } 18049ed24f4bSMarc Zyngier 18059ed24f4bSMarc Zyngier #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\ 18069ed24f4bSMarc Zyngier { \ 18079ed24f4bSMarc Zyngier .reg_offset = off, \ 18089ed24f4bSMarc Zyngier .len = length, \ 18099ed24f4bSMarc Zyngier .access_flags = acc, \ 18109ed24f4bSMarc Zyngier .its_read = rd, \ 18119ed24f4bSMarc Zyngier .its_write = wr, \ 18129ed24f4bSMarc Zyngier .uaccess_its_write = uwr, \ 18139ed24f4bSMarc Zyngier } 18149ed24f4bSMarc Zyngier 18159ed24f4bSMarc Zyngier static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its, 18169ed24f4bSMarc Zyngier gpa_t addr, unsigned int len, unsigned long val) 18179ed24f4bSMarc Zyngier { 18189ed24f4bSMarc Zyngier /* Ignore */ 18199ed24f4bSMarc Zyngier } 18209ed24f4bSMarc Zyngier 18219ed24f4bSMarc Zyngier static struct vgic_register_region its_registers[] = { 18229ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CTLR, 18239ed24f4bSMarc Zyngier vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4, 18249ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18259ed24f4bSMarc Zyngier REGISTER_ITS_DESC_UACCESS(GITS_IIDR, 18269ed24f4bSMarc Zyngier vgic_mmio_read_its_iidr, its_mmio_write_wi, 18279ed24f4bSMarc Zyngier vgic_mmio_uaccess_write_its_iidr, 4, 18289ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18299ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_TYPER, 18309ed24f4bSMarc Zyngier vgic_mmio_read_its_typer, its_mmio_write_wi, 8, 18319ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18329ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CBASER, 18339ed24f4bSMarc Zyngier vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8, 18349ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18359ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_CWRITER, 18369ed24f4bSMarc Zyngier vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8, 18379ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18389ed24f4bSMarc Zyngier REGISTER_ITS_DESC_UACCESS(GITS_CREADR, 18399ed24f4bSMarc Zyngier vgic_mmio_read_its_creadr, its_mmio_write_wi, 18409ed24f4bSMarc Zyngier vgic_mmio_uaccess_write_its_creadr, 8, 18419ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18429ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_BASER, 18439ed24f4bSMarc Zyngier vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40, 18449ed24f4bSMarc Zyngier VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 18459ed24f4bSMarc Zyngier REGISTER_ITS_DESC(GITS_IDREGS_BASE, 18469ed24f4bSMarc Zyngier vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30, 18479ed24f4bSMarc Zyngier VGIC_ACCESS_32bit), 18489ed24f4bSMarc Zyngier }; 18499ed24f4bSMarc Zyngier 18509ed24f4bSMarc Zyngier /* This is called on setting the LPI enable bit in the redistributor. */ 18519ed24f4bSMarc Zyngier void vgic_enable_lpis(struct kvm_vcpu *vcpu) 18529ed24f4bSMarc Zyngier { 18539ed24f4bSMarc Zyngier if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ)) 18549ed24f4bSMarc Zyngier its_sync_lpi_pending_table(vcpu); 18559ed24f4bSMarc Zyngier } 18569ed24f4bSMarc Zyngier 18579ed24f4bSMarc Zyngier static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its, 18589ed24f4bSMarc Zyngier u64 addr) 18599ed24f4bSMarc Zyngier { 18609ed24f4bSMarc Zyngier struct vgic_io_device *iodev = &its->iodev; 18619ed24f4bSMarc Zyngier int ret; 18629ed24f4bSMarc Zyngier 18639ed24f4bSMarc Zyngier mutex_lock(&kvm->slots_lock); 18649ed24f4bSMarc Zyngier if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { 18659ed24f4bSMarc Zyngier ret = -EBUSY; 18669ed24f4bSMarc Zyngier goto out; 18679ed24f4bSMarc Zyngier } 18689ed24f4bSMarc Zyngier 18699ed24f4bSMarc Zyngier its->vgic_its_base = addr; 18709ed24f4bSMarc Zyngier iodev->regions = its_registers; 18719ed24f4bSMarc Zyngier iodev->nr_regions = ARRAY_SIZE(its_registers); 18729ed24f4bSMarc Zyngier kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops); 18739ed24f4bSMarc Zyngier 18749ed24f4bSMarc Zyngier iodev->base_addr = its->vgic_its_base; 18759ed24f4bSMarc Zyngier iodev->iodev_type = IODEV_ITS; 18769ed24f4bSMarc Zyngier iodev->its = its; 18779ed24f4bSMarc Zyngier ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr, 18789ed24f4bSMarc Zyngier KVM_VGIC_V3_ITS_SIZE, &iodev->dev); 18799ed24f4bSMarc Zyngier out: 18809ed24f4bSMarc Zyngier mutex_unlock(&kvm->slots_lock); 18819ed24f4bSMarc Zyngier 18829ed24f4bSMarc Zyngier return ret; 18839ed24f4bSMarc Zyngier } 18849ed24f4bSMarc Zyngier 18859ed24f4bSMarc Zyngier /* Default is 16 cached LPIs per vcpu */ 18869ed24f4bSMarc Zyngier #define LPI_DEFAULT_PCPU_CACHE_SIZE 16 18879ed24f4bSMarc Zyngier 18889ed24f4bSMarc Zyngier void vgic_lpi_translation_cache_init(struct kvm *kvm) 18899ed24f4bSMarc Zyngier { 18909ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 18919ed24f4bSMarc Zyngier unsigned int sz; 18929ed24f4bSMarc Zyngier int i; 18939ed24f4bSMarc Zyngier 18949ed24f4bSMarc Zyngier if (!list_empty(&dist->lpi_translation_cache)) 18959ed24f4bSMarc Zyngier return; 18969ed24f4bSMarc Zyngier 18979ed24f4bSMarc Zyngier sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE; 18989ed24f4bSMarc Zyngier 18999ed24f4bSMarc Zyngier for (i = 0; i < sz; i++) { 19009ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte; 19019ed24f4bSMarc Zyngier 19029ed24f4bSMarc Zyngier /* An allocation failure is not fatal */ 19033ef23167SJia He cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT); 19049ed24f4bSMarc Zyngier if (WARN_ON(!cte)) 19059ed24f4bSMarc Zyngier break; 19069ed24f4bSMarc Zyngier 19079ed24f4bSMarc Zyngier INIT_LIST_HEAD(&cte->entry); 19089ed24f4bSMarc Zyngier list_add(&cte->entry, &dist->lpi_translation_cache); 19099ed24f4bSMarc Zyngier } 19109ed24f4bSMarc Zyngier } 19119ed24f4bSMarc Zyngier 19129ed24f4bSMarc Zyngier void vgic_lpi_translation_cache_destroy(struct kvm *kvm) 19139ed24f4bSMarc Zyngier { 19149ed24f4bSMarc Zyngier struct vgic_dist *dist = &kvm->arch.vgic; 19159ed24f4bSMarc Zyngier struct vgic_translation_cache_entry *cte, *tmp; 19169ed24f4bSMarc Zyngier 19179ed24f4bSMarc Zyngier vgic_its_invalidate_cache(kvm); 19189ed24f4bSMarc Zyngier 19199ed24f4bSMarc Zyngier list_for_each_entry_safe(cte, tmp, 19209ed24f4bSMarc Zyngier &dist->lpi_translation_cache, entry) { 19219ed24f4bSMarc Zyngier list_del(&cte->entry); 19229ed24f4bSMarc Zyngier kfree(cte); 19239ed24f4bSMarc Zyngier } 19249ed24f4bSMarc Zyngier } 19259ed24f4bSMarc Zyngier 19269ed24f4bSMarc Zyngier #define INITIAL_BASER_VALUE \ 19279ed24f4bSMarc Zyngier (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ 19289ed24f4bSMarc Zyngier GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ 19299ed24f4bSMarc Zyngier GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \ 19309ed24f4bSMarc Zyngier GITS_BASER_PAGE_SIZE_64K) 19319ed24f4bSMarc Zyngier 19329ed24f4bSMarc Zyngier #define INITIAL_PROPBASER_VALUE \ 19339ed24f4bSMarc Zyngier (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \ 19349ed24f4bSMarc Zyngier GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \ 19359ed24f4bSMarc Zyngier GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable)) 19369ed24f4bSMarc Zyngier 19379ed24f4bSMarc Zyngier static int vgic_its_create(struct kvm_device *dev, u32 type) 19389ed24f4bSMarc Zyngier { 19399ed24f4bSMarc Zyngier struct vgic_its *its; 19409ed24f4bSMarc Zyngier 19419ed24f4bSMarc Zyngier if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) 19429ed24f4bSMarc Zyngier return -ENODEV; 19439ed24f4bSMarc Zyngier 19443ef23167SJia He its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT); 19459ed24f4bSMarc Zyngier if (!its) 19469ed24f4bSMarc Zyngier return -ENOMEM; 19479ed24f4bSMarc Zyngier 19489ed24f4bSMarc Zyngier if (vgic_initialized(dev->kvm)) { 19499ed24f4bSMarc Zyngier int ret = vgic_v4_init(dev->kvm); 19509ed24f4bSMarc Zyngier if (ret < 0) { 19519ed24f4bSMarc Zyngier kfree(its); 19529ed24f4bSMarc Zyngier return ret; 19539ed24f4bSMarc Zyngier } 19549ed24f4bSMarc Zyngier 19559ed24f4bSMarc Zyngier vgic_lpi_translation_cache_init(dev->kvm); 19569ed24f4bSMarc Zyngier } 19579ed24f4bSMarc Zyngier 19589ed24f4bSMarc Zyngier mutex_init(&its->its_lock); 19599ed24f4bSMarc Zyngier mutex_init(&its->cmd_lock); 19609ed24f4bSMarc Zyngier 19619ed24f4bSMarc Zyngier its->vgic_its_base = VGIC_ADDR_UNDEF; 19629ed24f4bSMarc Zyngier 19639ed24f4bSMarc Zyngier INIT_LIST_HEAD(&its->device_list); 19649ed24f4bSMarc Zyngier INIT_LIST_HEAD(&its->collection_list); 19659ed24f4bSMarc Zyngier 19669ed24f4bSMarc Zyngier dev->kvm->arch.vgic.msis_require_devid = true; 19679ed24f4bSMarc Zyngier dev->kvm->arch.vgic.has_its = true; 19689ed24f4bSMarc Zyngier its->enabled = false; 19699ed24f4bSMarc Zyngier its->dev = dev; 19709ed24f4bSMarc Zyngier 19719ed24f4bSMarc Zyngier its->baser_device_table = INITIAL_BASER_VALUE | 19729ed24f4bSMarc Zyngier ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT); 19739ed24f4bSMarc Zyngier its->baser_coll_table = INITIAL_BASER_VALUE | 19749ed24f4bSMarc Zyngier ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT); 19759ed24f4bSMarc Zyngier dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE; 19769ed24f4bSMarc Zyngier 19779ed24f4bSMarc Zyngier dev->private = its; 19789ed24f4bSMarc Zyngier 19799ed24f4bSMarc Zyngier return vgic_its_set_abi(its, NR_ITS_ABIS - 1); 19809ed24f4bSMarc Zyngier } 19819ed24f4bSMarc Zyngier 19829ed24f4bSMarc Zyngier static void vgic_its_destroy(struct kvm_device *kvm_dev) 19839ed24f4bSMarc Zyngier { 19849ed24f4bSMarc Zyngier struct kvm *kvm = kvm_dev->kvm; 19859ed24f4bSMarc Zyngier struct vgic_its *its = kvm_dev->private; 19869ed24f4bSMarc Zyngier 19879ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 19889ed24f4bSMarc Zyngier 19899ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 19909ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 19919ed24f4bSMarc Zyngier 19929ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 19939ed24f4bSMarc Zyngier kfree(its); 19949ed24f4bSMarc Zyngier kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */ 19959ed24f4bSMarc Zyngier } 19969ed24f4bSMarc Zyngier 19979ed24f4bSMarc Zyngier static int vgic_its_has_attr_regs(struct kvm_device *dev, 19989ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 19999ed24f4bSMarc Zyngier { 20009ed24f4bSMarc Zyngier const struct vgic_register_region *region; 20019ed24f4bSMarc Zyngier gpa_t offset = attr->attr; 20029ed24f4bSMarc Zyngier int align; 20039ed24f4bSMarc Zyngier 20049ed24f4bSMarc Zyngier align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7; 20059ed24f4bSMarc Zyngier 20069ed24f4bSMarc Zyngier if (offset & align) 20079ed24f4bSMarc Zyngier return -EINVAL; 20089ed24f4bSMarc Zyngier 20099ed24f4bSMarc Zyngier region = vgic_find_mmio_region(its_registers, 20109ed24f4bSMarc Zyngier ARRAY_SIZE(its_registers), 20119ed24f4bSMarc Zyngier offset); 20129ed24f4bSMarc Zyngier if (!region) 20139ed24f4bSMarc Zyngier return -ENXIO; 20149ed24f4bSMarc Zyngier 20159ed24f4bSMarc Zyngier return 0; 20169ed24f4bSMarc Zyngier } 20179ed24f4bSMarc Zyngier 20189ed24f4bSMarc Zyngier static int vgic_its_attr_regs_access(struct kvm_device *dev, 20199ed24f4bSMarc Zyngier struct kvm_device_attr *attr, 20209ed24f4bSMarc Zyngier u64 *reg, bool is_write) 20219ed24f4bSMarc Zyngier { 20229ed24f4bSMarc Zyngier const struct vgic_register_region *region; 20239ed24f4bSMarc Zyngier struct vgic_its *its; 20249ed24f4bSMarc Zyngier gpa_t addr, offset; 20259ed24f4bSMarc Zyngier unsigned int len; 20269ed24f4bSMarc Zyngier int align, ret = 0; 20279ed24f4bSMarc Zyngier 20289ed24f4bSMarc Zyngier its = dev->private; 20299ed24f4bSMarc Zyngier offset = attr->attr; 20309ed24f4bSMarc Zyngier 20319ed24f4bSMarc Zyngier /* 20329ed24f4bSMarc Zyngier * Although the spec supports upper/lower 32-bit accesses to 20339ed24f4bSMarc Zyngier * 64-bit ITS registers, the userspace ABI requires 64-bit 20349ed24f4bSMarc Zyngier * accesses to all 64-bit wide registers. We therefore only 20359ed24f4bSMarc Zyngier * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID 20369ed24f4bSMarc Zyngier * registers 20379ed24f4bSMarc Zyngier */ 20389ed24f4bSMarc Zyngier if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4)) 20399ed24f4bSMarc Zyngier align = 0x3; 20409ed24f4bSMarc Zyngier else 20419ed24f4bSMarc Zyngier align = 0x7; 20429ed24f4bSMarc Zyngier 20439ed24f4bSMarc Zyngier if (offset & align) 20449ed24f4bSMarc Zyngier return -EINVAL; 20459ed24f4bSMarc Zyngier 20469ed24f4bSMarc Zyngier mutex_lock(&dev->kvm->lock); 20479ed24f4bSMarc Zyngier 20489ed24f4bSMarc Zyngier if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { 20499ed24f4bSMarc Zyngier ret = -ENXIO; 20509ed24f4bSMarc Zyngier goto out; 20519ed24f4bSMarc Zyngier } 20529ed24f4bSMarc Zyngier 20539ed24f4bSMarc Zyngier region = vgic_find_mmio_region(its_registers, 20549ed24f4bSMarc Zyngier ARRAY_SIZE(its_registers), 20559ed24f4bSMarc Zyngier offset); 20569ed24f4bSMarc Zyngier if (!region) { 20579ed24f4bSMarc Zyngier ret = -ENXIO; 20589ed24f4bSMarc Zyngier goto out; 20599ed24f4bSMarc Zyngier } 20609ed24f4bSMarc Zyngier 20619ed24f4bSMarc Zyngier if (!lock_all_vcpus(dev->kvm)) { 20629ed24f4bSMarc Zyngier ret = -EBUSY; 20639ed24f4bSMarc Zyngier goto out; 20649ed24f4bSMarc Zyngier } 20659ed24f4bSMarc Zyngier 20669ed24f4bSMarc Zyngier addr = its->vgic_its_base + offset; 20679ed24f4bSMarc Zyngier 20689ed24f4bSMarc Zyngier len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; 20699ed24f4bSMarc Zyngier 20709ed24f4bSMarc Zyngier if (is_write) { 20719ed24f4bSMarc Zyngier if (region->uaccess_its_write) 20729ed24f4bSMarc Zyngier ret = region->uaccess_its_write(dev->kvm, its, addr, 20739ed24f4bSMarc Zyngier len, *reg); 20749ed24f4bSMarc Zyngier else 20759ed24f4bSMarc Zyngier region->its_write(dev->kvm, its, addr, len, *reg); 20769ed24f4bSMarc Zyngier } else { 20779ed24f4bSMarc Zyngier *reg = region->its_read(dev->kvm, its, addr, len); 20789ed24f4bSMarc Zyngier } 20799ed24f4bSMarc Zyngier unlock_all_vcpus(dev->kvm); 20809ed24f4bSMarc Zyngier out: 20819ed24f4bSMarc Zyngier mutex_unlock(&dev->kvm->lock); 20829ed24f4bSMarc Zyngier return ret; 20839ed24f4bSMarc Zyngier } 20849ed24f4bSMarc Zyngier 20859ed24f4bSMarc Zyngier static u32 compute_next_devid_offset(struct list_head *h, 20869ed24f4bSMarc Zyngier struct its_device *dev) 20879ed24f4bSMarc Zyngier { 20889ed24f4bSMarc Zyngier struct its_device *next; 20899ed24f4bSMarc Zyngier u32 next_offset; 20909ed24f4bSMarc Zyngier 20919ed24f4bSMarc Zyngier if (list_is_last(&dev->dev_list, h)) 20929ed24f4bSMarc Zyngier return 0; 20939ed24f4bSMarc Zyngier next = list_next_entry(dev, dev_list); 20949ed24f4bSMarc Zyngier next_offset = next->device_id - dev->device_id; 20959ed24f4bSMarc Zyngier 20969ed24f4bSMarc Zyngier return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET); 20979ed24f4bSMarc Zyngier } 20989ed24f4bSMarc Zyngier 20999ed24f4bSMarc Zyngier static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite) 21009ed24f4bSMarc Zyngier { 21019ed24f4bSMarc Zyngier struct its_ite *next; 21029ed24f4bSMarc Zyngier u32 next_offset; 21039ed24f4bSMarc Zyngier 21049ed24f4bSMarc Zyngier if (list_is_last(&ite->ite_list, h)) 21059ed24f4bSMarc Zyngier return 0; 21069ed24f4bSMarc Zyngier next = list_next_entry(ite, ite_list); 21079ed24f4bSMarc Zyngier next_offset = next->event_id - ite->event_id; 21089ed24f4bSMarc Zyngier 21099ed24f4bSMarc Zyngier return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET); 21109ed24f4bSMarc Zyngier } 21119ed24f4bSMarc Zyngier 21129ed24f4bSMarc Zyngier /** 21139ed24f4bSMarc Zyngier * entry_fn_t - Callback called on a table entry restore path 21149ed24f4bSMarc Zyngier * @its: its handle 21159ed24f4bSMarc Zyngier * @id: id of the entry 21169ed24f4bSMarc Zyngier * @entry: pointer to the entry 21179ed24f4bSMarc Zyngier * @opaque: pointer to an opaque data 21189ed24f4bSMarc Zyngier * 21199ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if last element was identified, id offset to next 21209ed24f4bSMarc Zyngier * element otherwise 21219ed24f4bSMarc Zyngier */ 21229ed24f4bSMarc Zyngier typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry, 21239ed24f4bSMarc Zyngier void *opaque); 21249ed24f4bSMarc Zyngier 21259ed24f4bSMarc Zyngier /** 21269ed24f4bSMarc Zyngier * scan_its_table - Scan a contiguous table in guest RAM and applies a function 21279ed24f4bSMarc Zyngier * to each entry 21289ed24f4bSMarc Zyngier * 21299ed24f4bSMarc Zyngier * @its: its handle 21309ed24f4bSMarc Zyngier * @base: base gpa of the table 21319ed24f4bSMarc Zyngier * @size: size of the table in bytes 21329ed24f4bSMarc Zyngier * @esz: entry size in bytes 21339ed24f4bSMarc Zyngier * @start_id: the ID of the first entry in the table 21349ed24f4bSMarc Zyngier * (non zero for 2d level tables) 21359ed24f4bSMarc Zyngier * @fn: function to apply on each entry 21369ed24f4bSMarc Zyngier * 21379ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if last element was identified, 1 otherwise 21389ed24f4bSMarc Zyngier * (the last element may not be found on second level tables) 21399ed24f4bSMarc Zyngier */ 21409ed24f4bSMarc Zyngier static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, 21419ed24f4bSMarc Zyngier int start_id, entry_fn_t fn, void *opaque) 21429ed24f4bSMarc Zyngier { 21439ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 21449ed24f4bSMarc Zyngier unsigned long len = size; 21459ed24f4bSMarc Zyngier int id = start_id; 21469ed24f4bSMarc Zyngier gpa_t gpa = base; 21479ed24f4bSMarc Zyngier char entry[ESZ_MAX]; 21489ed24f4bSMarc Zyngier int ret; 21499ed24f4bSMarc Zyngier 21509ed24f4bSMarc Zyngier memset(entry, 0, esz); 21519ed24f4bSMarc Zyngier 2152c000a260SEric Ren while (true) { 21539ed24f4bSMarc Zyngier int next_offset; 21549ed24f4bSMarc Zyngier size_t byte_offset; 21559ed24f4bSMarc Zyngier 21569ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, gpa, entry, esz); 21579ed24f4bSMarc Zyngier if (ret) 21589ed24f4bSMarc Zyngier return ret; 21599ed24f4bSMarc Zyngier 21609ed24f4bSMarc Zyngier next_offset = fn(its, id, entry, opaque); 21619ed24f4bSMarc Zyngier if (next_offset <= 0) 21629ed24f4bSMarc Zyngier return next_offset; 21639ed24f4bSMarc Zyngier 21649ed24f4bSMarc Zyngier byte_offset = next_offset * esz; 2165c000a260SEric Ren if (byte_offset >= len) 2166c000a260SEric Ren break; 2167c000a260SEric Ren 21689ed24f4bSMarc Zyngier id += next_offset; 21699ed24f4bSMarc Zyngier gpa += byte_offset; 21709ed24f4bSMarc Zyngier len -= byte_offset; 21719ed24f4bSMarc Zyngier } 21729ed24f4bSMarc Zyngier return 1; 21739ed24f4bSMarc Zyngier } 21749ed24f4bSMarc Zyngier 21759ed24f4bSMarc Zyngier /** 21769ed24f4bSMarc Zyngier * vgic_its_save_ite - Save an interrupt translation entry at @gpa 21779ed24f4bSMarc Zyngier */ 21789ed24f4bSMarc Zyngier static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, 21799ed24f4bSMarc Zyngier struct its_ite *ite, gpa_t gpa, int ite_esz) 21809ed24f4bSMarc Zyngier { 21819ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 21829ed24f4bSMarc Zyngier u32 next_offset; 21839ed24f4bSMarc Zyngier u64 val; 21849ed24f4bSMarc Zyngier 21859ed24f4bSMarc Zyngier next_offset = compute_next_eventid_offset(&dev->itt_head, ite); 21869ed24f4bSMarc Zyngier val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) | 21879ed24f4bSMarc Zyngier ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | 21889ed24f4bSMarc Zyngier ite->collection->collection_id; 21899ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2190*a23eaf93SGavin Shan return vgic_write_guest_lock(kvm, gpa, &val, ite_esz); 21919ed24f4bSMarc Zyngier } 21929ed24f4bSMarc Zyngier 21939ed24f4bSMarc Zyngier /** 21949ed24f4bSMarc Zyngier * vgic_its_restore_ite - restore an interrupt translation entry 21959ed24f4bSMarc Zyngier * @event_id: id used for indexing 21969ed24f4bSMarc Zyngier * @ptr: pointer to the ITE entry 21979ed24f4bSMarc Zyngier * @opaque: pointer to the its_device 21989ed24f4bSMarc Zyngier */ 21999ed24f4bSMarc Zyngier static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, 22009ed24f4bSMarc Zyngier void *ptr, void *opaque) 22019ed24f4bSMarc Zyngier { 2202c707663eSYu Zhe struct its_device *dev = opaque; 22039ed24f4bSMarc Zyngier struct its_collection *collection; 22049ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 22059ed24f4bSMarc Zyngier struct kvm_vcpu *vcpu = NULL; 22069ed24f4bSMarc Zyngier u64 val; 22079ed24f4bSMarc Zyngier u64 *p = (u64 *)ptr; 22089ed24f4bSMarc Zyngier struct vgic_irq *irq; 22099ed24f4bSMarc Zyngier u32 coll_id, lpi_id; 22109ed24f4bSMarc Zyngier struct its_ite *ite; 22119ed24f4bSMarc Zyngier u32 offset; 22129ed24f4bSMarc Zyngier 22139ed24f4bSMarc Zyngier val = *p; 22149ed24f4bSMarc Zyngier 22159ed24f4bSMarc Zyngier val = le64_to_cpu(val); 22169ed24f4bSMarc Zyngier 22179ed24f4bSMarc Zyngier coll_id = val & KVM_ITS_ITE_ICID_MASK; 22189ed24f4bSMarc Zyngier lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT; 22199ed24f4bSMarc Zyngier 22209ed24f4bSMarc Zyngier if (!lpi_id) 22219ed24f4bSMarc Zyngier return 1; /* invalid entry, no choice but to scan next entry */ 22229ed24f4bSMarc Zyngier 22239ed24f4bSMarc Zyngier if (lpi_id < VGIC_MIN_LPI) 22249ed24f4bSMarc Zyngier return -EINVAL; 22259ed24f4bSMarc Zyngier 22269ed24f4bSMarc Zyngier offset = val >> KVM_ITS_ITE_NEXT_SHIFT; 22279ed24f4bSMarc Zyngier if (event_id + offset >= BIT_ULL(dev->num_eventid_bits)) 22289ed24f4bSMarc Zyngier return -EINVAL; 22299ed24f4bSMarc Zyngier 22309ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 22319ed24f4bSMarc Zyngier if (!collection) 22329ed24f4bSMarc Zyngier return -EINVAL; 22339ed24f4bSMarc Zyngier 2234243b1f6cSRicardo Koller if (!vgic_its_check_event_id(its, dev, event_id)) 2235243b1f6cSRicardo Koller return -EINVAL; 2236243b1f6cSRicardo Koller 22379ed24f4bSMarc Zyngier ite = vgic_its_alloc_ite(dev, collection, event_id); 22389ed24f4bSMarc Zyngier if (IS_ERR(ite)) 22399ed24f4bSMarc Zyngier return PTR_ERR(ite); 22409ed24f4bSMarc Zyngier 22419ed24f4bSMarc Zyngier if (its_is_collection_mapped(collection)) 22429ed24f4bSMarc Zyngier vcpu = kvm_get_vcpu(kvm, collection->target_addr); 22439ed24f4bSMarc Zyngier 22449ed24f4bSMarc Zyngier irq = vgic_add_lpi(kvm, lpi_id, vcpu); 22458c5e74c9SRicardo Koller if (IS_ERR(irq)) { 22468c5e74c9SRicardo Koller its_free_ite(kvm, ite); 22479ed24f4bSMarc Zyngier return PTR_ERR(irq); 22488c5e74c9SRicardo Koller } 22499ed24f4bSMarc Zyngier ite->irq = irq; 22509ed24f4bSMarc Zyngier 22519ed24f4bSMarc Zyngier return offset; 22529ed24f4bSMarc Zyngier } 22539ed24f4bSMarc Zyngier 22544f0f586bSSami Tolvanen static int vgic_its_ite_cmp(void *priv, const struct list_head *a, 22554f0f586bSSami Tolvanen const struct list_head *b) 22569ed24f4bSMarc Zyngier { 22579ed24f4bSMarc Zyngier struct its_ite *itea = container_of(a, struct its_ite, ite_list); 22589ed24f4bSMarc Zyngier struct its_ite *iteb = container_of(b, struct its_ite, ite_list); 22599ed24f4bSMarc Zyngier 22609ed24f4bSMarc Zyngier if (itea->event_id < iteb->event_id) 22619ed24f4bSMarc Zyngier return -1; 22629ed24f4bSMarc Zyngier else 22639ed24f4bSMarc Zyngier return 1; 22649ed24f4bSMarc Zyngier } 22659ed24f4bSMarc Zyngier 22669ed24f4bSMarc Zyngier static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device) 22679ed24f4bSMarc Zyngier { 22689ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 22699ed24f4bSMarc Zyngier gpa_t base = device->itt_addr; 22709ed24f4bSMarc Zyngier struct its_ite *ite; 22719ed24f4bSMarc Zyngier int ret; 22729ed24f4bSMarc Zyngier int ite_esz = abi->ite_esz; 22739ed24f4bSMarc Zyngier 22749ed24f4bSMarc Zyngier list_sort(NULL, &device->itt_head, vgic_its_ite_cmp); 22759ed24f4bSMarc Zyngier 22769ed24f4bSMarc Zyngier list_for_each_entry(ite, &device->itt_head, ite_list) { 22779ed24f4bSMarc Zyngier gpa_t gpa = base + ite->event_id * ite_esz; 22789ed24f4bSMarc Zyngier 22799ed24f4bSMarc Zyngier /* 22809ed24f4bSMarc Zyngier * If an LPI carries the HW bit, this means that this 22819ed24f4bSMarc Zyngier * interrupt is controlled by GICv4, and we do not 22828082d50fSShenming Lu * have direct access to that state without GICv4.1. 22838082d50fSShenming Lu * Let's simply fail the save operation... 22849ed24f4bSMarc Zyngier */ 22858082d50fSShenming Lu if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1) 22869ed24f4bSMarc Zyngier return -EACCES; 22879ed24f4bSMarc Zyngier 22889ed24f4bSMarc Zyngier ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz); 22899ed24f4bSMarc Zyngier if (ret) 22909ed24f4bSMarc Zyngier return ret; 22919ed24f4bSMarc Zyngier } 22929ed24f4bSMarc Zyngier return 0; 22939ed24f4bSMarc Zyngier } 22949ed24f4bSMarc Zyngier 22959ed24f4bSMarc Zyngier /** 22969ed24f4bSMarc Zyngier * vgic_its_restore_itt - restore the ITT of a device 22979ed24f4bSMarc Zyngier * 22989ed24f4bSMarc Zyngier * @its: its handle 22999ed24f4bSMarc Zyngier * @dev: device handle 23009ed24f4bSMarc Zyngier * 23019ed24f4bSMarc Zyngier * Return 0 on success, < 0 on error 23029ed24f4bSMarc Zyngier */ 23039ed24f4bSMarc Zyngier static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) 23049ed24f4bSMarc Zyngier { 23059ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 23069ed24f4bSMarc Zyngier gpa_t base = dev->itt_addr; 23079ed24f4bSMarc Zyngier int ret; 23089ed24f4bSMarc Zyngier int ite_esz = abi->ite_esz; 23099ed24f4bSMarc Zyngier size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz; 23109ed24f4bSMarc Zyngier 23119ed24f4bSMarc Zyngier ret = scan_its_table(its, base, max_size, ite_esz, 0, 23129ed24f4bSMarc Zyngier vgic_its_restore_ite, dev); 23139ed24f4bSMarc Zyngier 23149ed24f4bSMarc Zyngier /* scan_its_table returns +1 if all ITEs are invalid */ 23159ed24f4bSMarc Zyngier if (ret > 0) 23169ed24f4bSMarc Zyngier ret = 0; 23179ed24f4bSMarc Zyngier 23189ed24f4bSMarc Zyngier return ret; 23199ed24f4bSMarc Zyngier } 23209ed24f4bSMarc Zyngier 23219ed24f4bSMarc Zyngier /** 23229ed24f4bSMarc Zyngier * vgic_its_save_dte - Save a device table entry at a given GPA 23239ed24f4bSMarc Zyngier * 23249ed24f4bSMarc Zyngier * @its: ITS handle 23259ed24f4bSMarc Zyngier * @dev: ITS device 23269ed24f4bSMarc Zyngier * @ptr: GPA 23279ed24f4bSMarc Zyngier */ 23289ed24f4bSMarc Zyngier static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, 23299ed24f4bSMarc Zyngier gpa_t ptr, int dte_esz) 23309ed24f4bSMarc Zyngier { 23319ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 23329ed24f4bSMarc Zyngier u64 val, itt_addr_field; 23339ed24f4bSMarc Zyngier u32 next_offset; 23349ed24f4bSMarc Zyngier 23359ed24f4bSMarc Zyngier itt_addr_field = dev->itt_addr >> 8; 23369ed24f4bSMarc Zyngier next_offset = compute_next_devid_offset(&its->device_list, dev); 23379ed24f4bSMarc Zyngier val = (1ULL << KVM_ITS_DTE_VALID_SHIFT | 23389ed24f4bSMarc Zyngier ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) | 23399ed24f4bSMarc Zyngier (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | 23409ed24f4bSMarc Zyngier (dev->num_eventid_bits - 1)); 23419ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2342*a23eaf93SGavin Shan return vgic_write_guest_lock(kvm, ptr, &val, dte_esz); 23439ed24f4bSMarc Zyngier } 23449ed24f4bSMarc Zyngier 23459ed24f4bSMarc Zyngier /** 23469ed24f4bSMarc Zyngier * vgic_its_restore_dte - restore a device table entry 23479ed24f4bSMarc Zyngier * 23489ed24f4bSMarc Zyngier * @its: its handle 23499ed24f4bSMarc Zyngier * @id: device id the DTE corresponds to 23509ed24f4bSMarc Zyngier * @ptr: kernel VA where the 8 byte DTE is located 23519ed24f4bSMarc Zyngier * @opaque: unused 23529ed24f4bSMarc Zyngier * 23539ed24f4bSMarc Zyngier * Return: < 0 on error, 0 if the dte is the last one, id offset to the 23549ed24f4bSMarc Zyngier * next dte otherwise 23559ed24f4bSMarc Zyngier */ 23569ed24f4bSMarc Zyngier static int vgic_its_restore_dte(struct vgic_its *its, u32 id, 23579ed24f4bSMarc Zyngier void *ptr, void *opaque) 23589ed24f4bSMarc Zyngier { 23599ed24f4bSMarc Zyngier struct its_device *dev; 2360243b1f6cSRicardo Koller u64 baser = its->baser_device_table; 23619ed24f4bSMarc Zyngier gpa_t itt_addr; 23629ed24f4bSMarc Zyngier u8 num_eventid_bits; 23639ed24f4bSMarc Zyngier u64 entry = *(u64 *)ptr; 23649ed24f4bSMarc Zyngier bool valid; 23659ed24f4bSMarc Zyngier u32 offset; 23669ed24f4bSMarc Zyngier int ret; 23679ed24f4bSMarc Zyngier 23689ed24f4bSMarc Zyngier entry = le64_to_cpu(entry); 23699ed24f4bSMarc Zyngier 23709ed24f4bSMarc Zyngier valid = entry >> KVM_ITS_DTE_VALID_SHIFT; 23719ed24f4bSMarc Zyngier num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1; 23729ed24f4bSMarc Zyngier itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK) 23739ed24f4bSMarc Zyngier >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8; 23749ed24f4bSMarc Zyngier 23759ed24f4bSMarc Zyngier if (!valid) 23769ed24f4bSMarc Zyngier return 1; 23779ed24f4bSMarc Zyngier 23789ed24f4bSMarc Zyngier /* dte entry is valid */ 23799ed24f4bSMarc Zyngier offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT; 23809ed24f4bSMarc Zyngier 2381243b1f6cSRicardo Koller if (!vgic_its_check_id(its, baser, id, NULL)) 2382243b1f6cSRicardo Koller return -EINVAL; 2383243b1f6cSRicardo Koller 23849ed24f4bSMarc Zyngier dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits); 23859ed24f4bSMarc Zyngier if (IS_ERR(dev)) 23869ed24f4bSMarc Zyngier return PTR_ERR(dev); 23879ed24f4bSMarc Zyngier 23889ed24f4bSMarc Zyngier ret = vgic_its_restore_itt(its, dev); 23899ed24f4bSMarc Zyngier if (ret) { 23909ed24f4bSMarc Zyngier vgic_its_free_device(its->dev->kvm, dev); 23919ed24f4bSMarc Zyngier return ret; 23929ed24f4bSMarc Zyngier } 23939ed24f4bSMarc Zyngier 23949ed24f4bSMarc Zyngier return offset; 23959ed24f4bSMarc Zyngier } 23969ed24f4bSMarc Zyngier 23974f0f586bSSami Tolvanen static int vgic_its_device_cmp(void *priv, const struct list_head *a, 23984f0f586bSSami Tolvanen const struct list_head *b) 23999ed24f4bSMarc Zyngier { 24009ed24f4bSMarc Zyngier struct its_device *deva = container_of(a, struct its_device, dev_list); 24019ed24f4bSMarc Zyngier struct its_device *devb = container_of(b, struct its_device, dev_list); 24029ed24f4bSMarc Zyngier 24039ed24f4bSMarc Zyngier if (deva->device_id < devb->device_id) 24049ed24f4bSMarc Zyngier return -1; 24059ed24f4bSMarc Zyngier else 24069ed24f4bSMarc Zyngier return 1; 24079ed24f4bSMarc Zyngier } 24089ed24f4bSMarc Zyngier 24099ed24f4bSMarc Zyngier /** 24109ed24f4bSMarc Zyngier * vgic_its_save_device_tables - Save the device table and all ITT 24119ed24f4bSMarc Zyngier * into guest RAM 24129ed24f4bSMarc Zyngier * 24139ed24f4bSMarc Zyngier * L1/L2 handling is hidden by vgic_its_check_id() helper which directly 24149ed24f4bSMarc Zyngier * returns the GPA of the device entry 24159ed24f4bSMarc Zyngier */ 24169ed24f4bSMarc Zyngier static int vgic_its_save_device_tables(struct vgic_its *its) 24179ed24f4bSMarc Zyngier { 24189ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 24199ed24f4bSMarc Zyngier u64 baser = its->baser_device_table; 24209ed24f4bSMarc Zyngier struct its_device *dev; 24219ed24f4bSMarc Zyngier int dte_esz = abi->dte_esz; 24229ed24f4bSMarc Zyngier 24239ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 24249ed24f4bSMarc Zyngier return 0; 24259ed24f4bSMarc Zyngier 24269ed24f4bSMarc Zyngier list_sort(NULL, &its->device_list, vgic_its_device_cmp); 24279ed24f4bSMarc Zyngier 24289ed24f4bSMarc Zyngier list_for_each_entry(dev, &its->device_list, dev_list) { 24299ed24f4bSMarc Zyngier int ret; 24309ed24f4bSMarc Zyngier gpa_t eaddr; 24319ed24f4bSMarc Zyngier 24329ed24f4bSMarc Zyngier if (!vgic_its_check_id(its, baser, 24339ed24f4bSMarc Zyngier dev->device_id, &eaddr)) 24349ed24f4bSMarc Zyngier return -EINVAL; 24359ed24f4bSMarc Zyngier 24369ed24f4bSMarc Zyngier ret = vgic_its_save_itt(its, dev); 24379ed24f4bSMarc Zyngier if (ret) 24389ed24f4bSMarc Zyngier return ret; 24399ed24f4bSMarc Zyngier 24409ed24f4bSMarc Zyngier ret = vgic_its_save_dte(its, dev, eaddr, dte_esz); 24419ed24f4bSMarc Zyngier if (ret) 24429ed24f4bSMarc Zyngier return ret; 24439ed24f4bSMarc Zyngier } 24449ed24f4bSMarc Zyngier return 0; 24459ed24f4bSMarc Zyngier } 24469ed24f4bSMarc Zyngier 24479ed24f4bSMarc Zyngier /** 24489ed24f4bSMarc Zyngier * handle_l1_dte - callback used for L1 device table entries (2 stage case) 24499ed24f4bSMarc Zyngier * 24509ed24f4bSMarc Zyngier * @its: its handle 24519ed24f4bSMarc Zyngier * @id: index of the entry in the L1 table 24529ed24f4bSMarc Zyngier * @addr: kernel VA 24539ed24f4bSMarc Zyngier * @opaque: unused 24549ed24f4bSMarc Zyngier * 24559ed24f4bSMarc Zyngier * L1 table entries are scanned by steps of 1 entry 24569ed24f4bSMarc Zyngier * Return < 0 if error, 0 if last dte was found when scanning the L2 24579ed24f4bSMarc Zyngier * table, +1 otherwise (meaning next L1 entry must be scanned) 24589ed24f4bSMarc Zyngier */ 24599ed24f4bSMarc Zyngier static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr, 24609ed24f4bSMarc Zyngier void *opaque) 24619ed24f4bSMarc Zyngier { 24629ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 24639ed24f4bSMarc Zyngier int l2_start_id = id * (SZ_64K / abi->dte_esz); 24649ed24f4bSMarc Zyngier u64 entry = *(u64 *)addr; 24659ed24f4bSMarc Zyngier int dte_esz = abi->dte_esz; 24669ed24f4bSMarc Zyngier gpa_t gpa; 24679ed24f4bSMarc Zyngier int ret; 24689ed24f4bSMarc Zyngier 24699ed24f4bSMarc Zyngier entry = le64_to_cpu(entry); 24709ed24f4bSMarc Zyngier 24719ed24f4bSMarc Zyngier if (!(entry & KVM_ITS_L1E_VALID_MASK)) 24729ed24f4bSMarc Zyngier return 1; 24739ed24f4bSMarc Zyngier 24749ed24f4bSMarc Zyngier gpa = entry & KVM_ITS_L1E_ADDR_MASK; 24759ed24f4bSMarc Zyngier 24769ed24f4bSMarc Zyngier ret = scan_its_table(its, gpa, SZ_64K, dte_esz, 24779ed24f4bSMarc Zyngier l2_start_id, vgic_its_restore_dte, NULL); 24789ed24f4bSMarc Zyngier 24799ed24f4bSMarc Zyngier return ret; 24809ed24f4bSMarc Zyngier } 24819ed24f4bSMarc Zyngier 24829ed24f4bSMarc Zyngier /** 24839ed24f4bSMarc Zyngier * vgic_its_restore_device_tables - Restore the device table and all ITT 24849ed24f4bSMarc Zyngier * from guest RAM to internal data structs 24859ed24f4bSMarc Zyngier */ 24869ed24f4bSMarc Zyngier static int vgic_its_restore_device_tables(struct vgic_its *its) 24879ed24f4bSMarc Zyngier { 24889ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 24899ed24f4bSMarc Zyngier u64 baser = its->baser_device_table; 24909ed24f4bSMarc Zyngier int l1_esz, ret; 24919ed24f4bSMarc Zyngier int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 24929ed24f4bSMarc Zyngier gpa_t l1_gpa; 24939ed24f4bSMarc Zyngier 24949ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 24959ed24f4bSMarc Zyngier return 0; 24969ed24f4bSMarc Zyngier 24979ed24f4bSMarc Zyngier l1_gpa = GITS_BASER_ADDR_48_to_52(baser); 24989ed24f4bSMarc Zyngier 24999ed24f4bSMarc Zyngier if (baser & GITS_BASER_INDIRECT) { 25009ed24f4bSMarc Zyngier l1_esz = GITS_LVL1_ENTRY_SIZE; 25019ed24f4bSMarc Zyngier ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0, 25029ed24f4bSMarc Zyngier handle_l1_dte, NULL); 25039ed24f4bSMarc Zyngier } else { 25049ed24f4bSMarc Zyngier l1_esz = abi->dte_esz; 25059ed24f4bSMarc Zyngier ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0, 25069ed24f4bSMarc Zyngier vgic_its_restore_dte, NULL); 25079ed24f4bSMarc Zyngier } 25089ed24f4bSMarc Zyngier 25099ed24f4bSMarc Zyngier /* scan_its_table returns +1 if all entries are invalid */ 25109ed24f4bSMarc Zyngier if (ret > 0) 25119ed24f4bSMarc Zyngier ret = 0; 25129ed24f4bSMarc Zyngier 25138c5e74c9SRicardo Koller if (ret < 0) 25148c5e74c9SRicardo Koller vgic_its_free_device_list(its->dev->kvm, its); 25158c5e74c9SRicardo Koller 25169ed24f4bSMarc Zyngier return ret; 25179ed24f4bSMarc Zyngier } 25189ed24f4bSMarc Zyngier 25199ed24f4bSMarc Zyngier static int vgic_its_save_cte(struct vgic_its *its, 25209ed24f4bSMarc Zyngier struct its_collection *collection, 25219ed24f4bSMarc Zyngier gpa_t gpa, int esz) 25229ed24f4bSMarc Zyngier { 25239ed24f4bSMarc Zyngier u64 val; 25249ed24f4bSMarc Zyngier 25259ed24f4bSMarc Zyngier val = (1ULL << KVM_ITS_CTE_VALID_SHIFT | 25269ed24f4bSMarc Zyngier ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | 25279ed24f4bSMarc Zyngier collection->collection_id); 25289ed24f4bSMarc Zyngier val = cpu_to_le64(val); 2529*a23eaf93SGavin Shan return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz); 25309ed24f4bSMarc Zyngier } 25319ed24f4bSMarc Zyngier 2532a1ccfd6fSRicardo Koller /* 2533a1ccfd6fSRicardo Koller * Restore a collection entry into the ITS collection table. 2534a1ccfd6fSRicardo Koller * Return +1 on success, 0 if the entry was invalid (which should be 2535a1ccfd6fSRicardo Koller * interpreted as end-of-table), and a negative error value for generic errors. 2536a1ccfd6fSRicardo Koller */ 25379ed24f4bSMarc Zyngier static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) 25389ed24f4bSMarc Zyngier { 25399ed24f4bSMarc Zyngier struct its_collection *collection; 25409ed24f4bSMarc Zyngier struct kvm *kvm = its->dev->kvm; 25419ed24f4bSMarc Zyngier u32 target_addr, coll_id; 25429ed24f4bSMarc Zyngier u64 val; 25439ed24f4bSMarc Zyngier int ret; 25449ed24f4bSMarc Zyngier 25459ed24f4bSMarc Zyngier BUG_ON(esz > sizeof(val)); 25469ed24f4bSMarc Zyngier ret = kvm_read_guest_lock(kvm, gpa, &val, esz); 25479ed24f4bSMarc Zyngier if (ret) 25489ed24f4bSMarc Zyngier return ret; 25499ed24f4bSMarc Zyngier val = le64_to_cpu(val); 25509ed24f4bSMarc Zyngier if (!(val & KVM_ITS_CTE_VALID_MASK)) 25519ed24f4bSMarc Zyngier return 0; 25529ed24f4bSMarc Zyngier 25539ed24f4bSMarc Zyngier target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT); 25549ed24f4bSMarc Zyngier coll_id = val & KVM_ITS_CTE_ICID_MASK; 25559ed24f4bSMarc Zyngier 25569ed24f4bSMarc Zyngier if (target_addr != COLLECTION_NOT_MAPPED && 25579ed24f4bSMarc Zyngier target_addr >= atomic_read(&kvm->online_vcpus)) 25589ed24f4bSMarc Zyngier return -EINVAL; 25599ed24f4bSMarc Zyngier 25609ed24f4bSMarc Zyngier collection = find_collection(its, coll_id); 25619ed24f4bSMarc Zyngier if (collection) 25629ed24f4bSMarc Zyngier return -EEXIST; 2563a1ccfd6fSRicardo Koller 2564a1ccfd6fSRicardo Koller if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL)) 2565a1ccfd6fSRicardo Koller return -EINVAL; 2566a1ccfd6fSRicardo Koller 25679ed24f4bSMarc Zyngier ret = vgic_its_alloc_collection(its, &collection, coll_id); 25689ed24f4bSMarc Zyngier if (ret) 25699ed24f4bSMarc Zyngier return ret; 25709ed24f4bSMarc Zyngier collection->target_addr = target_addr; 25719ed24f4bSMarc Zyngier return 1; 25729ed24f4bSMarc Zyngier } 25739ed24f4bSMarc Zyngier 25749ed24f4bSMarc Zyngier /** 25759ed24f4bSMarc Zyngier * vgic_its_save_collection_table - Save the collection table into 25769ed24f4bSMarc Zyngier * guest RAM 25779ed24f4bSMarc Zyngier */ 25789ed24f4bSMarc Zyngier static int vgic_its_save_collection_table(struct vgic_its *its) 25799ed24f4bSMarc Zyngier { 25809ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 25819ed24f4bSMarc Zyngier u64 baser = its->baser_coll_table; 25829ed24f4bSMarc Zyngier gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser); 25839ed24f4bSMarc Zyngier struct its_collection *collection; 25849ed24f4bSMarc Zyngier u64 val; 25859ed24f4bSMarc Zyngier size_t max_size, filled = 0; 25869ed24f4bSMarc Zyngier int ret, cte_esz = abi->cte_esz; 25879ed24f4bSMarc Zyngier 25889ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 25899ed24f4bSMarc Zyngier return 0; 25909ed24f4bSMarc Zyngier 25919ed24f4bSMarc Zyngier max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 25929ed24f4bSMarc Zyngier 25939ed24f4bSMarc Zyngier list_for_each_entry(collection, &its->collection_list, coll_list) { 25949ed24f4bSMarc Zyngier ret = vgic_its_save_cte(its, collection, gpa, cte_esz); 25959ed24f4bSMarc Zyngier if (ret) 25969ed24f4bSMarc Zyngier return ret; 25979ed24f4bSMarc Zyngier gpa += cte_esz; 25989ed24f4bSMarc Zyngier filled += cte_esz; 25999ed24f4bSMarc Zyngier } 26009ed24f4bSMarc Zyngier 26019ed24f4bSMarc Zyngier if (filled == max_size) 26029ed24f4bSMarc Zyngier return 0; 26039ed24f4bSMarc Zyngier 26049ed24f4bSMarc Zyngier /* 26059ed24f4bSMarc Zyngier * table is not fully filled, add a last dummy element 26069ed24f4bSMarc Zyngier * with valid bit unset 26079ed24f4bSMarc Zyngier */ 26089ed24f4bSMarc Zyngier val = 0; 26099ed24f4bSMarc Zyngier BUG_ON(cte_esz > sizeof(val)); 2610*a23eaf93SGavin Shan ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); 26119ed24f4bSMarc Zyngier return ret; 26129ed24f4bSMarc Zyngier } 26139ed24f4bSMarc Zyngier 26149ed24f4bSMarc Zyngier /** 26159ed24f4bSMarc Zyngier * vgic_its_restore_collection_table - reads the collection table 26169ed24f4bSMarc Zyngier * in guest memory and restores the ITS internal state. Requires the 26179ed24f4bSMarc Zyngier * BASER registers to be restored before. 26189ed24f4bSMarc Zyngier */ 26199ed24f4bSMarc Zyngier static int vgic_its_restore_collection_table(struct vgic_its *its) 26209ed24f4bSMarc Zyngier { 26219ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 26229ed24f4bSMarc Zyngier u64 baser = its->baser_coll_table; 26239ed24f4bSMarc Zyngier int cte_esz = abi->cte_esz; 26249ed24f4bSMarc Zyngier size_t max_size, read = 0; 26259ed24f4bSMarc Zyngier gpa_t gpa; 26269ed24f4bSMarc Zyngier int ret; 26279ed24f4bSMarc Zyngier 26289ed24f4bSMarc Zyngier if (!(baser & GITS_BASER_VALID)) 26299ed24f4bSMarc Zyngier return 0; 26309ed24f4bSMarc Zyngier 26319ed24f4bSMarc Zyngier gpa = GITS_BASER_ADDR_48_to_52(baser); 26329ed24f4bSMarc Zyngier 26339ed24f4bSMarc Zyngier max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; 26349ed24f4bSMarc Zyngier 26359ed24f4bSMarc Zyngier while (read < max_size) { 26369ed24f4bSMarc Zyngier ret = vgic_its_restore_cte(its, gpa, cte_esz); 26379ed24f4bSMarc Zyngier if (ret <= 0) 26389ed24f4bSMarc Zyngier break; 26399ed24f4bSMarc Zyngier gpa += cte_esz; 26409ed24f4bSMarc Zyngier read += cte_esz; 26419ed24f4bSMarc Zyngier } 26429ed24f4bSMarc Zyngier 26439ed24f4bSMarc Zyngier if (ret > 0) 26449ed24f4bSMarc Zyngier return 0; 26459ed24f4bSMarc Zyngier 26468c5e74c9SRicardo Koller if (ret < 0) 26478c5e74c9SRicardo Koller vgic_its_free_collection_list(its->dev->kvm, its); 26488c5e74c9SRicardo Koller 26499ed24f4bSMarc Zyngier return ret; 26509ed24f4bSMarc Zyngier } 26519ed24f4bSMarc Zyngier 26529ed24f4bSMarc Zyngier /** 26539ed24f4bSMarc Zyngier * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM 26549ed24f4bSMarc Zyngier * according to v0 ABI 26559ed24f4bSMarc Zyngier */ 26569ed24f4bSMarc Zyngier static int vgic_its_save_tables_v0(struct vgic_its *its) 26579ed24f4bSMarc Zyngier { 26589ed24f4bSMarc Zyngier int ret; 26599ed24f4bSMarc Zyngier 26609ed24f4bSMarc Zyngier ret = vgic_its_save_device_tables(its); 26619ed24f4bSMarc Zyngier if (ret) 26629ed24f4bSMarc Zyngier return ret; 26639ed24f4bSMarc Zyngier 26649ed24f4bSMarc Zyngier return vgic_its_save_collection_table(its); 26659ed24f4bSMarc Zyngier } 26669ed24f4bSMarc Zyngier 26679ed24f4bSMarc Zyngier /** 26689ed24f4bSMarc Zyngier * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM 26699ed24f4bSMarc Zyngier * to internal data structs according to V0 ABI 26709ed24f4bSMarc Zyngier * 26719ed24f4bSMarc Zyngier */ 26729ed24f4bSMarc Zyngier static int vgic_its_restore_tables_v0(struct vgic_its *its) 26739ed24f4bSMarc Zyngier { 26749ed24f4bSMarc Zyngier int ret; 26759ed24f4bSMarc Zyngier 26769ed24f4bSMarc Zyngier ret = vgic_its_restore_collection_table(its); 26779ed24f4bSMarc Zyngier if (ret) 26789ed24f4bSMarc Zyngier return ret; 26799ed24f4bSMarc Zyngier 26808c5e74c9SRicardo Koller ret = vgic_its_restore_device_tables(its); 26818c5e74c9SRicardo Koller if (ret) 26828c5e74c9SRicardo Koller vgic_its_free_collection_list(its->dev->kvm, its); 26838c5e74c9SRicardo Koller return ret; 26849ed24f4bSMarc Zyngier } 26859ed24f4bSMarc Zyngier 26869ed24f4bSMarc Zyngier static int vgic_its_commit_v0(struct vgic_its *its) 26879ed24f4bSMarc Zyngier { 26889ed24f4bSMarc Zyngier const struct vgic_its_abi *abi; 26899ed24f4bSMarc Zyngier 26909ed24f4bSMarc Zyngier abi = vgic_its_get_abi(its); 26919ed24f4bSMarc Zyngier its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK; 26929ed24f4bSMarc Zyngier its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK; 26939ed24f4bSMarc Zyngier 26949ed24f4bSMarc Zyngier its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5) 26959ed24f4bSMarc Zyngier << GITS_BASER_ENTRY_SIZE_SHIFT); 26969ed24f4bSMarc Zyngier 26979ed24f4bSMarc Zyngier its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5) 26989ed24f4bSMarc Zyngier << GITS_BASER_ENTRY_SIZE_SHIFT); 26999ed24f4bSMarc Zyngier return 0; 27009ed24f4bSMarc Zyngier } 27019ed24f4bSMarc Zyngier 27029ed24f4bSMarc Zyngier static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) 27039ed24f4bSMarc Zyngier { 27049ed24f4bSMarc Zyngier /* We need to keep the ABI specific field values */ 27059ed24f4bSMarc Zyngier its->baser_coll_table &= ~GITS_BASER_VALID; 27069ed24f4bSMarc Zyngier its->baser_device_table &= ~GITS_BASER_VALID; 27079ed24f4bSMarc Zyngier its->cbaser = 0; 27089ed24f4bSMarc Zyngier its->creadr = 0; 27099ed24f4bSMarc Zyngier its->cwriter = 0; 27109ed24f4bSMarc Zyngier its->enabled = 0; 27119ed24f4bSMarc Zyngier vgic_its_free_device_list(kvm, its); 27129ed24f4bSMarc Zyngier vgic_its_free_collection_list(kvm, its); 27139ed24f4bSMarc Zyngier } 27149ed24f4bSMarc Zyngier 27159ed24f4bSMarc Zyngier static int vgic_its_has_attr(struct kvm_device *dev, 27169ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 27179ed24f4bSMarc Zyngier { 27189ed24f4bSMarc Zyngier switch (attr->group) { 27199ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: 27209ed24f4bSMarc Zyngier switch (attr->attr) { 27219ed24f4bSMarc Zyngier case KVM_VGIC_ITS_ADDR_TYPE: 27229ed24f4bSMarc Zyngier return 0; 27239ed24f4bSMarc Zyngier } 27249ed24f4bSMarc Zyngier break; 27259ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_CTRL: 27269ed24f4bSMarc Zyngier switch (attr->attr) { 27279ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_CTRL_INIT: 27289ed24f4bSMarc Zyngier return 0; 27299ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_CTRL_RESET: 27309ed24f4bSMarc Zyngier return 0; 27319ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_SAVE_TABLES: 27329ed24f4bSMarc Zyngier return 0; 27339ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_RESTORE_TABLES: 27349ed24f4bSMarc Zyngier return 0; 27359ed24f4bSMarc Zyngier } 27369ed24f4bSMarc Zyngier break; 27379ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: 27389ed24f4bSMarc Zyngier return vgic_its_has_attr_regs(dev, attr); 27399ed24f4bSMarc Zyngier } 27409ed24f4bSMarc Zyngier return -ENXIO; 27419ed24f4bSMarc Zyngier } 27429ed24f4bSMarc Zyngier 27439ed24f4bSMarc Zyngier static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) 27449ed24f4bSMarc Zyngier { 27459ed24f4bSMarc Zyngier const struct vgic_its_abi *abi = vgic_its_get_abi(its); 27469ed24f4bSMarc Zyngier int ret = 0; 27479ed24f4bSMarc Zyngier 27489ed24f4bSMarc Zyngier if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ 27499ed24f4bSMarc Zyngier return 0; 27509ed24f4bSMarc Zyngier 27519ed24f4bSMarc Zyngier mutex_lock(&kvm->lock); 27529ed24f4bSMarc Zyngier mutex_lock(&its->its_lock); 27539ed24f4bSMarc Zyngier 27549ed24f4bSMarc Zyngier if (!lock_all_vcpus(kvm)) { 27559ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 27569ed24f4bSMarc Zyngier mutex_unlock(&kvm->lock); 27579ed24f4bSMarc Zyngier return -EBUSY; 27589ed24f4bSMarc Zyngier } 27599ed24f4bSMarc Zyngier 27609ed24f4bSMarc Zyngier switch (attr) { 27619ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_CTRL_RESET: 27629ed24f4bSMarc Zyngier vgic_its_reset(kvm, its); 27639ed24f4bSMarc Zyngier break; 27649ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_SAVE_TABLES: 27659ed24f4bSMarc Zyngier ret = abi->save_tables(its); 27669ed24f4bSMarc Zyngier break; 27679ed24f4bSMarc Zyngier case KVM_DEV_ARM_ITS_RESTORE_TABLES: 27689ed24f4bSMarc Zyngier ret = abi->restore_tables(its); 27699ed24f4bSMarc Zyngier break; 27709ed24f4bSMarc Zyngier } 27719ed24f4bSMarc Zyngier 27729ed24f4bSMarc Zyngier unlock_all_vcpus(kvm); 27739ed24f4bSMarc Zyngier mutex_unlock(&its->its_lock); 27749ed24f4bSMarc Zyngier mutex_unlock(&kvm->lock); 27759ed24f4bSMarc Zyngier return ret; 27769ed24f4bSMarc Zyngier } 27779ed24f4bSMarc Zyngier 27789cb1096fSGavin Shan /* 27799cb1096fSGavin Shan * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory 27809cb1096fSGavin Shan * without the running VCPU when dirty ring is enabled. 27819cb1096fSGavin Shan * 27829cb1096fSGavin Shan * The running VCPU is required to track dirty guest pages when dirty ring 27839cb1096fSGavin Shan * is enabled. Otherwise, the backup bitmap should be used to track the 27849cb1096fSGavin Shan * dirty guest pages. When vgic/its tables are being saved, the backup 27859cb1096fSGavin Shan * bitmap is used to track the dirty guest pages due to the missed running 27869cb1096fSGavin Shan * VCPU in the period. 27879cb1096fSGavin Shan */ 27889cb1096fSGavin Shan bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm) 27899cb1096fSGavin Shan { 27909cb1096fSGavin Shan struct vgic_dist *dist = &kvm->arch.vgic; 27919cb1096fSGavin Shan 2792*a23eaf93SGavin Shan return dist->table_write_in_progress; 27939cb1096fSGavin Shan } 27949cb1096fSGavin Shan 27959ed24f4bSMarc Zyngier static int vgic_its_set_attr(struct kvm_device *dev, 27969ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 27979ed24f4bSMarc Zyngier { 27989ed24f4bSMarc Zyngier struct vgic_its *its = dev->private; 27999ed24f4bSMarc Zyngier int ret; 28009ed24f4bSMarc Zyngier 28019ed24f4bSMarc Zyngier switch (attr->group) { 28029ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: { 28039ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28049ed24f4bSMarc Zyngier unsigned long type = (unsigned long)attr->attr; 28059ed24f4bSMarc Zyngier u64 addr; 28069ed24f4bSMarc Zyngier 28079ed24f4bSMarc Zyngier if (type != KVM_VGIC_ITS_ADDR_TYPE) 28089ed24f4bSMarc Zyngier return -ENODEV; 28099ed24f4bSMarc Zyngier 28109ed24f4bSMarc Zyngier if (copy_from_user(&addr, uaddr, sizeof(addr))) 28119ed24f4bSMarc Zyngier return -EFAULT; 28129ed24f4bSMarc Zyngier 28132ec02f6cSRicardo Koller ret = vgic_check_iorange(dev->kvm, its->vgic_its_base, 28142ec02f6cSRicardo Koller addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE); 28159ed24f4bSMarc Zyngier if (ret) 28169ed24f4bSMarc Zyngier return ret; 28179ed24f4bSMarc Zyngier 28189ed24f4bSMarc Zyngier return vgic_register_its_iodev(dev->kvm, its, addr); 28199ed24f4bSMarc Zyngier } 28209ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_CTRL: 28219ed24f4bSMarc Zyngier return vgic_its_ctrl(dev->kvm, its, attr->attr); 28229ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { 28239ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28249ed24f4bSMarc Zyngier u64 reg; 28259ed24f4bSMarc Zyngier 28269ed24f4bSMarc Zyngier if (get_user(reg, uaddr)) 28279ed24f4bSMarc Zyngier return -EFAULT; 28289ed24f4bSMarc Zyngier 28299ed24f4bSMarc Zyngier return vgic_its_attr_regs_access(dev, attr, ®, true); 28309ed24f4bSMarc Zyngier } 28319ed24f4bSMarc Zyngier } 28329ed24f4bSMarc Zyngier return -ENXIO; 28339ed24f4bSMarc Zyngier } 28349ed24f4bSMarc Zyngier 28359ed24f4bSMarc Zyngier static int vgic_its_get_attr(struct kvm_device *dev, 28369ed24f4bSMarc Zyngier struct kvm_device_attr *attr) 28379ed24f4bSMarc Zyngier { 28389ed24f4bSMarc Zyngier switch (attr->group) { 28399ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ADDR: { 28409ed24f4bSMarc Zyngier struct vgic_its *its = dev->private; 28419ed24f4bSMarc Zyngier u64 addr = its->vgic_its_base; 28429ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28439ed24f4bSMarc Zyngier unsigned long type = (unsigned long)attr->attr; 28449ed24f4bSMarc Zyngier 28459ed24f4bSMarc Zyngier if (type != KVM_VGIC_ITS_ADDR_TYPE) 28469ed24f4bSMarc Zyngier return -ENODEV; 28479ed24f4bSMarc Zyngier 28489ed24f4bSMarc Zyngier if (copy_to_user(uaddr, &addr, sizeof(addr))) 28499ed24f4bSMarc Zyngier return -EFAULT; 28509ed24f4bSMarc Zyngier break; 28519ed24f4bSMarc Zyngier } 28529ed24f4bSMarc Zyngier case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { 28539ed24f4bSMarc Zyngier u64 __user *uaddr = (u64 __user *)(long)attr->addr; 28549ed24f4bSMarc Zyngier u64 reg; 28559ed24f4bSMarc Zyngier int ret; 28569ed24f4bSMarc Zyngier 28579ed24f4bSMarc Zyngier ret = vgic_its_attr_regs_access(dev, attr, ®, false); 28589ed24f4bSMarc Zyngier if (ret) 28599ed24f4bSMarc Zyngier return ret; 28609ed24f4bSMarc Zyngier return put_user(reg, uaddr); 28619ed24f4bSMarc Zyngier } 28629ed24f4bSMarc Zyngier default: 28639ed24f4bSMarc Zyngier return -ENXIO; 28649ed24f4bSMarc Zyngier } 28659ed24f4bSMarc Zyngier 28669ed24f4bSMarc Zyngier return 0; 28679ed24f4bSMarc Zyngier } 28689ed24f4bSMarc Zyngier 28699ed24f4bSMarc Zyngier static struct kvm_device_ops kvm_arm_vgic_its_ops = { 28709ed24f4bSMarc Zyngier .name = "kvm-arm-vgic-its", 28719ed24f4bSMarc Zyngier .create = vgic_its_create, 28729ed24f4bSMarc Zyngier .destroy = vgic_its_destroy, 28739ed24f4bSMarc Zyngier .set_attr = vgic_its_set_attr, 28749ed24f4bSMarc Zyngier .get_attr = vgic_its_get_attr, 28759ed24f4bSMarc Zyngier .has_attr = vgic_its_has_attr, 28769ed24f4bSMarc Zyngier }; 28779ed24f4bSMarc Zyngier 28789ed24f4bSMarc Zyngier int kvm_vgic_register_its_device(void) 28799ed24f4bSMarc Zyngier { 28809ed24f4bSMarc Zyngier return kvm_register_device_ops(&kvm_arm_vgic_its_ops, 28819ed24f4bSMarc Zyngier KVM_DEV_TYPE_ARM_VGIC_ITS); 28829ed24f4bSMarc Zyngier } 2883