1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2cc2d3216SMarc Zyngier /* 3d7276b80SMarc Zyngier * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4cc2d3216SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 5cc2d3216SMarc Zyngier */ 6cc2d3216SMarc Zyngier 73f010cf1STomasz Nowicki #include <linux/acpi.h> 88d3554b8SHanjun Guo #include <linux/acpi_iort.h> 9ffedbf0cSMarc Zyngier #include <linux/bitfield.h> 10cc2d3216SMarc Zyngier #include <linux/bitmap.h> 11cc2d3216SMarc Zyngier #include <linux/cpu.h> 12c6e2ccb6SMarc Zyngier #include <linux/crash_dump.h> 13cc2d3216SMarc Zyngier #include <linux/delay.h> 1444bb7e24SRobin Murphy #include <linux/dma-iommu.h> 153fb68faeSMarc Zyngier #include <linux/efi.h> 16cc2d3216SMarc Zyngier #include <linux/interrupt.h> 173f010cf1STomasz Nowicki #include <linux/irqdomain.h> 18880cb3cdSMarc Zyngier #include <linux/list.h> 19cc2d3216SMarc Zyngier #include <linux/log2.h> 205e2c9f9aSMarc Zyngier #include <linux/memblock.h> 21cc2d3216SMarc Zyngier #include <linux/mm.h> 22cc2d3216SMarc Zyngier #include <linux/msi.h> 23cc2d3216SMarc Zyngier #include <linux/of.h> 24cc2d3216SMarc Zyngier #include <linux/of_address.h> 25cc2d3216SMarc Zyngier #include <linux/of_irq.h> 26cc2d3216SMarc Zyngier #include <linux/of_pci.h> 27cc2d3216SMarc Zyngier #include <linux/of_platform.h> 28cc2d3216SMarc Zyngier #include <linux/percpu.h> 29cc2d3216SMarc Zyngier #include <linux/slab.h> 30dba0bc7bSDerek Basehore #include <linux/syscore_ops.h> 31cc2d3216SMarc Zyngier 3241a83e06SJoel Porquet #include <linux/irqchip.h> 33cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 34c808eea8SMarc Zyngier #include <linux/irqchip/arm-gic-v4.h> 35cc2d3216SMarc Zyngier 36cc2d3216SMarc Zyngier #include <asm/cputype.h> 37cc2d3216SMarc Zyngier #include <asm/exception.h> 38cc2d3216SMarc Zyngier 3967510ccaSRobert Richter #include "irq-gic-common.h" 4067510ccaSRobert Richter 4194100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 4294100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 43fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 44dba0bc7bSDerek Basehore #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) 45cc2d3216SMarc Zyngier 46c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 47c440a9d9SMarc Zyngier #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) 48c48ed51cSMarc Zyngier 49a13b0404SMarc Zyngier static u32 lpi_id_bits; 50a13b0404SMarc Zyngier 51a13b0404SMarc Zyngier /* 52a13b0404SMarc Zyngier * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 53a13b0404SMarc Zyngier * deal with (one configuration byte per interrupt). PENDBASE has to 54a13b0404SMarc Zyngier * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 55a13b0404SMarc Zyngier */ 56a13b0404SMarc Zyngier #define LPI_NRBITS lpi_id_bits 57a13b0404SMarc Zyngier #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 58a13b0404SMarc Zyngier #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 59a13b0404SMarc Zyngier 602130b789SJulien Thierry #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI 61a13b0404SMarc Zyngier 62cc2d3216SMarc Zyngier /* 63cc2d3216SMarc Zyngier * Collection structure - just an ID, and a redistributor address to 64cc2d3216SMarc Zyngier * ping. We use one per CPU as a bag of interrupts assigned to this 65cc2d3216SMarc Zyngier * CPU. 66cc2d3216SMarc Zyngier */ 67cc2d3216SMarc Zyngier struct its_collection { 68cc2d3216SMarc Zyngier u64 target_address; 69cc2d3216SMarc Zyngier u16 col_id; 70cc2d3216SMarc Zyngier }; 71cc2d3216SMarc Zyngier 72cc2d3216SMarc Zyngier /* 739347359aSShanker Donthineni * The ITS_BASER structure - contains memory information, cached 749347359aSShanker Donthineni * value of BASER register configuration and ITS page size. 75466b7d16SShanker Donthineni */ 76466b7d16SShanker Donthineni struct its_baser { 77466b7d16SShanker Donthineni void *base; 78466b7d16SShanker Donthineni u64 val; 79466b7d16SShanker Donthineni u32 order; 809347359aSShanker Donthineni u32 psz; 81466b7d16SShanker Donthineni }; 82466b7d16SShanker Donthineni 83558b0165SArd Biesheuvel struct its_device; 84558b0165SArd Biesheuvel 85466b7d16SShanker Donthineni /* 86cc2d3216SMarc Zyngier * The ITS structure - contains most of the infrastructure, with the 87841514abSMarc Zyngier * top-level MSI domain, the command queue, the collections, and the 88841514abSMarc Zyngier * list of devices writing to it. 899791ec7dSMarc Zyngier * 909791ec7dSMarc Zyngier * dev_alloc_lock has to be taken for device allocations, while the 919791ec7dSMarc Zyngier * spinlock must be taken to parse data structures such as the device 929791ec7dSMarc Zyngier * list. 93cc2d3216SMarc Zyngier */ 94cc2d3216SMarc Zyngier struct its_node { 95cc2d3216SMarc Zyngier raw_spinlock_t lock; 969791ec7dSMarc Zyngier struct mutex dev_alloc_lock; 97cc2d3216SMarc Zyngier struct list_head entry; 98cc2d3216SMarc Zyngier void __iomem *base; 99db40f0a7STomasz Nowicki phys_addr_t phys_base; 100cc2d3216SMarc Zyngier struct its_cmd_block *cmd_base; 101cc2d3216SMarc Zyngier struct its_cmd_block *cmd_write; 102466b7d16SShanker Donthineni struct its_baser tables[GITS_BASER_NR_REGS]; 103cc2d3216SMarc Zyngier struct its_collection *collections; 104558b0165SArd Biesheuvel struct fwnode_handle *fwnode_handle; 105558b0165SArd Biesheuvel u64 (*get_msi_base)(struct its_device *its_dev); 1060dd57fedSMarc Zyngier u64 typer; 107dba0bc7bSDerek Basehore u64 cbaser_save; 108dba0bc7bSDerek Basehore u32 ctlr_save; 1095e516846SMarc Zyngier u32 mpidr; 110cc2d3216SMarc Zyngier struct list_head its_device_list; 111cc2d3216SMarc Zyngier u64 flags; 112debf6d02SMarc Zyngier unsigned long list_nr; 113fbf8f40eSGanapatrao Kulkarni int numa_node; 114558b0165SArd Biesheuvel unsigned int msi_domain_flags; 115558b0165SArd Biesheuvel u32 pre_its_base; /* for Socionext Synquacer */ 1165c9a882eSMarc Zyngier int vlpi_redist_offset; 117cc2d3216SMarc Zyngier }; 118cc2d3216SMarc Zyngier 1190dd57fedSMarc Zyngier #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) 1205e516846SMarc Zyngier #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) 121576a8342SMarc Zyngier #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) 1220dd57fedSMarc Zyngier 123cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN SZ_256 124cc2d3216SMarc Zyngier 12532bd44dcSShanker Donthineni /* The maximum number of VPEID bits supported by VLPI commands */ 126f2d83409SMarc Zyngier #define ITS_MAX_VPEID_BITS \ 127f2d83409SMarc Zyngier ({ \ 128f2d83409SMarc Zyngier int nvpeid = 16; \ 129f2d83409SMarc Zyngier if (gic_rdists->has_rvpeid && \ 130f2d83409SMarc Zyngier gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ 131f2d83409SMarc Zyngier nvpeid = 1 + (gic_rdists->gicd_typer2 & \ 132f2d83409SMarc Zyngier GICD_TYPER2_VID); \ 133f2d83409SMarc Zyngier \ 134f2d83409SMarc Zyngier nvpeid; \ 135f2d83409SMarc Zyngier }) 13632bd44dcSShanker Donthineni #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) 13732bd44dcSShanker Donthineni 1382eca0d6cSShanker Donthineni /* Convert page order to size in bytes */ 1392eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 1402eca0d6cSShanker Donthineni 141591e5becSMarc Zyngier struct event_lpi_map { 142591e5becSMarc Zyngier unsigned long *lpi_map; 143591e5becSMarc Zyngier u16 *col_map; 144591e5becSMarc Zyngier irq_hw_number_t lpi_base; 145591e5becSMarc Zyngier int nr_lpis; 14611635fa2SMarc Zyngier raw_spinlock_t vlpi_lock; 147d011e4e6SMarc Zyngier struct its_vm *vm; 148d011e4e6SMarc Zyngier struct its_vlpi_map *vlpi_maps; 149d011e4e6SMarc Zyngier int nr_vlpis; 150591e5becSMarc Zyngier }; 151591e5becSMarc Zyngier 152cc2d3216SMarc Zyngier /* 153d011e4e6SMarc Zyngier * The ITS view of a device - belongs to an ITS, owns an interrupt 154d011e4e6SMarc Zyngier * translation table, and a list of interrupts. If it some of its 155d011e4e6SMarc Zyngier * LPIs are injected into a guest (GICv4), the event_map.vm field 156d011e4e6SMarc Zyngier * indicates which one. 157cc2d3216SMarc Zyngier */ 158cc2d3216SMarc Zyngier struct its_device { 159cc2d3216SMarc Zyngier struct list_head entry; 160cc2d3216SMarc Zyngier struct its_node *its; 161591e5becSMarc Zyngier struct event_lpi_map event_map; 162cc2d3216SMarc Zyngier void *itt; 163cc2d3216SMarc Zyngier u32 nr_ites; 164cc2d3216SMarc Zyngier u32 device_id; 1659791ec7dSMarc Zyngier bool shared; 166cc2d3216SMarc Zyngier }; 167cc2d3216SMarc Zyngier 16820b3d54eSMarc Zyngier static struct { 16920b3d54eSMarc Zyngier raw_spinlock_t lock; 17020b3d54eSMarc Zyngier struct its_device *dev; 17120b3d54eSMarc Zyngier struct its_vpe **vpes; 17220b3d54eSMarc Zyngier int next_victim; 17320b3d54eSMarc Zyngier } vpe_proxy; 17420b3d54eSMarc Zyngier 1751ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes); 176a8db7456SSebastian Andrzej Siewior static DEFINE_RAW_SPINLOCK(its_lock); 1771ac19ca6SMarc Zyngier static struct rdists *gic_rdists; 178db40f0a7STomasz Nowicki static struct irq_domain *its_parent; 1791ac19ca6SMarc Zyngier 1803dfa576bSMarc Zyngier static unsigned long its_list_map; 1813171a47aSMarc Zyngier static u16 vmovp_seq_num; 1823171a47aSMarc Zyngier static DEFINE_RAW_SPINLOCK(vmovp_lock); 1833171a47aSMarc Zyngier 1847d75bbb4SMarc Zyngier static DEFINE_IDA(its_vpeid_ida); 1853dfa576bSMarc Zyngier 1861ac19ca6SMarc Zyngier #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 18711e37d35SMarc Zyngier #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) 1881ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 189e643d803SMarc Zyngier #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 1901ac19ca6SMarc Zyngier 19184243125SZenghui Yu static u16 get_its_list(struct its_vm *vm) 19284243125SZenghui Yu { 19384243125SZenghui Yu struct its_node *its; 19484243125SZenghui Yu unsigned long its_list = 0; 19584243125SZenghui Yu 19684243125SZenghui Yu list_for_each_entry(its, &its_nodes, entry) { 1970dd57fedSMarc Zyngier if (!is_v4(its)) 19884243125SZenghui Yu continue; 19984243125SZenghui Yu 20084243125SZenghui Yu if (vm->vlpi_count[its->list_nr]) 20184243125SZenghui Yu __set_bit(its->list_nr, &its_list); 20284243125SZenghui Yu } 20384243125SZenghui Yu 20484243125SZenghui Yu return (u16)its_list; 20584243125SZenghui Yu } 20684243125SZenghui Yu 207425c09beSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d) 208425c09beSMarc Zyngier { 209425c09beSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 210425c09beSMarc Zyngier return d->hwirq - its_dev->event_map.lpi_base; 211425c09beSMarc Zyngier } 212425c09beSMarc Zyngier 213591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev, 214591e5becSMarc Zyngier u32 event) 215591e5becSMarc Zyngier { 216591e5becSMarc Zyngier struct its_node *its = its_dev->its; 217591e5becSMarc Zyngier 218591e5becSMarc Zyngier return its->collections + its_dev->event_map.col_map[event]; 219591e5becSMarc Zyngier } 220591e5becSMarc Zyngier 221c1d4d5cdSMarc Zyngier static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, 222c1d4d5cdSMarc Zyngier u32 event) 223c1d4d5cdSMarc Zyngier { 224c1d4d5cdSMarc Zyngier if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) 225c1d4d5cdSMarc Zyngier return NULL; 226c1d4d5cdSMarc Zyngier 227c1d4d5cdSMarc Zyngier return &its_dev->event_map.vlpi_maps[event]; 228c1d4d5cdSMarc Zyngier } 229c1d4d5cdSMarc Zyngier 230f4a81f5aSMarc Zyngier static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) 231f4a81f5aSMarc Zyngier { 232f4a81f5aSMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 233f4a81f5aSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 234f4a81f5aSMarc Zyngier u32 event = its_get_event_id(d); 235f4a81f5aSMarc Zyngier 236f4a81f5aSMarc Zyngier return dev_event_to_vlpi_map(its_dev, event); 237f4a81f5aSMarc Zyngier } 238f4a81f5aSMarc Zyngier 239f4a81f5aSMarc Zyngier return NULL; 240f4a81f5aSMarc Zyngier } 241f4a81f5aSMarc Zyngier 242f4a81f5aSMarc Zyngier static int irq_to_cpuid(struct irq_data *d) 243425c09beSMarc Zyngier { 244425c09beSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 245f4a81f5aSMarc Zyngier struct its_vlpi_map *map = get_vlpi_map(d); 246425c09beSMarc Zyngier 247f4a81f5aSMarc Zyngier if (map) 248f4a81f5aSMarc Zyngier return map->vpe->col_idx; 249f4a81f5aSMarc Zyngier 250f4a81f5aSMarc Zyngier return its_dev->event_map.col_map[its_get_event_id(d)]; 251425c09beSMarc Zyngier } 252425c09beSMarc Zyngier 25383559b47SMarc Zyngier static struct its_collection *valid_col(struct its_collection *col) 25483559b47SMarc Zyngier { 25520faba84SJoe Perches if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) 25683559b47SMarc Zyngier return NULL; 25783559b47SMarc Zyngier 25883559b47SMarc Zyngier return col; 25983559b47SMarc Zyngier } 26083559b47SMarc Zyngier 261205e065dSMarc Zyngier static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) 262205e065dSMarc Zyngier { 263205e065dSMarc Zyngier if (valid_col(its->collections + vpe->col_idx)) 264205e065dSMarc Zyngier return vpe; 265205e065dSMarc Zyngier 266205e065dSMarc Zyngier return NULL; 267205e065dSMarc Zyngier } 268205e065dSMarc Zyngier 269cc2d3216SMarc Zyngier /* 270cc2d3216SMarc Zyngier * ITS command descriptors - parameters to be encoded in a command 271cc2d3216SMarc Zyngier * block. 272cc2d3216SMarc Zyngier */ 273cc2d3216SMarc Zyngier struct its_cmd_desc { 274cc2d3216SMarc Zyngier union { 275cc2d3216SMarc Zyngier struct { 276cc2d3216SMarc Zyngier struct its_device *dev; 277cc2d3216SMarc Zyngier u32 event_id; 278cc2d3216SMarc Zyngier } its_inv_cmd; 279cc2d3216SMarc Zyngier 280cc2d3216SMarc Zyngier struct { 281cc2d3216SMarc Zyngier struct its_device *dev; 282cc2d3216SMarc Zyngier u32 event_id; 2838d85dcedSMarc Zyngier } its_clear_cmd; 2848d85dcedSMarc Zyngier 2858d85dcedSMarc Zyngier struct { 2868d85dcedSMarc Zyngier struct its_device *dev; 2878d85dcedSMarc Zyngier u32 event_id; 288cc2d3216SMarc Zyngier } its_int_cmd; 289cc2d3216SMarc Zyngier 290cc2d3216SMarc Zyngier struct { 291cc2d3216SMarc Zyngier struct its_device *dev; 292cc2d3216SMarc Zyngier int valid; 293cc2d3216SMarc Zyngier } its_mapd_cmd; 294cc2d3216SMarc Zyngier 295cc2d3216SMarc Zyngier struct { 296cc2d3216SMarc Zyngier struct its_collection *col; 297cc2d3216SMarc Zyngier int valid; 298cc2d3216SMarc Zyngier } its_mapc_cmd; 299cc2d3216SMarc Zyngier 300cc2d3216SMarc Zyngier struct { 301cc2d3216SMarc Zyngier struct its_device *dev; 302cc2d3216SMarc Zyngier u32 phys_id; 303cc2d3216SMarc Zyngier u32 event_id; 3046a25ad3aSMarc Zyngier } its_mapti_cmd; 305cc2d3216SMarc Zyngier 306cc2d3216SMarc Zyngier struct { 307cc2d3216SMarc Zyngier struct its_device *dev; 308cc2d3216SMarc Zyngier struct its_collection *col; 309591e5becSMarc Zyngier u32 event_id; 310cc2d3216SMarc Zyngier } its_movi_cmd; 311cc2d3216SMarc Zyngier 312cc2d3216SMarc Zyngier struct { 313cc2d3216SMarc Zyngier struct its_device *dev; 314cc2d3216SMarc Zyngier u32 event_id; 315cc2d3216SMarc Zyngier } its_discard_cmd; 316cc2d3216SMarc Zyngier 317cc2d3216SMarc Zyngier struct { 318cc2d3216SMarc Zyngier struct its_collection *col; 319cc2d3216SMarc Zyngier } its_invall_cmd; 320d011e4e6SMarc Zyngier 321d011e4e6SMarc Zyngier struct { 322d011e4e6SMarc Zyngier struct its_vpe *vpe; 323eb78192bSMarc Zyngier } its_vinvall_cmd; 324eb78192bSMarc Zyngier 325eb78192bSMarc Zyngier struct { 326eb78192bSMarc Zyngier struct its_vpe *vpe; 327eb78192bSMarc Zyngier struct its_collection *col; 328eb78192bSMarc Zyngier bool valid; 329eb78192bSMarc Zyngier } its_vmapp_cmd; 330eb78192bSMarc Zyngier 331eb78192bSMarc Zyngier struct { 332eb78192bSMarc Zyngier struct its_vpe *vpe; 333d011e4e6SMarc Zyngier struct its_device *dev; 334d011e4e6SMarc Zyngier u32 virt_id; 335d011e4e6SMarc Zyngier u32 event_id; 336d011e4e6SMarc Zyngier bool db_enabled; 337d011e4e6SMarc Zyngier } its_vmapti_cmd; 338d011e4e6SMarc Zyngier 339d011e4e6SMarc Zyngier struct { 340d011e4e6SMarc Zyngier struct its_vpe *vpe; 341d011e4e6SMarc Zyngier struct its_device *dev; 342d011e4e6SMarc Zyngier u32 event_id; 343d011e4e6SMarc Zyngier bool db_enabled; 344d011e4e6SMarc Zyngier } its_vmovi_cmd; 3453171a47aSMarc Zyngier 3463171a47aSMarc Zyngier struct { 3473171a47aSMarc Zyngier struct its_vpe *vpe; 3483171a47aSMarc Zyngier struct its_collection *col; 3493171a47aSMarc Zyngier u16 seq_num; 3503171a47aSMarc Zyngier u16 its_list; 3513171a47aSMarc Zyngier } its_vmovp_cmd; 352d97c97baSMarc Zyngier 353d97c97baSMarc Zyngier struct { 354d97c97baSMarc Zyngier struct its_vpe *vpe; 355d97c97baSMarc Zyngier } its_invdb_cmd; 356cc2d3216SMarc Zyngier }; 357cc2d3216SMarc Zyngier }; 358cc2d3216SMarc Zyngier 359cc2d3216SMarc Zyngier /* 360cc2d3216SMarc Zyngier * The ITS command block, which is what the ITS actually parses. 361cc2d3216SMarc Zyngier */ 362cc2d3216SMarc Zyngier struct its_cmd_block { 3632bbdfcc5SBen Dooks (Codethink) union { 364cc2d3216SMarc Zyngier u64 raw_cmd[4]; 3652bbdfcc5SBen Dooks (Codethink) __le64 raw_cmd_le[4]; 3662bbdfcc5SBen Dooks (Codethink) }; 367cc2d3216SMarc Zyngier }; 368cc2d3216SMarc Zyngier 369cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ SZ_64K 370cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 371cc2d3216SMarc Zyngier 37267047f90SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, 37367047f90SMarc Zyngier struct its_cmd_block *, 374cc2d3216SMarc Zyngier struct its_cmd_desc *); 375cc2d3216SMarc Zyngier 37667047f90SMarc Zyngier typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, 37767047f90SMarc Zyngier struct its_cmd_block *, 378d011e4e6SMarc Zyngier struct its_cmd_desc *); 379d011e4e6SMarc Zyngier 3804d36f136SMarc Zyngier static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 3814d36f136SMarc Zyngier { 3824d36f136SMarc Zyngier u64 mask = GENMASK_ULL(h, l); 3834d36f136SMarc Zyngier *raw_cmd &= ~mask; 3844d36f136SMarc Zyngier *raw_cmd |= (val << l) & mask; 3854d36f136SMarc Zyngier } 3864d36f136SMarc Zyngier 387cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 388cc2d3216SMarc Zyngier { 3894d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); 390cc2d3216SMarc Zyngier } 391cc2d3216SMarc Zyngier 392cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 393cc2d3216SMarc Zyngier { 3944d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); 395cc2d3216SMarc Zyngier } 396cc2d3216SMarc Zyngier 397cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 398cc2d3216SMarc Zyngier { 3994d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); 400cc2d3216SMarc Zyngier } 401cc2d3216SMarc Zyngier 402cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 403cc2d3216SMarc Zyngier { 4044d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); 405cc2d3216SMarc Zyngier } 406cc2d3216SMarc Zyngier 407cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size) 408cc2d3216SMarc Zyngier { 4094d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); 410cc2d3216SMarc Zyngier } 411cc2d3216SMarc Zyngier 412cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 413cc2d3216SMarc Zyngier { 41430ae9610SShanker Donthineni its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); 415cc2d3216SMarc Zyngier } 416cc2d3216SMarc Zyngier 417cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid) 418cc2d3216SMarc Zyngier { 4194d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); 420cc2d3216SMarc Zyngier } 421cc2d3216SMarc Zyngier 422cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 423cc2d3216SMarc Zyngier { 42430ae9610SShanker Donthineni its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); 425cc2d3216SMarc Zyngier } 426cc2d3216SMarc Zyngier 427cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 428cc2d3216SMarc Zyngier { 4294d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 430cc2d3216SMarc Zyngier } 431cc2d3216SMarc Zyngier 432d011e4e6SMarc Zyngier static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 433d011e4e6SMarc Zyngier { 434d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 435d011e4e6SMarc Zyngier } 436d011e4e6SMarc Zyngier 437d011e4e6SMarc Zyngier static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 438d011e4e6SMarc Zyngier { 439d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 440d011e4e6SMarc Zyngier } 441d011e4e6SMarc Zyngier 442d011e4e6SMarc Zyngier static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 443d011e4e6SMarc Zyngier { 444d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 445d011e4e6SMarc Zyngier } 446d011e4e6SMarc Zyngier 447d011e4e6SMarc Zyngier static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 448d011e4e6SMarc Zyngier { 449d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 450d011e4e6SMarc Zyngier } 451d011e4e6SMarc Zyngier 4523171a47aSMarc Zyngier static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) 4533171a47aSMarc Zyngier { 4543171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); 4553171a47aSMarc Zyngier } 4563171a47aSMarc Zyngier 4573171a47aSMarc Zyngier static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) 4583171a47aSMarc Zyngier { 4593171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); 4603171a47aSMarc Zyngier } 4613171a47aSMarc Zyngier 462eb78192bSMarc Zyngier static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 463eb78192bSMarc Zyngier { 46430ae9610SShanker Donthineni its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); 465eb78192bSMarc Zyngier } 466eb78192bSMarc Zyngier 467eb78192bSMarc Zyngier static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 468eb78192bSMarc Zyngier { 469eb78192bSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); 470eb78192bSMarc Zyngier } 471eb78192bSMarc Zyngier 47264edfaa9SMarc Zyngier static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) 47364edfaa9SMarc Zyngier { 47464edfaa9SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); 47564edfaa9SMarc Zyngier } 47664edfaa9SMarc Zyngier 47764edfaa9SMarc Zyngier static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) 47864edfaa9SMarc Zyngier { 47964edfaa9SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); 48064edfaa9SMarc Zyngier } 48164edfaa9SMarc Zyngier 48264edfaa9SMarc Zyngier static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) 48364edfaa9SMarc Zyngier { 48464edfaa9SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); 48564edfaa9SMarc Zyngier } 48664edfaa9SMarc Zyngier 48764edfaa9SMarc Zyngier static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, 48864edfaa9SMarc Zyngier u32 vpe_db_lpi) 48964edfaa9SMarc Zyngier { 49064edfaa9SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); 49164edfaa9SMarc Zyngier } 49264edfaa9SMarc Zyngier 493dd3f050aSMarc Zyngier static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, 494dd3f050aSMarc Zyngier u32 vpe_db_lpi) 495dd3f050aSMarc Zyngier { 496dd3f050aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); 497dd3f050aSMarc Zyngier } 498dd3f050aSMarc Zyngier 499dd3f050aSMarc Zyngier static void its_encode_db(struct its_cmd_block *cmd, bool db) 500dd3f050aSMarc Zyngier { 501dd3f050aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); 502dd3f050aSMarc Zyngier } 503dd3f050aSMarc Zyngier 504cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd) 505cc2d3216SMarc Zyngier { 506cc2d3216SMarc Zyngier /* Let's fixup BE commands */ 5072bbdfcc5SBen Dooks (Codethink) cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); 5082bbdfcc5SBen Dooks (Codethink) cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); 5092bbdfcc5SBen Dooks (Codethink) cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); 5102bbdfcc5SBen Dooks (Codethink) cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); 511cc2d3216SMarc Zyngier } 512cc2d3216SMarc Zyngier 51367047f90SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_node *its, 51467047f90SMarc Zyngier struct its_cmd_block *cmd, 515cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 516cc2d3216SMarc Zyngier { 517cc2d3216SMarc Zyngier unsigned long itt_addr; 518c8481267SMarc Zyngier u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 519cc2d3216SMarc Zyngier 520cc2d3216SMarc Zyngier itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 521cc2d3216SMarc Zyngier itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 522cc2d3216SMarc Zyngier 523cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPD); 524cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 525cc2d3216SMarc Zyngier its_encode_size(cmd, size - 1); 526cc2d3216SMarc Zyngier its_encode_itt(cmd, itt_addr); 527cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapd_cmd.valid); 528cc2d3216SMarc Zyngier 529cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 530cc2d3216SMarc Zyngier 531591e5becSMarc Zyngier return NULL; 532cc2d3216SMarc Zyngier } 533cc2d3216SMarc Zyngier 53467047f90SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_node *its, 53567047f90SMarc Zyngier struct its_cmd_block *cmd, 536cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 537cc2d3216SMarc Zyngier { 538cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPC); 539cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 540cc2d3216SMarc Zyngier its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 541cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapc_cmd.valid); 542cc2d3216SMarc Zyngier 543cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 544cc2d3216SMarc Zyngier 545cc2d3216SMarc Zyngier return desc->its_mapc_cmd.col; 546cc2d3216SMarc Zyngier } 547cc2d3216SMarc Zyngier 54867047f90SMarc Zyngier static struct its_collection *its_build_mapti_cmd(struct its_node *its, 54967047f90SMarc Zyngier struct its_cmd_block *cmd, 550cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 551cc2d3216SMarc Zyngier { 552591e5becSMarc Zyngier struct its_collection *col; 553591e5becSMarc Zyngier 5546a25ad3aSMarc Zyngier col = dev_event_to_col(desc->its_mapti_cmd.dev, 5556a25ad3aSMarc Zyngier desc->its_mapti_cmd.event_id); 556591e5becSMarc Zyngier 5576a25ad3aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPTI); 5586a25ad3aSMarc Zyngier its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); 5596a25ad3aSMarc Zyngier its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); 5606a25ad3aSMarc Zyngier its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); 561591e5becSMarc Zyngier its_encode_collection(cmd, col->col_id); 562cc2d3216SMarc Zyngier 563cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 564cc2d3216SMarc Zyngier 56583559b47SMarc Zyngier return valid_col(col); 566cc2d3216SMarc Zyngier } 567cc2d3216SMarc Zyngier 56867047f90SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_node *its, 56967047f90SMarc Zyngier struct its_cmd_block *cmd, 570cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 571cc2d3216SMarc Zyngier { 572591e5becSMarc Zyngier struct its_collection *col; 573591e5becSMarc Zyngier 574591e5becSMarc Zyngier col = dev_event_to_col(desc->its_movi_cmd.dev, 575591e5becSMarc Zyngier desc->its_movi_cmd.event_id); 576591e5becSMarc Zyngier 577cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MOVI); 578cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 579591e5becSMarc Zyngier its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 580cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 581cc2d3216SMarc Zyngier 582cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 583cc2d3216SMarc Zyngier 58483559b47SMarc Zyngier return valid_col(col); 585cc2d3216SMarc Zyngier } 586cc2d3216SMarc Zyngier 58767047f90SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_node *its, 58867047f90SMarc Zyngier struct its_cmd_block *cmd, 589cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 590cc2d3216SMarc Zyngier { 591591e5becSMarc Zyngier struct its_collection *col; 592591e5becSMarc Zyngier 593591e5becSMarc Zyngier col = dev_event_to_col(desc->its_discard_cmd.dev, 594591e5becSMarc Zyngier desc->its_discard_cmd.event_id); 595591e5becSMarc Zyngier 596cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_DISCARD); 597cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 598cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 599cc2d3216SMarc Zyngier 600cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 601cc2d3216SMarc Zyngier 60283559b47SMarc Zyngier return valid_col(col); 603cc2d3216SMarc Zyngier } 604cc2d3216SMarc Zyngier 60567047f90SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_node *its, 60667047f90SMarc Zyngier struct its_cmd_block *cmd, 607cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 608cc2d3216SMarc Zyngier { 609591e5becSMarc Zyngier struct its_collection *col; 610591e5becSMarc Zyngier 611591e5becSMarc Zyngier col = dev_event_to_col(desc->its_inv_cmd.dev, 612591e5becSMarc Zyngier desc->its_inv_cmd.event_id); 613591e5becSMarc Zyngier 614cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 615cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 616cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 617cc2d3216SMarc Zyngier 618cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 619cc2d3216SMarc Zyngier 62083559b47SMarc Zyngier return valid_col(col); 621cc2d3216SMarc Zyngier } 622cc2d3216SMarc Zyngier 62367047f90SMarc Zyngier static struct its_collection *its_build_int_cmd(struct its_node *its, 62467047f90SMarc Zyngier struct its_cmd_block *cmd, 6258d85dcedSMarc Zyngier struct its_cmd_desc *desc) 6268d85dcedSMarc Zyngier { 6278d85dcedSMarc Zyngier struct its_collection *col; 6288d85dcedSMarc Zyngier 6298d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_int_cmd.dev, 6308d85dcedSMarc Zyngier desc->its_int_cmd.event_id); 6318d85dcedSMarc Zyngier 6328d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INT); 6338d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 6348d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_int_cmd.event_id); 6358d85dcedSMarc Zyngier 6368d85dcedSMarc Zyngier its_fixup_cmd(cmd); 6378d85dcedSMarc Zyngier 63883559b47SMarc Zyngier return valid_col(col); 6398d85dcedSMarc Zyngier } 6408d85dcedSMarc Zyngier 64167047f90SMarc Zyngier static struct its_collection *its_build_clear_cmd(struct its_node *its, 64267047f90SMarc Zyngier struct its_cmd_block *cmd, 6438d85dcedSMarc Zyngier struct its_cmd_desc *desc) 6448d85dcedSMarc Zyngier { 6458d85dcedSMarc Zyngier struct its_collection *col; 6468d85dcedSMarc Zyngier 6478d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_clear_cmd.dev, 6488d85dcedSMarc Zyngier desc->its_clear_cmd.event_id); 6498d85dcedSMarc Zyngier 6508d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_CLEAR); 6518d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 6528d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 6538d85dcedSMarc Zyngier 6548d85dcedSMarc Zyngier its_fixup_cmd(cmd); 6558d85dcedSMarc Zyngier 65683559b47SMarc Zyngier return valid_col(col); 6578d85dcedSMarc Zyngier } 6588d85dcedSMarc Zyngier 65967047f90SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_node *its, 66067047f90SMarc Zyngier struct its_cmd_block *cmd, 661cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 662cc2d3216SMarc Zyngier { 663cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVALL); 66410794522SZenghui Yu its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); 665cc2d3216SMarc Zyngier 666cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 667cc2d3216SMarc Zyngier 668cc2d3216SMarc Zyngier return NULL; 669cc2d3216SMarc Zyngier } 670cc2d3216SMarc Zyngier 67167047f90SMarc Zyngier static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, 67267047f90SMarc Zyngier struct its_cmd_block *cmd, 673eb78192bSMarc Zyngier struct its_cmd_desc *desc) 674eb78192bSMarc Zyngier { 675eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VINVALL); 676eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); 677eb78192bSMarc Zyngier 678eb78192bSMarc Zyngier its_fixup_cmd(cmd); 679eb78192bSMarc Zyngier 680205e065dSMarc Zyngier return valid_vpe(its, desc->its_vinvall_cmd.vpe); 681eb78192bSMarc Zyngier } 682eb78192bSMarc Zyngier 68367047f90SMarc Zyngier static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, 68467047f90SMarc Zyngier struct its_cmd_block *cmd, 685eb78192bSMarc Zyngier struct its_cmd_desc *desc) 686eb78192bSMarc Zyngier { 68764edfaa9SMarc Zyngier unsigned long vpt_addr, vconf_addr; 6885c9a882eSMarc Zyngier u64 target; 68964edfaa9SMarc Zyngier bool alloc; 690eb78192bSMarc Zyngier 691eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPP); 692eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 693eb78192bSMarc Zyngier its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 69464edfaa9SMarc Zyngier 69564edfaa9SMarc Zyngier if (!desc->its_vmapp_cmd.valid) { 69664edfaa9SMarc Zyngier if (is_v4_1(its)) { 69764edfaa9SMarc Zyngier alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); 69864edfaa9SMarc Zyngier its_encode_alloc(cmd, alloc); 69964edfaa9SMarc Zyngier } 70064edfaa9SMarc Zyngier 70164edfaa9SMarc Zyngier goto out; 70264edfaa9SMarc Zyngier } 70364edfaa9SMarc Zyngier 70464edfaa9SMarc Zyngier vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 70564edfaa9SMarc Zyngier target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; 70664edfaa9SMarc Zyngier 7075c9a882eSMarc Zyngier its_encode_target(cmd, target); 708eb78192bSMarc Zyngier its_encode_vpt_addr(cmd, vpt_addr); 709eb78192bSMarc Zyngier its_encode_vpt_size(cmd, LPI_NRBITS - 1); 710eb78192bSMarc Zyngier 71164edfaa9SMarc Zyngier if (!is_v4_1(its)) 71264edfaa9SMarc Zyngier goto out; 71364edfaa9SMarc Zyngier 71464edfaa9SMarc Zyngier vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); 71564edfaa9SMarc Zyngier 71664edfaa9SMarc Zyngier alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); 71764edfaa9SMarc Zyngier 71864edfaa9SMarc Zyngier its_encode_alloc(cmd, alloc); 71964edfaa9SMarc Zyngier 72064edfaa9SMarc Zyngier /* We can only signal PTZ when alloc==1. Why do we have two bits? */ 72164edfaa9SMarc Zyngier its_encode_ptz(cmd, alloc); 72264edfaa9SMarc Zyngier its_encode_vconf_addr(cmd, vconf_addr); 72364edfaa9SMarc Zyngier its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); 72464edfaa9SMarc Zyngier 72564edfaa9SMarc Zyngier out: 726eb78192bSMarc Zyngier its_fixup_cmd(cmd); 727eb78192bSMarc Zyngier 728205e065dSMarc Zyngier return valid_vpe(its, desc->its_vmapp_cmd.vpe); 729eb78192bSMarc Zyngier } 730eb78192bSMarc Zyngier 73167047f90SMarc Zyngier static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, 73267047f90SMarc Zyngier struct its_cmd_block *cmd, 733d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 734d011e4e6SMarc Zyngier { 735d011e4e6SMarc Zyngier u32 db; 736d011e4e6SMarc Zyngier 7373858d4dfSMarc Zyngier if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) 738d011e4e6SMarc Zyngier db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 739d011e4e6SMarc Zyngier else 740d011e4e6SMarc Zyngier db = 1023; 741d011e4e6SMarc Zyngier 742d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPTI); 743d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 744d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 745d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 746d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 747d011e4e6SMarc Zyngier its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 748d011e4e6SMarc Zyngier 749d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 750d011e4e6SMarc Zyngier 751205e065dSMarc Zyngier return valid_vpe(its, desc->its_vmapti_cmd.vpe); 752d011e4e6SMarc Zyngier } 753d011e4e6SMarc Zyngier 75467047f90SMarc Zyngier static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, 75567047f90SMarc Zyngier struct its_cmd_block *cmd, 756d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 757d011e4e6SMarc Zyngier { 758d011e4e6SMarc Zyngier u32 db; 759d011e4e6SMarc Zyngier 7603858d4dfSMarc Zyngier if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) 761d011e4e6SMarc Zyngier db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 762d011e4e6SMarc Zyngier else 763d011e4e6SMarc Zyngier db = 1023; 764d011e4e6SMarc Zyngier 765d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVI); 766d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 767d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 768d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 769d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 770d011e4e6SMarc Zyngier its_encode_db_valid(cmd, true); 771d011e4e6SMarc Zyngier 772d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 773d011e4e6SMarc Zyngier 774205e065dSMarc Zyngier return valid_vpe(its, desc->its_vmovi_cmd.vpe); 775d011e4e6SMarc Zyngier } 776d011e4e6SMarc Zyngier 77767047f90SMarc Zyngier static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, 77867047f90SMarc Zyngier struct its_cmd_block *cmd, 7793171a47aSMarc Zyngier struct its_cmd_desc *desc) 7803171a47aSMarc Zyngier { 7815c9a882eSMarc Zyngier u64 target; 7825c9a882eSMarc Zyngier 7835c9a882eSMarc Zyngier target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; 7843171a47aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVP); 7853171a47aSMarc Zyngier its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 7863171a47aSMarc Zyngier its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 7873171a47aSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 7885c9a882eSMarc Zyngier its_encode_target(cmd, target); 7893171a47aSMarc Zyngier 790dd3f050aSMarc Zyngier if (is_v4_1(its)) { 791dd3f050aSMarc Zyngier its_encode_db(cmd, true); 792dd3f050aSMarc Zyngier its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); 793dd3f050aSMarc Zyngier } 794dd3f050aSMarc Zyngier 7953171a47aSMarc Zyngier its_fixup_cmd(cmd); 7963171a47aSMarc Zyngier 797205e065dSMarc Zyngier return valid_vpe(its, desc->its_vmovp_cmd.vpe); 7983171a47aSMarc Zyngier } 7993171a47aSMarc Zyngier 80028614696SMarc Zyngier static struct its_vpe *its_build_vinv_cmd(struct its_node *its, 80128614696SMarc Zyngier struct its_cmd_block *cmd, 80228614696SMarc Zyngier struct its_cmd_desc *desc) 80328614696SMarc Zyngier { 80428614696SMarc Zyngier struct its_vlpi_map *map; 80528614696SMarc Zyngier 80628614696SMarc Zyngier map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, 80728614696SMarc Zyngier desc->its_inv_cmd.event_id); 80828614696SMarc Zyngier 80928614696SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 81028614696SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 81128614696SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 81228614696SMarc Zyngier 81328614696SMarc Zyngier its_fixup_cmd(cmd); 81428614696SMarc Zyngier 81528614696SMarc Zyngier return valid_vpe(its, map->vpe); 81628614696SMarc Zyngier } 81728614696SMarc Zyngier 818ed0e4aa9SMarc Zyngier static struct its_vpe *its_build_vint_cmd(struct its_node *its, 819ed0e4aa9SMarc Zyngier struct its_cmd_block *cmd, 820ed0e4aa9SMarc Zyngier struct its_cmd_desc *desc) 821ed0e4aa9SMarc Zyngier { 822ed0e4aa9SMarc Zyngier struct its_vlpi_map *map; 823ed0e4aa9SMarc Zyngier 824ed0e4aa9SMarc Zyngier map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, 825ed0e4aa9SMarc Zyngier desc->its_int_cmd.event_id); 826ed0e4aa9SMarc Zyngier 827ed0e4aa9SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INT); 828ed0e4aa9SMarc Zyngier its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 829ed0e4aa9SMarc Zyngier its_encode_event_id(cmd, desc->its_int_cmd.event_id); 830ed0e4aa9SMarc Zyngier 831ed0e4aa9SMarc Zyngier its_fixup_cmd(cmd); 832ed0e4aa9SMarc Zyngier 833ed0e4aa9SMarc Zyngier return valid_vpe(its, map->vpe); 834ed0e4aa9SMarc Zyngier } 835ed0e4aa9SMarc Zyngier 836ed0e4aa9SMarc Zyngier static struct its_vpe *its_build_vclear_cmd(struct its_node *its, 837ed0e4aa9SMarc Zyngier struct its_cmd_block *cmd, 838ed0e4aa9SMarc Zyngier struct its_cmd_desc *desc) 839ed0e4aa9SMarc Zyngier { 840ed0e4aa9SMarc Zyngier struct its_vlpi_map *map; 841ed0e4aa9SMarc Zyngier 842ed0e4aa9SMarc Zyngier map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, 843ed0e4aa9SMarc Zyngier desc->its_clear_cmd.event_id); 844ed0e4aa9SMarc Zyngier 845ed0e4aa9SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_CLEAR); 846ed0e4aa9SMarc Zyngier its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 847ed0e4aa9SMarc Zyngier its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 848ed0e4aa9SMarc Zyngier 849ed0e4aa9SMarc Zyngier its_fixup_cmd(cmd); 850ed0e4aa9SMarc Zyngier 851ed0e4aa9SMarc Zyngier return valid_vpe(its, map->vpe); 852ed0e4aa9SMarc Zyngier } 853ed0e4aa9SMarc Zyngier 854d97c97baSMarc Zyngier static struct its_vpe *its_build_invdb_cmd(struct its_node *its, 855d97c97baSMarc Zyngier struct its_cmd_block *cmd, 856d97c97baSMarc Zyngier struct its_cmd_desc *desc) 857d97c97baSMarc Zyngier { 858d97c97baSMarc Zyngier if (WARN_ON(!is_v4_1(its))) 859d97c97baSMarc Zyngier return NULL; 860d97c97baSMarc Zyngier 861d97c97baSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVDB); 862d97c97baSMarc Zyngier its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); 863d97c97baSMarc Zyngier 864d97c97baSMarc Zyngier its_fixup_cmd(cmd); 865d97c97baSMarc Zyngier 866d97c97baSMarc Zyngier return valid_vpe(its, desc->its_invdb_cmd.vpe); 867d97c97baSMarc Zyngier } 868d97c97baSMarc Zyngier 869cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its, 870cc2d3216SMarc Zyngier struct its_cmd_block *ptr) 871cc2d3216SMarc Zyngier { 872cc2d3216SMarc Zyngier return (ptr - its->cmd_base) * sizeof(*ptr); 873cc2d3216SMarc Zyngier } 874cc2d3216SMarc Zyngier 875cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its) 876cc2d3216SMarc Zyngier { 877cc2d3216SMarc Zyngier int widx; 878cc2d3216SMarc Zyngier int ridx; 879cc2d3216SMarc Zyngier 880cc2d3216SMarc Zyngier widx = its->cmd_write - its->cmd_base; 881cc2d3216SMarc Zyngier ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 882cc2d3216SMarc Zyngier 883cc2d3216SMarc Zyngier /* This is incredibly unlikely to happen, unless the ITS locks up. */ 884cc2d3216SMarc Zyngier if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 885cc2d3216SMarc Zyngier return 1; 886cc2d3216SMarc Zyngier 887cc2d3216SMarc Zyngier return 0; 888cc2d3216SMarc Zyngier } 889cc2d3216SMarc Zyngier 890cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its) 891cc2d3216SMarc Zyngier { 892cc2d3216SMarc Zyngier struct its_cmd_block *cmd; 893cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 894cc2d3216SMarc Zyngier 895cc2d3216SMarc Zyngier while (its_queue_full(its)) { 896cc2d3216SMarc Zyngier count--; 897cc2d3216SMarc Zyngier if (!count) { 898cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue not draining\n"); 899cc2d3216SMarc Zyngier return NULL; 900cc2d3216SMarc Zyngier } 901cc2d3216SMarc Zyngier cpu_relax(); 902cc2d3216SMarc Zyngier udelay(1); 903cc2d3216SMarc Zyngier } 904cc2d3216SMarc Zyngier 905cc2d3216SMarc Zyngier cmd = its->cmd_write++; 906cc2d3216SMarc Zyngier 907cc2d3216SMarc Zyngier /* Handle queue wrapping */ 908cc2d3216SMarc Zyngier if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 909cc2d3216SMarc Zyngier its->cmd_write = its->cmd_base; 910cc2d3216SMarc Zyngier 91134d677a9SMarc Zyngier /* Clear command */ 91234d677a9SMarc Zyngier cmd->raw_cmd[0] = 0; 91334d677a9SMarc Zyngier cmd->raw_cmd[1] = 0; 91434d677a9SMarc Zyngier cmd->raw_cmd[2] = 0; 91534d677a9SMarc Zyngier cmd->raw_cmd[3] = 0; 91634d677a9SMarc Zyngier 917cc2d3216SMarc Zyngier return cmd; 918cc2d3216SMarc Zyngier } 919cc2d3216SMarc Zyngier 920cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its) 921cc2d3216SMarc Zyngier { 922cc2d3216SMarc Zyngier u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 923cc2d3216SMarc Zyngier 924cc2d3216SMarc Zyngier writel_relaxed(wr, its->base + GITS_CWRITER); 925cc2d3216SMarc Zyngier 926cc2d3216SMarc Zyngier return its->cmd_write; 927cc2d3216SMarc Zyngier } 928cc2d3216SMarc Zyngier 929cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 930cc2d3216SMarc Zyngier { 931cc2d3216SMarc Zyngier /* 932cc2d3216SMarc Zyngier * Make sure the commands written to memory are observable by 933cc2d3216SMarc Zyngier * the ITS. 934cc2d3216SMarc Zyngier */ 935cc2d3216SMarc Zyngier if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 936328191c0SVladimir Murzin gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); 937cc2d3216SMarc Zyngier else 938cc2d3216SMarc Zyngier dsb(ishst); 939cc2d3216SMarc Zyngier } 940cc2d3216SMarc Zyngier 941a19b462fSMarc Zyngier static int its_wait_for_range_completion(struct its_node *its, 942a050fa54SHeyi Guo u64 prev_idx, 943cc2d3216SMarc Zyngier struct its_cmd_block *to) 944cc2d3216SMarc Zyngier { 945a050fa54SHeyi Guo u64 rd_idx, to_idx, linear_idx; 946cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 947cc2d3216SMarc Zyngier 948a050fa54SHeyi Guo /* Linearize to_idx if the command set has wrapped around */ 949cc2d3216SMarc Zyngier to_idx = its_cmd_ptr_to_offset(its, to); 950a050fa54SHeyi Guo if (to_idx < prev_idx) 951a050fa54SHeyi Guo to_idx += ITS_CMD_QUEUE_SZ; 952a050fa54SHeyi Guo 953a050fa54SHeyi Guo linear_idx = prev_idx; 954cc2d3216SMarc Zyngier 955cc2d3216SMarc Zyngier while (1) { 956a050fa54SHeyi Guo s64 delta; 957a050fa54SHeyi Guo 958cc2d3216SMarc Zyngier rd_idx = readl_relaxed(its->base + GITS_CREADR); 9599bdd8b1cSMarc Zyngier 960a050fa54SHeyi Guo /* 961a050fa54SHeyi Guo * Compute the read pointer progress, taking the 962a050fa54SHeyi Guo * potential wrap-around into account. 963a050fa54SHeyi Guo */ 964a050fa54SHeyi Guo delta = rd_idx - prev_idx; 965a050fa54SHeyi Guo if (rd_idx < prev_idx) 966a050fa54SHeyi Guo delta += ITS_CMD_QUEUE_SZ; 9679bdd8b1cSMarc Zyngier 968a050fa54SHeyi Guo linear_idx += delta; 969a050fa54SHeyi Guo if (linear_idx >= to_idx) 970cc2d3216SMarc Zyngier break; 971cc2d3216SMarc Zyngier 972cc2d3216SMarc Zyngier count--; 973cc2d3216SMarc Zyngier if (!count) { 974a050fa54SHeyi Guo pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", 975a050fa54SHeyi Guo to_idx, linear_idx); 976a19b462fSMarc Zyngier return -1; 977cc2d3216SMarc Zyngier } 978a050fa54SHeyi Guo prev_idx = rd_idx; 979cc2d3216SMarc Zyngier cpu_relax(); 980cc2d3216SMarc Zyngier udelay(1); 981cc2d3216SMarc Zyngier } 982a19b462fSMarc Zyngier 983a19b462fSMarc Zyngier return 0; 984cc2d3216SMarc Zyngier } 985cc2d3216SMarc Zyngier 986e4f9094bSMarc Zyngier /* Warning, macro hell follows */ 987e4f9094bSMarc Zyngier #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ 988e4f9094bSMarc Zyngier void name(struct its_node *its, \ 989e4f9094bSMarc Zyngier buildtype builder, \ 990e4f9094bSMarc Zyngier struct its_cmd_desc *desc) \ 991e4f9094bSMarc Zyngier { \ 992e4f9094bSMarc Zyngier struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 993e4f9094bSMarc Zyngier synctype *sync_obj; \ 994e4f9094bSMarc Zyngier unsigned long flags; \ 995a050fa54SHeyi Guo u64 rd_idx; \ 996e4f9094bSMarc Zyngier \ 997e4f9094bSMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); \ 998e4f9094bSMarc Zyngier \ 999e4f9094bSMarc Zyngier cmd = its_allocate_entry(its); \ 1000e4f9094bSMarc Zyngier if (!cmd) { /* We're soooooo screewed... */ \ 1001e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 1002e4f9094bSMarc Zyngier return; \ 1003e4f9094bSMarc Zyngier } \ 100467047f90SMarc Zyngier sync_obj = builder(its, cmd, desc); \ 1005e4f9094bSMarc Zyngier its_flush_cmd(its, cmd); \ 1006e4f9094bSMarc Zyngier \ 1007e4f9094bSMarc Zyngier if (sync_obj) { \ 1008e4f9094bSMarc Zyngier sync_cmd = its_allocate_entry(its); \ 1009e4f9094bSMarc Zyngier if (!sync_cmd) \ 1010e4f9094bSMarc Zyngier goto post; \ 1011e4f9094bSMarc Zyngier \ 101267047f90SMarc Zyngier buildfn(its, sync_cmd, sync_obj); \ 1013e4f9094bSMarc Zyngier its_flush_cmd(its, sync_cmd); \ 1014e4f9094bSMarc Zyngier } \ 1015e4f9094bSMarc Zyngier \ 1016e4f9094bSMarc Zyngier post: \ 1017a050fa54SHeyi Guo rd_idx = readl_relaxed(its->base + GITS_CREADR); \ 1018e4f9094bSMarc Zyngier next_cmd = its_post_commands(its); \ 1019e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 1020e4f9094bSMarc Zyngier \ 1021a050fa54SHeyi Guo if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ 1022a19b462fSMarc Zyngier pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ 1023e4f9094bSMarc Zyngier } 1024e4f9094bSMarc Zyngier 102567047f90SMarc Zyngier static void its_build_sync_cmd(struct its_node *its, 102667047f90SMarc Zyngier struct its_cmd_block *sync_cmd, 1027e4f9094bSMarc Zyngier struct its_collection *sync_col) 1028cc2d3216SMarc Zyngier { 1029cc2d3216SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 1030cc2d3216SMarc Zyngier its_encode_target(sync_cmd, sync_col->target_address); 1031e4f9094bSMarc Zyngier 1032cc2d3216SMarc Zyngier its_fixup_cmd(sync_cmd); 1033cc2d3216SMarc Zyngier } 1034cc2d3216SMarc Zyngier 1035e4f9094bSMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 1036e4f9094bSMarc Zyngier struct its_collection, its_build_sync_cmd) 1037cc2d3216SMarc Zyngier 103867047f90SMarc Zyngier static void its_build_vsync_cmd(struct its_node *its, 103967047f90SMarc Zyngier struct its_cmd_block *sync_cmd, 1040d011e4e6SMarc Zyngier struct its_vpe *sync_vpe) 1041d011e4e6SMarc Zyngier { 1042d011e4e6SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 1043d011e4e6SMarc Zyngier its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 1044d011e4e6SMarc Zyngier 1045d011e4e6SMarc Zyngier its_fixup_cmd(sync_cmd); 1046d011e4e6SMarc Zyngier } 1047d011e4e6SMarc Zyngier 1048d011e4e6SMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 1049d011e4e6SMarc Zyngier struct its_vpe, its_build_vsync_cmd) 1050d011e4e6SMarc Zyngier 10518d85dcedSMarc Zyngier static void its_send_int(struct its_device *dev, u32 event_id) 10528d85dcedSMarc Zyngier { 10538d85dcedSMarc Zyngier struct its_cmd_desc desc; 10548d85dcedSMarc Zyngier 10558d85dcedSMarc Zyngier desc.its_int_cmd.dev = dev; 10568d85dcedSMarc Zyngier desc.its_int_cmd.event_id = event_id; 10578d85dcedSMarc Zyngier 10588d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_int_cmd, &desc); 10598d85dcedSMarc Zyngier } 10608d85dcedSMarc Zyngier 10618d85dcedSMarc Zyngier static void its_send_clear(struct its_device *dev, u32 event_id) 10628d85dcedSMarc Zyngier { 10638d85dcedSMarc Zyngier struct its_cmd_desc desc; 10648d85dcedSMarc Zyngier 10658d85dcedSMarc Zyngier desc.its_clear_cmd.dev = dev; 10668d85dcedSMarc Zyngier desc.its_clear_cmd.event_id = event_id; 10678d85dcedSMarc Zyngier 10688d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_clear_cmd, &desc); 1069cc2d3216SMarc Zyngier } 1070cc2d3216SMarc Zyngier 1071cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id) 1072cc2d3216SMarc Zyngier { 1073cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1074cc2d3216SMarc Zyngier 1075cc2d3216SMarc Zyngier desc.its_inv_cmd.dev = dev; 1076cc2d3216SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 1077cc2d3216SMarc Zyngier 1078cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_inv_cmd, &desc); 1079cc2d3216SMarc Zyngier } 1080cc2d3216SMarc Zyngier 1081cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid) 1082cc2d3216SMarc Zyngier { 1083cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1084cc2d3216SMarc Zyngier 1085cc2d3216SMarc Zyngier desc.its_mapd_cmd.dev = dev; 1086cc2d3216SMarc Zyngier desc.its_mapd_cmd.valid = !!valid; 1087cc2d3216SMarc Zyngier 1088cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 1089cc2d3216SMarc Zyngier } 1090cc2d3216SMarc Zyngier 1091cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col, 1092cc2d3216SMarc Zyngier int valid) 1093cc2d3216SMarc Zyngier { 1094cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1095cc2d3216SMarc Zyngier 1096cc2d3216SMarc Zyngier desc.its_mapc_cmd.col = col; 1097cc2d3216SMarc Zyngier desc.its_mapc_cmd.valid = !!valid; 1098cc2d3216SMarc Zyngier 1099cc2d3216SMarc Zyngier its_send_single_command(its, its_build_mapc_cmd, &desc); 1100cc2d3216SMarc Zyngier } 1101cc2d3216SMarc Zyngier 11026a25ad3aSMarc Zyngier static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) 1103cc2d3216SMarc Zyngier { 1104cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1105cc2d3216SMarc Zyngier 11066a25ad3aSMarc Zyngier desc.its_mapti_cmd.dev = dev; 11076a25ad3aSMarc Zyngier desc.its_mapti_cmd.phys_id = irq_id; 11086a25ad3aSMarc Zyngier desc.its_mapti_cmd.event_id = id; 1109cc2d3216SMarc Zyngier 11106a25ad3aSMarc Zyngier its_send_single_command(dev->its, its_build_mapti_cmd, &desc); 1111cc2d3216SMarc Zyngier } 1112cc2d3216SMarc Zyngier 1113cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev, 1114cc2d3216SMarc Zyngier struct its_collection *col, u32 id) 1115cc2d3216SMarc Zyngier { 1116cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1117cc2d3216SMarc Zyngier 1118cc2d3216SMarc Zyngier desc.its_movi_cmd.dev = dev; 1119cc2d3216SMarc Zyngier desc.its_movi_cmd.col = col; 1120591e5becSMarc Zyngier desc.its_movi_cmd.event_id = id; 1121cc2d3216SMarc Zyngier 1122cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_movi_cmd, &desc); 1123cc2d3216SMarc Zyngier } 1124cc2d3216SMarc Zyngier 1125cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id) 1126cc2d3216SMarc Zyngier { 1127cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1128cc2d3216SMarc Zyngier 1129cc2d3216SMarc Zyngier desc.its_discard_cmd.dev = dev; 1130cc2d3216SMarc Zyngier desc.its_discard_cmd.event_id = id; 1131cc2d3216SMarc Zyngier 1132cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_discard_cmd, &desc); 1133cc2d3216SMarc Zyngier } 1134cc2d3216SMarc Zyngier 1135cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col) 1136cc2d3216SMarc Zyngier { 1137cc2d3216SMarc Zyngier struct its_cmd_desc desc; 1138cc2d3216SMarc Zyngier 1139cc2d3216SMarc Zyngier desc.its_invall_cmd.col = col; 1140cc2d3216SMarc Zyngier 1141cc2d3216SMarc Zyngier its_send_single_command(its, its_build_invall_cmd, &desc); 1142cc2d3216SMarc Zyngier } 1143c48ed51cSMarc Zyngier 1144d011e4e6SMarc Zyngier static void its_send_vmapti(struct its_device *dev, u32 id) 1145d011e4e6SMarc Zyngier { 1146c1d4d5cdSMarc Zyngier struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); 1147d011e4e6SMarc Zyngier struct its_cmd_desc desc; 1148d011e4e6SMarc Zyngier 1149d011e4e6SMarc Zyngier desc.its_vmapti_cmd.vpe = map->vpe; 1150d011e4e6SMarc Zyngier desc.its_vmapti_cmd.dev = dev; 1151d011e4e6SMarc Zyngier desc.its_vmapti_cmd.virt_id = map->vintid; 1152d011e4e6SMarc Zyngier desc.its_vmapti_cmd.event_id = id; 1153d011e4e6SMarc Zyngier desc.its_vmapti_cmd.db_enabled = map->db_enabled; 1154d011e4e6SMarc Zyngier 1155d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 1156d011e4e6SMarc Zyngier } 1157d011e4e6SMarc Zyngier 1158d011e4e6SMarc Zyngier static void its_send_vmovi(struct its_device *dev, u32 id) 1159d011e4e6SMarc Zyngier { 1160c1d4d5cdSMarc Zyngier struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); 1161d011e4e6SMarc Zyngier struct its_cmd_desc desc; 1162d011e4e6SMarc Zyngier 1163d011e4e6SMarc Zyngier desc.its_vmovi_cmd.vpe = map->vpe; 1164d011e4e6SMarc Zyngier desc.its_vmovi_cmd.dev = dev; 1165d011e4e6SMarc Zyngier desc.its_vmovi_cmd.event_id = id; 1166d011e4e6SMarc Zyngier desc.its_vmovi_cmd.db_enabled = map->db_enabled; 1167d011e4e6SMarc Zyngier 1168d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 1169d011e4e6SMarc Zyngier } 1170d011e4e6SMarc Zyngier 117175fd951bSMarc Zyngier static void its_send_vmapp(struct its_node *its, 117275fd951bSMarc Zyngier struct its_vpe *vpe, bool valid) 1173eb78192bSMarc Zyngier { 1174eb78192bSMarc Zyngier struct its_cmd_desc desc; 1175eb78192bSMarc Zyngier 1176eb78192bSMarc Zyngier desc.its_vmapp_cmd.vpe = vpe; 1177eb78192bSMarc Zyngier desc.its_vmapp_cmd.valid = valid; 1178eb78192bSMarc Zyngier desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; 117975fd951bSMarc Zyngier 1180eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); 1181eb78192bSMarc Zyngier } 1182eb78192bSMarc Zyngier 11833171a47aSMarc Zyngier static void its_send_vmovp(struct its_vpe *vpe) 11843171a47aSMarc Zyngier { 118584243125SZenghui Yu struct its_cmd_desc desc = {}; 11863171a47aSMarc Zyngier struct its_node *its; 11873171a47aSMarc Zyngier unsigned long flags; 11883171a47aSMarc Zyngier int col_id = vpe->col_idx; 11893171a47aSMarc Zyngier 11903171a47aSMarc Zyngier desc.its_vmovp_cmd.vpe = vpe; 11913171a47aSMarc Zyngier 11923171a47aSMarc Zyngier if (!its_list_map) { 11933171a47aSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 11943171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 11953171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 11963171a47aSMarc Zyngier return; 11973171a47aSMarc Zyngier } 11983171a47aSMarc Zyngier 11993171a47aSMarc Zyngier /* 12003171a47aSMarc Zyngier * Yet another marvel of the architecture. If using the 12013171a47aSMarc Zyngier * its_list "feature", we need to make sure that all ITSs 12023171a47aSMarc Zyngier * receive all VMOVP commands in the same order. The only way 12033171a47aSMarc Zyngier * to guarantee this is to make vmovp a serialization point. 12043171a47aSMarc Zyngier * 12053171a47aSMarc Zyngier * Wall <-- Head. 12063171a47aSMarc Zyngier */ 12073171a47aSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 12083171a47aSMarc Zyngier 12093171a47aSMarc Zyngier desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; 121084243125SZenghui Yu desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); 12113171a47aSMarc Zyngier 12123171a47aSMarc Zyngier /* Emit VMOVPs */ 12133171a47aSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 12140dd57fedSMarc Zyngier if (!is_v4(its)) 12153171a47aSMarc Zyngier continue; 12163171a47aSMarc Zyngier 12172247e1bfSMarc Zyngier if (!vpe->its_vm->vlpi_count[its->list_nr]) 12182247e1bfSMarc Zyngier continue; 12192247e1bfSMarc Zyngier 12203171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 12213171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 12223171a47aSMarc Zyngier } 12233171a47aSMarc Zyngier 12243171a47aSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 12253171a47aSMarc Zyngier } 12263171a47aSMarc Zyngier 122740619a2eSMarc Zyngier static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) 1228eb78192bSMarc Zyngier { 1229eb78192bSMarc Zyngier struct its_cmd_desc desc; 1230eb78192bSMarc Zyngier 1231eb78192bSMarc Zyngier desc.its_vinvall_cmd.vpe = vpe; 1232eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); 1233eb78192bSMarc Zyngier } 1234eb78192bSMarc Zyngier 123528614696SMarc Zyngier static void its_send_vinv(struct its_device *dev, u32 event_id) 123628614696SMarc Zyngier { 123728614696SMarc Zyngier struct its_cmd_desc desc; 123828614696SMarc Zyngier 123928614696SMarc Zyngier /* 124028614696SMarc Zyngier * There is no real VINV command. This is just a normal INV, 124128614696SMarc Zyngier * with a VSYNC instead of a SYNC. 124228614696SMarc Zyngier */ 124328614696SMarc Zyngier desc.its_inv_cmd.dev = dev; 124428614696SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 124528614696SMarc Zyngier 124628614696SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); 124728614696SMarc Zyngier } 124828614696SMarc Zyngier 1249ed0e4aa9SMarc Zyngier static void its_send_vint(struct its_device *dev, u32 event_id) 1250ed0e4aa9SMarc Zyngier { 1251ed0e4aa9SMarc Zyngier struct its_cmd_desc desc; 1252ed0e4aa9SMarc Zyngier 1253ed0e4aa9SMarc Zyngier /* 1254ed0e4aa9SMarc Zyngier * There is no real VINT command. This is just a normal INT, 1255ed0e4aa9SMarc Zyngier * with a VSYNC instead of a SYNC. 1256ed0e4aa9SMarc Zyngier */ 1257ed0e4aa9SMarc Zyngier desc.its_int_cmd.dev = dev; 1258ed0e4aa9SMarc Zyngier desc.its_int_cmd.event_id = event_id; 1259ed0e4aa9SMarc Zyngier 1260ed0e4aa9SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); 1261ed0e4aa9SMarc Zyngier } 1262ed0e4aa9SMarc Zyngier 1263ed0e4aa9SMarc Zyngier static void its_send_vclear(struct its_device *dev, u32 event_id) 1264ed0e4aa9SMarc Zyngier { 1265ed0e4aa9SMarc Zyngier struct its_cmd_desc desc; 1266ed0e4aa9SMarc Zyngier 1267ed0e4aa9SMarc Zyngier /* 1268ed0e4aa9SMarc Zyngier * There is no real VCLEAR command. This is just a normal CLEAR, 1269ed0e4aa9SMarc Zyngier * with a VSYNC instead of a SYNC. 1270ed0e4aa9SMarc Zyngier */ 1271ed0e4aa9SMarc Zyngier desc.its_clear_cmd.dev = dev; 1272ed0e4aa9SMarc Zyngier desc.its_clear_cmd.event_id = event_id; 1273ed0e4aa9SMarc Zyngier 1274ed0e4aa9SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); 1275ed0e4aa9SMarc Zyngier } 1276ed0e4aa9SMarc Zyngier 1277d97c97baSMarc Zyngier static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) 1278d97c97baSMarc Zyngier { 1279d97c97baSMarc Zyngier struct its_cmd_desc desc; 1280d97c97baSMarc Zyngier 1281d97c97baSMarc Zyngier desc.its_invdb_cmd.vpe = vpe; 1282d97c97baSMarc Zyngier its_send_single_vcommand(its, its_build_invdb_cmd, &desc); 1283d97c97baSMarc Zyngier } 1284d97c97baSMarc Zyngier 1285c48ed51cSMarc Zyngier /* 1286c48ed51cSMarc Zyngier * irqchip functions - assumes MSI, mostly. 1287c48ed51cSMarc Zyngier */ 1288015ec038SMarc Zyngier static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) 1289c48ed51cSMarc Zyngier { 1290c1d4d5cdSMarc Zyngier struct its_vlpi_map *map = get_vlpi_map(d); 1291015ec038SMarc Zyngier irq_hw_number_t hwirq; 1292e1a2e201SMarc Zyngier void *va; 1293adcdb94eSMarc Zyngier u8 *cfg; 1294c48ed51cSMarc Zyngier 1295c1d4d5cdSMarc Zyngier if (map) { 1296c1d4d5cdSMarc Zyngier va = page_address(map->vm->vprop_page); 1297d4d7b4adSMarc Zyngier hwirq = map->vintid; 1298d4d7b4adSMarc Zyngier 1299d4d7b4adSMarc Zyngier /* Remember the updated property */ 1300d4d7b4adSMarc Zyngier map->properties &= ~clr; 1301d4d7b4adSMarc Zyngier map->properties |= set | LPI_PROP_GROUP1; 1302015ec038SMarc Zyngier } else { 1303e1a2e201SMarc Zyngier va = gic_rdists->prop_table_va; 1304015ec038SMarc Zyngier hwirq = d->hwirq; 1305015ec038SMarc Zyngier } 1306adcdb94eSMarc Zyngier 1307e1a2e201SMarc Zyngier cfg = va + hwirq - 8192; 1308adcdb94eSMarc Zyngier *cfg &= ~clr; 1309015ec038SMarc Zyngier *cfg |= set | LPI_PROP_GROUP1; 1310c48ed51cSMarc Zyngier 1311c48ed51cSMarc Zyngier /* 1312c48ed51cSMarc Zyngier * Make the above write visible to the redistributors. 1313c48ed51cSMarc Zyngier * And yes, we're flushing exactly: One. Single. Byte. 1314c48ed51cSMarc Zyngier * Humpf... 1315c48ed51cSMarc Zyngier */ 1316c48ed51cSMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 1317328191c0SVladimir Murzin gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1318c48ed51cSMarc Zyngier else 1319c48ed51cSMarc Zyngier dsb(ishst); 1320015ec038SMarc Zyngier } 1321015ec038SMarc Zyngier 13222f4f064bSMarc Zyngier static void wait_for_syncr(void __iomem *rdbase) 13232f4f064bSMarc Zyngier { 13242f4f064bSMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 13252f4f064bSMarc Zyngier cpu_relax(); 13262f4f064bSMarc Zyngier } 13272f4f064bSMarc Zyngier 1328425c09beSMarc Zyngier static void direct_lpi_inv(struct irq_data *d) 1329425c09beSMarc Zyngier { 1330f4a81f5aSMarc Zyngier struct its_vlpi_map *map = get_vlpi_map(d); 1331425c09beSMarc Zyngier void __iomem *rdbase; 1332f4a81f5aSMarc Zyngier u64 val; 1333f4a81f5aSMarc Zyngier 1334f4a81f5aSMarc Zyngier if (map) { 1335f4a81f5aSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1336f4a81f5aSMarc Zyngier 1337f4a81f5aSMarc Zyngier WARN_ON(!is_v4_1(its_dev->its)); 1338f4a81f5aSMarc Zyngier 1339f4a81f5aSMarc Zyngier val = GICR_INVLPIR_V; 1340f4a81f5aSMarc Zyngier val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); 1341f4a81f5aSMarc Zyngier val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); 1342f4a81f5aSMarc Zyngier } else { 1343f4a81f5aSMarc Zyngier val = d->hwirq; 1344f4a81f5aSMarc Zyngier } 1345425c09beSMarc Zyngier 1346425c09beSMarc Zyngier /* Target the redistributor this LPI is currently routed to */ 1347f4a81f5aSMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; 1348f4a81f5aSMarc Zyngier gic_write_lpir(val, rdbase + GICR_INVLPIR); 1349425c09beSMarc Zyngier 1350425c09beSMarc Zyngier wait_for_syncr(rdbase); 1351425c09beSMarc Zyngier } 1352425c09beSMarc Zyngier 1353015ec038SMarc Zyngier static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) 1354015ec038SMarc Zyngier { 1355015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1356015ec038SMarc Zyngier 1357015ec038SMarc Zyngier lpi_write_config(d, clr, set); 1358f4a81f5aSMarc Zyngier if (gic_rdists->has_direct_lpi && 1359f4a81f5aSMarc Zyngier (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) 1360425c09beSMarc Zyngier direct_lpi_inv(d); 136128614696SMarc Zyngier else if (!irqd_is_forwarded_to_vcpu(d)) 1362adcdb94eSMarc Zyngier its_send_inv(its_dev, its_get_event_id(d)); 136328614696SMarc Zyngier else 136428614696SMarc Zyngier its_send_vinv(its_dev, its_get_event_id(d)); 1365c48ed51cSMarc Zyngier } 1366c48ed51cSMarc Zyngier 1367015ec038SMarc Zyngier static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) 1368015ec038SMarc Zyngier { 1369015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1370015ec038SMarc Zyngier u32 event = its_get_event_id(d); 1371c1d4d5cdSMarc Zyngier struct its_vlpi_map *map; 1372015ec038SMarc Zyngier 13733858d4dfSMarc Zyngier /* 13743858d4dfSMarc Zyngier * GICv4.1 does away with the per-LPI nonsense, nothing to do 13753858d4dfSMarc Zyngier * here. 13763858d4dfSMarc Zyngier */ 13773858d4dfSMarc Zyngier if (is_v4_1(its_dev->its)) 13783858d4dfSMarc Zyngier return; 13793858d4dfSMarc Zyngier 1380c1d4d5cdSMarc Zyngier map = dev_event_to_vlpi_map(its_dev, event); 1381c1d4d5cdSMarc Zyngier 1382c1d4d5cdSMarc Zyngier if (map->db_enabled == enable) 1383015ec038SMarc Zyngier return; 1384015ec038SMarc Zyngier 1385c1d4d5cdSMarc Zyngier map->db_enabled = enable; 1386015ec038SMarc Zyngier 1387015ec038SMarc Zyngier /* 1388015ec038SMarc Zyngier * More fun with the architecture: 1389015ec038SMarc Zyngier * 1390015ec038SMarc Zyngier * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI 1391015ec038SMarc Zyngier * value or to 1023, depending on the enable bit. But that 1392015ec038SMarc Zyngier * would be issueing a mapping for an /existing/ DevID+EventID 1393015ec038SMarc Zyngier * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI 1394015ec038SMarc Zyngier * to the /same/ vPE, using this opportunity to adjust the 1395015ec038SMarc Zyngier * doorbell. Mouahahahaha. We loves it, Precious. 1396015ec038SMarc Zyngier */ 1397015ec038SMarc Zyngier its_send_vmovi(its_dev, event); 1398c48ed51cSMarc Zyngier } 1399c48ed51cSMarc Zyngier 1400c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d) 1401c48ed51cSMarc Zyngier { 1402015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1403015ec038SMarc Zyngier its_vlpi_set_doorbell(d, false); 1404015ec038SMarc Zyngier 1405adcdb94eSMarc Zyngier lpi_update_config(d, LPI_PROP_ENABLED, 0); 1406c48ed51cSMarc Zyngier } 1407c48ed51cSMarc Zyngier 1408c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d) 1409c48ed51cSMarc Zyngier { 1410015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1411015ec038SMarc Zyngier its_vlpi_set_doorbell(d, true); 1412015ec038SMarc Zyngier 1413adcdb94eSMarc Zyngier lpi_update_config(d, 0, LPI_PROP_ENABLED); 1414c48ed51cSMarc Zyngier } 1415c48ed51cSMarc Zyngier 1416c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1417c48ed51cSMarc Zyngier bool force) 1418c48ed51cSMarc Zyngier { 1419fbf8f40eSGanapatrao Kulkarni unsigned int cpu; 1420fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 1421c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1422c48ed51cSMarc Zyngier struct its_collection *target_col; 1423c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 1424c48ed51cSMarc Zyngier 1425015ec038SMarc Zyngier /* A forwarded interrupt should use irq_set_vcpu_affinity */ 1426015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1427015ec038SMarc Zyngier return -EINVAL; 1428015ec038SMarc Zyngier 1429fbf8f40eSGanapatrao Kulkarni /* lpi cannot be routed to a redistributor that is on a foreign node */ 1430fbf8f40eSGanapatrao Kulkarni if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1431fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) { 1432fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1433fbf8f40eSGanapatrao Kulkarni if (!cpumask_intersects(mask_val, cpu_mask)) 1434fbf8f40eSGanapatrao Kulkarni return -EINVAL; 1435fbf8f40eSGanapatrao Kulkarni } 1436fbf8f40eSGanapatrao Kulkarni } 1437fbf8f40eSGanapatrao Kulkarni 1438fbf8f40eSGanapatrao Kulkarni cpu = cpumask_any_and(mask_val, cpu_mask); 1439fbf8f40eSGanapatrao Kulkarni 1440c48ed51cSMarc Zyngier if (cpu >= nr_cpu_ids) 1441c48ed51cSMarc Zyngier return -EINVAL; 1442c48ed51cSMarc Zyngier 14438b8d94a7SMaJun /* don't set the affinity when the target cpu is same as current one */ 14448b8d94a7SMaJun if (cpu != its_dev->event_map.col_map[id]) { 1445c48ed51cSMarc Zyngier target_col = &its_dev->its->collections[cpu]; 1446c48ed51cSMarc Zyngier its_send_movi(its_dev, target_col, id); 1447591e5becSMarc Zyngier its_dev->event_map.col_map[id] = cpu; 14480d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 14498b8d94a7SMaJun } 1450c48ed51cSMarc Zyngier 1451c48ed51cSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 1452c48ed51cSMarc Zyngier } 1453c48ed51cSMarc Zyngier 1454558b0165SArd Biesheuvel static u64 its_irq_get_msi_base(struct its_device *its_dev) 1455558b0165SArd Biesheuvel { 1456558b0165SArd Biesheuvel struct its_node *its = its_dev->its; 1457558b0165SArd Biesheuvel 1458558b0165SArd Biesheuvel return its->phys_base + GITS_TRANSLATER; 1459558b0165SArd Biesheuvel } 1460558b0165SArd Biesheuvel 1461b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 1462b48ac83dSMarc Zyngier { 1463b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1464b48ac83dSMarc Zyngier struct its_node *its; 1465b48ac83dSMarc Zyngier u64 addr; 1466b48ac83dSMarc Zyngier 1467b48ac83dSMarc Zyngier its = its_dev->its; 1468558b0165SArd Biesheuvel addr = its->get_msi_base(its_dev); 1469b48ac83dSMarc Zyngier 1470b11283ebSVladimir Murzin msg->address_lo = lower_32_bits(addr); 1471b11283ebSVladimir Murzin msg->address_hi = upper_32_bits(addr); 1472b48ac83dSMarc Zyngier msg->data = its_get_event_id(d); 147344bb7e24SRobin Murphy 147435ae7df2SJulien Grall iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); 1475b48ac83dSMarc Zyngier } 1476b48ac83dSMarc Zyngier 14778d85dcedSMarc Zyngier static int its_irq_set_irqchip_state(struct irq_data *d, 14788d85dcedSMarc Zyngier enum irqchip_irq_state which, 14798d85dcedSMarc Zyngier bool state) 14808d85dcedSMarc Zyngier { 14818d85dcedSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 14828d85dcedSMarc Zyngier u32 event = its_get_event_id(d); 14838d85dcedSMarc Zyngier 14848d85dcedSMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 14858d85dcedSMarc Zyngier return -EINVAL; 14868d85dcedSMarc Zyngier 1487ed0e4aa9SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 1488ed0e4aa9SMarc Zyngier if (state) 1489ed0e4aa9SMarc Zyngier its_send_vint(its_dev, event); 1490ed0e4aa9SMarc Zyngier else 1491ed0e4aa9SMarc Zyngier its_send_vclear(its_dev, event); 1492ed0e4aa9SMarc Zyngier } else { 14938d85dcedSMarc Zyngier if (state) 14948d85dcedSMarc Zyngier its_send_int(its_dev, event); 14958d85dcedSMarc Zyngier else 14968d85dcedSMarc Zyngier its_send_clear(its_dev, event); 1497ed0e4aa9SMarc Zyngier } 14988d85dcedSMarc Zyngier 14998d85dcedSMarc Zyngier return 0; 15008d85dcedSMarc Zyngier } 15018d85dcedSMarc Zyngier 15022247e1bfSMarc Zyngier static void its_map_vm(struct its_node *its, struct its_vm *vm) 15032247e1bfSMarc Zyngier { 15042247e1bfSMarc Zyngier unsigned long flags; 15052247e1bfSMarc Zyngier 15062247e1bfSMarc Zyngier /* Not using the ITS list? Everything is always mapped. */ 15072247e1bfSMarc Zyngier if (!its_list_map) 15082247e1bfSMarc Zyngier return; 15092247e1bfSMarc Zyngier 15102247e1bfSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 15112247e1bfSMarc Zyngier 15122247e1bfSMarc Zyngier /* 15132247e1bfSMarc Zyngier * If the VM wasn't mapped yet, iterate over the vpes and get 15142247e1bfSMarc Zyngier * them mapped now. 15152247e1bfSMarc Zyngier */ 15162247e1bfSMarc Zyngier vm->vlpi_count[its->list_nr]++; 15172247e1bfSMarc Zyngier 15182247e1bfSMarc Zyngier if (vm->vlpi_count[its->list_nr] == 1) { 15192247e1bfSMarc Zyngier int i; 15202247e1bfSMarc Zyngier 15212247e1bfSMarc Zyngier for (i = 0; i < vm->nr_vpes; i++) { 15222247e1bfSMarc Zyngier struct its_vpe *vpe = vm->vpes[i]; 152344c4c25eSMarc Zyngier struct irq_data *d = irq_get_irq_data(vpe->irq); 15242247e1bfSMarc Zyngier 15252247e1bfSMarc Zyngier /* Map the VPE to the first possible CPU */ 15262247e1bfSMarc Zyngier vpe->col_idx = cpumask_first(cpu_online_mask); 15272247e1bfSMarc Zyngier its_send_vmapp(its, vpe, true); 15282247e1bfSMarc Zyngier its_send_vinvall(its, vpe); 152944c4c25eSMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); 15302247e1bfSMarc Zyngier } 15312247e1bfSMarc Zyngier } 15322247e1bfSMarc Zyngier 15332247e1bfSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 15342247e1bfSMarc Zyngier } 15352247e1bfSMarc Zyngier 15362247e1bfSMarc Zyngier static void its_unmap_vm(struct its_node *its, struct its_vm *vm) 15372247e1bfSMarc Zyngier { 15382247e1bfSMarc Zyngier unsigned long flags; 15392247e1bfSMarc Zyngier 15402247e1bfSMarc Zyngier /* Not using the ITS list? Everything is always mapped. */ 15412247e1bfSMarc Zyngier if (!its_list_map) 15422247e1bfSMarc Zyngier return; 15432247e1bfSMarc Zyngier 15442247e1bfSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 15452247e1bfSMarc Zyngier 15462247e1bfSMarc Zyngier if (!--vm->vlpi_count[its->list_nr]) { 15472247e1bfSMarc Zyngier int i; 15482247e1bfSMarc Zyngier 15492247e1bfSMarc Zyngier for (i = 0; i < vm->nr_vpes; i++) 15502247e1bfSMarc Zyngier its_send_vmapp(its, vm->vpes[i], false); 15512247e1bfSMarc Zyngier } 15522247e1bfSMarc Zyngier 15532247e1bfSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 15542247e1bfSMarc Zyngier } 15552247e1bfSMarc Zyngier 1556d011e4e6SMarc Zyngier static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 1557d011e4e6SMarc Zyngier { 1558d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1559d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1560d011e4e6SMarc Zyngier int ret = 0; 1561d011e4e6SMarc Zyngier 1562d011e4e6SMarc Zyngier if (!info->map) 1563d011e4e6SMarc Zyngier return -EINVAL; 1564d011e4e6SMarc Zyngier 156511635fa2SMarc Zyngier raw_spin_lock(&its_dev->event_map.vlpi_lock); 1566d011e4e6SMarc Zyngier 1567d011e4e6SMarc Zyngier if (!its_dev->event_map.vm) { 1568d011e4e6SMarc Zyngier struct its_vlpi_map *maps; 1569d011e4e6SMarc Zyngier 15706396bb22SKees Cook maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), 157111635fa2SMarc Zyngier GFP_ATOMIC); 1572d011e4e6SMarc Zyngier if (!maps) { 1573d011e4e6SMarc Zyngier ret = -ENOMEM; 1574d011e4e6SMarc Zyngier goto out; 1575d011e4e6SMarc Zyngier } 1576d011e4e6SMarc Zyngier 1577d011e4e6SMarc Zyngier its_dev->event_map.vm = info->map->vm; 1578d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps = maps; 1579d011e4e6SMarc Zyngier } else if (its_dev->event_map.vm != info->map->vm) { 1580d011e4e6SMarc Zyngier ret = -EINVAL; 1581d011e4e6SMarc Zyngier goto out; 1582d011e4e6SMarc Zyngier } 1583d011e4e6SMarc Zyngier 1584d011e4e6SMarc Zyngier /* Get our private copy of the mapping information */ 1585d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps[event] = *info->map; 1586d011e4e6SMarc Zyngier 1587d011e4e6SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 1588d011e4e6SMarc Zyngier /* Already mapped, move it around */ 1589d011e4e6SMarc Zyngier its_send_vmovi(its_dev, event); 1590d011e4e6SMarc Zyngier } else { 15912247e1bfSMarc Zyngier /* Ensure all the VPEs are mapped on this ITS */ 15922247e1bfSMarc Zyngier its_map_vm(its_dev->its, info->map->vm); 15932247e1bfSMarc Zyngier 1594d4d7b4adSMarc Zyngier /* 1595d4d7b4adSMarc Zyngier * Flag the interrupt as forwarded so that we can 1596d4d7b4adSMarc Zyngier * start poking the virtual property table. 1597d4d7b4adSMarc Zyngier */ 1598d4d7b4adSMarc Zyngier irqd_set_forwarded_to_vcpu(d); 1599d4d7b4adSMarc Zyngier 1600d4d7b4adSMarc Zyngier /* Write out the property to the prop table */ 1601d4d7b4adSMarc Zyngier lpi_write_config(d, 0xff, info->map->properties); 1602d4d7b4adSMarc Zyngier 1603d011e4e6SMarc Zyngier /* Drop the physical mapping */ 1604d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1605d011e4e6SMarc Zyngier 1606d011e4e6SMarc Zyngier /* and install the virtual one */ 1607d011e4e6SMarc Zyngier its_send_vmapti(its_dev, event); 1608d011e4e6SMarc Zyngier 1609d011e4e6SMarc Zyngier /* Increment the number of VLPIs */ 1610d011e4e6SMarc Zyngier its_dev->event_map.nr_vlpis++; 1611d011e4e6SMarc Zyngier } 1612d011e4e6SMarc Zyngier 1613d011e4e6SMarc Zyngier out: 161411635fa2SMarc Zyngier raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1615d011e4e6SMarc Zyngier return ret; 1616d011e4e6SMarc Zyngier } 1617d011e4e6SMarc Zyngier 1618d011e4e6SMarc Zyngier static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1619d011e4e6SMarc Zyngier { 1620d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1621046b5054SMarc Zyngier struct its_vlpi_map *map; 1622d011e4e6SMarc Zyngier int ret = 0; 1623d011e4e6SMarc Zyngier 162411635fa2SMarc Zyngier raw_spin_lock(&its_dev->event_map.vlpi_lock); 1625d011e4e6SMarc Zyngier 1626046b5054SMarc Zyngier map = get_vlpi_map(d); 1627046b5054SMarc Zyngier 1628046b5054SMarc Zyngier if (!its_dev->event_map.vm || !map) { 1629d011e4e6SMarc Zyngier ret = -EINVAL; 1630d011e4e6SMarc Zyngier goto out; 1631d011e4e6SMarc Zyngier } 1632d011e4e6SMarc Zyngier 1633d011e4e6SMarc Zyngier /* Copy our mapping information to the incoming request */ 1634c1d4d5cdSMarc Zyngier *info->map = *map; 1635d011e4e6SMarc Zyngier 1636d011e4e6SMarc Zyngier out: 163711635fa2SMarc Zyngier raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1638d011e4e6SMarc Zyngier return ret; 1639d011e4e6SMarc Zyngier } 1640d011e4e6SMarc Zyngier 1641d011e4e6SMarc Zyngier static int its_vlpi_unmap(struct irq_data *d) 1642d011e4e6SMarc Zyngier { 1643d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1644d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1645d011e4e6SMarc Zyngier int ret = 0; 1646d011e4e6SMarc Zyngier 164711635fa2SMarc Zyngier raw_spin_lock(&its_dev->event_map.vlpi_lock); 1648d011e4e6SMarc Zyngier 1649d011e4e6SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 1650d011e4e6SMarc Zyngier ret = -EINVAL; 1651d011e4e6SMarc Zyngier goto out; 1652d011e4e6SMarc Zyngier } 1653d011e4e6SMarc Zyngier 1654d011e4e6SMarc Zyngier /* Drop the virtual mapping */ 1655d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1656d011e4e6SMarc Zyngier 1657d011e4e6SMarc Zyngier /* and restore the physical one */ 1658d011e4e6SMarc Zyngier irqd_clr_forwarded_to_vcpu(d); 1659d011e4e6SMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 1660d011e4e6SMarc Zyngier lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 1661d011e4e6SMarc Zyngier LPI_PROP_ENABLED | 1662d011e4e6SMarc Zyngier LPI_PROP_GROUP1)); 1663d011e4e6SMarc Zyngier 16642247e1bfSMarc Zyngier /* Potentially unmap the VM from this ITS */ 16652247e1bfSMarc Zyngier its_unmap_vm(its_dev->its, its_dev->event_map.vm); 16662247e1bfSMarc Zyngier 1667d011e4e6SMarc Zyngier /* 1668d011e4e6SMarc Zyngier * Drop the refcount and make the device available again if 1669d011e4e6SMarc Zyngier * this was the last VLPI. 1670d011e4e6SMarc Zyngier */ 1671d011e4e6SMarc Zyngier if (!--its_dev->event_map.nr_vlpis) { 1672d011e4e6SMarc Zyngier its_dev->event_map.vm = NULL; 1673d011e4e6SMarc Zyngier kfree(its_dev->event_map.vlpi_maps); 1674d011e4e6SMarc Zyngier } 1675d011e4e6SMarc Zyngier 1676d011e4e6SMarc Zyngier out: 167711635fa2SMarc Zyngier raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1678d011e4e6SMarc Zyngier return ret; 1679d011e4e6SMarc Zyngier } 1680d011e4e6SMarc Zyngier 1681015ec038SMarc Zyngier static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) 1682015ec038SMarc Zyngier { 1683015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1684015ec038SMarc Zyngier 1685015ec038SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 1686015ec038SMarc Zyngier return -EINVAL; 1687015ec038SMarc Zyngier 1688015ec038SMarc Zyngier if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) 1689015ec038SMarc Zyngier lpi_update_config(d, 0xff, info->config); 1690015ec038SMarc Zyngier else 1691015ec038SMarc Zyngier lpi_write_config(d, 0xff, info->config); 1692015ec038SMarc Zyngier its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); 1693015ec038SMarc Zyngier 1694015ec038SMarc Zyngier return 0; 1695015ec038SMarc Zyngier } 1696015ec038SMarc Zyngier 1697c808eea8SMarc Zyngier static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 1698c808eea8SMarc Zyngier { 1699c808eea8SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1700c808eea8SMarc Zyngier struct its_cmd_info *info = vcpu_info; 1701c808eea8SMarc Zyngier 1702c808eea8SMarc Zyngier /* Need a v4 ITS */ 17030dd57fedSMarc Zyngier if (!is_v4(its_dev->its)) 1704c808eea8SMarc Zyngier return -EINVAL; 1705c808eea8SMarc Zyngier 1706d011e4e6SMarc Zyngier /* Unmap request? */ 1707d011e4e6SMarc Zyngier if (!info) 1708d011e4e6SMarc Zyngier return its_vlpi_unmap(d); 1709d011e4e6SMarc Zyngier 1710c808eea8SMarc Zyngier switch (info->cmd_type) { 1711c808eea8SMarc Zyngier case MAP_VLPI: 1712d011e4e6SMarc Zyngier return its_vlpi_map(d, info); 1713c808eea8SMarc Zyngier 1714c808eea8SMarc Zyngier case GET_VLPI: 1715d011e4e6SMarc Zyngier return its_vlpi_get(d, info); 1716c808eea8SMarc Zyngier 1717c808eea8SMarc Zyngier case PROP_UPDATE_VLPI: 1718c808eea8SMarc Zyngier case PROP_UPDATE_AND_INV_VLPI: 1719015ec038SMarc Zyngier return its_vlpi_prop_update(d, info); 1720c808eea8SMarc Zyngier 1721c808eea8SMarc Zyngier default: 1722c808eea8SMarc Zyngier return -EINVAL; 1723c808eea8SMarc Zyngier } 1724c808eea8SMarc Zyngier } 1725c808eea8SMarc Zyngier 1726c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = { 1727c48ed51cSMarc Zyngier .name = "ITS", 1728c48ed51cSMarc Zyngier .irq_mask = its_mask_irq, 1729c48ed51cSMarc Zyngier .irq_unmask = its_unmask_irq, 1730004fa08dSAshok Kumar .irq_eoi = irq_chip_eoi_parent, 1731c48ed51cSMarc Zyngier .irq_set_affinity = its_set_affinity, 1732b48ac83dSMarc Zyngier .irq_compose_msi_msg = its_irq_compose_msi_msg, 17338d85dcedSMarc Zyngier .irq_set_irqchip_state = its_irq_set_irqchip_state, 1734c808eea8SMarc Zyngier .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1735b48ac83dSMarc Zyngier }; 1736b48ac83dSMarc Zyngier 1737880cb3cdSMarc Zyngier 1738bf9529f8SMarc Zyngier /* 1739bf9529f8SMarc Zyngier * How we allocate LPIs: 1740bf9529f8SMarc Zyngier * 1741880cb3cdSMarc Zyngier * lpi_range_list contains ranges of LPIs that are to available to 1742880cb3cdSMarc Zyngier * allocate from. To allocate LPIs, just pick the first range that 1743880cb3cdSMarc Zyngier * fits the required allocation, and reduce it by the required 1744880cb3cdSMarc Zyngier * amount. Once empty, remove the range from the list. 1745bf9529f8SMarc Zyngier * 1746880cb3cdSMarc Zyngier * To free a range of LPIs, add a free range to the list, sort it and 1747880cb3cdSMarc Zyngier * merge the result if the new range happens to be adjacent to an 1748880cb3cdSMarc Zyngier * already free block. 1749880cb3cdSMarc Zyngier * 1750880cb3cdSMarc Zyngier * The consequence of the above is that allocation is cost is low, but 1751880cb3cdSMarc Zyngier * freeing is expensive. We assumes that freeing rarely occurs. 1752880cb3cdSMarc Zyngier */ 17534cb205c0SJia He #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1754880cb3cdSMarc Zyngier 1755880cb3cdSMarc Zyngier static DEFINE_MUTEX(lpi_range_lock); 1756880cb3cdSMarc Zyngier static LIST_HEAD(lpi_range_list); 1757bf9529f8SMarc Zyngier 1758880cb3cdSMarc Zyngier struct lpi_range { 1759880cb3cdSMarc Zyngier struct list_head entry; 1760880cb3cdSMarc Zyngier u32 base_id; 1761880cb3cdSMarc Zyngier u32 span; 1762880cb3cdSMarc Zyngier }; 1763880cb3cdSMarc Zyngier 1764880cb3cdSMarc Zyngier static struct lpi_range *mk_lpi_range(u32 base, u32 span) 1765bf9529f8SMarc Zyngier { 1766880cb3cdSMarc Zyngier struct lpi_range *range; 1767880cb3cdSMarc Zyngier 17681c73fac5SRasmus Villemoes range = kmalloc(sizeof(*range), GFP_KERNEL); 1769880cb3cdSMarc Zyngier if (range) { 1770880cb3cdSMarc Zyngier range->base_id = base; 1771880cb3cdSMarc Zyngier range->span = span; 1772bf9529f8SMarc Zyngier } 1773bf9529f8SMarc Zyngier 1774880cb3cdSMarc Zyngier return range; 1775880cb3cdSMarc Zyngier } 1776880cb3cdSMarc Zyngier 1777880cb3cdSMarc Zyngier static int alloc_lpi_range(u32 nr_lpis, u32 *base) 1778880cb3cdSMarc Zyngier { 1779880cb3cdSMarc Zyngier struct lpi_range *range, *tmp; 1780880cb3cdSMarc Zyngier int err = -ENOSPC; 1781880cb3cdSMarc Zyngier 1782880cb3cdSMarc Zyngier mutex_lock(&lpi_range_lock); 1783880cb3cdSMarc Zyngier 1784880cb3cdSMarc Zyngier list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { 1785880cb3cdSMarc Zyngier if (range->span >= nr_lpis) { 1786880cb3cdSMarc Zyngier *base = range->base_id; 1787880cb3cdSMarc Zyngier range->base_id += nr_lpis; 1788880cb3cdSMarc Zyngier range->span -= nr_lpis; 1789880cb3cdSMarc Zyngier 1790880cb3cdSMarc Zyngier if (range->span == 0) { 1791880cb3cdSMarc Zyngier list_del(&range->entry); 1792880cb3cdSMarc Zyngier kfree(range); 1793880cb3cdSMarc Zyngier } 1794880cb3cdSMarc Zyngier 1795880cb3cdSMarc Zyngier err = 0; 1796880cb3cdSMarc Zyngier break; 1797880cb3cdSMarc Zyngier } 1798880cb3cdSMarc Zyngier } 1799880cb3cdSMarc Zyngier 1800880cb3cdSMarc Zyngier mutex_unlock(&lpi_range_lock); 1801880cb3cdSMarc Zyngier 1802880cb3cdSMarc Zyngier pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); 1803880cb3cdSMarc Zyngier return err; 1804880cb3cdSMarc Zyngier } 1805880cb3cdSMarc Zyngier 180612eade12SRasmus Villemoes static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) 180712eade12SRasmus Villemoes { 180812eade12SRasmus Villemoes if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) 180912eade12SRasmus Villemoes return; 181012eade12SRasmus Villemoes if (a->base_id + a->span != b->base_id) 181112eade12SRasmus Villemoes return; 181212eade12SRasmus Villemoes b->base_id = a->base_id; 181312eade12SRasmus Villemoes b->span += a->span; 181412eade12SRasmus Villemoes list_del(&a->entry); 181512eade12SRasmus Villemoes kfree(a); 181612eade12SRasmus Villemoes } 181712eade12SRasmus Villemoes 1818880cb3cdSMarc Zyngier static int free_lpi_range(u32 base, u32 nr_lpis) 1819880cb3cdSMarc Zyngier { 182012eade12SRasmus Villemoes struct lpi_range *new, *old; 1821880cb3cdSMarc Zyngier 1822880cb3cdSMarc Zyngier new = mk_lpi_range(base, nr_lpis); 1823b31a3838SRasmus Villemoes if (!new) 1824b31a3838SRasmus Villemoes return -ENOMEM; 1825880cb3cdSMarc Zyngier 1826880cb3cdSMarc Zyngier mutex_lock(&lpi_range_lock); 1827880cb3cdSMarc Zyngier 182812eade12SRasmus Villemoes list_for_each_entry_reverse(old, &lpi_range_list, entry) { 182912eade12SRasmus Villemoes if (old->base_id < base) 183012eade12SRasmus Villemoes break; 1831880cb3cdSMarc Zyngier } 183212eade12SRasmus Villemoes /* 183312eade12SRasmus Villemoes * old is the last element with ->base_id smaller than base, 183412eade12SRasmus Villemoes * so new goes right after it. If there are no elements with 183512eade12SRasmus Villemoes * ->base_id smaller than base, &old->entry ends up pointing 183612eade12SRasmus Villemoes * at the head of the list, and inserting new it the start of 183712eade12SRasmus Villemoes * the list is the right thing to do in that case as well. 183812eade12SRasmus Villemoes */ 183912eade12SRasmus Villemoes list_add(&new->entry, &old->entry); 184012eade12SRasmus Villemoes /* 184112eade12SRasmus Villemoes * Now check if we can merge with the preceding and/or 184212eade12SRasmus Villemoes * following ranges. 184312eade12SRasmus Villemoes */ 184412eade12SRasmus Villemoes merge_lpi_ranges(old, new); 184512eade12SRasmus Villemoes merge_lpi_ranges(new, list_next_entry(new, entry)); 1846880cb3cdSMarc Zyngier 1847880cb3cdSMarc Zyngier mutex_unlock(&lpi_range_lock); 1848b31a3838SRasmus Villemoes return 0; 1849bf9529f8SMarc Zyngier } 1850bf9529f8SMarc Zyngier 185104a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits) 1852bf9529f8SMarc Zyngier { 1853880cb3cdSMarc Zyngier u32 lpis = (1UL << id_bits) - 8192; 185412b2905aSMarc Zyngier u32 numlpis; 1855880cb3cdSMarc Zyngier int err; 1856bf9529f8SMarc Zyngier 185712b2905aSMarc Zyngier numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); 185812b2905aSMarc Zyngier 185912b2905aSMarc Zyngier if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { 186012b2905aSMarc Zyngier lpis = numlpis; 186112b2905aSMarc Zyngier pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", 186212b2905aSMarc Zyngier lpis); 186312b2905aSMarc Zyngier } 186412b2905aSMarc Zyngier 1865880cb3cdSMarc Zyngier /* 1866880cb3cdSMarc Zyngier * Initializing the allocator is just the same as freeing the 1867880cb3cdSMarc Zyngier * full range of LPIs. 1868880cb3cdSMarc Zyngier */ 1869880cb3cdSMarc Zyngier err = free_lpi_range(8192, lpis); 1870880cb3cdSMarc Zyngier pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); 1871880cb3cdSMarc Zyngier return err; 1872bf9529f8SMarc Zyngier } 1873bf9529f8SMarc Zyngier 187438dd7c49SMarc Zyngier static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) 1875bf9529f8SMarc Zyngier { 1876bf9529f8SMarc Zyngier unsigned long *bitmap = NULL; 1877880cb3cdSMarc Zyngier int err = 0; 1878bf9529f8SMarc Zyngier 1879bf9529f8SMarc Zyngier do { 188038dd7c49SMarc Zyngier err = alloc_lpi_range(nr_irqs, base); 1881880cb3cdSMarc Zyngier if (!err) 1882bf9529f8SMarc Zyngier break; 1883bf9529f8SMarc Zyngier 188438dd7c49SMarc Zyngier nr_irqs /= 2; 188538dd7c49SMarc Zyngier } while (nr_irqs > 0); 1886bf9529f8SMarc Zyngier 188745725e0fSMarc Zyngier if (!nr_irqs) 188845725e0fSMarc Zyngier err = -ENOSPC; 188945725e0fSMarc Zyngier 1890880cb3cdSMarc Zyngier if (err) 1891bf9529f8SMarc Zyngier goto out; 1892bf9529f8SMarc Zyngier 189338dd7c49SMarc Zyngier bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); 1894bf9529f8SMarc Zyngier if (!bitmap) 1895bf9529f8SMarc Zyngier goto out; 1896bf9529f8SMarc Zyngier 189738dd7c49SMarc Zyngier *nr_ids = nr_irqs; 1898bf9529f8SMarc Zyngier 1899bf9529f8SMarc Zyngier out: 1900c8415b94SMarc Zyngier if (!bitmap) 1901c8415b94SMarc Zyngier *base = *nr_ids = 0; 1902c8415b94SMarc Zyngier 1903bf9529f8SMarc Zyngier return bitmap; 1904bf9529f8SMarc Zyngier } 1905bf9529f8SMarc Zyngier 190638dd7c49SMarc Zyngier static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) 1907bf9529f8SMarc Zyngier { 1908880cb3cdSMarc Zyngier WARN_ON(free_lpi_range(base, nr_ids)); 1909cf2be8baSMarc Zyngier kfree(bitmap); 1910bf9529f8SMarc Zyngier } 19111ac19ca6SMarc Zyngier 1912053be485SMarc Zyngier static void gic_reset_prop_table(void *va) 1913053be485SMarc Zyngier { 1914053be485SMarc Zyngier /* Priority 0xa0, Group-1, disabled */ 1915053be485SMarc Zyngier memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); 1916053be485SMarc Zyngier 1917053be485SMarc Zyngier /* Make sure the GIC will observe the written configuration */ 1918053be485SMarc Zyngier gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); 1919053be485SMarc Zyngier } 1920053be485SMarc Zyngier 19210e5ccf91SMarc Zyngier static struct page *its_allocate_prop_table(gfp_t gfp_flags) 19220e5ccf91SMarc Zyngier { 19230e5ccf91SMarc Zyngier struct page *prop_page; 19241ac19ca6SMarc Zyngier 19250e5ccf91SMarc Zyngier prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 19260e5ccf91SMarc Zyngier if (!prop_page) 19270e5ccf91SMarc Zyngier return NULL; 19280e5ccf91SMarc Zyngier 1929053be485SMarc Zyngier gic_reset_prop_table(page_address(prop_page)); 19300e5ccf91SMarc Zyngier 19310e5ccf91SMarc Zyngier return prop_page; 19320e5ccf91SMarc Zyngier } 19330e5ccf91SMarc Zyngier 19347d75bbb4SMarc Zyngier static void its_free_prop_table(struct page *prop_page) 19357d75bbb4SMarc Zyngier { 19367d75bbb4SMarc Zyngier free_pages((unsigned long)page_address(prop_page), 19377d75bbb4SMarc Zyngier get_order(LPI_PROPBASE_SZ)); 19387d75bbb4SMarc Zyngier } 19391ac19ca6SMarc Zyngier 19405e2c9f9aSMarc Zyngier static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) 19415e2c9f9aSMarc Zyngier { 19425e2c9f9aSMarc Zyngier phys_addr_t start, end, addr_end; 19435e2c9f9aSMarc Zyngier u64 i; 19445e2c9f9aSMarc Zyngier 19455e2c9f9aSMarc Zyngier /* 19465e2c9f9aSMarc Zyngier * We don't bother checking for a kdump kernel as by 19475e2c9f9aSMarc Zyngier * construction, the LPI tables are out of this kernel's 19485e2c9f9aSMarc Zyngier * memory map. 19495e2c9f9aSMarc Zyngier */ 19505e2c9f9aSMarc Zyngier if (is_kdump_kernel()) 19515e2c9f9aSMarc Zyngier return true; 19525e2c9f9aSMarc Zyngier 19535e2c9f9aSMarc Zyngier addr_end = addr + size - 1; 19545e2c9f9aSMarc Zyngier 19555e2c9f9aSMarc Zyngier for_each_reserved_mem_region(i, &start, &end) { 19565e2c9f9aSMarc Zyngier if (addr >= start && addr_end <= end) 19575e2c9f9aSMarc Zyngier return true; 19585e2c9f9aSMarc Zyngier } 19595e2c9f9aSMarc Zyngier 19605e2c9f9aSMarc Zyngier /* Not found, not a good sign... */ 19615e2c9f9aSMarc Zyngier pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", 19625e2c9f9aSMarc Zyngier &addr, &addr_end); 19635e2c9f9aSMarc Zyngier add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 19645e2c9f9aSMarc Zyngier return false; 19655e2c9f9aSMarc Zyngier } 19665e2c9f9aSMarc Zyngier 19673fb68faeSMarc Zyngier static int gic_reserve_range(phys_addr_t addr, unsigned long size) 19683fb68faeSMarc Zyngier { 19693fb68faeSMarc Zyngier if (efi_enabled(EFI_CONFIG_TABLES)) 19703fb68faeSMarc Zyngier return efi_mem_reserve_persistent(addr, size); 19713fb68faeSMarc Zyngier 19723fb68faeSMarc Zyngier return 0; 19733fb68faeSMarc Zyngier } 19743fb68faeSMarc Zyngier 197511e37d35SMarc Zyngier static int __init its_setup_lpi_prop_table(void) 19761ac19ca6SMarc Zyngier { 1977c440a9d9SMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { 1978c440a9d9SMarc Zyngier u64 val; 1979c440a9d9SMarc Zyngier 1980c440a9d9SMarc Zyngier val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); 1981c440a9d9SMarc Zyngier lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; 1982c440a9d9SMarc Zyngier 1983c440a9d9SMarc Zyngier gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); 1984c440a9d9SMarc Zyngier gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, 1985c440a9d9SMarc Zyngier LPI_PROPBASE_SZ, 1986c440a9d9SMarc Zyngier MEMREMAP_WB); 1987c440a9d9SMarc Zyngier gic_reset_prop_table(gic_rdists->prop_table_va); 1988c440a9d9SMarc Zyngier } else { 1989e1a2e201SMarc Zyngier struct page *page; 19901ac19ca6SMarc Zyngier 1991c440a9d9SMarc Zyngier lpi_id_bits = min_t(u32, 1992c440a9d9SMarc Zyngier GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), 19934cb205c0SJia He ITS_MAX_LPI_NRBITS); 1994e1a2e201SMarc Zyngier page = its_allocate_prop_table(GFP_NOWAIT); 1995e1a2e201SMarc Zyngier if (!page) { 19961ac19ca6SMarc Zyngier pr_err("Failed to allocate PROPBASE\n"); 19971ac19ca6SMarc Zyngier return -ENOMEM; 19981ac19ca6SMarc Zyngier } 19991ac19ca6SMarc Zyngier 2000e1a2e201SMarc Zyngier gic_rdists->prop_table_pa = page_to_phys(page); 2001e1a2e201SMarc Zyngier gic_rdists->prop_table_va = page_address(page); 20023fb68faeSMarc Zyngier WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, 20033fb68faeSMarc Zyngier LPI_PROPBASE_SZ)); 2004c440a9d9SMarc Zyngier } 2005e1a2e201SMarc Zyngier 2006e1a2e201SMarc Zyngier pr_info("GICv3: using LPI property table @%pa\n", 2007e1a2e201SMarc Zyngier &gic_rdists->prop_table_pa); 20081ac19ca6SMarc Zyngier 20096c31e123SShanker Donthineni return its_lpi_init(lpi_id_bits); 20101ac19ca6SMarc Zyngier } 20111ac19ca6SMarc Zyngier 20121ac19ca6SMarc Zyngier static const char *its_base_type_string[] = { 20131ac19ca6SMarc Zyngier [GITS_BASER_TYPE_DEVICE] = "Devices", 20141ac19ca6SMarc Zyngier [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 20154f46de9dSMarc Zyngier [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", 20161ac19ca6SMarc Zyngier [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 20171ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 20181ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 20191ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 20201ac19ca6SMarc Zyngier }; 20211ac19ca6SMarc Zyngier 20222d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 20232d81d425SShanker Donthineni { 20242d81d425SShanker Donthineni u32 idx = baser - its->tables; 20252d81d425SShanker Donthineni 20260968a619SVladimir Murzin return gits_read_baser(its->base + GITS_BASER + (idx << 3)); 20272d81d425SShanker Donthineni } 20282d81d425SShanker Donthineni 20292d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser, 20302d81d425SShanker Donthineni u64 val) 20312d81d425SShanker Donthineni { 20322d81d425SShanker Donthineni u32 idx = baser - its->tables; 20332d81d425SShanker Donthineni 20340968a619SVladimir Murzin gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); 20352d81d425SShanker Donthineni baser->val = its_read_baser(its, baser); 20362d81d425SShanker Donthineni } 20372d81d425SShanker Donthineni 20389347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser, 20393faf24eaSShanker Donthineni u64 cache, u64 shr, u32 psz, u32 order, 20403faf24eaSShanker Donthineni bool indirect) 20419347359aSShanker Donthineni { 20429347359aSShanker Donthineni u64 val = its_read_baser(its, baser); 20439347359aSShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(val); 20449347359aSShanker Donthineni u64 type = GITS_BASER_TYPE(val); 204530ae9610SShanker Donthineni u64 baser_phys, tmp; 20469347359aSShanker Donthineni u32 alloc_pages; 2047539d3782SShanker Donthineni struct page *page; 20489347359aSShanker Donthineni void *base; 20499347359aSShanker Donthineni 20509347359aSShanker Donthineni retry_alloc_baser: 20519347359aSShanker Donthineni alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 20529347359aSShanker Donthineni if (alloc_pages > GITS_BASER_PAGES_MAX) { 20539347359aSShanker Donthineni pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 20549347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 20559347359aSShanker Donthineni alloc_pages, GITS_BASER_PAGES_MAX); 20569347359aSShanker Donthineni alloc_pages = GITS_BASER_PAGES_MAX; 20579347359aSShanker Donthineni order = get_order(GITS_BASER_PAGES_MAX * psz); 20589347359aSShanker Donthineni } 20599347359aSShanker Donthineni 2060539d3782SShanker Donthineni page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); 2061539d3782SShanker Donthineni if (!page) 20629347359aSShanker Donthineni return -ENOMEM; 20639347359aSShanker Donthineni 2064539d3782SShanker Donthineni base = (void *)page_address(page); 206530ae9610SShanker Donthineni baser_phys = virt_to_phys(base); 206630ae9610SShanker Donthineni 206730ae9610SShanker Donthineni /* Check if the physical address of the memory is above 48bits */ 206830ae9610SShanker Donthineni if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { 206930ae9610SShanker Donthineni 207030ae9610SShanker Donthineni /* 52bit PA is supported only when PageSize=64K */ 207130ae9610SShanker Donthineni if (psz != SZ_64K) { 207230ae9610SShanker Donthineni pr_err("ITS: no 52bit PA support when psz=%d\n", psz); 207330ae9610SShanker Donthineni free_pages((unsigned long)base, order); 207430ae9610SShanker Donthineni return -ENXIO; 207530ae9610SShanker Donthineni } 207630ae9610SShanker Donthineni 207730ae9610SShanker Donthineni /* Convert 52bit PA to 48bit field */ 207830ae9610SShanker Donthineni baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); 207930ae9610SShanker Donthineni } 208030ae9610SShanker Donthineni 20819347359aSShanker Donthineni retry_baser: 208230ae9610SShanker Donthineni val = (baser_phys | 20839347359aSShanker Donthineni (type << GITS_BASER_TYPE_SHIFT) | 20849347359aSShanker Donthineni ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 20859347359aSShanker Donthineni ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 20869347359aSShanker Donthineni cache | 20879347359aSShanker Donthineni shr | 20889347359aSShanker Donthineni GITS_BASER_VALID); 20899347359aSShanker Donthineni 20903faf24eaSShanker Donthineni val |= indirect ? GITS_BASER_INDIRECT : 0x0; 20913faf24eaSShanker Donthineni 20929347359aSShanker Donthineni switch (psz) { 20939347359aSShanker Donthineni case SZ_4K: 20949347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_4K; 20959347359aSShanker Donthineni break; 20969347359aSShanker Donthineni case SZ_16K: 20979347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_16K; 20989347359aSShanker Donthineni break; 20999347359aSShanker Donthineni case SZ_64K: 21009347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_64K; 21019347359aSShanker Donthineni break; 21029347359aSShanker Donthineni } 21039347359aSShanker Donthineni 21049347359aSShanker Donthineni its_write_baser(its, baser, val); 21059347359aSShanker Donthineni tmp = baser->val; 21069347359aSShanker Donthineni 21079347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 21089347359aSShanker Donthineni /* 21099347359aSShanker Donthineni * Shareability didn't stick. Just use 21109347359aSShanker Donthineni * whatever the read reported, which is likely 21119347359aSShanker Donthineni * to be the only thing this redistributor 21129347359aSShanker Donthineni * supports. If that's zero, make it 21139347359aSShanker Donthineni * non-cacheable as well. 21149347359aSShanker Donthineni */ 21159347359aSShanker Donthineni shr = tmp & GITS_BASER_SHAREABILITY_MASK; 21169347359aSShanker Donthineni if (!shr) { 21179347359aSShanker Donthineni cache = GITS_BASER_nC; 2118328191c0SVladimir Murzin gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 21199347359aSShanker Donthineni } 21209347359aSShanker Donthineni goto retry_baser; 21219347359aSShanker Donthineni } 21229347359aSShanker Donthineni 21239347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 21249347359aSShanker Donthineni /* 21259347359aSShanker Donthineni * Page size didn't stick. Let's try a smaller 21269347359aSShanker Donthineni * size and retry. If we reach 4K, then 21279347359aSShanker Donthineni * something is horribly wrong... 21289347359aSShanker Donthineni */ 21299347359aSShanker Donthineni free_pages((unsigned long)base, order); 21309347359aSShanker Donthineni baser->base = NULL; 21319347359aSShanker Donthineni 21329347359aSShanker Donthineni switch (psz) { 21339347359aSShanker Donthineni case SZ_16K: 21349347359aSShanker Donthineni psz = SZ_4K; 21359347359aSShanker Donthineni goto retry_alloc_baser; 21369347359aSShanker Donthineni case SZ_64K: 21379347359aSShanker Donthineni psz = SZ_16K; 21389347359aSShanker Donthineni goto retry_alloc_baser; 21399347359aSShanker Donthineni } 21409347359aSShanker Donthineni } 21419347359aSShanker Donthineni 21429347359aSShanker Donthineni if (val != tmp) { 2143b11283ebSVladimir Murzin pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 21449347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 2145b11283ebSVladimir Murzin val, tmp); 21469347359aSShanker Donthineni free_pages((unsigned long)base, order); 21479347359aSShanker Donthineni return -ENXIO; 21489347359aSShanker Donthineni } 21499347359aSShanker Donthineni 21509347359aSShanker Donthineni baser->order = order; 21519347359aSShanker Donthineni baser->base = base; 21529347359aSShanker Donthineni baser->psz = psz; 21533faf24eaSShanker Donthineni tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 21549347359aSShanker Donthineni 21553faf24eaSShanker Donthineni pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 2156d524eaa2SVladimir Murzin &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), 21579347359aSShanker Donthineni its_base_type_string[type], 21589347359aSShanker Donthineni (unsigned long)virt_to_phys(base), 21593faf24eaSShanker Donthineni indirect ? "indirect" : "flat", (int)esz, 21609347359aSShanker Donthineni psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 21619347359aSShanker Donthineni 21629347359aSShanker Donthineni return 0; 21639347359aSShanker Donthineni } 21649347359aSShanker Donthineni 21654cacac57SMarc Zyngier static bool its_parse_indirect_baser(struct its_node *its, 21664cacac57SMarc Zyngier struct its_baser *baser, 216732bd44dcSShanker Donthineni u32 psz, u32 *order, u32 ids) 21684b75c459SShanker Donthineni { 21694cacac57SMarc Zyngier u64 tmp = its_read_baser(its, baser); 21704cacac57SMarc Zyngier u64 type = GITS_BASER_TYPE(tmp); 21714cacac57SMarc Zyngier u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 21722fd632a0SShanker Donthineni u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 21734b75c459SShanker Donthineni u32 new_order = *order; 21743faf24eaSShanker Donthineni bool indirect = false; 21753faf24eaSShanker Donthineni 21763faf24eaSShanker Donthineni /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 21773faf24eaSShanker Donthineni if ((esz << ids) > (psz * 2)) { 21783faf24eaSShanker Donthineni /* 21793faf24eaSShanker Donthineni * Find out whether hw supports a single or two-level table by 21803faf24eaSShanker Donthineni * table by reading bit at offset '62' after writing '1' to it. 21813faf24eaSShanker Donthineni */ 21823faf24eaSShanker Donthineni its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 21833faf24eaSShanker Donthineni indirect = !!(baser->val & GITS_BASER_INDIRECT); 21843faf24eaSShanker Donthineni 21853faf24eaSShanker Donthineni if (indirect) { 21863faf24eaSShanker Donthineni /* 21873faf24eaSShanker Donthineni * The size of the lvl2 table is equal to ITS page size 21883faf24eaSShanker Donthineni * which is 'psz'. For computing lvl1 table size, 21893faf24eaSShanker Donthineni * subtract ID bits that sparse lvl2 table from 'ids' 21903faf24eaSShanker Donthineni * which is reported by ITS hardware times lvl1 table 21913faf24eaSShanker Donthineni * entry size. 21923faf24eaSShanker Donthineni */ 2193d524eaa2SVladimir Murzin ids -= ilog2(psz / (int)esz); 21943faf24eaSShanker Donthineni esz = GITS_LVL1_ENTRY_SIZE; 21953faf24eaSShanker Donthineni } 21963faf24eaSShanker Donthineni } 21974b75c459SShanker Donthineni 21984b75c459SShanker Donthineni /* 21994b75c459SShanker Donthineni * Allocate as many entries as required to fit the 22004b75c459SShanker Donthineni * range of device IDs that the ITS can grok... The ID 22014b75c459SShanker Donthineni * space being incredibly sparse, this results in a 22023faf24eaSShanker Donthineni * massive waste of memory if two-level device table 22033faf24eaSShanker Donthineni * feature is not supported by hardware. 22044b75c459SShanker Donthineni */ 22054b75c459SShanker Donthineni new_order = max_t(u32, get_order(esz << ids), new_order); 22064b75c459SShanker Donthineni if (new_order >= MAX_ORDER) { 22074b75c459SShanker Donthineni new_order = MAX_ORDER - 1; 2208d524eaa2SVladimir Murzin ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 2209576a8342SMarc Zyngier pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", 22104cacac57SMarc Zyngier &its->phys_base, its_base_type_string[type], 2211576a8342SMarc Zyngier device_ids(its), ids); 22124b75c459SShanker Donthineni } 22134b75c459SShanker Donthineni 22144b75c459SShanker Donthineni *order = new_order; 22153faf24eaSShanker Donthineni 22163faf24eaSShanker Donthineni return indirect; 22174b75c459SShanker Donthineni } 22184b75c459SShanker Donthineni 22195e516846SMarc Zyngier static u32 compute_common_aff(u64 val) 22205e516846SMarc Zyngier { 22215e516846SMarc Zyngier u32 aff, clpiaff; 22225e516846SMarc Zyngier 22235e516846SMarc Zyngier aff = FIELD_GET(GICR_TYPER_AFFINITY, val); 22245e516846SMarc Zyngier clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); 22255e516846SMarc Zyngier 22265e516846SMarc Zyngier return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); 22275e516846SMarc Zyngier } 22285e516846SMarc Zyngier 22295e516846SMarc Zyngier static u32 compute_its_aff(struct its_node *its) 22305e516846SMarc Zyngier { 22315e516846SMarc Zyngier u64 val; 22325e516846SMarc Zyngier u32 svpet; 22335e516846SMarc Zyngier 22345e516846SMarc Zyngier /* 22355e516846SMarc Zyngier * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute 22365e516846SMarc Zyngier * the resulting affinity. We then use that to see if this match 22375e516846SMarc Zyngier * our own affinity. 22385e516846SMarc Zyngier */ 22395e516846SMarc Zyngier svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); 22405e516846SMarc Zyngier val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); 22415e516846SMarc Zyngier val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); 22425e516846SMarc Zyngier return compute_common_aff(val); 22435e516846SMarc Zyngier } 22445e516846SMarc Zyngier 22455e516846SMarc Zyngier static struct its_node *find_sibling_its(struct its_node *cur_its) 22465e516846SMarc Zyngier { 22475e516846SMarc Zyngier struct its_node *its; 22485e516846SMarc Zyngier u32 aff; 22495e516846SMarc Zyngier 22505e516846SMarc Zyngier if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) 22515e516846SMarc Zyngier return NULL; 22525e516846SMarc Zyngier 22535e516846SMarc Zyngier aff = compute_its_aff(cur_its); 22545e516846SMarc Zyngier 22555e516846SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 22565e516846SMarc Zyngier u64 baser; 22575e516846SMarc Zyngier 22585e516846SMarc Zyngier if (!is_v4_1(its) || its == cur_its) 22595e516846SMarc Zyngier continue; 22605e516846SMarc Zyngier 22615e516846SMarc Zyngier if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) 22625e516846SMarc Zyngier continue; 22635e516846SMarc Zyngier 22645e516846SMarc Zyngier if (aff != compute_its_aff(its)) 22655e516846SMarc Zyngier continue; 22665e516846SMarc Zyngier 22675e516846SMarc Zyngier /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ 22685e516846SMarc Zyngier baser = its->tables[2].val; 22695e516846SMarc Zyngier if (!(baser & GITS_BASER_VALID)) 22705e516846SMarc Zyngier continue; 22715e516846SMarc Zyngier 22725e516846SMarc Zyngier return its; 22735e516846SMarc Zyngier } 22745e516846SMarc Zyngier 22755e516846SMarc Zyngier return NULL; 22765e516846SMarc Zyngier } 22775e516846SMarc Zyngier 22781ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its) 22791ac19ca6SMarc Zyngier { 22801ac19ca6SMarc Zyngier int i; 22811ac19ca6SMarc Zyngier 22821ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 22831a485f4dSShanker Donthineni if (its->tables[i].base) { 22841a485f4dSShanker Donthineni free_pages((unsigned long)its->tables[i].base, 22851a485f4dSShanker Donthineni its->tables[i].order); 22861a485f4dSShanker Donthineni its->tables[i].base = NULL; 22871ac19ca6SMarc Zyngier } 22881ac19ca6SMarc Zyngier } 22891ac19ca6SMarc Zyngier } 22901ac19ca6SMarc Zyngier 22910e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its) 22921ac19ca6SMarc Zyngier { 22931ac19ca6SMarc Zyngier u64 shr = GITS_BASER_InnerShareable; 22942fd632a0SShanker Donthineni u64 cache = GITS_BASER_RaWaWb; 22959347359aSShanker Donthineni u32 psz = SZ_64K; 22969347359aSShanker Donthineni int err, i; 229794100970SRobert Richter 2298fa150019SArd Biesheuvel if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) 2299fa150019SArd Biesheuvel /* erratum 24313: ignore memory access type */ 23009347359aSShanker Donthineni cache = GITS_BASER_nCnB; 2301466b7d16SShanker Donthineni 23021ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 23032d81d425SShanker Donthineni struct its_baser *baser = its->tables + i; 23042d81d425SShanker Donthineni u64 val = its_read_baser(its, baser); 23051ac19ca6SMarc Zyngier u64 type = GITS_BASER_TYPE(val); 23069347359aSShanker Donthineni u32 order = get_order(psz); 23073faf24eaSShanker Donthineni bool indirect = false; 23081ac19ca6SMarc Zyngier 23094cacac57SMarc Zyngier switch (type) { 23104cacac57SMarc Zyngier case GITS_BASER_TYPE_NONE: 23111ac19ca6SMarc Zyngier continue; 23121ac19ca6SMarc Zyngier 23134cacac57SMarc Zyngier case GITS_BASER_TYPE_DEVICE: 231432bd44dcSShanker Donthineni indirect = its_parse_indirect_baser(its, baser, 231532bd44dcSShanker Donthineni psz, &order, 2316576a8342SMarc Zyngier device_ids(its)); 23178d565748SZenghui Yu break; 23188d565748SZenghui Yu 23194cacac57SMarc Zyngier case GITS_BASER_TYPE_VCPU: 23205e516846SMarc Zyngier if (is_v4_1(its)) { 23215e516846SMarc Zyngier struct its_node *sibling; 23225e516846SMarc Zyngier 23235e516846SMarc Zyngier WARN_ON(i != 2); 23245e516846SMarc Zyngier if ((sibling = find_sibling_its(its))) { 23255e516846SMarc Zyngier *baser = sibling->tables[2]; 23265e516846SMarc Zyngier its_write_baser(its, baser, baser->val); 23275e516846SMarc Zyngier continue; 23285e516846SMarc Zyngier } 23295e516846SMarc Zyngier } 23305e516846SMarc Zyngier 23314cacac57SMarc Zyngier indirect = its_parse_indirect_baser(its, baser, 233232bd44dcSShanker Donthineni psz, &order, 233332bd44dcSShanker Donthineni ITS_MAX_VPEID_BITS); 23344cacac57SMarc Zyngier break; 23354cacac57SMarc Zyngier } 2336f54b97edSMarc Zyngier 23373faf24eaSShanker Donthineni err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 23389347359aSShanker Donthineni if (err < 0) { 23399347359aSShanker Donthineni its_free_tables(its); 23409347359aSShanker Donthineni return err; 234130f21363SRobert Richter } 234230f21363SRobert Richter 23439347359aSShanker Donthineni /* Update settings which will be used for next BASERn */ 23449347359aSShanker Donthineni psz = baser->psz; 23459347359aSShanker Donthineni cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 23469347359aSShanker Donthineni shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 23471ac19ca6SMarc Zyngier } 23481ac19ca6SMarc Zyngier 23491ac19ca6SMarc Zyngier return 0; 23501ac19ca6SMarc Zyngier } 23511ac19ca6SMarc Zyngier 23525e516846SMarc Zyngier static u64 inherit_vpe_l1_table_from_its(void) 23535e516846SMarc Zyngier { 23545e516846SMarc Zyngier struct its_node *its; 23555e516846SMarc Zyngier u64 val; 23565e516846SMarc Zyngier u32 aff; 23575e516846SMarc Zyngier 23585e516846SMarc Zyngier val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 23595e516846SMarc Zyngier aff = compute_common_aff(val); 23605e516846SMarc Zyngier 23615e516846SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 23625e516846SMarc Zyngier u64 baser, addr; 23635e516846SMarc Zyngier 23645e516846SMarc Zyngier if (!is_v4_1(its)) 23655e516846SMarc Zyngier continue; 23665e516846SMarc Zyngier 23675e516846SMarc Zyngier if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) 23685e516846SMarc Zyngier continue; 23695e516846SMarc Zyngier 23705e516846SMarc Zyngier if (aff != compute_its_aff(its)) 23715e516846SMarc Zyngier continue; 23725e516846SMarc Zyngier 23735e516846SMarc Zyngier /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ 23745e516846SMarc Zyngier baser = its->tables[2].val; 23755e516846SMarc Zyngier if (!(baser & GITS_BASER_VALID)) 23765e516846SMarc Zyngier continue; 23775e516846SMarc Zyngier 23785e516846SMarc Zyngier /* We have a winner! */ 23798b718d40SZenghui Yu gic_data_rdist()->vpe_l1_base = its->tables[2].base; 23808b718d40SZenghui Yu 23815e516846SMarc Zyngier val = GICR_VPROPBASER_4_1_VALID; 23825e516846SMarc Zyngier if (baser & GITS_BASER_INDIRECT) 23835e516846SMarc Zyngier val |= GICR_VPROPBASER_4_1_INDIRECT; 23845e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, 23855e516846SMarc Zyngier FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); 23865e516846SMarc Zyngier switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { 23875e516846SMarc Zyngier case GIC_PAGE_SIZE_64K: 23885e516846SMarc Zyngier addr = GITS_BASER_ADDR_48_to_52(baser); 23895e516846SMarc Zyngier break; 23905e516846SMarc Zyngier default: 23915e516846SMarc Zyngier addr = baser & GENMASK_ULL(47, 12); 23925e516846SMarc Zyngier break; 23935e516846SMarc Zyngier } 23945e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); 23955e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, 23965e516846SMarc Zyngier FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); 23975e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, 23985e516846SMarc Zyngier FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); 23995e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); 24005e516846SMarc Zyngier 24015e516846SMarc Zyngier return val; 24025e516846SMarc Zyngier } 24035e516846SMarc Zyngier 24045e516846SMarc Zyngier return 0; 24055e516846SMarc Zyngier } 24065e516846SMarc Zyngier 24075e516846SMarc Zyngier static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) 24085e516846SMarc Zyngier { 24095e516846SMarc Zyngier u32 aff; 24105e516846SMarc Zyngier u64 val; 24115e516846SMarc Zyngier int cpu; 24125e516846SMarc Zyngier 24135e516846SMarc Zyngier val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 24145e516846SMarc Zyngier aff = compute_common_aff(val); 24155e516846SMarc Zyngier 24165e516846SMarc Zyngier for_each_possible_cpu(cpu) { 24175e516846SMarc Zyngier void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; 24185e516846SMarc Zyngier 24195e516846SMarc Zyngier if (!base || cpu == smp_processor_id()) 24205e516846SMarc Zyngier continue; 24215e516846SMarc Zyngier 24225e516846SMarc Zyngier val = gic_read_typer(base + GICR_TYPER); 24234bccf1d7SZenghui Yu if (aff != compute_common_aff(val)) 24245e516846SMarc Zyngier continue; 24255e516846SMarc Zyngier 24265e516846SMarc Zyngier /* 24275e516846SMarc Zyngier * At this point, we have a victim. This particular CPU 24285e516846SMarc Zyngier * has already booted, and has an affinity that matches 24295e516846SMarc Zyngier * ours wrt CommonLPIAff. Let's use its own VPROPBASER. 24305e516846SMarc Zyngier * Make sure we don't write the Z bit in that case. 24315e516846SMarc Zyngier */ 24325186a6ccSZenghui Yu val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); 24335e516846SMarc Zyngier val &= ~GICR_VPROPBASER_4_1_Z; 24345e516846SMarc Zyngier 24358b718d40SZenghui Yu gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; 24365e516846SMarc Zyngier *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; 24375e516846SMarc Zyngier 24385e516846SMarc Zyngier return val; 24395e516846SMarc Zyngier } 24405e516846SMarc Zyngier 24415e516846SMarc Zyngier return 0; 24425e516846SMarc Zyngier } 24435e516846SMarc Zyngier 24444e6437f1SZenghui Yu static bool allocate_vpe_l2_table(int cpu, u32 id) 24454e6437f1SZenghui Yu { 24464e6437f1SZenghui Yu void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; 24474e6437f1SZenghui Yu u64 val, gpsz, npg; 24484e6437f1SZenghui Yu unsigned int psz, esz, idx; 24494e6437f1SZenghui Yu struct page *page; 24504e6437f1SZenghui Yu __le64 *table; 24514e6437f1SZenghui Yu 24524e6437f1SZenghui Yu if (!gic_rdists->has_rvpeid) 24534e6437f1SZenghui Yu return true; 24544e6437f1SZenghui Yu 24555186a6ccSZenghui Yu val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); 24564e6437f1SZenghui Yu 24574e6437f1SZenghui Yu esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; 24584e6437f1SZenghui Yu gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); 24594e6437f1SZenghui Yu npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; 24604e6437f1SZenghui Yu 24614e6437f1SZenghui Yu switch (gpsz) { 24624e6437f1SZenghui Yu default: 24634e6437f1SZenghui Yu WARN_ON(1); 24644e6437f1SZenghui Yu /* fall through */ 24654e6437f1SZenghui Yu case GIC_PAGE_SIZE_4K: 24664e6437f1SZenghui Yu psz = SZ_4K; 24674e6437f1SZenghui Yu break; 24684e6437f1SZenghui Yu case GIC_PAGE_SIZE_16K: 24694e6437f1SZenghui Yu psz = SZ_16K; 24704e6437f1SZenghui Yu break; 24714e6437f1SZenghui Yu case GIC_PAGE_SIZE_64K: 24724e6437f1SZenghui Yu psz = SZ_64K; 24734e6437f1SZenghui Yu break; 24744e6437f1SZenghui Yu } 24754e6437f1SZenghui Yu 24764e6437f1SZenghui Yu /* Don't allow vpe_id that exceeds single, flat table limit */ 24774e6437f1SZenghui Yu if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) 24784e6437f1SZenghui Yu return (id < (npg * psz / (esz * SZ_8))); 24794e6437f1SZenghui Yu 24804e6437f1SZenghui Yu /* Compute 1st level table index & check if that exceeds table limit */ 24814e6437f1SZenghui Yu idx = id >> ilog2(psz / (esz * SZ_8)); 24824e6437f1SZenghui Yu if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) 24834e6437f1SZenghui Yu return false; 24844e6437f1SZenghui Yu 24854e6437f1SZenghui Yu table = gic_data_rdist_cpu(cpu)->vpe_l1_base; 24864e6437f1SZenghui Yu 24874e6437f1SZenghui Yu /* Allocate memory for 2nd level table */ 24884e6437f1SZenghui Yu if (!table[idx]) { 24894e6437f1SZenghui Yu page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); 24904e6437f1SZenghui Yu if (!page) 24914e6437f1SZenghui Yu return false; 24924e6437f1SZenghui Yu 24934e6437f1SZenghui Yu /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 24944e6437f1SZenghui Yu if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) 24954e6437f1SZenghui Yu gic_flush_dcache_to_poc(page_address(page), psz); 24964e6437f1SZenghui Yu 24974e6437f1SZenghui Yu table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 24984e6437f1SZenghui Yu 24994e6437f1SZenghui Yu /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 25004e6437f1SZenghui Yu if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) 25014e6437f1SZenghui Yu gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 25024e6437f1SZenghui Yu 25034e6437f1SZenghui Yu /* Ensure updated table contents are visible to RD hardware */ 25044e6437f1SZenghui Yu dsb(sy); 25054e6437f1SZenghui Yu } 25064e6437f1SZenghui Yu 25074e6437f1SZenghui Yu return true; 25084e6437f1SZenghui Yu } 25094e6437f1SZenghui Yu 25105e516846SMarc Zyngier static int allocate_vpe_l1_table(void) 25115e516846SMarc Zyngier { 25125e516846SMarc Zyngier void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 25135e516846SMarc Zyngier u64 val, gpsz, npg, pa; 25145e516846SMarc Zyngier unsigned int psz = SZ_64K; 25155e516846SMarc Zyngier unsigned int np, epp, esz; 25165e516846SMarc Zyngier struct page *page; 25175e516846SMarc Zyngier 25185e516846SMarc Zyngier if (!gic_rdists->has_rvpeid) 25195e516846SMarc Zyngier return 0; 25205e516846SMarc Zyngier 25215e516846SMarc Zyngier /* 25225e516846SMarc Zyngier * if VPENDBASER.Valid is set, disable any previously programmed 25235e516846SMarc Zyngier * VPE by setting PendingLast while clearing Valid. This has the 25245e516846SMarc Zyngier * effect of making sure no doorbell will be generated and we can 25255e516846SMarc Zyngier * then safely clear VPROPBASER.Valid. 25265e516846SMarc Zyngier */ 25275186a6ccSZenghui Yu if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) 25285186a6ccSZenghui Yu gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, 25295e516846SMarc Zyngier vlpi_base + GICR_VPENDBASER); 25305e516846SMarc Zyngier 25315e516846SMarc Zyngier /* 25325e516846SMarc Zyngier * If we can inherit the configuration from another RD, let's do 25335e516846SMarc Zyngier * so. Otherwise, we have to go through the allocation process. We 25345e516846SMarc Zyngier * assume that all RDs have the exact same requirements, as 25355e516846SMarc Zyngier * nothing will work otherwise. 25365e516846SMarc Zyngier */ 25375e516846SMarc Zyngier val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); 25385e516846SMarc Zyngier if (val & GICR_VPROPBASER_4_1_VALID) 25395e516846SMarc Zyngier goto out; 25405e516846SMarc Zyngier 25415e516846SMarc Zyngier gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL); 25425e516846SMarc Zyngier if (!gic_data_rdist()->vpe_table_mask) 25435e516846SMarc Zyngier return -ENOMEM; 25445e516846SMarc Zyngier 25455e516846SMarc Zyngier val = inherit_vpe_l1_table_from_its(); 25465e516846SMarc Zyngier if (val & GICR_VPROPBASER_4_1_VALID) 25475e516846SMarc Zyngier goto out; 25485e516846SMarc Zyngier 25495e516846SMarc Zyngier /* First probe the page size */ 25505e516846SMarc Zyngier val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); 25515186a6ccSZenghui Yu gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 25525186a6ccSZenghui Yu val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); 25535e516846SMarc Zyngier gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); 25545e516846SMarc Zyngier esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); 25555e516846SMarc Zyngier 25565e516846SMarc Zyngier switch (gpsz) { 25575e516846SMarc Zyngier default: 25585e516846SMarc Zyngier gpsz = GIC_PAGE_SIZE_4K; 25595e516846SMarc Zyngier /* fall through */ 25605e516846SMarc Zyngier case GIC_PAGE_SIZE_4K: 25615e516846SMarc Zyngier psz = SZ_4K; 25625e516846SMarc Zyngier break; 25635e516846SMarc Zyngier case GIC_PAGE_SIZE_16K: 25645e516846SMarc Zyngier psz = SZ_16K; 25655e516846SMarc Zyngier break; 25665e516846SMarc Zyngier case GIC_PAGE_SIZE_64K: 25675e516846SMarc Zyngier psz = SZ_64K; 25685e516846SMarc Zyngier break; 25695e516846SMarc Zyngier } 25705e516846SMarc Zyngier 25715e516846SMarc Zyngier /* 25725e516846SMarc Zyngier * Start populating the register from scratch, including RO fields 25735e516846SMarc Zyngier * (which we want to print in debug cases...) 25745e516846SMarc Zyngier */ 25755e516846SMarc Zyngier val = 0; 25765e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); 25775e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); 25785e516846SMarc Zyngier 25795e516846SMarc Zyngier /* How many entries per GIC page? */ 25805e516846SMarc Zyngier esz++; 25815e516846SMarc Zyngier epp = psz / (esz * SZ_8); 25825e516846SMarc Zyngier 25835e516846SMarc Zyngier /* 25845e516846SMarc Zyngier * If we need more than just a single L1 page, flag the table 25855e516846SMarc Zyngier * as indirect and compute the number of required L1 pages. 25865e516846SMarc Zyngier */ 25875e516846SMarc Zyngier if (epp < ITS_MAX_VPEID) { 25885e516846SMarc Zyngier int nl2; 25895e516846SMarc Zyngier 25905e516846SMarc Zyngier val |= GICR_VPROPBASER_4_1_INDIRECT; 25915e516846SMarc Zyngier 25925e516846SMarc Zyngier /* Number of L2 pages required to cover the VPEID space */ 25935e516846SMarc Zyngier nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); 25945e516846SMarc Zyngier 25955e516846SMarc Zyngier /* Number of L1 pages to point to the L2 pages */ 25965e516846SMarc Zyngier npg = DIV_ROUND_UP(nl2 * SZ_8, psz); 25975e516846SMarc Zyngier } else { 25985e516846SMarc Zyngier npg = 1; 25995e516846SMarc Zyngier } 26005e516846SMarc Zyngier 2601e88bd316SZenghui Yu val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); 26025e516846SMarc Zyngier 26035e516846SMarc Zyngier /* Right, that's the number of CPU pages we need for L1 */ 26045e516846SMarc Zyngier np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); 26055e516846SMarc Zyngier 26065e516846SMarc Zyngier pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", 26075e516846SMarc Zyngier np, npg, psz, epp, esz); 26085e516846SMarc Zyngier page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE)); 26095e516846SMarc Zyngier if (!page) 26105e516846SMarc Zyngier return -ENOMEM; 26115e516846SMarc Zyngier 26128b718d40SZenghui Yu gic_data_rdist()->vpe_l1_base = page_address(page); 26135e516846SMarc Zyngier pa = virt_to_phys(page_address(page)); 26145e516846SMarc Zyngier WARN_ON(!IS_ALIGNED(pa, psz)); 26155e516846SMarc Zyngier 26165e516846SMarc Zyngier val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); 26175e516846SMarc Zyngier val |= GICR_VPROPBASER_RaWb; 26185e516846SMarc Zyngier val |= GICR_VPROPBASER_InnerShareable; 26195e516846SMarc Zyngier val |= GICR_VPROPBASER_4_1_Z; 26205e516846SMarc Zyngier val |= GICR_VPROPBASER_4_1_VALID; 26215e516846SMarc Zyngier 26225e516846SMarc Zyngier out: 26235186a6ccSZenghui Yu gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 26245e516846SMarc Zyngier cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); 26255e516846SMarc Zyngier 26265e516846SMarc Zyngier pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", 26275e516846SMarc Zyngier smp_processor_id(), val, 26285e516846SMarc Zyngier cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); 26295e516846SMarc Zyngier 26305e516846SMarc Zyngier return 0; 26315e516846SMarc Zyngier } 26325e516846SMarc Zyngier 26331ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its) 26341ac19ca6SMarc Zyngier { 263583559b47SMarc Zyngier int i; 263683559b47SMarc Zyngier 26376396bb22SKees Cook its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 26381ac19ca6SMarc Zyngier GFP_KERNEL); 26391ac19ca6SMarc Zyngier if (!its->collections) 26401ac19ca6SMarc Zyngier return -ENOMEM; 26411ac19ca6SMarc Zyngier 264283559b47SMarc Zyngier for (i = 0; i < nr_cpu_ids; i++) 264383559b47SMarc Zyngier its->collections[i].target_address = ~0ULL; 264483559b47SMarc Zyngier 26451ac19ca6SMarc Zyngier return 0; 26461ac19ca6SMarc Zyngier } 26471ac19ca6SMarc Zyngier 26487c297a2dSMarc Zyngier static struct page *its_allocate_pending_table(gfp_t gfp_flags) 26497c297a2dSMarc Zyngier { 26507c297a2dSMarc Zyngier struct page *pend_page; 2651adaab500SMarc Zyngier 26527c297a2dSMarc Zyngier pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 2653adaab500SMarc Zyngier get_order(LPI_PENDBASE_SZ)); 26547c297a2dSMarc Zyngier if (!pend_page) 26557c297a2dSMarc Zyngier return NULL; 26567c297a2dSMarc Zyngier 26577c297a2dSMarc Zyngier /* Make sure the GIC will observe the zero-ed page */ 26587c297a2dSMarc Zyngier gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 26597c297a2dSMarc Zyngier 26607c297a2dSMarc Zyngier return pend_page; 26617c297a2dSMarc Zyngier } 26627c297a2dSMarc Zyngier 26637d75bbb4SMarc Zyngier static void its_free_pending_table(struct page *pt) 26647d75bbb4SMarc Zyngier { 2665adaab500SMarc Zyngier free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); 26667d75bbb4SMarc Zyngier } 26677d75bbb4SMarc Zyngier 2668c6e2ccb6SMarc Zyngier /* 26695e2c9f9aSMarc Zyngier * Booting with kdump and LPIs enabled is generally fine. Any other 26705e2c9f9aSMarc Zyngier * case is wrong in the absence of firmware/EFI support. 2671c6e2ccb6SMarc Zyngier */ 2672c440a9d9SMarc Zyngier static bool enabled_lpis_allowed(void) 2673c440a9d9SMarc Zyngier { 26745e2c9f9aSMarc Zyngier phys_addr_t addr; 26755e2c9f9aSMarc Zyngier u64 val; 2676c6e2ccb6SMarc Zyngier 26775e2c9f9aSMarc Zyngier /* Check whether the property table is in a reserved region */ 26785e2c9f9aSMarc Zyngier val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); 26795e2c9f9aSMarc Zyngier addr = val & GENMASK_ULL(51, 12); 26805e2c9f9aSMarc Zyngier 26815e2c9f9aSMarc Zyngier return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); 2682c440a9d9SMarc Zyngier } 2683c440a9d9SMarc Zyngier 268411e37d35SMarc Zyngier static int __init allocate_lpi_tables(void) 268511e37d35SMarc Zyngier { 2686c440a9d9SMarc Zyngier u64 val; 268711e37d35SMarc Zyngier int err, cpu; 268811e37d35SMarc Zyngier 2689c440a9d9SMarc Zyngier /* 2690c440a9d9SMarc Zyngier * If LPIs are enabled while we run this from the boot CPU, 2691c440a9d9SMarc Zyngier * flag the RD tables as pre-allocated if the stars do align. 2692c440a9d9SMarc Zyngier */ 2693c440a9d9SMarc Zyngier val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); 2694c440a9d9SMarc Zyngier if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { 2695c440a9d9SMarc Zyngier gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | 2696c440a9d9SMarc Zyngier RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); 2697c440a9d9SMarc Zyngier pr_info("GICv3: Using preallocated redistributor tables\n"); 2698c440a9d9SMarc Zyngier } 2699c440a9d9SMarc Zyngier 270011e37d35SMarc Zyngier err = its_setup_lpi_prop_table(); 270111e37d35SMarc Zyngier if (err) 270211e37d35SMarc Zyngier return err; 270311e37d35SMarc Zyngier 270411e37d35SMarc Zyngier /* 270511e37d35SMarc Zyngier * We allocate all the pending tables anyway, as we may have a 270611e37d35SMarc Zyngier * mix of RDs that have had LPIs enabled, and some that 270711e37d35SMarc Zyngier * don't. We'll free the unused ones as each CPU comes online. 270811e37d35SMarc Zyngier */ 270911e37d35SMarc Zyngier for_each_possible_cpu(cpu) { 271011e37d35SMarc Zyngier struct page *pend_page; 271111e37d35SMarc Zyngier 271211e37d35SMarc Zyngier pend_page = its_allocate_pending_table(GFP_NOWAIT); 271311e37d35SMarc Zyngier if (!pend_page) { 271411e37d35SMarc Zyngier pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); 271511e37d35SMarc Zyngier return -ENOMEM; 271611e37d35SMarc Zyngier } 271711e37d35SMarc Zyngier 271811e37d35SMarc Zyngier gic_data_rdist_cpu(cpu)->pend_page = pend_page; 271911e37d35SMarc Zyngier } 272011e37d35SMarc Zyngier 272111e37d35SMarc Zyngier return 0; 272211e37d35SMarc Zyngier } 272311e37d35SMarc Zyngier 2724e64fab1aSMarc Zyngier static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) 27256479450fSHeyi Guo { 27266479450fSHeyi Guo u32 count = 1000000; /* 1s! */ 27276479450fSHeyi Guo bool clean; 27286479450fSHeyi Guo u64 val; 27296479450fSHeyi Guo 27305186a6ccSZenghui Yu val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 27316479450fSHeyi Guo val &= ~GICR_VPENDBASER_Valid; 2732e64fab1aSMarc Zyngier val &= ~clr; 2733e64fab1aSMarc Zyngier val |= set; 27345186a6ccSZenghui Yu gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 27356479450fSHeyi Guo 27366479450fSHeyi Guo do { 27375186a6ccSZenghui Yu val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 27386479450fSHeyi Guo clean = !(val & GICR_VPENDBASER_Dirty); 27396479450fSHeyi Guo if (!clean) { 27406479450fSHeyi Guo count--; 27416479450fSHeyi Guo cpu_relax(); 27426479450fSHeyi Guo udelay(1); 27436479450fSHeyi Guo } 27446479450fSHeyi Guo } while (!clean && count); 27456479450fSHeyi Guo 2746e64fab1aSMarc Zyngier if (unlikely(val & GICR_VPENDBASER_Dirty)) { 2747e64fab1aSMarc Zyngier pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2748e64fab1aSMarc Zyngier val |= GICR_VPENDBASER_PendingLast; 2749e64fab1aSMarc Zyngier } 2750e64fab1aSMarc Zyngier 27516479450fSHeyi Guo return val; 27526479450fSHeyi Guo } 27536479450fSHeyi Guo 27541ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void) 27551ac19ca6SMarc Zyngier { 27561ac19ca6SMarc Zyngier void __iomem *rbase = gic_data_rdist_rd_base(); 27571ac19ca6SMarc Zyngier struct page *pend_page; 275811e37d35SMarc Zyngier phys_addr_t paddr; 27591ac19ca6SMarc Zyngier u64 val, tmp; 27601ac19ca6SMarc Zyngier 276111e37d35SMarc Zyngier if (gic_data_rdist()->lpi_enabled) 27621ac19ca6SMarc Zyngier return; 27631ac19ca6SMarc Zyngier 2764c440a9d9SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 2765c440a9d9SMarc Zyngier if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && 2766c440a9d9SMarc Zyngier (val & GICR_CTLR_ENABLE_LPIS)) { 2767f842ca8eSMarc Zyngier /* 2768f842ca8eSMarc Zyngier * Check that we get the same property table on all 2769f842ca8eSMarc Zyngier * RDs. If we don't, this is hopeless. 2770f842ca8eSMarc Zyngier */ 2771f842ca8eSMarc Zyngier paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); 2772f842ca8eSMarc Zyngier paddr &= GENMASK_ULL(51, 12); 2773f842ca8eSMarc Zyngier if (WARN_ON(gic_rdists->prop_table_pa != paddr)) 2774f842ca8eSMarc Zyngier add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 2775f842ca8eSMarc Zyngier 2776c440a9d9SMarc Zyngier paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); 2777c440a9d9SMarc Zyngier paddr &= GENMASK_ULL(51, 16); 2778c440a9d9SMarc Zyngier 27795e2c9f9aSMarc Zyngier WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); 2780c440a9d9SMarc Zyngier its_free_pending_table(gic_data_rdist()->pend_page); 2781c440a9d9SMarc Zyngier gic_data_rdist()->pend_page = NULL; 2782c440a9d9SMarc Zyngier 2783c440a9d9SMarc Zyngier goto out; 2784c440a9d9SMarc Zyngier } 2785c440a9d9SMarc Zyngier 278611e37d35SMarc Zyngier pend_page = gic_data_rdist()->pend_page; 27871ac19ca6SMarc Zyngier paddr = page_to_phys(pend_page); 27883fb68faeSMarc Zyngier WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); 27891ac19ca6SMarc Zyngier 27901ac19ca6SMarc Zyngier /* set PROPBASE */ 2791e1a2e201SMarc Zyngier val = (gic_rdists->prop_table_pa | 27921ac19ca6SMarc Zyngier GICR_PROPBASER_InnerShareable | 27932fd632a0SShanker Donthineni GICR_PROPBASER_RaWaWb | 27941ac19ca6SMarc Zyngier ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 27951ac19ca6SMarc Zyngier 27960968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 27970968a619SVladimir Murzin tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); 27981ac19ca6SMarc Zyngier 27991ac19ca6SMarc Zyngier if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 2800241a386cSMarc Zyngier if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 2801241a386cSMarc Zyngier /* 2802241a386cSMarc Zyngier * The HW reports non-shareable, we must 2803241a386cSMarc Zyngier * remove the cacheability attributes as 2804241a386cSMarc Zyngier * well. 2805241a386cSMarc Zyngier */ 2806241a386cSMarc Zyngier val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 2807241a386cSMarc Zyngier GICR_PROPBASER_CACHEABILITY_MASK); 2808241a386cSMarc Zyngier val |= GICR_PROPBASER_nC; 28090968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 2810241a386cSMarc Zyngier } 28111ac19ca6SMarc Zyngier pr_info_once("GIC: using cache flushing for LPI property table\n"); 28121ac19ca6SMarc Zyngier gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 28131ac19ca6SMarc Zyngier } 28141ac19ca6SMarc Zyngier 28151ac19ca6SMarc Zyngier /* set PENDBASE */ 28161ac19ca6SMarc Zyngier val = (page_to_phys(pend_page) | 28174ad3e363SMarc Zyngier GICR_PENDBASER_InnerShareable | 28182fd632a0SShanker Donthineni GICR_PENDBASER_RaWaWb); 28191ac19ca6SMarc Zyngier 28200968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 28210968a619SVladimir Murzin tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); 2822241a386cSMarc Zyngier 2823241a386cSMarc Zyngier if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 2824241a386cSMarc Zyngier /* 2825241a386cSMarc Zyngier * The HW reports non-shareable, we must remove the 2826241a386cSMarc Zyngier * cacheability attributes as well. 2827241a386cSMarc Zyngier */ 2828241a386cSMarc Zyngier val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 2829241a386cSMarc Zyngier GICR_PENDBASER_CACHEABILITY_MASK); 2830241a386cSMarc Zyngier val |= GICR_PENDBASER_nC; 28310968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 2832241a386cSMarc Zyngier } 28331ac19ca6SMarc Zyngier 28341ac19ca6SMarc Zyngier /* Enable LPIs */ 28351ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 28361ac19ca6SMarc Zyngier val |= GICR_CTLR_ENABLE_LPIS; 28371ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 28381ac19ca6SMarc Zyngier 28395e516846SMarc Zyngier if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { 28406479450fSHeyi Guo void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 28416479450fSHeyi Guo 28426479450fSHeyi Guo /* 28436479450fSHeyi Guo * It's possible for CPU to receive VLPIs before it is 28446479450fSHeyi Guo * sheduled as a vPE, especially for the first CPU, and the 28456479450fSHeyi Guo * VLPI with INTID larger than 2^(IDbits+1) will be considered 28466479450fSHeyi Guo * as out of range and dropped by GIC. 28476479450fSHeyi Guo * So we initialize IDbits to known value to avoid VLPI drop. 28486479450fSHeyi Guo */ 28496479450fSHeyi Guo val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 28506479450fSHeyi Guo pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", 28516479450fSHeyi Guo smp_processor_id(), val); 28525186a6ccSZenghui Yu gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 28536479450fSHeyi Guo 28546479450fSHeyi Guo /* 28556479450fSHeyi Guo * Also clear Valid bit of GICR_VPENDBASER, in case some 28566479450fSHeyi Guo * ancient programming gets left in and has possibility of 28576479450fSHeyi Guo * corrupting memory. 28586479450fSHeyi Guo */ 2859e64fab1aSMarc Zyngier val = its_clear_vpend_valid(vlpi_base, 0, 0); 28606479450fSHeyi Guo } 28616479450fSHeyi Guo 28625e516846SMarc Zyngier if (allocate_vpe_l1_table()) { 28635e516846SMarc Zyngier /* 28645e516846SMarc Zyngier * If the allocation has failed, we're in massive trouble. 28655e516846SMarc Zyngier * Disable direct injection, and pray that no VM was 28665e516846SMarc Zyngier * already running... 28675e516846SMarc Zyngier */ 28685e516846SMarc Zyngier gic_rdists->has_rvpeid = false; 28695e516846SMarc Zyngier gic_rdists->has_vlpis = false; 28705e516846SMarc Zyngier } 28715e516846SMarc Zyngier 28721ac19ca6SMarc Zyngier /* Make sure the GIC has seen the above */ 28731ac19ca6SMarc Zyngier dsb(sy); 2874c440a9d9SMarc Zyngier out: 287511e37d35SMarc Zyngier gic_data_rdist()->lpi_enabled = true; 2876c440a9d9SMarc Zyngier pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", 287711e37d35SMarc Zyngier smp_processor_id(), 2878c440a9d9SMarc Zyngier gic_data_rdist()->pend_page ? "allocated" : "reserved", 287911e37d35SMarc Zyngier &paddr); 28801ac19ca6SMarc Zyngier } 28811ac19ca6SMarc Zyngier 2882920181ceSDerek Basehore static void its_cpu_init_collection(struct its_node *its) 28831ac19ca6SMarc Zyngier { 2884920181ceSDerek Basehore int cpu = smp_processor_id(); 28851ac19ca6SMarc Zyngier u64 target; 28861ac19ca6SMarc Zyngier 2887fbf8f40eSGanapatrao Kulkarni /* avoid cross node collections and its mapping */ 2888fbf8f40eSGanapatrao Kulkarni if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 2889fbf8f40eSGanapatrao Kulkarni struct device_node *cpu_node; 2890fbf8f40eSGanapatrao Kulkarni 2891fbf8f40eSGanapatrao Kulkarni cpu_node = of_get_cpu_node(cpu, NULL); 2892fbf8f40eSGanapatrao Kulkarni if (its->numa_node != NUMA_NO_NODE && 2893fbf8f40eSGanapatrao Kulkarni its->numa_node != of_node_to_nid(cpu_node)) 2894920181ceSDerek Basehore return; 2895fbf8f40eSGanapatrao Kulkarni } 2896fbf8f40eSGanapatrao Kulkarni 28971ac19ca6SMarc Zyngier /* 28981ac19ca6SMarc Zyngier * We now have to bind each collection to its target 28991ac19ca6SMarc Zyngier * redistributor. 29001ac19ca6SMarc Zyngier */ 2901589ce5f4SMarc Zyngier if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 29021ac19ca6SMarc Zyngier /* 29031ac19ca6SMarc Zyngier * This ITS wants the physical address of the 29041ac19ca6SMarc Zyngier * redistributor. 29051ac19ca6SMarc Zyngier */ 29061ac19ca6SMarc Zyngier target = gic_data_rdist()->phys_base; 29071ac19ca6SMarc Zyngier } else { 2908920181ceSDerek Basehore /* This ITS wants a linear CPU number. */ 2909589ce5f4SMarc Zyngier target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 2910263fcd31SMarc Zyngier target = GICR_TYPER_CPU_NUMBER(target) << 16; 29111ac19ca6SMarc Zyngier } 29121ac19ca6SMarc Zyngier 29131ac19ca6SMarc Zyngier /* Perform collection mapping */ 29141ac19ca6SMarc Zyngier its->collections[cpu].target_address = target; 29151ac19ca6SMarc Zyngier its->collections[cpu].col_id = cpu; 29161ac19ca6SMarc Zyngier 29171ac19ca6SMarc Zyngier its_send_mapc(its, &its->collections[cpu], 1); 29181ac19ca6SMarc Zyngier its_send_invall(its, &its->collections[cpu]); 29191ac19ca6SMarc Zyngier } 29201ac19ca6SMarc Zyngier 2921920181ceSDerek Basehore static void its_cpu_init_collections(void) 2922920181ceSDerek Basehore { 2923920181ceSDerek Basehore struct its_node *its; 2924920181ceSDerek Basehore 2925a8db7456SSebastian Andrzej Siewior raw_spin_lock(&its_lock); 2926920181ceSDerek Basehore 2927920181ceSDerek Basehore list_for_each_entry(its, &its_nodes, entry) 2928920181ceSDerek Basehore its_cpu_init_collection(its); 2929920181ceSDerek Basehore 2930a8db7456SSebastian Andrzej Siewior raw_spin_unlock(&its_lock); 29311ac19ca6SMarc Zyngier } 293284a6a2e7SMarc Zyngier 293384a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 293484a6a2e7SMarc Zyngier { 293584a6a2e7SMarc Zyngier struct its_device *its_dev = NULL, *tmp; 29363e39e8f5SMarc Zyngier unsigned long flags; 293784a6a2e7SMarc Zyngier 29383e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 293984a6a2e7SMarc Zyngier 294084a6a2e7SMarc Zyngier list_for_each_entry(tmp, &its->its_device_list, entry) { 294184a6a2e7SMarc Zyngier if (tmp->device_id == dev_id) { 294284a6a2e7SMarc Zyngier its_dev = tmp; 294384a6a2e7SMarc Zyngier break; 294484a6a2e7SMarc Zyngier } 294584a6a2e7SMarc Zyngier } 294684a6a2e7SMarc Zyngier 29473e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 294884a6a2e7SMarc Zyngier 294984a6a2e7SMarc Zyngier return its_dev; 295084a6a2e7SMarc Zyngier } 295184a6a2e7SMarc Zyngier 2952466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type) 2953466b7d16SShanker Donthineni { 2954466b7d16SShanker Donthineni int i; 2955466b7d16SShanker Donthineni 2956466b7d16SShanker Donthineni for (i = 0; i < GITS_BASER_NR_REGS; i++) { 2957466b7d16SShanker Donthineni if (GITS_BASER_TYPE(its->tables[i].val) == type) 2958466b7d16SShanker Donthineni return &its->tables[i]; 2959466b7d16SShanker Donthineni } 2960466b7d16SShanker Donthineni 2961466b7d16SShanker Donthineni return NULL; 2962466b7d16SShanker Donthineni } 2963466b7d16SShanker Donthineni 2964539d3782SShanker Donthineni static bool its_alloc_table_entry(struct its_node *its, 2965539d3782SShanker Donthineni struct its_baser *baser, u32 id) 29663faf24eaSShanker Donthineni { 29673faf24eaSShanker Donthineni struct page *page; 29683faf24eaSShanker Donthineni u32 esz, idx; 29693faf24eaSShanker Donthineni __le64 *table; 29703faf24eaSShanker Donthineni 29713faf24eaSShanker Donthineni /* Don't allow device id that exceeds single, flat table limit */ 29723faf24eaSShanker Donthineni esz = GITS_BASER_ENTRY_SIZE(baser->val); 29733faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_INDIRECT)) 297470cc81edSMarc Zyngier return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 29753faf24eaSShanker Donthineni 29763faf24eaSShanker Donthineni /* Compute 1st level table index & check if that exceeds table limit */ 297770cc81edSMarc Zyngier idx = id >> ilog2(baser->psz / esz); 29783faf24eaSShanker Donthineni if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 29793faf24eaSShanker Donthineni return false; 29803faf24eaSShanker Donthineni 29813faf24eaSShanker Donthineni table = baser->base; 29823faf24eaSShanker Donthineni 29833faf24eaSShanker Donthineni /* Allocate memory for 2nd level table */ 29843faf24eaSShanker Donthineni if (!table[idx]) { 2985539d3782SShanker Donthineni page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 2986539d3782SShanker Donthineni get_order(baser->psz)); 29873faf24eaSShanker Donthineni if (!page) 29883faf24eaSShanker Donthineni return false; 29893faf24eaSShanker Donthineni 29903faf24eaSShanker Donthineni /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 29913faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2992328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(page), baser->psz); 29933faf24eaSShanker Donthineni 29943faf24eaSShanker Donthineni table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 29953faf24eaSShanker Donthineni 29963faf24eaSShanker Donthineni /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 29973faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2998328191c0SVladimir Murzin gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 29993faf24eaSShanker Donthineni 30003faf24eaSShanker Donthineni /* Ensure updated table contents are visible to ITS hardware */ 30013faf24eaSShanker Donthineni dsb(sy); 30023faf24eaSShanker Donthineni } 30033faf24eaSShanker Donthineni 30043faf24eaSShanker Donthineni return true; 30053faf24eaSShanker Donthineni } 30063faf24eaSShanker Donthineni 300770cc81edSMarc Zyngier static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 300870cc81edSMarc Zyngier { 300970cc81edSMarc Zyngier struct its_baser *baser; 301070cc81edSMarc Zyngier 301170cc81edSMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 301270cc81edSMarc Zyngier 301370cc81edSMarc Zyngier /* Don't allow device id that exceeds ITS hardware limit */ 301470cc81edSMarc Zyngier if (!baser) 3015576a8342SMarc Zyngier return (ilog2(dev_id) < device_ids(its)); 301670cc81edSMarc Zyngier 3017539d3782SShanker Donthineni return its_alloc_table_entry(its, baser, dev_id); 301870cc81edSMarc Zyngier } 301970cc81edSMarc Zyngier 30207d75bbb4SMarc Zyngier static bool its_alloc_vpe_table(u32 vpe_id) 30217d75bbb4SMarc Zyngier { 30227d75bbb4SMarc Zyngier struct its_node *its; 30234e6437f1SZenghui Yu int cpu; 30247d75bbb4SMarc Zyngier 30257d75bbb4SMarc Zyngier /* 30267d75bbb4SMarc Zyngier * Make sure the L2 tables are allocated on *all* v4 ITSs. We 30277d75bbb4SMarc Zyngier * could try and only do it on ITSs corresponding to devices 30287d75bbb4SMarc Zyngier * that have interrupts targeted at this VPE, but the 30297d75bbb4SMarc Zyngier * complexity becomes crazy (and you have tons of memory 30307d75bbb4SMarc Zyngier * anyway, right?). 30317d75bbb4SMarc Zyngier */ 30327d75bbb4SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 30337d75bbb4SMarc Zyngier struct its_baser *baser; 30347d75bbb4SMarc Zyngier 30350dd57fedSMarc Zyngier if (!is_v4(its)) 30367d75bbb4SMarc Zyngier continue; 30377d75bbb4SMarc Zyngier 30387d75bbb4SMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 30397d75bbb4SMarc Zyngier if (!baser) 30407d75bbb4SMarc Zyngier return false; 30417d75bbb4SMarc Zyngier 3042539d3782SShanker Donthineni if (!its_alloc_table_entry(its, baser, vpe_id)) 30437d75bbb4SMarc Zyngier return false; 30447d75bbb4SMarc Zyngier } 30457d75bbb4SMarc Zyngier 30464e6437f1SZenghui Yu /* Non v4.1? No need to iterate RDs and go back early. */ 30474e6437f1SZenghui Yu if (!gic_rdists->has_rvpeid) 30484e6437f1SZenghui Yu return true; 30494e6437f1SZenghui Yu 30504e6437f1SZenghui Yu /* 30514e6437f1SZenghui Yu * Make sure the L2 tables are allocated for all copies of 30524e6437f1SZenghui Yu * the L1 table on *all* v4.1 RDs. 30534e6437f1SZenghui Yu */ 30544e6437f1SZenghui Yu for_each_possible_cpu(cpu) { 30554e6437f1SZenghui Yu if (!allocate_vpe_l2_table(cpu, vpe_id)) 30564e6437f1SZenghui Yu return false; 30574e6437f1SZenghui Yu } 30584e6437f1SZenghui Yu 30597d75bbb4SMarc Zyngier return true; 30607d75bbb4SMarc Zyngier } 30617d75bbb4SMarc Zyngier 306284a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 306393f94ea0SMarc Zyngier int nvecs, bool alloc_lpis) 306484a6a2e7SMarc Zyngier { 306584a6a2e7SMarc Zyngier struct its_device *dev; 306693f94ea0SMarc Zyngier unsigned long *lpi_map = NULL; 30673e39e8f5SMarc Zyngier unsigned long flags; 3068591e5becSMarc Zyngier u16 *col_map = NULL; 306984a6a2e7SMarc Zyngier void *itt; 307084a6a2e7SMarc Zyngier int lpi_base; 307184a6a2e7SMarc Zyngier int nr_lpis; 3072c8481267SMarc Zyngier int nr_ites; 307384a6a2e7SMarc Zyngier int sz; 307484a6a2e7SMarc Zyngier 30753faf24eaSShanker Donthineni if (!its_alloc_device_table(its, dev_id)) 3076466b7d16SShanker Donthineni return NULL; 3077466b7d16SShanker Donthineni 3078147c8f37SMarc Zyngier if (WARN_ON(!is_power_of_2(nvecs))) 3079147c8f37SMarc Zyngier nvecs = roundup_pow_of_two(nvecs); 3080147c8f37SMarc Zyngier 308184a6a2e7SMarc Zyngier dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3082c8481267SMarc Zyngier /* 3083147c8f37SMarc Zyngier * Even if the device wants a single LPI, the ITT must be 3084147c8f37SMarc Zyngier * sized as a power of two (and you need at least one bit...). 3085c8481267SMarc Zyngier */ 3086147c8f37SMarc Zyngier nr_ites = max(2, nvecs); 3087ffedbf0cSMarc Zyngier sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); 308884a6a2e7SMarc Zyngier sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 3089539d3782SShanker Donthineni itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); 309093f94ea0SMarc Zyngier if (alloc_lpis) { 309138dd7c49SMarc Zyngier lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); 3092591e5becSMarc Zyngier if (lpi_map) 30936396bb22SKees Cook col_map = kcalloc(nr_lpis, sizeof(*col_map), 309493f94ea0SMarc Zyngier GFP_KERNEL); 309593f94ea0SMarc Zyngier } else { 30966396bb22SKees Cook col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); 309793f94ea0SMarc Zyngier nr_lpis = 0; 309893f94ea0SMarc Zyngier lpi_base = 0; 309993f94ea0SMarc Zyngier } 310084a6a2e7SMarc Zyngier 310193f94ea0SMarc Zyngier if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 310284a6a2e7SMarc Zyngier kfree(dev); 310384a6a2e7SMarc Zyngier kfree(itt); 310484a6a2e7SMarc Zyngier kfree(lpi_map); 3105591e5becSMarc Zyngier kfree(col_map); 310684a6a2e7SMarc Zyngier return NULL; 310784a6a2e7SMarc Zyngier } 310884a6a2e7SMarc Zyngier 3109328191c0SVladimir Murzin gic_flush_dcache_to_poc(itt, sz); 31105a9a8915SMarc Zyngier 311184a6a2e7SMarc Zyngier dev->its = its; 311284a6a2e7SMarc Zyngier dev->itt = itt; 3113c8481267SMarc Zyngier dev->nr_ites = nr_ites; 3114591e5becSMarc Zyngier dev->event_map.lpi_map = lpi_map; 3115591e5becSMarc Zyngier dev->event_map.col_map = col_map; 3116591e5becSMarc Zyngier dev->event_map.lpi_base = lpi_base; 3117591e5becSMarc Zyngier dev->event_map.nr_lpis = nr_lpis; 311811635fa2SMarc Zyngier raw_spin_lock_init(&dev->event_map.vlpi_lock); 311984a6a2e7SMarc Zyngier dev->device_id = dev_id; 312084a6a2e7SMarc Zyngier INIT_LIST_HEAD(&dev->entry); 312184a6a2e7SMarc Zyngier 31223e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 312384a6a2e7SMarc Zyngier list_add(&dev->entry, &its->its_device_list); 31243e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 312584a6a2e7SMarc Zyngier 312684a6a2e7SMarc Zyngier /* Map device to its ITT */ 312784a6a2e7SMarc Zyngier its_send_mapd(dev, 1); 312884a6a2e7SMarc Zyngier 312984a6a2e7SMarc Zyngier return dev; 313084a6a2e7SMarc Zyngier } 313184a6a2e7SMarc Zyngier 313284a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev) 313384a6a2e7SMarc Zyngier { 31343e39e8f5SMarc Zyngier unsigned long flags; 31353e39e8f5SMarc Zyngier 31363e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its_dev->its->lock, flags); 313784a6a2e7SMarc Zyngier list_del(&its_dev->entry); 31383e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 3139898aa5ceSMarc Zyngier kfree(its_dev->event_map.col_map); 314084a6a2e7SMarc Zyngier kfree(its_dev->itt); 314184a6a2e7SMarc Zyngier kfree(its_dev); 314284a6a2e7SMarc Zyngier } 3143b48ac83dSMarc Zyngier 31448208d170SMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) 3145b48ac83dSMarc Zyngier { 3146b48ac83dSMarc Zyngier int idx; 3147b48ac83dSMarc Zyngier 3148342be106SZenghui Yu /* Find a free LPI region in lpi_map and allocate them. */ 31498208d170SMarc Zyngier idx = bitmap_find_free_region(dev->event_map.lpi_map, 31508208d170SMarc Zyngier dev->event_map.nr_lpis, 31518208d170SMarc Zyngier get_count_order(nvecs)); 31528208d170SMarc Zyngier if (idx < 0) 3153b48ac83dSMarc Zyngier return -ENOSPC; 3154b48ac83dSMarc Zyngier 3155591e5becSMarc Zyngier *hwirq = dev->event_map.lpi_base + idx; 3156b48ac83dSMarc Zyngier 3157b48ac83dSMarc Zyngier return 0; 3158b48ac83dSMarc Zyngier } 3159b48ac83dSMarc Zyngier 316054456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 3161b48ac83dSMarc Zyngier int nvec, msi_alloc_info_t *info) 3162b48ac83dSMarc Zyngier { 3163b48ac83dSMarc Zyngier struct its_node *its; 3164b48ac83dSMarc Zyngier struct its_device *its_dev; 316554456db9SMarc Zyngier struct msi_domain_info *msi_info; 316654456db9SMarc Zyngier u32 dev_id; 31679791ec7dSMarc Zyngier int err = 0; 3168b48ac83dSMarc Zyngier 316954456db9SMarc Zyngier /* 3170a7c90f51SJulien Grall * We ignore "dev" entirely, and rely on the dev_id that has 317154456db9SMarc Zyngier * been passed via the scratchpad. This limits this domain's 317254456db9SMarc Zyngier * usefulness to upper layers that definitely know that they 317354456db9SMarc Zyngier * are built on top of the ITS. 317454456db9SMarc Zyngier */ 317554456db9SMarc Zyngier dev_id = info->scratchpad[0].ul; 317654456db9SMarc Zyngier 317754456db9SMarc Zyngier msi_info = msi_get_domain_info(domain); 317854456db9SMarc Zyngier its = msi_info->data; 317954456db9SMarc Zyngier 318020b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi && 318120b3d54eSMarc Zyngier vpe_proxy.dev && 318220b3d54eSMarc Zyngier vpe_proxy.dev->its == its && 318320b3d54eSMarc Zyngier dev_id == vpe_proxy.dev->device_id) { 318420b3d54eSMarc Zyngier /* Bad luck. Get yourself a better implementation */ 318520b3d54eSMarc Zyngier WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", 318620b3d54eSMarc Zyngier dev_id); 318720b3d54eSMarc Zyngier return -EINVAL; 318820b3d54eSMarc Zyngier } 318920b3d54eSMarc Zyngier 31909791ec7dSMarc Zyngier mutex_lock(&its->dev_alloc_lock); 3191f130420eSMarc Zyngier its_dev = its_find_device(its, dev_id); 3192e8137f4fSMarc Zyngier if (its_dev) { 3193e8137f4fSMarc Zyngier /* 3194e8137f4fSMarc Zyngier * We already have seen this ID, probably through 3195e8137f4fSMarc Zyngier * another alias (PCI bridge of some sort). No need to 3196e8137f4fSMarc Zyngier * create the device. 3197e8137f4fSMarc Zyngier */ 31989791ec7dSMarc Zyngier its_dev->shared = true; 3199f130420eSMarc Zyngier pr_debug("Reusing ITT for devID %x\n", dev_id); 3200e8137f4fSMarc Zyngier goto out; 3201e8137f4fSMarc Zyngier } 3202b48ac83dSMarc Zyngier 320393f94ea0SMarc Zyngier its_dev = its_create_device(its, dev_id, nvec, true); 32049791ec7dSMarc Zyngier if (!its_dev) { 32059791ec7dSMarc Zyngier err = -ENOMEM; 32069791ec7dSMarc Zyngier goto out; 32079791ec7dSMarc Zyngier } 3208b48ac83dSMarc Zyngier 3209f130420eSMarc Zyngier pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 3210e8137f4fSMarc Zyngier out: 32119791ec7dSMarc Zyngier mutex_unlock(&its->dev_alloc_lock); 3212b48ac83dSMarc Zyngier info->scratchpad[0].ptr = its_dev; 32139791ec7dSMarc Zyngier return err; 3214b48ac83dSMarc Zyngier } 3215b48ac83dSMarc Zyngier 321654456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = { 321754456db9SMarc Zyngier .msi_prepare = its_msi_prepare, 321854456db9SMarc Zyngier }; 321954456db9SMarc Zyngier 3220b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain, 3221b48ac83dSMarc Zyngier unsigned int virq, 3222b48ac83dSMarc Zyngier irq_hw_number_t hwirq) 3223b48ac83dSMarc Zyngier { 3224f833f57fSMarc Zyngier struct irq_fwspec fwspec; 3225b48ac83dSMarc Zyngier 3226f833f57fSMarc Zyngier if (irq_domain_get_of_node(domain->parent)) { 3227f833f57fSMarc Zyngier fwspec.fwnode = domain->parent->fwnode; 3228f833f57fSMarc Zyngier fwspec.param_count = 3; 3229f833f57fSMarc Zyngier fwspec.param[0] = GIC_IRQ_TYPE_LPI; 3230f833f57fSMarc Zyngier fwspec.param[1] = hwirq; 3231f833f57fSMarc Zyngier fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 32323f010cf1STomasz Nowicki } else if (is_fwnode_irqchip(domain->parent->fwnode)) { 32333f010cf1STomasz Nowicki fwspec.fwnode = domain->parent->fwnode; 32343f010cf1STomasz Nowicki fwspec.param_count = 2; 32353f010cf1STomasz Nowicki fwspec.param[0] = hwirq; 32363f010cf1STomasz Nowicki fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 3237f833f57fSMarc Zyngier } else { 3238f833f57fSMarc Zyngier return -EINVAL; 3239f833f57fSMarc Zyngier } 3240b48ac83dSMarc Zyngier 3241f833f57fSMarc Zyngier return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 3242b48ac83dSMarc Zyngier } 3243b48ac83dSMarc Zyngier 3244b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 3245b48ac83dSMarc Zyngier unsigned int nr_irqs, void *args) 3246b48ac83dSMarc Zyngier { 3247b48ac83dSMarc Zyngier msi_alloc_info_t *info = args; 3248b48ac83dSMarc Zyngier struct its_device *its_dev = info->scratchpad[0].ptr; 324935ae7df2SJulien Grall struct its_node *its = its_dev->its; 3250b48ac83dSMarc Zyngier irq_hw_number_t hwirq; 3251b48ac83dSMarc Zyngier int err; 3252b48ac83dSMarc Zyngier int i; 3253b48ac83dSMarc Zyngier 32548208d170SMarc Zyngier err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); 3255b48ac83dSMarc Zyngier if (err) 3256b48ac83dSMarc Zyngier return err; 3257b48ac83dSMarc Zyngier 325835ae7df2SJulien Grall err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); 325935ae7df2SJulien Grall if (err) 326035ae7df2SJulien Grall return err; 326135ae7df2SJulien Grall 32628208d170SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 32638208d170SMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); 3264b48ac83dSMarc Zyngier if (err) 3265b48ac83dSMarc Zyngier return err; 3266b48ac83dSMarc Zyngier 3267b48ac83dSMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, 32688208d170SMarc Zyngier hwirq + i, &its_irq_chip, its_dev); 32690d224d35SMarc Zyngier irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 3270f130420eSMarc Zyngier pr_debug("ID:%d pID:%d vID:%d\n", 32718208d170SMarc Zyngier (int)(hwirq + i - its_dev->event_map.lpi_base), 32728208d170SMarc Zyngier (int)(hwirq + i), virq + i); 3273b48ac83dSMarc Zyngier } 3274b48ac83dSMarc Zyngier 3275b48ac83dSMarc Zyngier return 0; 3276b48ac83dSMarc Zyngier } 3277b48ac83dSMarc Zyngier 327872491643SThomas Gleixner static int its_irq_domain_activate(struct irq_domain *domain, 3279702cb0a0SThomas Gleixner struct irq_data *d, bool reserve) 3280aca268dfSMarc Zyngier { 3281aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 3282aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 3283fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 32840d224d35SMarc Zyngier int cpu; 3285fbf8f40eSGanapatrao Kulkarni 3286fbf8f40eSGanapatrao Kulkarni /* get the cpu_mask of local node */ 3287fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) 3288fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 3289aca268dfSMarc Zyngier 3290591e5becSMarc Zyngier /* Bind the LPI to the first possible CPU */ 3291c1797b11SYang Yingliang cpu = cpumask_first_and(cpu_mask, cpu_online_mask); 3292c1797b11SYang Yingliang if (cpu >= nr_cpu_ids) { 3293c1797b11SYang Yingliang if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) 3294c1797b11SYang Yingliang return -EINVAL; 3295c1797b11SYang Yingliang 3296c1797b11SYang Yingliang cpu = cpumask_first(cpu_online_mask); 3297c1797b11SYang Yingliang } 3298c1797b11SYang Yingliang 32990d224d35SMarc Zyngier its_dev->event_map.col_map[event] = cpu; 33000d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 3301591e5becSMarc Zyngier 3302aca268dfSMarc Zyngier /* Map the GIC IRQ and event to the device */ 33036a25ad3aSMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 330472491643SThomas Gleixner return 0; 3305aca268dfSMarc Zyngier } 3306aca268dfSMarc Zyngier 3307aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain, 3308aca268dfSMarc Zyngier struct irq_data *d) 3309aca268dfSMarc Zyngier { 3310aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 3311aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 3312aca268dfSMarc Zyngier 3313aca268dfSMarc Zyngier /* Stop the delivery of interrupts */ 3314aca268dfSMarc Zyngier its_send_discard(its_dev, event); 3315aca268dfSMarc Zyngier } 3316aca268dfSMarc Zyngier 3317b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 3318b48ac83dSMarc Zyngier unsigned int nr_irqs) 3319b48ac83dSMarc Zyngier { 3320b48ac83dSMarc Zyngier struct irq_data *d = irq_domain_get_irq_data(domain, virq); 3321b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 33229791ec7dSMarc Zyngier struct its_node *its = its_dev->its; 3323b48ac83dSMarc Zyngier int i; 3324b48ac83dSMarc Zyngier 3325c9c96e30SMarc Zyngier bitmap_release_region(its_dev->event_map.lpi_map, 3326c9c96e30SMarc Zyngier its_get_event_id(irq_domain_get_irq_data(domain, virq)), 3327c9c96e30SMarc Zyngier get_count_order(nr_irqs)); 3328c9c96e30SMarc Zyngier 3329b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 3330b48ac83dSMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 3331b48ac83dSMarc Zyngier virq + i); 3332b48ac83dSMarc Zyngier /* Nuke the entry in the domain */ 33332da39949SMarc Zyngier irq_domain_reset_irq_data(data); 3334b48ac83dSMarc Zyngier } 3335b48ac83dSMarc Zyngier 33369791ec7dSMarc Zyngier mutex_lock(&its->dev_alloc_lock); 33379791ec7dSMarc Zyngier 33389791ec7dSMarc Zyngier /* 33399791ec7dSMarc Zyngier * If all interrupts have been freed, start mopping the 33409791ec7dSMarc Zyngier * floor. This is conditionned on the device not being shared. 33419791ec7dSMarc Zyngier */ 33429791ec7dSMarc Zyngier if (!its_dev->shared && 33439791ec7dSMarc Zyngier bitmap_empty(its_dev->event_map.lpi_map, 3344591e5becSMarc Zyngier its_dev->event_map.nr_lpis)) { 334538dd7c49SMarc Zyngier its_lpi_free(its_dev->event_map.lpi_map, 3346cf2be8baSMarc Zyngier its_dev->event_map.lpi_base, 3347cf2be8baSMarc Zyngier its_dev->event_map.nr_lpis); 3348b48ac83dSMarc Zyngier 3349b48ac83dSMarc Zyngier /* Unmap device/itt */ 3350b48ac83dSMarc Zyngier its_send_mapd(its_dev, 0); 3351b48ac83dSMarc Zyngier its_free_device(its_dev); 3352b48ac83dSMarc Zyngier } 3353b48ac83dSMarc Zyngier 33549791ec7dSMarc Zyngier mutex_unlock(&its->dev_alloc_lock); 33559791ec7dSMarc Zyngier 3356b48ac83dSMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 3357b48ac83dSMarc Zyngier } 3358b48ac83dSMarc Zyngier 3359b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = { 3360b48ac83dSMarc Zyngier .alloc = its_irq_domain_alloc, 3361b48ac83dSMarc Zyngier .free = its_irq_domain_free, 3362aca268dfSMarc Zyngier .activate = its_irq_domain_activate, 3363aca268dfSMarc Zyngier .deactivate = its_irq_domain_deactivate, 3364b48ac83dSMarc Zyngier }; 33654c21f3c2SMarc Zyngier 336620b3d54eSMarc Zyngier /* 336720b3d54eSMarc Zyngier * This is insane. 336820b3d54eSMarc Zyngier * 33690684c704SMarc Zyngier * If a GICv4.0 doesn't implement Direct LPIs (which is extremely 337020b3d54eSMarc Zyngier * likely), the only way to perform an invalidate is to use a fake 337120b3d54eSMarc Zyngier * device to issue an INV command, implying that the LPI has first 337220b3d54eSMarc Zyngier * been mapped to some event on that device. Since this is not exactly 337320b3d54eSMarc Zyngier * cheap, we try to keep that mapping around as long as possible, and 337420b3d54eSMarc Zyngier * only issue an UNMAP if we're short on available slots. 337520b3d54eSMarc Zyngier * 337620b3d54eSMarc Zyngier * Broken by design(tm). 33770684c704SMarc Zyngier * 33780684c704SMarc Zyngier * GICv4.1, on the other hand, mandates that we're able to invalidate 33790684c704SMarc Zyngier * by writing to a MMIO register. It doesn't implement the whole of 33800684c704SMarc Zyngier * DirectLPI, but that's good enough. And most of the time, we don't 33810684c704SMarc Zyngier * even have to invalidate anything, as the redistributor can be told 33820684c704SMarc Zyngier * whether to generate a doorbell or not (we thus leave it enabled, 33830684c704SMarc Zyngier * always). 338420b3d54eSMarc Zyngier */ 338520b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) 338620b3d54eSMarc Zyngier { 33870684c704SMarc Zyngier /* GICv4.1 doesn't use a proxy, so nothing to do here */ 33880684c704SMarc Zyngier if (gic_rdists->has_rvpeid) 33890684c704SMarc Zyngier return; 33900684c704SMarc Zyngier 339120b3d54eSMarc Zyngier /* Already unmapped? */ 339220b3d54eSMarc Zyngier if (vpe->vpe_proxy_event == -1) 339320b3d54eSMarc Zyngier return; 339420b3d54eSMarc Zyngier 339520b3d54eSMarc Zyngier its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); 339620b3d54eSMarc Zyngier vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; 339720b3d54eSMarc Zyngier 339820b3d54eSMarc Zyngier /* 339920b3d54eSMarc Zyngier * We don't track empty slots at all, so let's move the 340020b3d54eSMarc Zyngier * next_victim pointer if we can quickly reuse that slot 340120b3d54eSMarc Zyngier * instead of nuking an existing entry. Not clear that this is 340220b3d54eSMarc Zyngier * always a win though, and this might just generate a ripple 340320b3d54eSMarc Zyngier * effect... Let's just hope VPEs don't migrate too often. 340420b3d54eSMarc Zyngier */ 340520b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 340620b3d54eSMarc Zyngier vpe_proxy.next_victim = vpe->vpe_proxy_event; 340720b3d54eSMarc Zyngier 340820b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 340920b3d54eSMarc Zyngier } 341020b3d54eSMarc Zyngier 341120b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) 341220b3d54eSMarc Zyngier { 34130684c704SMarc Zyngier /* GICv4.1 doesn't use a proxy, so nothing to do here */ 34140684c704SMarc Zyngier if (gic_rdists->has_rvpeid) 34150684c704SMarc Zyngier return; 34160684c704SMarc Zyngier 341720b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi) { 341820b3d54eSMarc Zyngier unsigned long flags; 341920b3d54eSMarc Zyngier 342020b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 342120b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe); 342220b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 342320b3d54eSMarc Zyngier } 342420b3d54eSMarc Zyngier } 342520b3d54eSMarc Zyngier 342620b3d54eSMarc Zyngier static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) 342720b3d54eSMarc Zyngier { 34280684c704SMarc Zyngier /* GICv4.1 doesn't use a proxy, so nothing to do here */ 34290684c704SMarc Zyngier if (gic_rdists->has_rvpeid) 34300684c704SMarc Zyngier return; 34310684c704SMarc Zyngier 343220b3d54eSMarc Zyngier /* Already mapped? */ 343320b3d54eSMarc Zyngier if (vpe->vpe_proxy_event != -1) 343420b3d54eSMarc Zyngier return; 343520b3d54eSMarc Zyngier 343620b3d54eSMarc Zyngier /* This slot was already allocated. Kick the other VPE out. */ 343720b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 343820b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); 343920b3d54eSMarc Zyngier 344020b3d54eSMarc Zyngier /* Map the new VPE instead */ 344120b3d54eSMarc Zyngier vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; 344220b3d54eSMarc Zyngier vpe->vpe_proxy_event = vpe_proxy.next_victim; 344320b3d54eSMarc Zyngier vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; 344420b3d54eSMarc Zyngier 344520b3d54eSMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; 344620b3d54eSMarc Zyngier its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); 344720b3d54eSMarc Zyngier } 344820b3d54eSMarc Zyngier 3449958b90d1SMarc Zyngier static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) 3450958b90d1SMarc Zyngier { 3451958b90d1SMarc Zyngier unsigned long flags; 3452958b90d1SMarc Zyngier struct its_collection *target_col; 3453958b90d1SMarc Zyngier 34540684c704SMarc Zyngier /* GICv4.1 doesn't use a proxy, so nothing to do here */ 34550684c704SMarc Zyngier if (gic_rdists->has_rvpeid) 34560684c704SMarc Zyngier return; 34570684c704SMarc Zyngier 3458958b90d1SMarc Zyngier if (gic_rdists->has_direct_lpi) { 3459958b90d1SMarc Zyngier void __iomem *rdbase; 3460958b90d1SMarc Zyngier 3461958b90d1SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; 3462958b90d1SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 34632f4f064bSMarc Zyngier wait_for_syncr(rdbase); 3464958b90d1SMarc Zyngier 3465958b90d1SMarc Zyngier return; 3466958b90d1SMarc Zyngier } 3467958b90d1SMarc Zyngier 3468958b90d1SMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 3469958b90d1SMarc Zyngier 3470958b90d1SMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 3471958b90d1SMarc Zyngier 3472958b90d1SMarc Zyngier target_col = &vpe_proxy.dev->its->collections[to]; 3473958b90d1SMarc Zyngier its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); 3474958b90d1SMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; 3475958b90d1SMarc Zyngier 3476958b90d1SMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 3477958b90d1SMarc Zyngier } 3478958b90d1SMarc Zyngier 34793171a47aSMarc Zyngier static int its_vpe_set_affinity(struct irq_data *d, 34803171a47aSMarc Zyngier const struct cpumask *mask_val, 34813171a47aSMarc Zyngier bool force) 34823171a47aSMarc Zyngier { 34833171a47aSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3484dd3f050aSMarc Zyngier int from, cpu = cpumask_first(mask_val); 34853171a47aSMarc Zyngier 34863171a47aSMarc Zyngier /* 34873171a47aSMarc Zyngier * Changing affinity is mega expensive, so let's be as lazy as 348820b3d54eSMarc Zyngier * we can and only do it if we really have to. Also, if mapped 3489958b90d1SMarc Zyngier * into the proxy device, we need to move the doorbell 3490958b90d1SMarc Zyngier * interrupt to its new location. 34913171a47aSMarc Zyngier */ 3492dd3f050aSMarc Zyngier if (vpe->col_idx == cpu) 3493dd3f050aSMarc Zyngier goto out; 3494958b90d1SMarc Zyngier 3495dd3f050aSMarc Zyngier from = vpe->col_idx; 34963171a47aSMarc Zyngier vpe->col_idx = cpu; 3497dd3f050aSMarc Zyngier 3498dd3f050aSMarc Zyngier /* 3499dd3f050aSMarc Zyngier * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD 3500dd3f050aSMarc Zyngier * is sharing its VPE table with the current one. 3501dd3f050aSMarc Zyngier */ 3502dd3f050aSMarc Zyngier if (gic_data_rdist_cpu(cpu)->vpe_table_mask && 3503dd3f050aSMarc Zyngier cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) 3504dd3f050aSMarc Zyngier goto out; 3505dd3f050aSMarc Zyngier 35063171a47aSMarc Zyngier its_send_vmovp(vpe); 3507958b90d1SMarc Zyngier its_vpe_db_proxy_move(vpe, from, cpu); 35083171a47aSMarc Zyngier 3509dd3f050aSMarc Zyngier out: 351044c4c25eSMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 351144c4c25eSMarc Zyngier 35123171a47aSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 35133171a47aSMarc Zyngier } 35143171a47aSMarc Zyngier 3515e643d803SMarc Zyngier static void its_vpe_schedule(struct its_vpe *vpe) 3516e643d803SMarc Zyngier { 351750c33097SRobin Murphy void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 3518e643d803SMarc Zyngier u64 val; 3519e643d803SMarc Zyngier 3520e643d803SMarc Zyngier /* Schedule the VPE */ 3521e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & 3522e643d803SMarc Zyngier GENMASK_ULL(51, 12); 3523e643d803SMarc Zyngier val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 3524e643d803SMarc Zyngier val |= GICR_VPROPBASER_RaWb; 3525e643d803SMarc Zyngier val |= GICR_VPROPBASER_InnerShareable; 35265186a6ccSZenghui Yu gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 3527e643d803SMarc Zyngier 3528e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->vpt_page)) & 3529e643d803SMarc Zyngier GENMASK_ULL(51, 16); 3530e643d803SMarc Zyngier val |= GICR_VPENDBASER_RaWaWb; 3531e643d803SMarc Zyngier val |= GICR_VPENDBASER_NonShareable; 3532e643d803SMarc Zyngier /* 3533e643d803SMarc Zyngier * There is no good way of finding out if the pending table is 3534e643d803SMarc Zyngier * empty as we can race against the doorbell interrupt very 3535e643d803SMarc Zyngier * easily. So in the end, vpe->pending_last is only an 3536e643d803SMarc Zyngier * indication that the vcpu has something pending, not one 3537e643d803SMarc Zyngier * that the pending table is empty. A good implementation 3538e643d803SMarc Zyngier * would be able to read its coarse map pretty quickly anyway, 3539e643d803SMarc Zyngier * making this a tolerable issue. 3540e643d803SMarc Zyngier */ 3541e643d803SMarc Zyngier val |= GICR_VPENDBASER_PendingLast; 3542e643d803SMarc Zyngier val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 3543e643d803SMarc Zyngier val |= GICR_VPENDBASER_Valid; 35445186a6ccSZenghui Yu gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 3545e643d803SMarc Zyngier } 3546e643d803SMarc Zyngier 3547e643d803SMarc Zyngier static void its_vpe_deschedule(struct its_vpe *vpe) 3548e643d803SMarc Zyngier { 354950c33097SRobin Murphy void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 3550e643d803SMarc Zyngier u64 val; 3551e643d803SMarc Zyngier 3552e64fab1aSMarc Zyngier val = its_clear_vpend_valid(vlpi_base, 0, 0); 3553e643d803SMarc Zyngier 3554e643d803SMarc Zyngier vpe->idai = !!(val & GICR_VPENDBASER_IDAI); 3555e643d803SMarc Zyngier vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 3556e643d803SMarc Zyngier } 3557e643d803SMarc Zyngier 355840619a2eSMarc Zyngier static void its_vpe_invall(struct its_vpe *vpe) 355940619a2eSMarc Zyngier { 356040619a2eSMarc Zyngier struct its_node *its; 356140619a2eSMarc Zyngier 356240619a2eSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 35630dd57fedSMarc Zyngier if (!is_v4(its)) 356440619a2eSMarc Zyngier continue; 356540619a2eSMarc Zyngier 35662247e1bfSMarc Zyngier if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) 35672247e1bfSMarc Zyngier continue; 35682247e1bfSMarc Zyngier 35693c1cceebSMarc Zyngier /* 35703c1cceebSMarc Zyngier * Sending a VINVALL to a single ITS is enough, as all 35713c1cceebSMarc Zyngier * we need is to reach the redistributors. 35723c1cceebSMarc Zyngier */ 357340619a2eSMarc Zyngier its_send_vinvall(its, vpe); 35743c1cceebSMarc Zyngier return; 357540619a2eSMarc Zyngier } 357640619a2eSMarc Zyngier } 357740619a2eSMarc Zyngier 3578e643d803SMarc Zyngier static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 3579e643d803SMarc Zyngier { 3580e643d803SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3581e643d803SMarc Zyngier struct its_cmd_info *info = vcpu_info; 3582e643d803SMarc Zyngier 3583e643d803SMarc Zyngier switch (info->cmd_type) { 3584e643d803SMarc Zyngier case SCHEDULE_VPE: 3585e643d803SMarc Zyngier its_vpe_schedule(vpe); 3586e643d803SMarc Zyngier return 0; 3587e643d803SMarc Zyngier 3588e643d803SMarc Zyngier case DESCHEDULE_VPE: 3589e643d803SMarc Zyngier its_vpe_deschedule(vpe); 3590e643d803SMarc Zyngier return 0; 3591e643d803SMarc Zyngier 35925e2f7642SMarc Zyngier case INVALL_VPE: 359340619a2eSMarc Zyngier its_vpe_invall(vpe); 35945e2f7642SMarc Zyngier return 0; 35955e2f7642SMarc Zyngier 3596e643d803SMarc Zyngier default: 3597e643d803SMarc Zyngier return -EINVAL; 3598e643d803SMarc Zyngier } 3599e643d803SMarc Zyngier } 3600e643d803SMarc Zyngier 360120b3d54eSMarc Zyngier static void its_vpe_send_cmd(struct its_vpe *vpe, 360220b3d54eSMarc Zyngier void (*cmd)(struct its_device *, u32)) 360320b3d54eSMarc Zyngier { 360420b3d54eSMarc Zyngier unsigned long flags; 360520b3d54eSMarc Zyngier 360620b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 360720b3d54eSMarc Zyngier 360820b3d54eSMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 360920b3d54eSMarc Zyngier cmd(vpe_proxy.dev, vpe->vpe_proxy_event); 361020b3d54eSMarc Zyngier 361120b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 361220b3d54eSMarc Zyngier } 361320b3d54eSMarc Zyngier 3614f6a91da7SMarc Zyngier static void its_vpe_send_inv(struct irq_data *d) 3615f6a91da7SMarc Zyngier { 3616f6a91da7SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 361720b3d54eSMarc Zyngier 361820b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 3619f6a91da7SMarc Zyngier void __iomem *rdbase; 3620f6a91da7SMarc Zyngier 3621425c09beSMarc Zyngier /* Target the redistributor this VPE is currently known on */ 3622f6a91da7SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 3623425c09beSMarc Zyngier gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); 36242f4f064bSMarc Zyngier wait_for_syncr(rdbase); 362520b3d54eSMarc Zyngier } else { 362620b3d54eSMarc Zyngier its_vpe_send_cmd(vpe, its_send_inv); 362720b3d54eSMarc Zyngier } 3628f6a91da7SMarc Zyngier } 3629f6a91da7SMarc Zyngier 3630f6a91da7SMarc Zyngier static void its_vpe_mask_irq(struct irq_data *d) 3631f6a91da7SMarc Zyngier { 3632f6a91da7SMarc Zyngier /* 3633f6a91da7SMarc Zyngier * We need to unmask the LPI, which is described by the parent 3634f6a91da7SMarc Zyngier * irq_data. Instead of calling into the parent (which won't 3635f6a91da7SMarc Zyngier * exactly do the right thing, let's simply use the 3636f6a91da7SMarc Zyngier * parent_data pointer. Yes, I'm naughty. 3637f6a91da7SMarc Zyngier */ 3638f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 3639f6a91da7SMarc Zyngier its_vpe_send_inv(d); 3640f6a91da7SMarc Zyngier } 3641f6a91da7SMarc Zyngier 3642f6a91da7SMarc Zyngier static void its_vpe_unmask_irq(struct irq_data *d) 3643f6a91da7SMarc Zyngier { 3644f6a91da7SMarc Zyngier /* Same hack as above... */ 3645f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 3646f6a91da7SMarc Zyngier its_vpe_send_inv(d); 3647f6a91da7SMarc Zyngier } 3648f6a91da7SMarc Zyngier 3649e57a3e28SMarc Zyngier static int its_vpe_set_irqchip_state(struct irq_data *d, 3650e57a3e28SMarc Zyngier enum irqchip_irq_state which, 3651e57a3e28SMarc Zyngier bool state) 3652e57a3e28SMarc Zyngier { 3653e57a3e28SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3654e57a3e28SMarc Zyngier 3655e57a3e28SMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 3656e57a3e28SMarc Zyngier return -EINVAL; 3657e57a3e28SMarc Zyngier 3658e57a3e28SMarc Zyngier if (gic_rdists->has_direct_lpi) { 3659e57a3e28SMarc Zyngier void __iomem *rdbase; 3660e57a3e28SMarc Zyngier 3661e57a3e28SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 3662e57a3e28SMarc Zyngier if (state) { 3663e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); 3664e57a3e28SMarc Zyngier } else { 3665e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 36662f4f064bSMarc Zyngier wait_for_syncr(rdbase); 3667e57a3e28SMarc Zyngier } 3668e57a3e28SMarc Zyngier } else { 3669e57a3e28SMarc Zyngier if (state) 3670e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_int); 3671e57a3e28SMarc Zyngier else 3672e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_clear); 3673e57a3e28SMarc Zyngier } 3674e57a3e28SMarc Zyngier 3675e57a3e28SMarc Zyngier return 0; 3676e57a3e28SMarc Zyngier } 3677e57a3e28SMarc Zyngier 36788fff27aeSMarc Zyngier static struct irq_chip its_vpe_irq_chip = { 36798fff27aeSMarc Zyngier .name = "GICv4-vpe", 3680f6a91da7SMarc Zyngier .irq_mask = its_vpe_mask_irq, 3681f6a91da7SMarc Zyngier .irq_unmask = its_vpe_unmask_irq, 3682f6a91da7SMarc Zyngier .irq_eoi = irq_chip_eoi_parent, 36833171a47aSMarc Zyngier .irq_set_affinity = its_vpe_set_affinity, 3684e57a3e28SMarc Zyngier .irq_set_irqchip_state = its_vpe_set_irqchip_state, 3685e643d803SMarc Zyngier .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, 36868fff27aeSMarc Zyngier }; 36878fff27aeSMarc Zyngier 3688d97c97baSMarc Zyngier static struct its_node *find_4_1_its(void) 3689d97c97baSMarc Zyngier { 3690d97c97baSMarc Zyngier static struct its_node *its = NULL; 3691d97c97baSMarc Zyngier 3692d97c97baSMarc Zyngier if (!its) { 3693d97c97baSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 3694d97c97baSMarc Zyngier if (is_v4_1(its)) 3695d97c97baSMarc Zyngier return its; 3696d97c97baSMarc Zyngier } 3697d97c97baSMarc Zyngier 3698d97c97baSMarc Zyngier /* Oops? */ 3699d97c97baSMarc Zyngier its = NULL; 3700d97c97baSMarc Zyngier } 3701d97c97baSMarc Zyngier 3702d97c97baSMarc Zyngier return its; 3703d97c97baSMarc Zyngier } 3704d97c97baSMarc Zyngier 3705d97c97baSMarc Zyngier static void its_vpe_4_1_send_inv(struct irq_data *d) 3706d97c97baSMarc Zyngier { 3707d97c97baSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3708d97c97baSMarc Zyngier struct its_node *its; 3709d97c97baSMarc Zyngier 3710d97c97baSMarc Zyngier /* 3711d97c97baSMarc Zyngier * GICv4.1 wants doorbells to be invalidated using the 3712d97c97baSMarc Zyngier * INVDB command in order to be broadcast to all RDs. Send 3713d97c97baSMarc Zyngier * it to the first valid ITS, and let the HW do its magic. 3714d97c97baSMarc Zyngier */ 3715d97c97baSMarc Zyngier its = find_4_1_its(); 3716d97c97baSMarc Zyngier if (its) 3717d97c97baSMarc Zyngier its_send_invdb(its, vpe); 3718d97c97baSMarc Zyngier } 3719d97c97baSMarc Zyngier 3720d97c97baSMarc Zyngier static void its_vpe_4_1_mask_irq(struct irq_data *d) 3721d97c97baSMarc Zyngier { 3722d97c97baSMarc Zyngier lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 3723d97c97baSMarc Zyngier its_vpe_4_1_send_inv(d); 3724d97c97baSMarc Zyngier } 3725d97c97baSMarc Zyngier 3726d97c97baSMarc Zyngier static void its_vpe_4_1_unmask_irq(struct irq_data *d) 3727d97c97baSMarc Zyngier { 3728d97c97baSMarc Zyngier lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 3729d97c97baSMarc Zyngier its_vpe_4_1_send_inv(d); 3730d97c97baSMarc Zyngier } 3731d97c97baSMarc Zyngier 373291bf6395SMarc Zyngier static void its_vpe_4_1_schedule(struct its_vpe *vpe, 373391bf6395SMarc Zyngier struct its_cmd_info *info) 373491bf6395SMarc Zyngier { 373591bf6395SMarc Zyngier void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 373691bf6395SMarc Zyngier u64 val = 0; 373791bf6395SMarc Zyngier 373891bf6395SMarc Zyngier /* Schedule the VPE */ 373991bf6395SMarc Zyngier val |= GICR_VPENDBASER_Valid; 374091bf6395SMarc Zyngier val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; 374191bf6395SMarc Zyngier val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; 374291bf6395SMarc Zyngier val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); 374391bf6395SMarc Zyngier 37445186a6ccSZenghui Yu gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 374591bf6395SMarc Zyngier } 374691bf6395SMarc Zyngier 3747e64fab1aSMarc Zyngier static void its_vpe_4_1_deschedule(struct its_vpe *vpe, 3748e64fab1aSMarc Zyngier struct its_cmd_info *info) 3749e64fab1aSMarc Zyngier { 3750e64fab1aSMarc Zyngier void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 3751e64fab1aSMarc Zyngier u64 val; 3752e64fab1aSMarc Zyngier 3753e64fab1aSMarc Zyngier if (info->req_db) { 3754e64fab1aSMarc Zyngier /* 3755e64fab1aSMarc Zyngier * vPE is going to block: make the vPE non-resident with 3756e64fab1aSMarc Zyngier * PendingLast clear and DB set. The GIC guarantees that if 3757e64fab1aSMarc Zyngier * we read-back PendingLast clear, then a doorbell will be 3758e64fab1aSMarc Zyngier * delivered when an interrupt comes. 3759e64fab1aSMarc Zyngier */ 3760e64fab1aSMarc Zyngier val = its_clear_vpend_valid(vlpi_base, 3761e64fab1aSMarc Zyngier GICR_VPENDBASER_PendingLast, 3762e64fab1aSMarc Zyngier GICR_VPENDBASER_4_1_DB); 3763e64fab1aSMarc Zyngier vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 3764e64fab1aSMarc Zyngier } else { 3765e64fab1aSMarc Zyngier /* 3766e64fab1aSMarc Zyngier * We're not blocking, so just make the vPE non-resident 3767e64fab1aSMarc Zyngier * with PendingLast set, indicating that we'll be back. 3768e64fab1aSMarc Zyngier */ 3769e64fab1aSMarc Zyngier val = its_clear_vpend_valid(vlpi_base, 3770e64fab1aSMarc Zyngier 0, 3771e64fab1aSMarc Zyngier GICR_VPENDBASER_PendingLast); 3772e64fab1aSMarc Zyngier vpe->pending_last = true; 3773e64fab1aSMarc Zyngier } 3774e64fab1aSMarc Zyngier } 3775e64fab1aSMarc Zyngier 3776b4a4bd0fSMarc Zyngier static void its_vpe_4_1_invall(struct its_vpe *vpe) 3777b4a4bd0fSMarc Zyngier { 3778b4a4bd0fSMarc Zyngier void __iomem *rdbase; 3779b4a4bd0fSMarc Zyngier u64 val; 3780b4a4bd0fSMarc Zyngier 3781b4a4bd0fSMarc Zyngier val = GICR_INVALLR_V; 3782b4a4bd0fSMarc Zyngier val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); 3783b4a4bd0fSMarc Zyngier 3784b4a4bd0fSMarc Zyngier /* Target the redistributor this vPE is currently known on */ 3785b4a4bd0fSMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 3786b4a4bd0fSMarc Zyngier gic_write_lpir(val, rdbase + GICR_INVALLR); 3787b4a4bd0fSMarc Zyngier } 3788b4a4bd0fSMarc Zyngier 378929c647f3SMarc Zyngier static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 379029c647f3SMarc Zyngier { 379191bf6395SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 379229c647f3SMarc Zyngier struct its_cmd_info *info = vcpu_info; 379329c647f3SMarc Zyngier 379429c647f3SMarc Zyngier switch (info->cmd_type) { 379529c647f3SMarc Zyngier case SCHEDULE_VPE: 379691bf6395SMarc Zyngier its_vpe_4_1_schedule(vpe, info); 379729c647f3SMarc Zyngier return 0; 379829c647f3SMarc Zyngier 379929c647f3SMarc Zyngier case DESCHEDULE_VPE: 3800e64fab1aSMarc Zyngier its_vpe_4_1_deschedule(vpe, info); 380129c647f3SMarc Zyngier return 0; 380229c647f3SMarc Zyngier 380329c647f3SMarc Zyngier case INVALL_VPE: 3804b4a4bd0fSMarc Zyngier its_vpe_4_1_invall(vpe); 380529c647f3SMarc Zyngier return 0; 380629c647f3SMarc Zyngier 380729c647f3SMarc Zyngier default: 380829c647f3SMarc Zyngier return -EINVAL; 380929c647f3SMarc Zyngier } 381029c647f3SMarc Zyngier } 381129c647f3SMarc Zyngier 381229c647f3SMarc Zyngier static struct irq_chip its_vpe_4_1_irq_chip = { 381329c647f3SMarc Zyngier .name = "GICv4.1-vpe", 3814d97c97baSMarc Zyngier .irq_mask = its_vpe_4_1_mask_irq, 3815d97c97baSMarc Zyngier .irq_unmask = its_vpe_4_1_unmask_irq, 381629c647f3SMarc Zyngier .irq_eoi = irq_chip_eoi_parent, 381729c647f3SMarc Zyngier .irq_set_affinity = its_vpe_set_affinity, 381829c647f3SMarc Zyngier .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, 381929c647f3SMarc Zyngier }; 382029c647f3SMarc Zyngier 38217d75bbb4SMarc Zyngier static int its_vpe_id_alloc(void) 38227d75bbb4SMarc Zyngier { 382332bd44dcSShanker Donthineni return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); 38247d75bbb4SMarc Zyngier } 38257d75bbb4SMarc Zyngier 38267d75bbb4SMarc Zyngier static void its_vpe_id_free(u16 id) 38277d75bbb4SMarc Zyngier { 38287d75bbb4SMarc Zyngier ida_simple_remove(&its_vpeid_ida, id); 38297d75bbb4SMarc Zyngier } 38307d75bbb4SMarc Zyngier 38317d75bbb4SMarc Zyngier static int its_vpe_init(struct its_vpe *vpe) 38327d75bbb4SMarc Zyngier { 38337d75bbb4SMarc Zyngier struct page *vpt_page; 38347d75bbb4SMarc Zyngier int vpe_id; 38357d75bbb4SMarc Zyngier 38367d75bbb4SMarc Zyngier /* Allocate vpe_id */ 38377d75bbb4SMarc Zyngier vpe_id = its_vpe_id_alloc(); 38387d75bbb4SMarc Zyngier if (vpe_id < 0) 38397d75bbb4SMarc Zyngier return vpe_id; 38407d75bbb4SMarc Zyngier 38417d75bbb4SMarc Zyngier /* Allocate VPT */ 38427d75bbb4SMarc Zyngier vpt_page = its_allocate_pending_table(GFP_KERNEL); 38437d75bbb4SMarc Zyngier if (!vpt_page) { 38447d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 38457d75bbb4SMarc Zyngier return -ENOMEM; 38467d75bbb4SMarc Zyngier } 38477d75bbb4SMarc Zyngier 38487d75bbb4SMarc Zyngier if (!its_alloc_vpe_table(vpe_id)) { 38497d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 385034f8eb92SNianyao Tang its_free_pending_table(vpt_page); 38517d75bbb4SMarc Zyngier return -ENOMEM; 38527d75bbb4SMarc Zyngier } 38537d75bbb4SMarc Zyngier 38547d75bbb4SMarc Zyngier vpe->vpe_id = vpe_id; 38557d75bbb4SMarc Zyngier vpe->vpt_page = vpt_page; 385664edfaa9SMarc Zyngier if (gic_rdists->has_rvpeid) 385764edfaa9SMarc Zyngier atomic_set(&vpe->vmapp_count, 0); 385864edfaa9SMarc Zyngier else 385920b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 38607d75bbb4SMarc Zyngier 38617d75bbb4SMarc Zyngier return 0; 38627d75bbb4SMarc Zyngier } 38637d75bbb4SMarc Zyngier 38647d75bbb4SMarc Zyngier static void its_vpe_teardown(struct its_vpe *vpe) 38657d75bbb4SMarc Zyngier { 386620b3d54eSMarc Zyngier its_vpe_db_proxy_unmap(vpe); 38677d75bbb4SMarc Zyngier its_vpe_id_free(vpe->vpe_id); 38687d75bbb4SMarc Zyngier its_free_pending_table(vpe->vpt_page); 38697d75bbb4SMarc Zyngier } 38707d75bbb4SMarc Zyngier 38717d75bbb4SMarc Zyngier static void its_vpe_irq_domain_free(struct irq_domain *domain, 38727d75bbb4SMarc Zyngier unsigned int virq, 38737d75bbb4SMarc Zyngier unsigned int nr_irqs) 38747d75bbb4SMarc Zyngier { 38757d75bbb4SMarc Zyngier struct its_vm *vm = domain->host_data; 38767d75bbb4SMarc Zyngier int i; 38777d75bbb4SMarc Zyngier 38787d75bbb4SMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 38797d75bbb4SMarc Zyngier 38807d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 38817d75bbb4SMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 38827d75bbb4SMarc Zyngier virq + i); 38837d75bbb4SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 38847d75bbb4SMarc Zyngier 38857d75bbb4SMarc Zyngier BUG_ON(vm != vpe->its_vm); 38867d75bbb4SMarc Zyngier 38877d75bbb4SMarc Zyngier clear_bit(data->hwirq, vm->db_bitmap); 38887d75bbb4SMarc Zyngier its_vpe_teardown(vpe); 38897d75bbb4SMarc Zyngier irq_domain_reset_irq_data(data); 38907d75bbb4SMarc Zyngier } 38917d75bbb4SMarc Zyngier 38927d75bbb4SMarc Zyngier if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 389338dd7c49SMarc Zyngier its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 38947d75bbb4SMarc Zyngier its_free_prop_table(vm->vprop_page); 38957d75bbb4SMarc Zyngier } 38967d75bbb4SMarc Zyngier } 38977d75bbb4SMarc Zyngier 38987d75bbb4SMarc Zyngier static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 38997d75bbb4SMarc Zyngier unsigned int nr_irqs, void *args) 39007d75bbb4SMarc Zyngier { 390129c647f3SMarc Zyngier struct irq_chip *irqchip = &its_vpe_irq_chip; 39027d75bbb4SMarc Zyngier struct its_vm *vm = args; 39037d75bbb4SMarc Zyngier unsigned long *bitmap; 39047d75bbb4SMarc Zyngier struct page *vprop_page; 39057d75bbb4SMarc Zyngier int base, nr_ids, i, err = 0; 39067d75bbb4SMarc Zyngier 39077d75bbb4SMarc Zyngier BUG_ON(!vm); 39087d75bbb4SMarc Zyngier 390938dd7c49SMarc Zyngier bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); 39107d75bbb4SMarc Zyngier if (!bitmap) 39117d75bbb4SMarc Zyngier return -ENOMEM; 39127d75bbb4SMarc Zyngier 39137d75bbb4SMarc Zyngier if (nr_ids < nr_irqs) { 391438dd7c49SMarc Zyngier its_lpi_free(bitmap, base, nr_ids); 39157d75bbb4SMarc Zyngier return -ENOMEM; 39167d75bbb4SMarc Zyngier } 39177d75bbb4SMarc Zyngier 39187d75bbb4SMarc Zyngier vprop_page = its_allocate_prop_table(GFP_KERNEL); 39197d75bbb4SMarc Zyngier if (!vprop_page) { 392038dd7c49SMarc Zyngier its_lpi_free(bitmap, base, nr_ids); 39217d75bbb4SMarc Zyngier return -ENOMEM; 39227d75bbb4SMarc Zyngier } 39237d75bbb4SMarc Zyngier 39247d75bbb4SMarc Zyngier vm->db_bitmap = bitmap; 39257d75bbb4SMarc Zyngier vm->db_lpi_base = base; 39267d75bbb4SMarc Zyngier vm->nr_db_lpis = nr_ids; 39277d75bbb4SMarc Zyngier vm->vprop_page = vprop_page; 39287d75bbb4SMarc Zyngier 392929c647f3SMarc Zyngier if (gic_rdists->has_rvpeid) 393029c647f3SMarc Zyngier irqchip = &its_vpe_4_1_irq_chip; 393129c647f3SMarc Zyngier 39327d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 39337d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi = base + i; 39347d75bbb4SMarc Zyngier err = its_vpe_init(vm->vpes[i]); 39357d75bbb4SMarc Zyngier if (err) 39367d75bbb4SMarc Zyngier break; 39377d75bbb4SMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, 39387d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi); 39397d75bbb4SMarc Zyngier if (err) 39407d75bbb4SMarc Zyngier break; 39417d75bbb4SMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, i, 394229c647f3SMarc Zyngier irqchip, vm->vpes[i]); 39437d75bbb4SMarc Zyngier set_bit(i, bitmap); 39447d75bbb4SMarc Zyngier } 39457d75bbb4SMarc Zyngier 39467d75bbb4SMarc Zyngier if (err) { 39477d75bbb4SMarc Zyngier if (i > 0) 39487d75bbb4SMarc Zyngier its_vpe_irq_domain_free(domain, virq, i - 1); 39497d75bbb4SMarc Zyngier 395038dd7c49SMarc Zyngier its_lpi_free(bitmap, base, nr_ids); 39517d75bbb4SMarc Zyngier its_free_prop_table(vprop_page); 39527d75bbb4SMarc Zyngier } 39537d75bbb4SMarc Zyngier 39547d75bbb4SMarc Zyngier return err; 39557d75bbb4SMarc Zyngier } 39567d75bbb4SMarc Zyngier 395772491643SThomas Gleixner static int its_vpe_irq_domain_activate(struct irq_domain *domain, 3958702cb0a0SThomas Gleixner struct irq_data *d, bool reserve) 3959eb78192bSMarc Zyngier { 3960eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 396140619a2eSMarc Zyngier struct its_node *its; 3962eb78192bSMarc Zyngier 39632247e1bfSMarc Zyngier /* If we use the list map, we issue VMAPP on demand... */ 39642247e1bfSMarc Zyngier if (its_list_map) 39656ef930f2SMarc Zyngier return 0; 3966eb78192bSMarc Zyngier 3967eb78192bSMarc Zyngier /* Map the VPE to the first possible CPU */ 3968eb78192bSMarc Zyngier vpe->col_idx = cpumask_first(cpu_online_mask); 396940619a2eSMarc Zyngier 397040619a2eSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 39710dd57fedSMarc Zyngier if (!is_v4(its)) 397240619a2eSMarc Zyngier continue; 397340619a2eSMarc Zyngier 397475fd951bSMarc Zyngier its_send_vmapp(its, vpe, true); 397540619a2eSMarc Zyngier its_send_vinvall(its, vpe); 397640619a2eSMarc Zyngier } 397740619a2eSMarc Zyngier 397844c4c25eSMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); 397944c4c25eSMarc Zyngier 398072491643SThomas Gleixner return 0; 3981eb78192bSMarc Zyngier } 3982eb78192bSMarc Zyngier 3983eb78192bSMarc Zyngier static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 3984eb78192bSMarc Zyngier struct irq_data *d) 3985eb78192bSMarc Zyngier { 3986eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 398775fd951bSMarc Zyngier struct its_node *its; 3988eb78192bSMarc Zyngier 39892247e1bfSMarc Zyngier /* 39902247e1bfSMarc Zyngier * If we use the list map, we unmap the VPE once no VLPIs are 39912247e1bfSMarc Zyngier * associated with the VM. 39922247e1bfSMarc Zyngier */ 39932247e1bfSMarc Zyngier if (its_list_map) 39942247e1bfSMarc Zyngier return; 39952247e1bfSMarc Zyngier 399675fd951bSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 39970dd57fedSMarc Zyngier if (!is_v4(its)) 399875fd951bSMarc Zyngier continue; 399975fd951bSMarc Zyngier 400075fd951bSMarc Zyngier its_send_vmapp(its, vpe, false); 400175fd951bSMarc Zyngier } 4002eb78192bSMarc Zyngier } 4003eb78192bSMarc Zyngier 40048fff27aeSMarc Zyngier static const struct irq_domain_ops its_vpe_domain_ops = { 40057d75bbb4SMarc Zyngier .alloc = its_vpe_irq_domain_alloc, 40067d75bbb4SMarc Zyngier .free = its_vpe_irq_domain_free, 4007eb78192bSMarc Zyngier .activate = its_vpe_irq_domain_activate, 4008eb78192bSMarc Zyngier .deactivate = its_vpe_irq_domain_deactivate, 40098fff27aeSMarc Zyngier }; 40108fff27aeSMarc Zyngier 40114559fbb3SYun Wu static int its_force_quiescent(void __iomem *base) 40124559fbb3SYun Wu { 40134559fbb3SYun Wu u32 count = 1000000; /* 1s */ 40144559fbb3SYun Wu u32 val; 40154559fbb3SYun Wu 40164559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 40177611da86SDavid Daney /* 40187611da86SDavid Daney * GIC architecture specification requires the ITS to be both 40197611da86SDavid Daney * disabled and quiescent for writes to GITS_BASER<n> or 40207611da86SDavid Daney * GITS_CBASER to not have UNPREDICTABLE results. 40217611da86SDavid Daney */ 40227611da86SDavid Daney if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 40234559fbb3SYun Wu return 0; 40244559fbb3SYun Wu 40254559fbb3SYun Wu /* Disable the generation of all interrupts to this ITS */ 4026d51c4b4dSMarc Zyngier val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); 40274559fbb3SYun Wu writel_relaxed(val, base + GITS_CTLR); 40284559fbb3SYun Wu 40294559fbb3SYun Wu /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 40304559fbb3SYun Wu while (1) { 40314559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 40324559fbb3SYun Wu if (val & GITS_CTLR_QUIESCENT) 40334559fbb3SYun Wu return 0; 40344559fbb3SYun Wu 40354559fbb3SYun Wu count--; 40364559fbb3SYun Wu if (!count) 40374559fbb3SYun Wu return -EBUSY; 40384559fbb3SYun Wu 40394559fbb3SYun Wu cpu_relax(); 40404559fbb3SYun Wu udelay(1); 40414559fbb3SYun Wu } 40424559fbb3SYun Wu } 40434559fbb3SYun Wu 40449d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) 404594100970SRobert Richter { 404694100970SRobert Richter struct its_node *its = data; 404794100970SRobert Richter 4048576a8342SMarc Zyngier /* erratum 22375: only alloc 8MB table size (20 bits) */ 4049576a8342SMarc Zyngier its->typer &= ~GITS_TYPER_DEVBITS; 4050576a8342SMarc Zyngier its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); 405194100970SRobert Richter its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 40529d111d49SArd Biesheuvel 40539d111d49SArd Biesheuvel return true; 405494100970SRobert Richter } 405594100970SRobert Richter 40569d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) 4057fbf8f40eSGanapatrao Kulkarni { 4058fbf8f40eSGanapatrao Kulkarni struct its_node *its = data; 4059fbf8f40eSGanapatrao Kulkarni 4060fbf8f40eSGanapatrao Kulkarni its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 40619d111d49SArd Biesheuvel 40629d111d49SArd Biesheuvel return true; 4063fbf8f40eSGanapatrao Kulkarni } 4064fbf8f40eSGanapatrao Kulkarni 40659d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) 406690922a2dSShanker Donthineni { 406790922a2dSShanker Donthineni struct its_node *its = data; 406890922a2dSShanker Donthineni 406990922a2dSShanker Donthineni /* On QDF2400, the size of the ITE is 16Bytes */ 4070ffedbf0cSMarc Zyngier its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; 4071ffedbf0cSMarc Zyngier its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); 40729d111d49SArd Biesheuvel 40739d111d49SArd Biesheuvel return true; 407490922a2dSShanker Donthineni } 407590922a2dSShanker Donthineni 4076558b0165SArd Biesheuvel static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) 4077558b0165SArd Biesheuvel { 4078558b0165SArd Biesheuvel struct its_node *its = its_dev->its; 4079558b0165SArd Biesheuvel 4080558b0165SArd Biesheuvel /* 4081558b0165SArd Biesheuvel * The Socionext Synquacer SoC has a so-called 'pre-ITS', 4082558b0165SArd Biesheuvel * which maps 32-bit writes targeted at a separate window of 4083558b0165SArd Biesheuvel * size '4 << device_id_bits' onto writes to GITS_TRANSLATER 4084558b0165SArd Biesheuvel * with device ID taken from bits [device_id_bits + 1:2] of 4085558b0165SArd Biesheuvel * the window offset. 4086558b0165SArd Biesheuvel */ 4087558b0165SArd Biesheuvel return its->pre_its_base + (its_dev->device_id << 2); 4088558b0165SArd Biesheuvel } 4089558b0165SArd Biesheuvel 4090558b0165SArd Biesheuvel static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) 4091558b0165SArd Biesheuvel { 4092558b0165SArd Biesheuvel struct its_node *its = data; 4093558b0165SArd Biesheuvel u32 pre_its_window[2]; 4094558b0165SArd Biesheuvel u32 ids; 4095558b0165SArd Biesheuvel 4096558b0165SArd Biesheuvel if (!fwnode_property_read_u32_array(its->fwnode_handle, 4097558b0165SArd Biesheuvel "socionext,synquacer-pre-its", 4098558b0165SArd Biesheuvel pre_its_window, 4099558b0165SArd Biesheuvel ARRAY_SIZE(pre_its_window))) { 4100558b0165SArd Biesheuvel 4101558b0165SArd Biesheuvel its->pre_its_base = pre_its_window[0]; 4102558b0165SArd Biesheuvel its->get_msi_base = its_irq_get_msi_base_pre_its; 4103558b0165SArd Biesheuvel 4104558b0165SArd Biesheuvel ids = ilog2(pre_its_window[1]) - 2; 4105576a8342SMarc Zyngier if (device_ids(its) > ids) { 4106576a8342SMarc Zyngier its->typer &= ~GITS_TYPER_DEVBITS; 4107576a8342SMarc Zyngier its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); 4108576a8342SMarc Zyngier } 4109558b0165SArd Biesheuvel 4110558b0165SArd Biesheuvel /* the pre-ITS breaks isolation, so disable MSI remapping */ 4111558b0165SArd Biesheuvel its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; 4112558b0165SArd Biesheuvel return true; 4113558b0165SArd Biesheuvel } 4114558b0165SArd Biesheuvel return false; 4115558b0165SArd Biesheuvel } 4116558b0165SArd Biesheuvel 41175c9a882eSMarc Zyngier static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) 41185c9a882eSMarc Zyngier { 41195c9a882eSMarc Zyngier struct its_node *its = data; 41205c9a882eSMarc Zyngier 41215c9a882eSMarc Zyngier /* 41225c9a882eSMarc Zyngier * Hip07 insists on using the wrong address for the VLPI 41235c9a882eSMarc Zyngier * page. Trick it into doing the right thing... 41245c9a882eSMarc Zyngier */ 41255c9a882eSMarc Zyngier its->vlpi_redist_offset = SZ_128K; 41265c9a882eSMarc Zyngier return true; 4127cc2d3216SMarc Zyngier } 41284c21f3c2SMarc Zyngier 412967510ccaSRobert Richter static const struct gic_quirk its_quirks[] = { 413094100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375 413194100970SRobert Richter { 413294100970SRobert Richter .desc = "ITS: Cavium errata 22375, 24313", 413394100970SRobert Richter .iidr = 0xa100034c, /* ThunderX pass 1.x */ 413494100970SRobert Richter .mask = 0xffff0fff, 413594100970SRobert Richter .init = its_enable_quirk_cavium_22375, 413694100970SRobert Richter }, 413794100970SRobert Richter #endif 4138fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144 4139fbf8f40eSGanapatrao Kulkarni { 4140fbf8f40eSGanapatrao Kulkarni .desc = "ITS: Cavium erratum 23144", 4141fbf8f40eSGanapatrao Kulkarni .iidr = 0xa100034c, /* ThunderX pass 1.x */ 4142fbf8f40eSGanapatrao Kulkarni .mask = 0xffff0fff, 4143fbf8f40eSGanapatrao Kulkarni .init = its_enable_quirk_cavium_23144, 4144fbf8f40eSGanapatrao Kulkarni }, 4145fbf8f40eSGanapatrao Kulkarni #endif 414690922a2dSShanker Donthineni #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 414790922a2dSShanker Donthineni { 414890922a2dSShanker Donthineni .desc = "ITS: QDF2400 erratum 0065", 414990922a2dSShanker Donthineni .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ 415090922a2dSShanker Donthineni .mask = 0xffffffff, 415190922a2dSShanker Donthineni .init = its_enable_quirk_qdf2400_e0065, 415290922a2dSShanker Donthineni }, 415390922a2dSShanker Donthineni #endif 4154558b0165SArd Biesheuvel #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS 4155558b0165SArd Biesheuvel { 4156558b0165SArd Biesheuvel /* 4157558b0165SArd Biesheuvel * The Socionext Synquacer SoC incorporates ARM's own GIC-500 4158558b0165SArd Biesheuvel * implementation, but with a 'pre-ITS' added that requires 4159558b0165SArd Biesheuvel * special handling in software. 4160558b0165SArd Biesheuvel */ 4161558b0165SArd Biesheuvel .desc = "ITS: Socionext Synquacer pre-ITS", 4162558b0165SArd Biesheuvel .iidr = 0x0001143b, 4163558b0165SArd Biesheuvel .mask = 0xffffffff, 4164558b0165SArd Biesheuvel .init = its_enable_quirk_socionext_synquacer, 4165558b0165SArd Biesheuvel }, 4166558b0165SArd Biesheuvel #endif 41675c9a882eSMarc Zyngier #ifdef CONFIG_HISILICON_ERRATUM_161600802 41685c9a882eSMarc Zyngier { 41695c9a882eSMarc Zyngier .desc = "ITS: Hip07 erratum 161600802", 41705c9a882eSMarc Zyngier .iidr = 0x00000004, 41715c9a882eSMarc Zyngier .mask = 0xffffffff, 41725c9a882eSMarc Zyngier .init = its_enable_quirk_hip07_161600802, 41735c9a882eSMarc Zyngier }, 41745c9a882eSMarc Zyngier #endif 417567510ccaSRobert Richter { 417667510ccaSRobert Richter } 417767510ccaSRobert Richter }; 417867510ccaSRobert Richter 417967510ccaSRobert Richter static void its_enable_quirks(struct its_node *its) 418067510ccaSRobert Richter { 418167510ccaSRobert Richter u32 iidr = readl_relaxed(its->base + GITS_IIDR); 418267510ccaSRobert Richter 418367510ccaSRobert Richter gic_enable_quirks(iidr, its_quirks, its); 418467510ccaSRobert Richter } 418567510ccaSRobert Richter 4186dba0bc7bSDerek Basehore static int its_save_disable(void) 4187dba0bc7bSDerek Basehore { 4188dba0bc7bSDerek Basehore struct its_node *its; 4189dba0bc7bSDerek Basehore int err = 0; 4190dba0bc7bSDerek Basehore 4191a8db7456SSebastian Andrzej Siewior raw_spin_lock(&its_lock); 4192dba0bc7bSDerek Basehore list_for_each_entry(its, &its_nodes, entry) { 4193dba0bc7bSDerek Basehore void __iomem *base; 4194dba0bc7bSDerek Basehore 4195dba0bc7bSDerek Basehore if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 4196dba0bc7bSDerek Basehore continue; 4197dba0bc7bSDerek Basehore 4198dba0bc7bSDerek Basehore base = its->base; 4199dba0bc7bSDerek Basehore its->ctlr_save = readl_relaxed(base + GITS_CTLR); 4200dba0bc7bSDerek Basehore err = its_force_quiescent(base); 4201dba0bc7bSDerek Basehore if (err) { 4202dba0bc7bSDerek Basehore pr_err("ITS@%pa: failed to quiesce: %d\n", 4203dba0bc7bSDerek Basehore &its->phys_base, err); 4204dba0bc7bSDerek Basehore writel_relaxed(its->ctlr_save, base + GITS_CTLR); 4205dba0bc7bSDerek Basehore goto err; 4206dba0bc7bSDerek Basehore } 4207dba0bc7bSDerek Basehore 4208dba0bc7bSDerek Basehore its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); 4209dba0bc7bSDerek Basehore } 4210dba0bc7bSDerek Basehore 4211dba0bc7bSDerek Basehore err: 4212dba0bc7bSDerek Basehore if (err) { 4213dba0bc7bSDerek Basehore list_for_each_entry_continue_reverse(its, &its_nodes, entry) { 4214dba0bc7bSDerek Basehore void __iomem *base; 4215dba0bc7bSDerek Basehore 4216dba0bc7bSDerek Basehore if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 4217dba0bc7bSDerek Basehore continue; 4218dba0bc7bSDerek Basehore 4219dba0bc7bSDerek Basehore base = its->base; 4220dba0bc7bSDerek Basehore writel_relaxed(its->ctlr_save, base + GITS_CTLR); 4221dba0bc7bSDerek Basehore } 4222dba0bc7bSDerek Basehore } 4223a8db7456SSebastian Andrzej Siewior raw_spin_unlock(&its_lock); 4224dba0bc7bSDerek Basehore 4225dba0bc7bSDerek Basehore return err; 4226dba0bc7bSDerek Basehore } 4227dba0bc7bSDerek Basehore 4228dba0bc7bSDerek Basehore static void its_restore_enable(void) 4229dba0bc7bSDerek Basehore { 4230dba0bc7bSDerek Basehore struct its_node *its; 4231dba0bc7bSDerek Basehore int ret; 4232dba0bc7bSDerek Basehore 4233a8db7456SSebastian Andrzej Siewior raw_spin_lock(&its_lock); 4234dba0bc7bSDerek Basehore list_for_each_entry(its, &its_nodes, entry) { 4235dba0bc7bSDerek Basehore void __iomem *base; 4236dba0bc7bSDerek Basehore int i; 4237dba0bc7bSDerek Basehore 4238dba0bc7bSDerek Basehore if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) 4239dba0bc7bSDerek Basehore continue; 4240dba0bc7bSDerek Basehore 4241dba0bc7bSDerek Basehore base = its->base; 4242dba0bc7bSDerek Basehore 4243dba0bc7bSDerek Basehore /* 4244dba0bc7bSDerek Basehore * Make sure that the ITS is disabled. If it fails to quiesce, 4245dba0bc7bSDerek Basehore * don't restore it since writing to CBASER or BASER<n> 4246dba0bc7bSDerek Basehore * registers is undefined according to the GIC v3 ITS 4247dba0bc7bSDerek Basehore * Specification. 4248dba0bc7bSDerek Basehore */ 4249dba0bc7bSDerek Basehore ret = its_force_quiescent(base); 4250dba0bc7bSDerek Basehore if (ret) { 4251dba0bc7bSDerek Basehore pr_err("ITS@%pa: failed to quiesce on resume: %d\n", 4252dba0bc7bSDerek Basehore &its->phys_base, ret); 4253dba0bc7bSDerek Basehore continue; 4254dba0bc7bSDerek Basehore } 4255dba0bc7bSDerek Basehore 4256dba0bc7bSDerek Basehore gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); 4257dba0bc7bSDerek Basehore 4258dba0bc7bSDerek Basehore /* 4259dba0bc7bSDerek Basehore * Writing CBASER resets CREADR to 0, so make CWRITER and 4260dba0bc7bSDerek Basehore * cmd_write line up with it. 4261dba0bc7bSDerek Basehore */ 4262dba0bc7bSDerek Basehore its->cmd_write = its->cmd_base; 4263dba0bc7bSDerek Basehore gits_write_cwriter(0, base + GITS_CWRITER); 4264dba0bc7bSDerek Basehore 4265dba0bc7bSDerek Basehore /* Restore GITS_BASER from the value cache. */ 4266dba0bc7bSDerek Basehore for (i = 0; i < GITS_BASER_NR_REGS; i++) { 4267dba0bc7bSDerek Basehore struct its_baser *baser = &its->tables[i]; 4268dba0bc7bSDerek Basehore 4269dba0bc7bSDerek Basehore if (!(baser->val & GITS_BASER_VALID)) 4270dba0bc7bSDerek Basehore continue; 4271dba0bc7bSDerek Basehore 4272dba0bc7bSDerek Basehore its_write_baser(its, baser, baser->val); 4273dba0bc7bSDerek Basehore } 4274dba0bc7bSDerek Basehore writel_relaxed(its->ctlr_save, base + GITS_CTLR); 4275920181ceSDerek Basehore 4276920181ceSDerek Basehore /* 4277920181ceSDerek Basehore * Reinit the collection if it's stored in the ITS. This is 4278920181ceSDerek Basehore * indicated by the col_id being less than the HCC field. 4279920181ceSDerek Basehore * CID < HCC as specified in the GIC v3 Documentation. 4280920181ceSDerek Basehore */ 4281920181ceSDerek Basehore if (its->collections[smp_processor_id()].col_id < 4282920181ceSDerek Basehore GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) 4283920181ceSDerek Basehore its_cpu_init_collection(its); 4284dba0bc7bSDerek Basehore } 4285a8db7456SSebastian Andrzej Siewior raw_spin_unlock(&its_lock); 4286dba0bc7bSDerek Basehore } 4287dba0bc7bSDerek Basehore 4288dba0bc7bSDerek Basehore static struct syscore_ops its_syscore_ops = { 4289dba0bc7bSDerek Basehore .suspend = its_save_disable, 4290dba0bc7bSDerek Basehore .resume = its_restore_enable, 4291dba0bc7bSDerek Basehore }; 4292dba0bc7bSDerek Basehore 4293db40f0a7STomasz Nowicki static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) 4294d14ae5e6STomasz Nowicki { 4295d14ae5e6STomasz Nowicki struct irq_domain *inner_domain; 4296d14ae5e6STomasz Nowicki struct msi_domain_info *info; 4297d14ae5e6STomasz Nowicki 4298d14ae5e6STomasz Nowicki info = kzalloc(sizeof(*info), GFP_KERNEL); 4299d14ae5e6STomasz Nowicki if (!info) 4300d14ae5e6STomasz Nowicki return -ENOMEM; 4301d14ae5e6STomasz Nowicki 4302db40f0a7STomasz Nowicki inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); 4303d14ae5e6STomasz Nowicki if (!inner_domain) { 4304d14ae5e6STomasz Nowicki kfree(info); 4305d14ae5e6STomasz Nowicki return -ENOMEM; 4306d14ae5e6STomasz Nowicki } 4307d14ae5e6STomasz Nowicki 4308db40f0a7STomasz Nowicki inner_domain->parent = its_parent; 430996f0d93aSMarc Zyngier irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); 4310558b0165SArd Biesheuvel inner_domain->flags |= its->msi_domain_flags; 4311d14ae5e6STomasz Nowicki info->ops = &its_msi_domain_ops; 4312d14ae5e6STomasz Nowicki info->data = its; 4313d14ae5e6STomasz Nowicki inner_domain->host_data = info; 4314d14ae5e6STomasz Nowicki 4315d14ae5e6STomasz Nowicki return 0; 4316d14ae5e6STomasz Nowicki } 4317d14ae5e6STomasz Nowicki 43188fff27aeSMarc Zyngier static int its_init_vpe_domain(void) 43198fff27aeSMarc Zyngier { 432020b3d54eSMarc Zyngier struct its_node *its; 432120b3d54eSMarc Zyngier u32 devid; 432220b3d54eSMarc Zyngier int entries; 432320b3d54eSMarc Zyngier 432420b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 432520b3d54eSMarc Zyngier pr_info("ITS: Using DirectLPI for VPE invalidation\n"); 432620b3d54eSMarc Zyngier return 0; 432720b3d54eSMarc Zyngier } 432820b3d54eSMarc Zyngier 432920b3d54eSMarc Zyngier /* Any ITS will do, even if not v4 */ 433020b3d54eSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 433120b3d54eSMarc Zyngier 433220b3d54eSMarc Zyngier entries = roundup_pow_of_two(nr_cpu_ids); 43336396bb22SKees Cook vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), 433420b3d54eSMarc Zyngier GFP_KERNEL); 433520b3d54eSMarc Zyngier if (!vpe_proxy.vpes) { 433620b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 433720b3d54eSMarc Zyngier return -ENOMEM; 433820b3d54eSMarc Zyngier } 433920b3d54eSMarc Zyngier 434020b3d54eSMarc Zyngier /* Use the last possible DevID */ 4341576a8342SMarc Zyngier devid = GENMASK(device_ids(its) - 1, 0); 434220b3d54eSMarc Zyngier vpe_proxy.dev = its_create_device(its, devid, entries, false); 434320b3d54eSMarc Zyngier if (!vpe_proxy.dev) { 434420b3d54eSMarc Zyngier kfree(vpe_proxy.vpes); 434520b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device\n"); 434620b3d54eSMarc Zyngier return -ENOMEM; 434720b3d54eSMarc Zyngier } 434820b3d54eSMarc Zyngier 4349c427a475SShanker Donthineni BUG_ON(entries > vpe_proxy.dev->nr_ites); 435020b3d54eSMarc Zyngier 435120b3d54eSMarc Zyngier raw_spin_lock_init(&vpe_proxy.lock); 435220b3d54eSMarc Zyngier vpe_proxy.next_victim = 0; 435320b3d54eSMarc Zyngier pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", 435420b3d54eSMarc Zyngier devid, vpe_proxy.dev->nr_ites); 435520b3d54eSMarc Zyngier 43568fff27aeSMarc Zyngier return 0; 43578fff27aeSMarc Zyngier } 43588fff27aeSMarc Zyngier 43593dfa576bSMarc Zyngier static int __init its_compute_its_list_map(struct resource *res, 43603dfa576bSMarc Zyngier void __iomem *its_base) 43613dfa576bSMarc Zyngier { 43623dfa576bSMarc Zyngier int its_number; 43633dfa576bSMarc Zyngier u32 ctlr; 43643dfa576bSMarc Zyngier 43653dfa576bSMarc Zyngier /* 43663dfa576bSMarc Zyngier * This is assumed to be done early enough that we're 43673dfa576bSMarc Zyngier * guaranteed to be single-threaded, hence no 43683dfa576bSMarc Zyngier * locking. Should this change, we should address 43693dfa576bSMarc Zyngier * this. 43703dfa576bSMarc Zyngier */ 4371ab60491eSMarc Zyngier its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); 4372ab60491eSMarc Zyngier if (its_number >= GICv4_ITS_LIST_MAX) { 43733dfa576bSMarc Zyngier pr_err("ITS@%pa: No ITSList entry available!\n", 43743dfa576bSMarc Zyngier &res->start); 43753dfa576bSMarc Zyngier return -EINVAL; 43763dfa576bSMarc Zyngier } 43773dfa576bSMarc Zyngier 43783dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 43793dfa576bSMarc Zyngier ctlr &= ~GITS_CTLR_ITS_NUMBER; 43803dfa576bSMarc Zyngier ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; 43813dfa576bSMarc Zyngier writel_relaxed(ctlr, its_base + GITS_CTLR); 43823dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 43833dfa576bSMarc Zyngier if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { 43843dfa576bSMarc Zyngier its_number = ctlr & GITS_CTLR_ITS_NUMBER; 43853dfa576bSMarc Zyngier its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; 43863dfa576bSMarc Zyngier } 43873dfa576bSMarc Zyngier 43883dfa576bSMarc Zyngier if (test_and_set_bit(its_number, &its_list_map)) { 43893dfa576bSMarc Zyngier pr_err("ITS@%pa: Duplicate ITSList entry %d\n", 43903dfa576bSMarc Zyngier &res->start, its_number); 43913dfa576bSMarc Zyngier return -EINVAL; 43923dfa576bSMarc Zyngier } 43933dfa576bSMarc Zyngier 43943dfa576bSMarc Zyngier return its_number; 43953dfa576bSMarc Zyngier } 43963dfa576bSMarc Zyngier 4397db40f0a7STomasz Nowicki static int __init its_probe_one(struct resource *res, 4398db40f0a7STomasz Nowicki struct fwnode_handle *handle, int numa_node) 43994c21f3c2SMarc Zyngier { 44004c21f3c2SMarc Zyngier struct its_node *its; 44014c21f3c2SMarc Zyngier void __iomem *its_base; 44023dfa576bSMarc Zyngier u32 val, ctlr; 44033dfa576bSMarc Zyngier u64 baser, tmp, typer; 4404539d3782SShanker Donthineni struct page *page; 44054c21f3c2SMarc Zyngier int err; 44064c21f3c2SMarc Zyngier 4407db40f0a7STomasz Nowicki its_base = ioremap(res->start, resource_size(res)); 44084c21f3c2SMarc Zyngier if (!its_base) { 4409db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); 44104c21f3c2SMarc Zyngier return -ENOMEM; 44114c21f3c2SMarc Zyngier } 44124c21f3c2SMarc Zyngier 44134c21f3c2SMarc Zyngier val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 44144c21f3c2SMarc Zyngier if (val != 0x30 && val != 0x40) { 4415db40f0a7STomasz Nowicki pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); 44164c21f3c2SMarc Zyngier err = -ENODEV; 44174c21f3c2SMarc Zyngier goto out_unmap; 44184c21f3c2SMarc Zyngier } 44194c21f3c2SMarc Zyngier 44204559fbb3SYun Wu err = its_force_quiescent(its_base); 44214559fbb3SYun Wu if (err) { 4422db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); 44234559fbb3SYun Wu goto out_unmap; 44244559fbb3SYun Wu } 44254559fbb3SYun Wu 4426db40f0a7STomasz Nowicki pr_info("ITS %pR\n", res); 44274c21f3c2SMarc Zyngier 44284c21f3c2SMarc Zyngier its = kzalloc(sizeof(*its), GFP_KERNEL); 44294c21f3c2SMarc Zyngier if (!its) { 44304c21f3c2SMarc Zyngier err = -ENOMEM; 44314c21f3c2SMarc Zyngier goto out_unmap; 44324c21f3c2SMarc Zyngier } 44334c21f3c2SMarc Zyngier 44344c21f3c2SMarc Zyngier raw_spin_lock_init(&its->lock); 44359791ec7dSMarc Zyngier mutex_init(&its->dev_alloc_lock); 44364c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->entry); 44374c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->its_device_list); 44383dfa576bSMarc Zyngier typer = gic_read_typer(its_base + GITS_TYPER); 44390dd57fedSMarc Zyngier its->typer = typer; 44404c21f3c2SMarc Zyngier its->base = its_base; 4441db40f0a7STomasz Nowicki its->phys_base = res->start; 44420dd57fedSMarc Zyngier if (is_v4(its)) { 44433dfa576bSMarc Zyngier if (!(typer & GITS_TYPER_VMOVP)) { 44443dfa576bSMarc Zyngier err = its_compute_its_list_map(res, its_base); 44453dfa576bSMarc Zyngier if (err < 0) 44463dfa576bSMarc Zyngier goto out_free_its; 44473dfa576bSMarc Zyngier 4448debf6d02SMarc Zyngier its->list_nr = err; 4449debf6d02SMarc Zyngier 44503dfa576bSMarc Zyngier pr_info("ITS@%pa: Using ITS number %d\n", 44513dfa576bSMarc Zyngier &res->start, err); 44523dfa576bSMarc Zyngier } else { 44533dfa576bSMarc Zyngier pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); 44543dfa576bSMarc Zyngier } 44555e516846SMarc Zyngier 44565e516846SMarc Zyngier if (is_v4_1(its)) { 44575e516846SMarc Zyngier u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); 44585e516846SMarc Zyngier its->mpidr = readl_relaxed(its_base + GITS_MPIDR); 44595e516846SMarc Zyngier 44605e516846SMarc Zyngier pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", 44615e516846SMarc Zyngier &res->start, its->mpidr, svpet); 44625e516846SMarc Zyngier } 44633dfa576bSMarc Zyngier } 44643dfa576bSMarc Zyngier 4465db40f0a7STomasz Nowicki its->numa_node = numa_node; 44664c21f3c2SMarc Zyngier 4467539d3782SShanker Donthineni page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 44685bc13c2cSRobert Richter get_order(ITS_CMD_QUEUE_SZ)); 4469539d3782SShanker Donthineni if (!page) { 44704c21f3c2SMarc Zyngier err = -ENOMEM; 44714c21f3c2SMarc Zyngier goto out_free_its; 44724c21f3c2SMarc Zyngier } 4473539d3782SShanker Donthineni its->cmd_base = (void *)page_address(page); 44744c21f3c2SMarc Zyngier its->cmd_write = its->cmd_base; 4475558b0165SArd Biesheuvel its->fwnode_handle = handle; 4476558b0165SArd Biesheuvel its->get_msi_base = its_irq_get_msi_base; 4477558b0165SArd Biesheuvel its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; 44784c21f3c2SMarc Zyngier 447967510ccaSRobert Richter its_enable_quirks(its); 448067510ccaSRobert Richter 44810e0b0f69SShanker Donthineni err = its_alloc_tables(its); 44824c21f3c2SMarc Zyngier if (err) 44834c21f3c2SMarc Zyngier goto out_free_cmd; 44844c21f3c2SMarc Zyngier 44854c21f3c2SMarc Zyngier err = its_alloc_collections(its); 44864c21f3c2SMarc Zyngier if (err) 44874c21f3c2SMarc Zyngier goto out_free_tables; 44884c21f3c2SMarc Zyngier 44894c21f3c2SMarc Zyngier baser = (virt_to_phys(its->cmd_base) | 44902fd632a0SShanker Donthineni GITS_CBASER_RaWaWb | 44914c21f3c2SMarc Zyngier GITS_CBASER_InnerShareable | 44924c21f3c2SMarc Zyngier (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 44934c21f3c2SMarc Zyngier GITS_CBASER_VALID); 44944c21f3c2SMarc Zyngier 44950968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 44960968a619SVladimir Murzin tmp = gits_read_cbaser(its->base + GITS_CBASER); 44974c21f3c2SMarc Zyngier 44984ad3e363SMarc Zyngier if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 4499241a386cSMarc Zyngier if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 4500241a386cSMarc Zyngier /* 4501241a386cSMarc Zyngier * The HW reports non-shareable, we must 4502241a386cSMarc Zyngier * remove the cacheability attributes as 4503241a386cSMarc Zyngier * well. 4504241a386cSMarc Zyngier */ 4505241a386cSMarc Zyngier baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 4506241a386cSMarc Zyngier GITS_CBASER_CACHEABILITY_MASK); 4507241a386cSMarc Zyngier baser |= GITS_CBASER_nC; 45080968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 4509241a386cSMarc Zyngier } 45104c21f3c2SMarc Zyngier pr_info("ITS: using cache flushing for cmd queue\n"); 45114c21f3c2SMarc Zyngier its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 45124c21f3c2SMarc Zyngier } 45134c21f3c2SMarc Zyngier 45140968a619SVladimir Murzin gits_write_cwriter(0, its->base + GITS_CWRITER); 45153dfa576bSMarc Zyngier ctlr = readl_relaxed(its->base + GITS_CTLR); 4516d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ENABLE; 45170dd57fedSMarc Zyngier if (is_v4(its)) 4518d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ImDe; 4519d51c4b4dSMarc Zyngier writel_relaxed(ctlr, its->base + GITS_CTLR); 4520241a386cSMarc Zyngier 4521dba0bc7bSDerek Basehore if (GITS_TYPER_HCC(typer)) 4522dba0bc7bSDerek Basehore its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; 4523dba0bc7bSDerek Basehore 4524db40f0a7STomasz Nowicki err = its_init_domain(handle, its); 4525d14ae5e6STomasz Nowicki if (err) 452654456db9SMarc Zyngier goto out_free_tables; 45274c21f3c2SMarc Zyngier 4528a8db7456SSebastian Andrzej Siewior raw_spin_lock(&its_lock); 45294c21f3c2SMarc Zyngier list_add(&its->entry, &its_nodes); 4530a8db7456SSebastian Andrzej Siewior raw_spin_unlock(&its_lock); 45314c21f3c2SMarc Zyngier 45324c21f3c2SMarc Zyngier return 0; 45334c21f3c2SMarc Zyngier 45344c21f3c2SMarc Zyngier out_free_tables: 45354c21f3c2SMarc Zyngier its_free_tables(its); 45364c21f3c2SMarc Zyngier out_free_cmd: 45375bc13c2cSRobert Richter free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 45384c21f3c2SMarc Zyngier out_free_its: 45394c21f3c2SMarc Zyngier kfree(its); 45404c21f3c2SMarc Zyngier out_unmap: 45414c21f3c2SMarc Zyngier iounmap(its_base); 4542db40f0a7STomasz Nowicki pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); 45434c21f3c2SMarc Zyngier return err; 45444c21f3c2SMarc Zyngier } 45454c21f3c2SMarc Zyngier 45464c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void) 45474c21f3c2SMarc Zyngier { 4548589ce5f4SMarc Zyngier return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 45494c21f3c2SMarc Zyngier } 45504c21f3c2SMarc Zyngier 45516eb486b6SShanker Donthineni static int redist_disable_lpis(void) 45524c21f3c2SMarc Zyngier { 45536eb486b6SShanker Donthineni void __iomem *rbase = gic_data_rdist_rd_base(); 45546eb486b6SShanker Donthineni u64 timeout = USEC_PER_SEC; 45556eb486b6SShanker Donthineni u64 val; 45566eb486b6SShanker Donthineni 45574c21f3c2SMarc Zyngier if (!gic_rdists_supports_plpis()) { 45584c21f3c2SMarc Zyngier pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 45594c21f3c2SMarc Zyngier return -ENXIO; 45604c21f3c2SMarc Zyngier } 45616eb486b6SShanker Donthineni 45626eb486b6SShanker Donthineni val = readl_relaxed(rbase + GICR_CTLR); 45636eb486b6SShanker Donthineni if (!(val & GICR_CTLR_ENABLE_LPIS)) 45646eb486b6SShanker Donthineni return 0; 45656eb486b6SShanker Donthineni 456611e37d35SMarc Zyngier /* 456711e37d35SMarc Zyngier * If coming via a CPU hotplug event, we don't need to disable 456811e37d35SMarc Zyngier * LPIs before trying to re-enable them. They are already 456911e37d35SMarc Zyngier * configured and all is well in the world. 4570c440a9d9SMarc Zyngier * 4571c440a9d9SMarc Zyngier * If running with preallocated tables, there is nothing to do. 457211e37d35SMarc Zyngier */ 4573c440a9d9SMarc Zyngier if (gic_data_rdist()->lpi_enabled || 4574c440a9d9SMarc Zyngier (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) 457511e37d35SMarc Zyngier return 0; 457611e37d35SMarc Zyngier 457711e37d35SMarc Zyngier /* 457811e37d35SMarc Zyngier * From that point on, we only try to do some damage control. 457911e37d35SMarc Zyngier */ 458011e37d35SMarc Zyngier pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", 45816eb486b6SShanker Donthineni smp_processor_id()); 45826eb486b6SShanker Donthineni add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); 45836eb486b6SShanker Donthineni 45846eb486b6SShanker Donthineni /* Disable LPIs */ 45856eb486b6SShanker Donthineni val &= ~GICR_CTLR_ENABLE_LPIS; 45866eb486b6SShanker Donthineni writel_relaxed(val, rbase + GICR_CTLR); 45876eb486b6SShanker Donthineni 45886eb486b6SShanker Donthineni /* Make sure any change to GICR_CTLR is observable by the GIC */ 45896eb486b6SShanker Donthineni dsb(sy); 45906eb486b6SShanker Donthineni 45916eb486b6SShanker Donthineni /* 45926eb486b6SShanker Donthineni * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs 45936eb486b6SShanker Donthineni * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. 45946eb486b6SShanker Donthineni * Error out if we time out waiting for RWP to clear. 45956eb486b6SShanker Donthineni */ 45966eb486b6SShanker Donthineni while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { 45976eb486b6SShanker Donthineni if (!timeout) { 45986eb486b6SShanker Donthineni pr_err("CPU%d: Timeout while disabling LPIs\n", 45996eb486b6SShanker Donthineni smp_processor_id()); 46006eb486b6SShanker Donthineni return -ETIMEDOUT; 46016eb486b6SShanker Donthineni } 46026eb486b6SShanker Donthineni udelay(1); 46036eb486b6SShanker Donthineni timeout--; 46046eb486b6SShanker Donthineni } 46056eb486b6SShanker Donthineni 46066eb486b6SShanker Donthineni /* 46076eb486b6SShanker Donthineni * After it has been written to 1, it is IMPLEMENTATION 46086eb486b6SShanker Donthineni * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be 46096eb486b6SShanker Donthineni * cleared to 0. Error out if clearing the bit failed. 46106eb486b6SShanker Donthineni */ 46116eb486b6SShanker Donthineni if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { 46126eb486b6SShanker Donthineni pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); 46136eb486b6SShanker Donthineni return -EBUSY; 46146eb486b6SShanker Donthineni } 46156eb486b6SShanker Donthineni 46166eb486b6SShanker Donthineni return 0; 46176eb486b6SShanker Donthineni } 46186eb486b6SShanker Donthineni 46196eb486b6SShanker Donthineni int its_cpu_init(void) 46206eb486b6SShanker Donthineni { 46216eb486b6SShanker Donthineni if (!list_empty(&its_nodes)) { 46226eb486b6SShanker Donthineni int ret; 46236eb486b6SShanker Donthineni 46246eb486b6SShanker Donthineni ret = redist_disable_lpis(); 46256eb486b6SShanker Donthineni if (ret) 46266eb486b6SShanker Donthineni return ret; 46276eb486b6SShanker Donthineni 46284c21f3c2SMarc Zyngier its_cpu_init_lpis(); 4629920181ceSDerek Basehore its_cpu_init_collections(); 46304c21f3c2SMarc Zyngier } 46314c21f3c2SMarc Zyngier 46324c21f3c2SMarc Zyngier return 0; 46334c21f3c2SMarc Zyngier } 46344c21f3c2SMarc Zyngier 4635935bba7cSArvind Yadav static const struct of_device_id its_device_id[] = { 46364c21f3c2SMarc Zyngier { .compatible = "arm,gic-v3-its", }, 46374c21f3c2SMarc Zyngier {}, 46384c21f3c2SMarc Zyngier }; 46394c21f3c2SMarc Zyngier 4640db40f0a7STomasz Nowicki static int __init its_of_probe(struct device_node *node) 46414c21f3c2SMarc Zyngier { 46424c21f3c2SMarc Zyngier struct device_node *np; 4643db40f0a7STomasz Nowicki struct resource res; 46444c21f3c2SMarc Zyngier 46454c21f3c2SMarc Zyngier for (np = of_find_matching_node(node, its_device_id); np; 46464c21f3c2SMarc Zyngier np = of_find_matching_node(np, its_device_id)) { 464795a25625SStephen Boyd if (!of_device_is_available(np)) 464895a25625SStephen Boyd continue; 4649d14ae5e6STomasz Nowicki if (!of_property_read_bool(np, "msi-controller")) { 4650e81f54c6SRob Herring pr_warn("%pOF: no msi-controller property, ITS ignored\n", 4651e81f54c6SRob Herring np); 4652d14ae5e6STomasz Nowicki continue; 4653d14ae5e6STomasz Nowicki } 4654d14ae5e6STomasz Nowicki 4655db40f0a7STomasz Nowicki if (of_address_to_resource(np, 0, &res)) { 4656e81f54c6SRob Herring pr_warn("%pOF: no regs?\n", np); 4657db40f0a7STomasz Nowicki continue; 46584c21f3c2SMarc Zyngier } 46594c21f3c2SMarc Zyngier 4660db40f0a7STomasz Nowicki its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); 4661db40f0a7STomasz Nowicki } 4662db40f0a7STomasz Nowicki return 0; 4663db40f0a7STomasz Nowicki } 4664db40f0a7STomasz Nowicki 46653f010cf1STomasz Nowicki #ifdef CONFIG_ACPI 46663f010cf1STomasz Nowicki 46673f010cf1STomasz Nowicki #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 46683f010cf1STomasz Nowicki 4669d1ce263fSRobert Richter #ifdef CONFIG_ACPI_NUMA 4670dbd2b826SGanapatrao Kulkarni struct its_srat_map { 4671dbd2b826SGanapatrao Kulkarni /* numa node id */ 4672dbd2b826SGanapatrao Kulkarni u32 numa_node; 4673dbd2b826SGanapatrao Kulkarni /* GIC ITS ID */ 4674dbd2b826SGanapatrao Kulkarni u32 its_id; 4675dbd2b826SGanapatrao Kulkarni }; 4676dbd2b826SGanapatrao Kulkarni 4677fdf6e7a8SHanjun Guo static struct its_srat_map *its_srat_maps __initdata; 4678dbd2b826SGanapatrao Kulkarni static int its_in_srat __initdata; 4679dbd2b826SGanapatrao Kulkarni 4680dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) 4681dbd2b826SGanapatrao Kulkarni { 4682dbd2b826SGanapatrao Kulkarni int i; 4683dbd2b826SGanapatrao Kulkarni 4684dbd2b826SGanapatrao Kulkarni for (i = 0; i < its_in_srat; i++) { 4685dbd2b826SGanapatrao Kulkarni if (its_id == its_srat_maps[i].its_id) 4686dbd2b826SGanapatrao Kulkarni return its_srat_maps[i].numa_node; 4687dbd2b826SGanapatrao Kulkarni } 4688dbd2b826SGanapatrao Kulkarni return NUMA_NO_NODE; 4689dbd2b826SGanapatrao Kulkarni } 4690dbd2b826SGanapatrao Kulkarni 469160574d1eSKeith Busch static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, 4692fdf6e7a8SHanjun Guo const unsigned long end) 4693fdf6e7a8SHanjun Guo { 4694fdf6e7a8SHanjun Guo return 0; 4695fdf6e7a8SHanjun Guo } 4696fdf6e7a8SHanjun Guo 469760574d1eSKeith Busch static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, 4698dbd2b826SGanapatrao Kulkarni const unsigned long end) 4699dbd2b826SGanapatrao Kulkarni { 4700dbd2b826SGanapatrao Kulkarni int node; 4701dbd2b826SGanapatrao Kulkarni struct acpi_srat_gic_its_affinity *its_affinity; 4702dbd2b826SGanapatrao Kulkarni 4703dbd2b826SGanapatrao Kulkarni its_affinity = (struct acpi_srat_gic_its_affinity *)header; 4704dbd2b826SGanapatrao Kulkarni if (!its_affinity) 4705dbd2b826SGanapatrao Kulkarni return -EINVAL; 4706dbd2b826SGanapatrao Kulkarni 4707dbd2b826SGanapatrao Kulkarni if (its_affinity->header.length < sizeof(*its_affinity)) { 4708dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid header length %d in ITS affinity\n", 4709dbd2b826SGanapatrao Kulkarni its_affinity->header.length); 4710dbd2b826SGanapatrao Kulkarni return -EINVAL; 4711dbd2b826SGanapatrao Kulkarni } 4712dbd2b826SGanapatrao Kulkarni 4713dbd2b826SGanapatrao Kulkarni node = acpi_map_pxm_to_node(its_affinity->proximity_domain); 4714dbd2b826SGanapatrao Kulkarni 4715dbd2b826SGanapatrao Kulkarni if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 4716dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); 4717dbd2b826SGanapatrao Kulkarni return 0; 4718dbd2b826SGanapatrao Kulkarni } 4719dbd2b826SGanapatrao Kulkarni 4720dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].numa_node = node; 4721dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].its_id = its_affinity->its_id; 4722dbd2b826SGanapatrao Kulkarni its_in_srat++; 4723dbd2b826SGanapatrao Kulkarni pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", 4724dbd2b826SGanapatrao Kulkarni its_affinity->proximity_domain, its_affinity->its_id, node); 4725dbd2b826SGanapatrao Kulkarni 4726dbd2b826SGanapatrao Kulkarni return 0; 4727dbd2b826SGanapatrao Kulkarni } 4728dbd2b826SGanapatrao Kulkarni 4729dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) 4730dbd2b826SGanapatrao Kulkarni { 4731fdf6e7a8SHanjun Guo int count; 4732fdf6e7a8SHanjun Guo 4733fdf6e7a8SHanjun Guo count = acpi_table_parse_entries(ACPI_SIG_SRAT, 4734fdf6e7a8SHanjun Guo sizeof(struct acpi_table_srat), 4735fdf6e7a8SHanjun Guo ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 4736fdf6e7a8SHanjun Guo gic_acpi_match_srat_its, 0); 4737fdf6e7a8SHanjun Guo if (count <= 0) 4738fdf6e7a8SHanjun Guo return; 4739fdf6e7a8SHanjun Guo 47406da2ec56SKees Cook its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), 4741fdf6e7a8SHanjun Guo GFP_KERNEL); 4742fdf6e7a8SHanjun Guo if (!its_srat_maps) { 4743fdf6e7a8SHanjun Guo pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); 4744fdf6e7a8SHanjun Guo return; 4745fdf6e7a8SHanjun Guo } 4746fdf6e7a8SHanjun Guo 4747dbd2b826SGanapatrao Kulkarni acpi_table_parse_entries(ACPI_SIG_SRAT, 4748dbd2b826SGanapatrao Kulkarni sizeof(struct acpi_table_srat), 4749dbd2b826SGanapatrao Kulkarni ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 4750dbd2b826SGanapatrao Kulkarni gic_acpi_parse_srat_its, 0); 4751dbd2b826SGanapatrao Kulkarni } 4752fdf6e7a8SHanjun Guo 4753fdf6e7a8SHanjun Guo /* free the its_srat_maps after ITS probing */ 4754fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) 4755fdf6e7a8SHanjun Guo { 4756fdf6e7a8SHanjun Guo kfree(its_srat_maps); 4757fdf6e7a8SHanjun Guo } 4758dbd2b826SGanapatrao Kulkarni #else 4759dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) { } 4760dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } 4761fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) { } 4762dbd2b826SGanapatrao Kulkarni #endif 4763dbd2b826SGanapatrao Kulkarni 476460574d1eSKeith Busch static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, 47653f010cf1STomasz Nowicki const unsigned long end) 47663f010cf1STomasz Nowicki { 47673f010cf1STomasz Nowicki struct acpi_madt_generic_translator *its_entry; 47683f010cf1STomasz Nowicki struct fwnode_handle *dom_handle; 47693f010cf1STomasz Nowicki struct resource res; 47703f010cf1STomasz Nowicki int err; 47713f010cf1STomasz Nowicki 47723f010cf1STomasz Nowicki its_entry = (struct acpi_madt_generic_translator *)header; 47733f010cf1STomasz Nowicki memset(&res, 0, sizeof(res)); 47743f010cf1STomasz Nowicki res.start = its_entry->base_address; 47753f010cf1STomasz Nowicki res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 47763f010cf1STomasz Nowicki res.flags = IORESOURCE_MEM; 47773f010cf1STomasz Nowicki 47785778cc77SMarc Zyngier dom_handle = irq_domain_alloc_fwnode(&res.start); 47793f010cf1STomasz Nowicki if (!dom_handle) { 47803f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 47813f010cf1STomasz Nowicki &res.start); 47823f010cf1STomasz Nowicki return -ENOMEM; 47833f010cf1STomasz Nowicki } 47843f010cf1STomasz Nowicki 47858b4282e6SShameer Kolothum err = iort_register_domain_token(its_entry->translation_id, res.start, 47868b4282e6SShameer Kolothum dom_handle); 47873f010cf1STomasz Nowicki if (err) { 47883f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", 47893f010cf1STomasz Nowicki &res.start, its_entry->translation_id); 47903f010cf1STomasz Nowicki goto dom_err; 47913f010cf1STomasz Nowicki } 47923f010cf1STomasz Nowicki 4793dbd2b826SGanapatrao Kulkarni err = its_probe_one(&res, dom_handle, 4794dbd2b826SGanapatrao Kulkarni acpi_get_its_numa_node(its_entry->translation_id)); 47953f010cf1STomasz Nowicki if (!err) 47963f010cf1STomasz Nowicki return 0; 47973f010cf1STomasz Nowicki 47983f010cf1STomasz Nowicki iort_deregister_domain_token(its_entry->translation_id); 47993f010cf1STomasz Nowicki dom_err: 48003f010cf1STomasz Nowicki irq_domain_free_fwnode(dom_handle); 48013f010cf1STomasz Nowicki return err; 48023f010cf1STomasz Nowicki } 48033f010cf1STomasz Nowicki 48043f010cf1STomasz Nowicki static void __init its_acpi_probe(void) 48053f010cf1STomasz Nowicki { 4806dbd2b826SGanapatrao Kulkarni acpi_table_parse_srat_its(); 48073f010cf1STomasz Nowicki acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 48083f010cf1STomasz Nowicki gic_acpi_parse_madt_its, 0); 4809fdf6e7a8SHanjun Guo acpi_its_srat_maps_free(); 48103f010cf1STomasz Nowicki } 48113f010cf1STomasz Nowicki #else 48123f010cf1STomasz Nowicki static void __init its_acpi_probe(void) { } 48133f010cf1STomasz Nowicki #endif 48143f010cf1STomasz Nowicki 4815db40f0a7STomasz Nowicki int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, 4816db40f0a7STomasz Nowicki struct irq_domain *parent_domain) 4817db40f0a7STomasz Nowicki { 4818db40f0a7STomasz Nowicki struct device_node *of_node; 48198fff27aeSMarc Zyngier struct its_node *its; 48208fff27aeSMarc Zyngier bool has_v4 = false; 48218fff27aeSMarc Zyngier int err; 4822db40f0a7STomasz Nowicki 48235e516846SMarc Zyngier gic_rdists = rdists; 48245e516846SMarc Zyngier 4825db40f0a7STomasz Nowicki its_parent = parent_domain; 4826db40f0a7STomasz Nowicki of_node = to_of_node(handle); 4827db40f0a7STomasz Nowicki if (of_node) 4828db40f0a7STomasz Nowicki its_of_probe(of_node); 4829db40f0a7STomasz Nowicki else 48303f010cf1STomasz Nowicki its_acpi_probe(); 4831db40f0a7STomasz Nowicki 48324c21f3c2SMarc Zyngier if (list_empty(&its_nodes)) { 48334c21f3c2SMarc Zyngier pr_warn("ITS: No ITS available, not enabling LPIs\n"); 48344c21f3c2SMarc Zyngier return -ENXIO; 48354c21f3c2SMarc Zyngier } 48364c21f3c2SMarc Zyngier 483711e37d35SMarc Zyngier err = allocate_lpi_tables(); 48388fff27aeSMarc Zyngier if (err) 48398fff27aeSMarc Zyngier return err; 48408fff27aeSMarc Zyngier 48418fff27aeSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) 48420dd57fedSMarc Zyngier has_v4 |= is_v4(its); 48438fff27aeSMarc Zyngier 48448fff27aeSMarc Zyngier if (has_v4 & rdists->has_vlpis) { 48453d63cb53SMarc Zyngier if (its_init_vpe_domain() || 48463d63cb53SMarc Zyngier its_init_v4(parent_domain, &its_vpe_domain_ops)) { 48478fff27aeSMarc Zyngier rdists->has_vlpis = false; 48488fff27aeSMarc Zyngier pr_err("ITS: Disabling GICv4 support\n"); 48498fff27aeSMarc Zyngier } 48508fff27aeSMarc Zyngier } 48518fff27aeSMarc Zyngier 4852dba0bc7bSDerek Basehore register_syscore_ops(&its_syscore_ops); 4853dba0bc7bSDerek Basehore 48548fff27aeSMarc Zyngier return 0; 48554c21f3c2SMarc Zyngier } 4856