1cc2d3216SMarc Zyngier /* 2d7276b80SMarc Zyngier * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 3cc2d3216SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 4cc2d3216SMarc Zyngier * 5cc2d3216SMarc Zyngier * This program is free software; you can redistribute it and/or modify 6cc2d3216SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 7cc2d3216SMarc Zyngier * published by the Free Software Foundation. 8cc2d3216SMarc Zyngier * 9cc2d3216SMarc Zyngier * This program is distributed in the hope that it will be useful, 10cc2d3216SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 11cc2d3216SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12cc2d3216SMarc Zyngier * GNU General Public License for more details. 13cc2d3216SMarc Zyngier * 14cc2d3216SMarc Zyngier * You should have received a copy of the GNU General Public License 15cc2d3216SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 16cc2d3216SMarc Zyngier */ 17cc2d3216SMarc Zyngier 183f010cf1STomasz Nowicki #include <linux/acpi.h> 198d3554b8SHanjun Guo #include <linux/acpi_iort.h> 20cc2d3216SMarc Zyngier #include <linux/bitmap.h> 21cc2d3216SMarc Zyngier #include <linux/cpu.h> 22cc2d3216SMarc Zyngier #include <linux/delay.h> 2344bb7e24SRobin Murphy #include <linux/dma-iommu.h> 24cc2d3216SMarc Zyngier #include <linux/interrupt.h> 253f010cf1STomasz Nowicki #include <linux/irqdomain.h> 26cc2d3216SMarc Zyngier #include <linux/log2.h> 27cc2d3216SMarc Zyngier #include <linux/mm.h> 28cc2d3216SMarc Zyngier #include <linux/msi.h> 29cc2d3216SMarc Zyngier #include <linux/of.h> 30cc2d3216SMarc Zyngier #include <linux/of_address.h> 31cc2d3216SMarc Zyngier #include <linux/of_irq.h> 32cc2d3216SMarc Zyngier #include <linux/of_pci.h> 33cc2d3216SMarc Zyngier #include <linux/of_platform.h> 34cc2d3216SMarc Zyngier #include <linux/percpu.h> 35cc2d3216SMarc Zyngier #include <linux/slab.h> 36cc2d3216SMarc Zyngier 3741a83e06SJoel Porquet #include <linux/irqchip.h> 38cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 39c808eea8SMarc Zyngier #include <linux/irqchip/arm-gic-v4.h> 40cc2d3216SMarc Zyngier 41cc2d3216SMarc Zyngier #include <asm/cputype.h> 42cc2d3216SMarc Zyngier #include <asm/exception.h> 43cc2d3216SMarc Zyngier 4467510ccaSRobert Richter #include "irq-gic-common.h" 4567510ccaSRobert Richter 4694100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 4794100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 48fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 49cc2d3216SMarc Zyngier 50c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 51c48ed51cSMarc Zyngier 52a13b0404SMarc Zyngier static u32 lpi_id_bits; 53a13b0404SMarc Zyngier 54a13b0404SMarc Zyngier /* 55a13b0404SMarc Zyngier * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 56a13b0404SMarc Zyngier * deal with (one configuration byte per interrupt). PENDBASE has to 57a13b0404SMarc Zyngier * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 58a13b0404SMarc Zyngier */ 59a13b0404SMarc Zyngier #define LPI_NRBITS lpi_id_bits 60a13b0404SMarc Zyngier #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 61a13b0404SMarc Zyngier #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 62a13b0404SMarc Zyngier 63a13b0404SMarc Zyngier #define LPI_PROP_DEFAULT_PRIO 0xa0 64a13b0404SMarc Zyngier 65cc2d3216SMarc Zyngier /* 66cc2d3216SMarc Zyngier * Collection structure - just an ID, and a redistributor address to 67cc2d3216SMarc Zyngier * ping. We use one per CPU as a bag of interrupts assigned to this 68cc2d3216SMarc Zyngier * CPU. 69cc2d3216SMarc Zyngier */ 70cc2d3216SMarc Zyngier struct its_collection { 71cc2d3216SMarc Zyngier u64 target_address; 72cc2d3216SMarc Zyngier u16 col_id; 73cc2d3216SMarc Zyngier }; 74cc2d3216SMarc Zyngier 75cc2d3216SMarc Zyngier /* 769347359aSShanker Donthineni * The ITS_BASER structure - contains memory information, cached 779347359aSShanker Donthineni * value of BASER register configuration and ITS page size. 78466b7d16SShanker Donthineni */ 79466b7d16SShanker Donthineni struct its_baser { 80466b7d16SShanker Donthineni void *base; 81466b7d16SShanker Donthineni u64 val; 82466b7d16SShanker Donthineni u32 order; 839347359aSShanker Donthineni u32 psz; 84466b7d16SShanker Donthineni }; 85466b7d16SShanker Donthineni 86558b0165SArd Biesheuvel struct its_device; 87558b0165SArd Biesheuvel 88466b7d16SShanker Donthineni /* 89cc2d3216SMarc Zyngier * The ITS structure - contains most of the infrastructure, with the 90841514abSMarc Zyngier * top-level MSI domain, the command queue, the collections, and the 91841514abSMarc Zyngier * list of devices writing to it. 92cc2d3216SMarc Zyngier */ 93cc2d3216SMarc Zyngier struct its_node { 94cc2d3216SMarc Zyngier raw_spinlock_t lock; 95cc2d3216SMarc Zyngier struct list_head entry; 96cc2d3216SMarc Zyngier void __iomem *base; 97db40f0a7STomasz Nowicki phys_addr_t phys_base; 98cc2d3216SMarc Zyngier struct its_cmd_block *cmd_base; 99cc2d3216SMarc Zyngier struct its_cmd_block *cmd_write; 100466b7d16SShanker Donthineni struct its_baser tables[GITS_BASER_NR_REGS]; 101cc2d3216SMarc Zyngier struct its_collection *collections; 102558b0165SArd Biesheuvel struct fwnode_handle *fwnode_handle; 103558b0165SArd Biesheuvel u64 (*get_msi_base)(struct its_device *its_dev); 104cc2d3216SMarc Zyngier struct list_head its_device_list; 105cc2d3216SMarc Zyngier u64 flags; 106debf6d02SMarc Zyngier unsigned long list_nr; 107cc2d3216SMarc Zyngier u32 ite_size; 108466b7d16SShanker Donthineni u32 device_ids; 109fbf8f40eSGanapatrao Kulkarni int numa_node; 110558b0165SArd Biesheuvel unsigned int msi_domain_flags; 111558b0165SArd Biesheuvel u32 pre_its_base; /* for Socionext Synquacer */ 1123dfa576bSMarc Zyngier bool is_v4; 1135c9a882eSMarc Zyngier int vlpi_redist_offset; 114cc2d3216SMarc Zyngier }; 115cc2d3216SMarc Zyngier 116cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN SZ_256 117cc2d3216SMarc Zyngier 1182eca0d6cSShanker Donthineni /* Convert page order to size in bytes */ 1192eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 1202eca0d6cSShanker Donthineni 121591e5becSMarc Zyngier struct event_lpi_map { 122591e5becSMarc Zyngier unsigned long *lpi_map; 123591e5becSMarc Zyngier u16 *col_map; 124591e5becSMarc Zyngier irq_hw_number_t lpi_base; 125591e5becSMarc Zyngier int nr_lpis; 126d011e4e6SMarc Zyngier struct mutex vlpi_lock; 127d011e4e6SMarc Zyngier struct its_vm *vm; 128d011e4e6SMarc Zyngier struct its_vlpi_map *vlpi_maps; 129d011e4e6SMarc Zyngier int nr_vlpis; 130591e5becSMarc Zyngier }; 131591e5becSMarc Zyngier 132cc2d3216SMarc Zyngier /* 133d011e4e6SMarc Zyngier * The ITS view of a device - belongs to an ITS, owns an interrupt 134d011e4e6SMarc Zyngier * translation table, and a list of interrupts. If it some of its 135d011e4e6SMarc Zyngier * LPIs are injected into a guest (GICv4), the event_map.vm field 136d011e4e6SMarc Zyngier * indicates which one. 137cc2d3216SMarc Zyngier */ 138cc2d3216SMarc Zyngier struct its_device { 139cc2d3216SMarc Zyngier struct list_head entry; 140cc2d3216SMarc Zyngier struct its_node *its; 141591e5becSMarc Zyngier struct event_lpi_map event_map; 142cc2d3216SMarc Zyngier void *itt; 143cc2d3216SMarc Zyngier u32 nr_ites; 144cc2d3216SMarc Zyngier u32 device_id; 145cc2d3216SMarc Zyngier }; 146cc2d3216SMarc Zyngier 14720b3d54eSMarc Zyngier static struct { 14820b3d54eSMarc Zyngier raw_spinlock_t lock; 14920b3d54eSMarc Zyngier struct its_device *dev; 15020b3d54eSMarc Zyngier struct its_vpe **vpes; 15120b3d54eSMarc Zyngier int next_victim; 15220b3d54eSMarc Zyngier } vpe_proxy; 15320b3d54eSMarc Zyngier 1541ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes); 1551ac19ca6SMarc Zyngier static DEFINE_SPINLOCK(its_lock); 1561ac19ca6SMarc Zyngier static struct rdists *gic_rdists; 157db40f0a7STomasz Nowicki static struct irq_domain *its_parent; 1581ac19ca6SMarc Zyngier 1593dfa576bSMarc Zyngier static unsigned long its_list_map; 1603171a47aSMarc Zyngier static u16 vmovp_seq_num; 1613171a47aSMarc Zyngier static DEFINE_RAW_SPINLOCK(vmovp_lock); 1623171a47aSMarc Zyngier 1637d75bbb4SMarc Zyngier static DEFINE_IDA(its_vpeid_ida); 1643dfa576bSMarc Zyngier 1651ac19ca6SMarc Zyngier #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 1661ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 167e643d803SMarc Zyngier #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 1681ac19ca6SMarc Zyngier 169591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev, 170591e5becSMarc Zyngier u32 event) 171591e5becSMarc Zyngier { 172591e5becSMarc Zyngier struct its_node *its = its_dev->its; 173591e5becSMarc Zyngier 174591e5becSMarc Zyngier return its->collections + its_dev->event_map.col_map[event]; 175591e5becSMarc Zyngier } 176591e5becSMarc Zyngier 177cc2d3216SMarc Zyngier /* 178cc2d3216SMarc Zyngier * ITS command descriptors - parameters to be encoded in a command 179cc2d3216SMarc Zyngier * block. 180cc2d3216SMarc Zyngier */ 181cc2d3216SMarc Zyngier struct its_cmd_desc { 182cc2d3216SMarc Zyngier union { 183cc2d3216SMarc Zyngier struct { 184cc2d3216SMarc Zyngier struct its_device *dev; 185cc2d3216SMarc Zyngier u32 event_id; 186cc2d3216SMarc Zyngier } its_inv_cmd; 187cc2d3216SMarc Zyngier 188cc2d3216SMarc Zyngier struct { 189cc2d3216SMarc Zyngier struct its_device *dev; 190cc2d3216SMarc Zyngier u32 event_id; 1918d85dcedSMarc Zyngier } its_clear_cmd; 1928d85dcedSMarc Zyngier 1938d85dcedSMarc Zyngier struct { 1948d85dcedSMarc Zyngier struct its_device *dev; 1958d85dcedSMarc Zyngier u32 event_id; 196cc2d3216SMarc Zyngier } its_int_cmd; 197cc2d3216SMarc Zyngier 198cc2d3216SMarc Zyngier struct { 199cc2d3216SMarc Zyngier struct its_device *dev; 200cc2d3216SMarc Zyngier int valid; 201cc2d3216SMarc Zyngier } its_mapd_cmd; 202cc2d3216SMarc Zyngier 203cc2d3216SMarc Zyngier struct { 204cc2d3216SMarc Zyngier struct its_collection *col; 205cc2d3216SMarc Zyngier int valid; 206cc2d3216SMarc Zyngier } its_mapc_cmd; 207cc2d3216SMarc Zyngier 208cc2d3216SMarc Zyngier struct { 209cc2d3216SMarc Zyngier struct its_device *dev; 210cc2d3216SMarc Zyngier u32 phys_id; 211cc2d3216SMarc Zyngier u32 event_id; 2126a25ad3aSMarc Zyngier } its_mapti_cmd; 213cc2d3216SMarc Zyngier 214cc2d3216SMarc Zyngier struct { 215cc2d3216SMarc Zyngier struct its_device *dev; 216cc2d3216SMarc Zyngier struct its_collection *col; 217591e5becSMarc Zyngier u32 event_id; 218cc2d3216SMarc Zyngier } its_movi_cmd; 219cc2d3216SMarc Zyngier 220cc2d3216SMarc Zyngier struct { 221cc2d3216SMarc Zyngier struct its_device *dev; 222cc2d3216SMarc Zyngier u32 event_id; 223cc2d3216SMarc Zyngier } its_discard_cmd; 224cc2d3216SMarc Zyngier 225cc2d3216SMarc Zyngier struct { 226cc2d3216SMarc Zyngier struct its_collection *col; 227cc2d3216SMarc Zyngier } its_invall_cmd; 228d011e4e6SMarc Zyngier 229d011e4e6SMarc Zyngier struct { 230d011e4e6SMarc Zyngier struct its_vpe *vpe; 231eb78192bSMarc Zyngier } its_vinvall_cmd; 232eb78192bSMarc Zyngier 233eb78192bSMarc Zyngier struct { 234eb78192bSMarc Zyngier struct its_vpe *vpe; 235eb78192bSMarc Zyngier struct its_collection *col; 236eb78192bSMarc Zyngier bool valid; 237eb78192bSMarc Zyngier } its_vmapp_cmd; 238eb78192bSMarc Zyngier 239eb78192bSMarc Zyngier struct { 240eb78192bSMarc Zyngier struct its_vpe *vpe; 241d011e4e6SMarc Zyngier struct its_device *dev; 242d011e4e6SMarc Zyngier u32 virt_id; 243d011e4e6SMarc Zyngier u32 event_id; 244d011e4e6SMarc Zyngier bool db_enabled; 245d011e4e6SMarc Zyngier } its_vmapti_cmd; 246d011e4e6SMarc Zyngier 247d011e4e6SMarc Zyngier struct { 248d011e4e6SMarc Zyngier struct its_vpe *vpe; 249d011e4e6SMarc Zyngier struct its_device *dev; 250d011e4e6SMarc Zyngier u32 event_id; 251d011e4e6SMarc Zyngier bool db_enabled; 252d011e4e6SMarc Zyngier } its_vmovi_cmd; 2533171a47aSMarc Zyngier 2543171a47aSMarc Zyngier struct { 2553171a47aSMarc Zyngier struct its_vpe *vpe; 2563171a47aSMarc Zyngier struct its_collection *col; 2573171a47aSMarc Zyngier u16 seq_num; 2583171a47aSMarc Zyngier u16 its_list; 2593171a47aSMarc Zyngier } its_vmovp_cmd; 260cc2d3216SMarc Zyngier }; 261cc2d3216SMarc Zyngier }; 262cc2d3216SMarc Zyngier 263cc2d3216SMarc Zyngier /* 264cc2d3216SMarc Zyngier * The ITS command block, which is what the ITS actually parses. 265cc2d3216SMarc Zyngier */ 266cc2d3216SMarc Zyngier struct its_cmd_block { 267cc2d3216SMarc Zyngier u64 raw_cmd[4]; 268cc2d3216SMarc Zyngier }; 269cc2d3216SMarc Zyngier 270cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ SZ_64K 271cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 272cc2d3216SMarc Zyngier 27367047f90SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, 27467047f90SMarc Zyngier struct its_cmd_block *, 275cc2d3216SMarc Zyngier struct its_cmd_desc *); 276cc2d3216SMarc Zyngier 27767047f90SMarc Zyngier typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, 27867047f90SMarc Zyngier struct its_cmd_block *, 279d011e4e6SMarc Zyngier struct its_cmd_desc *); 280d011e4e6SMarc Zyngier 2814d36f136SMarc Zyngier static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 2824d36f136SMarc Zyngier { 2834d36f136SMarc Zyngier u64 mask = GENMASK_ULL(h, l); 2844d36f136SMarc Zyngier *raw_cmd &= ~mask; 2854d36f136SMarc Zyngier *raw_cmd |= (val << l) & mask; 2864d36f136SMarc Zyngier } 2874d36f136SMarc Zyngier 288cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 289cc2d3216SMarc Zyngier { 2904d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); 291cc2d3216SMarc Zyngier } 292cc2d3216SMarc Zyngier 293cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 294cc2d3216SMarc Zyngier { 2954d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); 296cc2d3216SMarc Zyngier } 297cc2d3216SMarc Zyngier 298cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 299cc2d3216SMarc Zyngier { 3004d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); 301cc2d3216SMarc Zyngier } 302cc2d3216SMarc Zyngier 303cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 304cc2d3216SMarc Zyngier { 3054d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); 306cc2d3216SMarc Zyngier } 307cc2d3216SMarc Zyngier 308cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size) 309cc2d3216SMarc Zyngier { 3104d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); 311cc2d3216SMarc Zyngier } 312cc2d3216SMarc Zyngier 313cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 314cc2d3216SMarc Zyngier { 3154d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); 316cc2d3216SMarc Zyngier } 317cc2d3216SMarc Zyngier 318cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid) 319cc2d3216SMarc Zyngier { 3204d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); 321cc2d3216SMarc Zyngier } 322cc2d3216SMarc Zyngier 323cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 324cc2d3216SMarc Zyngier { 3254d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); 326cc2d3216SMarc Zyngier } 327cc2d3216SMarc Zyngier 328cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 329cc2d3216SMarc Zyngier { 3304d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 331cc2d3216SMarc Zyngier } 332cc2d3216SMarc Zyngier 333d011e4e6SMarc Zyngier static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 334d011e4e6SMarc Zyngier { 335d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 336d011e4e6SMarc Zyngier } 337d011e4e6SMarc Zyngier 338d011e4e6SMarc Zyngier static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 339d011e4e6SMarc Zyngier { 340d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 341d011e4e6SMarc Zyngier } 342d011e4e6SMarc Zyngier 343d011e4e6SMarc Zyngier static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 344d011e4e6SMarc Zyngier { 345d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 346d011e4e6SMarc Zyngier } 347d011e4e6SMarc Zyngier 348d011e4e6SMarc Zyngier static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 349d011e4e6SMarc Zyngier { 350d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 351d011e4e6SMarc Zyngier } 352d011e4e6SMarc Zyngier 3533171a47aSMarc Zyngier static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) 3543171a47aSMarc Zyngier { 3553171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); 3563171a47aSMarc Zyngier } 3573171a47aSMarc Zyngier 3583171a47aSMarc Zyngier static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) 3593171a47aSMarc Zyngier { 3603171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); 3613171a47aSMarc Zyngier } 3623171a47aSMarc Zyngier 363eb78192bSMarc Zyngier static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 364eb78192bSMarc Zyngier { 365eb78192bSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); 366eb78192bSMarc Zyngier } 367eb78192bSMarc Zyngier 368eb78192bSMarc Zyngier static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 369eb78192bSMarc Zyngier { 370eb78192bSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); 371eb78192bSMarc Zyngier } 372eb78192bSMarc Zyngier 373cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd) 374cc2d3216SMarc Zyngier { 375cc2d3216SMarc Zyngier /* Let's fixup BE commands */ 376cc2d3216SMarc Zyngier cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); 377cc2d3216SMarc Zyngier cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); 378cc2d3216SMarc Zyngier cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); 379cc2d3216SMarc Zyngier cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 380cc2d3216SMarc Zyngier } 381cc2d3216SMarc Zyngier 38267047f90SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_node *its, 38367047f90SMarc Zyngier struct its_cmd_block *cmd, 384cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 385cc2d3216SMarc Zyngier { 386cc2d3216SMarc Zyngier unsigned long itt_addr; 387c8481267SMarc Zyngier u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 388cc2d3216SMarc Zyngier 389cc2d3216SMarc Zyngier itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 390cc2d3216SMarc Zyngier itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 391cc2d3216SMarc Zyngier 392cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPD); 393cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 394cc2d3216SMarc Zyngier its_encode_size(cmd, size - 1); 395cc2d3216SMarc Zyngier its_encode_itt(cmd, itt_addr); 396cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapd_cmd.valid); 397cc2d3216SMarc Zyngier 398cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 399cc2d3216SMarc Zyngier 400591e5becSMarc Zyngier return NULL; 401cc2d3216SMarc Zyngier } 402cc2d3216SMarc Zyngier 40367047f90SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_node *its, 40467047f90SMarc Zyngier struct its_cmd_block *cmd, 405cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 406cc2d3216SMarc Zyngier { 407cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPC); 408cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 409cc2d3216SMarc Zyngier its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 410cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapc_cmd.valid); 411cc2d3216SMarc Zyngier 412cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 413cc2d3216SMarc Zyngier 414cc2d3216SMarc Zyngier return desc->its_mapc_cmd.col; 415cc2d3216SMarc Zyngier } 416cc2d3216SMarc Zyngier 41767047f90SMarc Zyngier static struct its_collection *its_build_mapti_cmd(struct its_node *its, 41867047f90SMarc Zyngier struct its_cmd_block *cmd, 419cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 420cc2d3216SMarc Zyngier { 421591e5becSMarc Zyngier struct its_collection *col; 422591e5becSMarc Zyngier 4236a25ad3aSMarc Zyngier col = dev_event_to_col(desc->its_mapti_cmd.dev, 4246a25ad3aSMarc Zyngier desc->its_mapti_cmd.event_id); 425591e5becSMarc Zyngier 4266a25ad3aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPTI); 4276a25ad3aSMarc Zyngier its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); 4286a25ad3aSMarc Zyngier its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); 4296a25ad3aSMarc Zyngier its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); 430591e5becSMarc Zyngier its_encode_collection(cmd, col->col_id); 431cc2d3216SMarc Zyngier 432cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 433cc2d3216SMarc Zyngier 434591e5becSMarc Zyngier return col; 435cc2d3216SMarc Zyngier } 436cc2d3216SMarc Zyngier 43767047f90SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_node *its, 43867047f90SMarc Zyngier struct its_cmd_block *cmd, 439cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 440cc2d3216SMarc Zyngier { 441591e5becSMarc Zyngier struct its_collection *col; 442591e5becSMarc Zyngier 443591e5becSMarc Zyngier col = dev_event_to_col(desc->its_movi_cmd.dev, 444591e5becSMarc Zyngier desc->its_movi_cmd.event_id); 445591e5becSMarc Zyngier 446cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MOVI); 447cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 448591e5becSMarc Zyngier its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 449cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 450cc2d3216SMarc Zyngier 451cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 452cc2d3216SMarc Zyngier 453591e5becSMarc Zyngier return col; 454cc2d3216SMarc Zyngier } 455cc2d3216SMarc Zyngier 45667047f90SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_node *its, 45767047f90SMarc Zyngier struct its_cmd_block *cmd, 458cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 459cc2d3216SMarc Zyngier { 460591e5becSMarc Zyngier struct its_collection *col; 461591e5becSMarc Zyngier 462591e5becSMarc Zyngier col = dev_event_to_col(desc->its_discard_cmd.dev, 463591e5becSMarc Zyngier desc->its_discard_cmd.event_id); 464591e5becSMarc Zyngier 465cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_DISCARD); 466cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 467cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 468cc2d3216SMarc Zyngier 469cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 470cc2d3216SMarc Zyngier 471591e5becSMarc Zyngier return col; 472cc2d3216SMarc Zyngier } 473cc2d3216SMarc Zyngier 47467047f90SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_node *its, 47567047f90SMarc Zyngier struct its_cmd_block *cmd, 476cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 477cc2d3216SMarc Zyngier { 478591e5becSMarc Zyngier struct its_collection *col; 479591e5becSMarc Zyngier 480591e5becSMarc Zyngier col = dev_event_to_col(desc->its_inv_cmd.dev, 481591e5becSMarc Zyngier desc->its_inv_cmd.event_id); 482591e5becSMarc Zyngier 483cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 484cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 485cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 486cc2d3216SMarc Zyngier 487cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 488cc2d3216SMarc Zyngier 489591e5becSMarc Zyngier return col; 490cc2d3216SMarc Zyngier } 491cc2d3216SMarc Zyngier 49267047f90SMarc Zyngier static struct its_collection *its_build_int_cmd(struct its_node *its, 49367047f90SMarc Zyngier struct its_cmd_block *cmd, 4948d85dcedSMarc Zyngier struct its_cmd_desc *desc) 4958d85dcedSMarc Zyngier { 4968d85dcedSMarc Zyngier struct its_collection *col; 4978d85dcedSMarc Zyngier 4988d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_int_cmd.dev, 4998d85dcedSMarc Zyngier desc->its_int_cmd.event_id); 5008d85dcedSMarc Zyngier 5018d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INT); 5028d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 5038d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_int_cmd.event_id); 5048d85dcedSMarc Zyngier 5058d85dcedSMarc Zyngier its_fixup_cmd(cmd); 5068d85dcedSMarc Zyngier 5078d85dcedSMarc Zyngier return col; 5088d85dcedSMarc Zyngier } 5098d85dcedSMarc Zyngier 51067047f90SMarc Zyngier static struct its_collection *its_build_clear_cmd(struct its_node *its, 51167047f90SMarc Zyngier struct its_cmd_block *cmd, 5128d85dcedSMarc Zyngier struct its_cmd_desc *desc) 5138d85dcedSMarc Zyngier { 5148d85dcedSMarc Zyngier struct its_collection *col; 5158d85dcedSMarc Zyngier 5168d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_clear_cmd.dev, 5178d85dcedSMarc Zyngier desc->its_clear_cmd.event_id); 5188d85dcedSMarc Zyngier 5198d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_CLEAR); 5208d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 5218d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 5228d85dcedSMarc Zyngier 5238d85dcedSMarc Zyngier its_fixup_cmd(cmd); 5248d85dcedSMarc Zyngier 5258d85dcedSMarc Zyngier return col; 5268d85dcedSMarc Zyngier } 5278d85dcedSMarc Zyngier 52867047f90SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_node *its, 52967047f90SMarc Zyngier struct its_cmd_block *cmd, 530cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 531cc2d3216SMarc Zyngier { 532cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVALL); 533cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 534cc2d3216SMarc Zyngier 535cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 536cc2d3216SMarc Zyngier 537cc2d3216SMarc Zyngier return NULL; 538cc2d3216SMarc Zyngier } 539cc2d3216SMarc Zyngier 54067047f90SMarc Zyngier static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, 54167047f90SMarc Zyngier struct its_cmd_block *cmd, 542eb78192bSMarc Zyngier struct its_cmd_desc *desc) 543eb78192bSMarc Zyngier { 544eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VINVALL); 545eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); 546eb78192bSMarc Zyngier 547eb78192bSMarc Zyngier its_fixup_cmd(cmd); 548eb78192bSMarc Zyngier 549eb78192bSMarc Zyngier return desc->its_vinvall_cmd.vpe; 550eb78192bSMarc Zyngier } 551eb78192bSMarc Zyngier 55267047f90SMarc Zyngier static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, 55367047f90SMarc Zyngier struct its_cmd_block *cmd, 554eb78192bSMarc Zyngier struct its_cmd_desc *desc) 555eb78192bSMarc Zyngier { 556eb78192bSMarc Zyngier unsigned long vpt_addr; 5575c9a882eSMarc Zyngier u64 target; 558eb78192bSMarc Zyngier 559eb78192bSMarc Zyngier vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 5605c9a882eSMarc Zyngier target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; 561eb78192bSMarc Zyngier 562eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPP); 563eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 564eb78192bSMarc Zyngier its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 5655c9a882eSMarc Zyngier its_encode_target(cmd, target); 566eb78192bSMarc Zyngier its_encode_vpt_addr(cmd, vpt_addr); 567eb78192bSMarc Zyngier its_encode_vpt_size(cmd, LPI_NRBITS - 1); 568eb78192bSMarc Zyngier 569eb78192bSMarc Zyngier its_fixup_cmd(cmd); 570eb78192bSMarc Zyngier 571eb78192bSMarc Zyngier return desc->its_vmapp_cmd.vpe; 572eb78192bSMarc Zyngier } 573eb78192bSMarc Zyngier 57467047f90SMarc Zyngier static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, 57567047f90SMarc Zyngier struct its_cmd_block *cmd, 576d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 577d011e4e6SMarc Zyngier { 578d011e4e6SMarc Zyngier u32 db; 579d011e4e6SMarc Zyngier 580d011e4e6SMarc Zyngier if (desc->its_vmapti_cmd.db_enabled) 581d011e4e6SMarc Zyngier db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 582d011e4e6SMarc Zyngier else 583d011e4e6SMarc Zyngier db = 1023; 584d011e4e6SMarc Zyngier 585d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPTI); 586d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 587d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 588d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 589d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 590d011e4e6SMarc Zyngier its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 591d011e4e6SMarc Zyngier 592d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 593d011e4e6SMarc Zyngier 594d011e4e6SMarc Zyngier return desc->its_vmapti_cmd.vpe; 595d011e4e6SMarc Zyngier } 596d011e4e6SMarc Zyngier 59767047f90SMarc Zyngier static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, 59867047f90SMarc Zyngier struct its_cmd_block *cmd, 599d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 600d011e4e6SMarc Zyngier { 601d011e4e6SMarc Zyngier u32 db; 602d011e4e6SMarc Zyngier 603d011e4e6SMarc Zyngier if (desc->its_vmovi_cmd.db_enabled) 604d011e4e6SMarc Zyngier db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 605d011e4e6SMarc Zyngier else 606d011e4e6SMarc Zyngier db = 1023; 607d011e4e6SMarc Zyngier 608d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVI); 609d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 610d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 611d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 612d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 613d011e4e6SMarc Zyngier its_encode_db_valid(cmd, true); 614d011e4e6SMarc Zyngier 615d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 616d011e4e6SMarc Zyngier 617d011e4e6SMarc Zyngier return desc->its_vmovi_cmd.vpe; 618d011e4e6SMarc Zyngier } 619d011e4e6SMarc Zyngier 62067047f90SMarc Zyngier static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, 62167047f90SMarc Zyngier struct its_cmd_block *cmd, 6223171a47aSMarc Zyngier struct its_cmd_desc *desc) 6233171a47aSMarc Zyngier { 6245c9a882eSMarc Zyngier u64 target; 6255c9a882eSMarc Zyngier 6265c9a882eSMarc Zyngier target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; 6273171a47aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVP); 6283171a47aSMarc Zyngier its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 6293171a47aSMarc Zyngier its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 6303171a47aSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 6315c9a882eSMarc Zyngier its_encode_target(cmd, target); 6323171a47aSMarc Zyngier 6333171a47aSMarc Zyngier its_fixup_cmd(cmd); 6343171a47aSMarc Zyngier 6353171a47aSMarc Zyngier return desc->its_vmovp_cmd.vpe; 6363171a47aSMarc Zyngier } 6373171a47aSMarc Zyngier 638cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its, 639cc2d3216SMarc Zyngier struct its_cmd_block *ptr) 640cc2d3216SMarc Zyngier { 641cc2d3216SMarc Zyngier return (ptr - its->cmd_base) * sizeof(*ptr); 642cc2d3216SMarc Zyngier } 643cc2d3216SMarc Zyngier 644cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its) 645cc2d3216SMarc Zyngier { 646cc2d3216SMarc Zyngier int widx; 647cc2d3216SMarc Zyngier int ridx; 648cc2d3216SMarc Zyngier 649cc2d3216SMarc Zyngier widx = its->cmd_write - its->cmd_base; 650cc2d3216SMarc Zyngier ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 651cc2d3216SMarc Zyngier 652cc2d3216SMarc Zyngier /* This is incredibly unlikely to happen, unless the ITS locks up. */ 653cc2d3216SMarc Zyngier if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 654cc2d3216SMarc Zyngier return 1; 655cc2d3216SMarc Zyngier 656cc2d3216SMarc Zyngier return 0; 657cc2d3216SMarc Zyngier } 658cc2d3216SMarc Zyngier 659cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its) 660cc2d3216SMarc Zyngier { 661cc2d3216SMarc Zyngier struct its_cmd_block *cmd; 662cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 663cc2d3216SMarc Zyngier 664cc2d3216SMarc Zyngier while (its_queue_full(its)) { 665cc2d3216SMarc Zyngier count--; 666cc2d3216SMarc Zyngier if (!count) { 667cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue not draining\n"); 668cc2d3216SMarc Zyngier return NULL; 669cc2d3216SMarc Zyngier } 670cc2d3216SMarc Zyngier cpu_relax(); 671cc2d3216SMarc Zyngier udelay(1); 672cc2d3216SMarc Zyngier } 673cc2d3216SMarc Zyngier 674cc2d3216SMarc Zyngier cmd = its->cmd_write++; 675cc2d3216SMarc Zyngier 676cc2d3216SMarc Zyngier /* Handle queue wrapping */ 677cc2d3216SMarc Zyngier if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 678cc2d3216SMarc Zyngier its->cmd_write = its->cmd_base; 679cc2d3216SMarc Zyngier 68034d677a9SMarc Zyngier /* Clear command */ 68134d677a9SMarc Zyngier cmd->raw_cmd[0] = 0; 68234d677a9SMarc Zyngier cmd->raw_cmd[1] = 0; 68334d677a9SMarc Zyngier cmd->raw_cmd[2] = 0; 68434d677a9SMarc Zyngier cmd->raw_cmd[3] = 0; 68534d677a9SMarc Zyngier 686cc2d3216SMarc Zyngier return cmd; 687cc2d3216SMarc Zyngier } 688cc2d3216SMarc Zyngier 689cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its) 690cc2d3216SMarc Zyngier { 691cc2d3216SMarc Zyngier u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 692cc2d3216SMarc Zyngier 693cc2d3216SMarc Zyngier writel_relaxed(wr, its->base + GITS_CWRITER); 694cc2d3216SMarc Zyngier 695cc2d3216SMarc Zyngier return its->cmd_write; 696cc2d3216SMarc Zyngier } 697cc2d3216SMarc Zyngier 698cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 699cc2d3216SMarc Zyngier { 700cc2d3216SMarc Zyngier /* 701cc2d3216SMarc Zyngier * Make sure the commands written to memory are observable by 702cc2d3216SMarc Zyngier * the ITS. 703cc2d3216SMarc Zyngier */ 704cc2d3216SMarc Zyngier if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 705328191c0SVladimir Murzin gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); 706cc2d3216SMarc Zyngier else 707cc2d3216SMarc Zyngier dsb(ishst); 708cc2d3216SMarc Zyngier } 709cc2d3216SMarc Zyngier 710a19b462fSMarc Zyngier static int its_wait_for_range_completion(struct its_node *its, 711cc2d3216SMarc Zyngier struct its_cmd_block *from, 712cc2d3216SMarc Zyngier struct its_cmd_block *to) 713cc2d3216SMarc Zyngier { 714cc2d3216SMarc Zyngier u64 rd_idx, from_idx, to_idx; 715cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 716cc2d3216SMarc Zyngier 717cc2d3216SMarc Zyngier from_idx = its_cmd_ptr_to_offset(its, from); 718cc2d3216SMarc Zyngier to_idx = its_cmd_ptr_to_offset(its, to); 719cc2d3216SMarc Zyngier 720cc2d3216SMarc Zyngier while (1) { 721cc2d3216SMarc Zyngier rd_idx = readl_relaxed(its->base + GITS_CREADR); 7229bdd8b1cSMarc Zyngier 7239bdd8b1cSMarc Zyngier /* Direct case */ 7249bdd8b1cSMarc Zyngier if (from_idx < to_idx && rd_idx >= to_idx) 7259bdd8b1cSMarc Zyngier break; 7269bdd8b1cSMarc Zyngier 7279bdd8b1cSMarc Zyngier /* Wrapped case */ 7289bdd8b1cSMarc Zyngier if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) 729cc2d3216SMarc Zyngier break; 730cc2d3216SMarc Zyngier 731cc2d3216SMarc Zyngier count--; 732cc2d3216SMarc Zyngier if (!count) { 733a19b462fSMarc Zyngier pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", 734a19b462fSMarc Zyngier from_idx, to_idx, rd_idx); 735a19b462fSMarc Zyngier return -1; 736cc2d3216SMarc Zyngier } 737cc2d3216SMarc Zyngier cpu_relax(); 738cc2d3216SMarc Zyngier udelay(1); 739cc2d3216SMarc Zyngier } 740a19b462fSMarc Zyngier 741a19b462fSMarc Zyngier return 0; 742cc2d3216SMarc Zyngier } 743cc2d3216SMarc Zyngier 744e4f9094bSMarc Zyngier /* Warning, macro hell follows */ 745e4f9094bSMarc Zyngier #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ 746e4f9094bSMarc Zyngier void name(struct its_node *its, \ 747e4f9094bSMarc Zyngier buildtype builder, \ 748e4f9094bSMarc Zyngier struct its_cmd_desc *desc) \ 749e4f9094bSMarc Zyngier { \ 750e4f9094bSMarc Zyngier struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 751e4f9094bSMarc Zyngier synctype *sync_obj; \ 752e4f9094bSMarc Zyngier unsigned long flags; \ 753e4f9094bSMarc Zyngier \ 754e4f9094bSMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); \ 755e4f9094bSMarc Zyngier \ 756e4f9094bSMarc Zyngier cmd = its_allocate_entry(its); \ 757e4f9094bSMarc Zyngier if (!cmd) { /* We're soooooo screewed... */ \ 758e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 759e4f9094bSMarc Zyngier return; \ 760e4f9094bSMarc Zyngier } \ 76167047f90SMarc Zyngier sync_obj = builder(its, cmd, desc); \ 762e4f9094bSMarc Zyngier its_flush_cmd(its, cmd); \ 763e4f9094bSMarc Zyngier \ 764e4f9094bSMarc Zyngier if (sync_obj) { \ 765e4f9094bSMarc Zyngier sync_cmd = its_allocate_entry(its); \ 766e4f9094bSMarc Zyngier if (!sync_cmd) \ 767e4f9094bSMarc Zyngier goto post; \ 768e4f9094bSMarc Zyngier \ 76967047f90SMarc Zyngier buildfn(its, sync_cmd, sync_obj); \ 770e4f9094bSMarc Zyngier its_flush_cmd(its, sync_cmd); \ 771e4f9094bSMarc Zyngier } \ 772e4f9094bSMarc Zyngier \ 773e4f9094bSMarc Zyngier post: \ 774e4f9094bSMarc Zyngier next_cmd = its_post_commands(its); \ 775e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 776e4f9094bSMarc Zyngier \ 777a19b462fSMarc Zyngier if (its_wait_for_range_completion(its, cmd, next_cmd)) \ 778a19b462fSMarc Zyngier pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ 779e4f9094bSMarc Zyngier } 780e4f9094bSMarc Zyngier 78167047f90SMarc Zyngier static void its_build_sync_cmd(struct its_node *its, 78267047f90SMarc Zyngier struct its_cmd_block *sync_cmd, 783e4f9094bSMarc Zyngier struct its_collection *sync_col) 784cc2d3216SMarc Zyngier { 785cc2d3216SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 786cc2d3216SMarc Zyngier its_encode_target(sync_cmd, sync_col->target_address); 787e4f9094bSMarc Zyngier 788cc2d3216SMarc Zyngier its_fixup_cmd(sync_cmd); 789cc2d3216SMarc Zyngier } 790cc2d3216SMarc Zyngier 791e4f9094bSMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 792e4f9094bSMarc Zyngier struct its_collection, its_build_sync_cmd) 793cc2d3216SMarc Zyngier 79467047f90SMarc Zyngier static void its_build_vsync_cmd(struct its_node *its, 79567047f90SMarc Zyngier struct its_cmd_block *sync_cmd, 796d011e4e6SMarc Zyngier struct its_vpe *sync_vpe) 797d011e4e6SMarc Zyngier { 798d011e4e6SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 799d011e4e6SMarc Zyngier its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 800d011e4e6SMarc Zyngier 801d011e4e6SMarc Zyngier its_fixup_cmd(sync_cmd); 802d011e4e6SMarc Zyngier } 803d011e4e6SMarc Zyngier 804d011e4e6SMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 805d011e4e6SMarc Zyngier struct its_vpe, its_build_vsync_cmd) 806d011e4e6SMarc Zyngier 8078d85dcedSMarc Zyngier static void its_send_int(struct its_device *dev, u32 event_id) 8088d85dcedSMarc Zyngier { 8098d85dcedSMarc Zyngier struct its_cmd_desc desc; 8108d85dcedSMarc Zyngier 8118d85dcedSMarc Zyngier desc.its_int_cmd.dev = dev; 8128d85dcedSMarc Zyngier desc.its_int_cmd.event_id = event_id; 8138d85dcedSMarc Zyngier 8148d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_int_cmd, &desc); 8158d85dcedSMarc Zyngier } 8168d85dcedSMarc Zyngier 8178d85dcedSMarc Zyngier static void its_send_clear(struct its_device *dev, u32 event_id) 8188d85dcedSMarc Zyngier { 8198d85dcedSMarc Zyngier struct its_cmd_desc desc; 8208d85dcedSMarc Zyngier 8218d85dcedSMarc Zyngier desc.its_clear_cmd.dev = dev; 8228d85dcedSMarc Zyngier desc.its_clear_cmd.event_id = event_id; 8238d85dcedSMarc Zyngier 8248d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_clear_cmd, &desc); 825cc2d3216SMarc Zyngier } 826cc2d3216SMarc Zyngier 827cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id) 828cc2d3216SMarc Zyngier { 829cc2d3216SMarc Zyngier struct its_cmd_desc desc; 830cc2d3216SMarc Zyngier 831cc2d3216SMarc Zyngier desc.its_inv_cmd.dev = dev; 832cc2d3216SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 833cc2d3216SMarc Zyngier 834cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_inv_cmd, &desc); 835cc2d3216SMarc Zyngier } 836cc2d3216SMarc Zyngier 837cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid) 838cc2d3216SMarc Zyngier { 839cc2d3216SMarc Zyngier struct its_cmd_desc desc; 840cc2d3216SMarc Zyngier 841cc2d3216SMarc Zyngier desc.its_mapd_cmd.dev = dev; 842cc2d3216SMarc Zyngier desc.its_mapd_cmd.valid = !!valid; 843cc2d3216SMarc Zyngier 844cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 845cc2d3216SMarc Zyngier } 846cc2d3216SMarc Zyngier 847cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col, 848cc2d3216SMarc Zyngier int valid) 849cc2d3216SMarc Zyngier { 850cc2d3216SMarc Zyngier struct its_cmd_desc desc; 851cc2d3216SMarc Zyngier 852cc2d3216SMarc Zyngier desc.its_mapc_cmd.col = col; 853cc2d3216SMarc Zyngier desc.its_mapc_cmd.valid = !!valid; 854cc2d3216SMarc Zyngier 855cc2d3216SMarc Zyngier its_send_single_command(its, its_build_mapc_cmd, &desc); 856cc2d3216SMarc Zyngier } 857cc2d3216SMarc Zyngier 8586a25ad3aSMarc Zyngier static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) 859cc2d3216SMarc Zyngier { 860cc2d3216SMarc Zyngier struct its_cmd_desc desc; 861cc2d3216SMarc Zyngier 8626a25ad3aSMarc Zyngier desc.its_mapti_cmd.dev = dev; 8636a25ad3aSMarc Zyngier desc.its_mapti_cmd.phys_id = irq_id; 8646a25ad3aSMarc Zyngier desc.its_mapti_cmd.event_id = id; 865cc2d3216SMarc Zyngier 8666a25ad3aSMarc Zyngier its_send_single_command(dev->its, its_build_mapti_cmd, &desc); 867cc2d3216SMarc Zyngier } 868cc2d3216SMarc Zyngier 869cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev, 870cc2d3216SMarc Zyngier struct its_collection *col, u32 id) 871cc2d3216SMarc Zyngier { 872cc2d3216SMarc Zyngier struct its_cmd_desc desc; 873cc2d3216SMarc Zyngier 874cc2d3216SMarc Zyngier desc.its_movi_cmd.dev = dev; 875cc2d3216SMarc Zyngier desc.its_movi_cmd.col = col; 876591e5becSMarc Zyngier desc.its_movi_cmd.event_id = id; 877cc2d3216SMarc Zyngier 878cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_movi_cmd, &desc); 879cc2d3216SMarc Zyngier } 880cc2d3216SMarc Zyngier 881cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id) 882cc2d3216SMarc Zyngier { 883cc2d3216SMarc Zyngier struct its_cmd_desc desc; 884cc2d3216SMarc Zyngier 885cc2d3216SMarc Zyngier desc.its_discard_cmd.dev = dev; 886cc2d3216SMarc Zyngier desc.its_discard_cmd.event_id = id; 887cc2d3216SMarc Zyngier 888cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_discard_cmd, &desc); 889cc2d3216SMarc Zyngier } 890cc2d3216SMarc Zyngier 891cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col) 892cc2d3216SMarc Zyngier { 893cc2d3216SMarc Zyngier struct its_cmd_desc desc; 894cc2d3216SMarc Zyngier 895cc2d3216SMarc Zyngier desc.its_invall_cmd.col = col; 896cc2d3216SMarc Zyngier 897cc2d3216SMarc Zyngier its_send_single_command(its, its_build_invall_cmd, &desc); 898cc2d3216SMarc Zyngier } 899c48ed51cSMarc Zyngier 900d011e4e6SMarc Zyngier static void its_send_vmapti(struct its_device *dev, u32 id) 901d011e4e6SMarc Zyngier { 902d011e4e6SMarc Zyngier struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 903d011e4e6SMarc Zyngier struct its_cmd_desc desc; 904d011e4e6SMarc Zyngier 905d011e4e6SMarc Zyngier desc.its_vmapti_cmd.vpe = map->vpe; 906d011e4e6SMarc Zyngier desc.its_vmapti_cmd.dev = dev; 907d011e4e6SMarc Zyngier desc.its_vmapti_cmd.virt_id = map->vintid; 908d011e4e6SMarc Zyngier desc.its_vmapti_cmd.event_id = id; 909d011e4e6SMarc Zyngier desc.its_vmapti_cmd.db_enabled = map->db_enabled; 910d011e4e6SMarc Zyngier 911d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 912d011e4e6SMarc Zyngier } 913d011e4e6SMarc Zyngier 914d011e4e6SMarc Zyngier static void its_send_vmovi(struct its_device *dev, u32 id) 915d011e4e6SMarc Zyngier { 916d011e4e6SMarc Zyngier struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 917d011e4e6SMarc Zyngier struct its_cmd_desc desc; 918d011e4e6SMarc Zyngier 919d011e4e6SMarc Zyngier desc.its_vmovi_cmd.vpe = map->vpe; 920d011e4e6SMarc Zyngier desc.its_vmovi_cmd.dev = dev; 921d011e4e6SMarc Zyngier desc.its_vmovi_cmd.event_id = id; 922d011e4e6SMarc Zyngier desc.its_vmovi_cmd.db_enabled = map->db_enabled; 923d011e4e6SMarc Zyngier 924d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 925d011e4e6SMarc Zyngier } 926d011e4e6SMarc Zyngier 92775fd951bSMarc Zyngier static void its_send_vmapp(struct its_node *its, 92875fd951bSMarc Zyngier struct its_vpe *vpe, bool valid) 929eb78192bSMarc Zyngier { 930eb78192bSMarc Zyngier struct its_cmd_desc desc; 931eb78192bSMarc Zyngier 932eb78192bSMarc Zyngier desc.its_vmapp_cmd.vpe = vpe; 933eb78192bSMarc Zyngier desc.its_vmapp_cmd.valid = valid; 934eb78192bSMarc Zyngier desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; 93575fd951bSMarc Zyngier 936eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); 937eb78192bSMarc Zyngier } 938eb78192bSMarc Zyngier 9393171a47aSMarc Zyngier static void its_send_vmovp(struct its_vpe *vpe) 9403171a47aSMarc Zyngier { 9413171a47aSMarc Zyngier struct its_cmd_desc desc; 9423171a47aSMarc Zyngier struct its_node *its; 9433171a47aSMarc Zyngier unsigned long flags; 9443171a47aSMarc Zyngier int col_id = vpe->col_idx; 9453171a47aSMarc Zyngier 9463171a47aSMarc Zyngier desc.its_vmovp_cmd.vpe = vpe; 9473171a47aSMarc Zyngier desc.its_vmovp_cmd.its_list = (u16)its_list_map; 9483171a47aSMarc Zyngier 9493171a47aSMarc Zyngier if (!its_list_map) { 9503171a47aSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 9513171a47aSMarc Zyngier desc.its_vmovp_cmd.seq_num = 0; 9523171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 9533171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 9543171a47aSMarc Zyngier return; 9553171a47aSMarc Zyngier } 9563171a47aSMarc Zyngier 9573171a47aSMarc Zyngier /* 9583171a47aSMarc Zyngier * Yet another marvel of the architecture. If using the 9593171a47aSMarc Zyngier * its_list "feature", we need to make sure that all ITSs 9603171a47aSMarc Zyngier * receive all VMOVP commands in the same order. The only way 9613171a47aSMarc Zyngier * to guarantee this is to make vmovp a serialization point. 9623171a47aSMarc Zyngier * 9633171a47aSMarc Zyngier * Wall <-- Head. 9643171a47aSMarc Zyngier */ 9653171a47aSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 9663171a47aSMarc Zyngier 9673171a47aSMarc Zyngier desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; 9683171a47aSMarc Zyngier 9693171a47aSMarc Zyngier /* Emit VMOVPs */ 9703171a47aSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 9713171a47aSMarc Zyngier if (!its->is_v4) 9723171a47aSMarc Zyngier continue; 9733171a47aSMarc Zyngier 974*2247e1bfSMarc Zyngier if (!vpe->its_vm->vlpi_count[its->list_nr]) 975*2247e1bfSMarc Zyngier continue; 976*2247e1bfSMarc Zyngier 9773171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 9783171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 9793171a47aSMarc Zyngier } 9803171a47aSMarc Zyngier 9813171a47aSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 9823171a47aSMarc Zyngier } 9833171a47aSMarc Zyngier 98440619a2eSMarc Zyngier static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) 985eb78192bSMarc Zyngier { 986eb78192bSMarc Zyngier struct its_cmd_desc desc; 987eb78192bSMarc Zyngier 988eb78192bSMarc Zyngier desc.its_vinvall_cmd.vpe = vpe; 989eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); 990eb78192bSMarc Zyngier } 991eb78192bSMarc Zyngier 992c48ed51cSMarc Zyngier /* 993c48ed51cSMarc Zyngier * irqchip functions - assumes MSI, mostly. 994c48ed51cSMarc Zyngier */ 995c48ed51cSMarc Zyngier 996c48ed51cSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d) 997c48ed51cSMarc Zyngier { 998c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 999591e5becSMarc Zyngier return d->hwirq - its_dev->event_map.lpi_base; 1000c48ed51cSMarc Zyngier } 1001c48ed51cSMarc Zyngier 1002015ec038SMarc Zyngier static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) 1003c48ed51cSMarc Zyngier { 1004015ec038SMarc Zyngier irq_hw_number_t hwirq; 1005adcdb94eSMarc Zyngier struct page *prop_page; 1006adcdb94eSMarc Zyngier u8 *cfg; 1007c48ed51cSMarc Zyngier 1008015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 1009015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1010015ec038SMarc Zyngier u32 event = its_get_event_id(d); 1011015ec038SMarc Zyngier 1012015ec038SMarc Zyngier prop_page = its_dev->event_map.vm->vprop_page; 1013015ec038SMarc Zyngier hwirq = its_dev->event_map.vlpi_maps[event].vintid; 1014015ec038SMarc Zyngier } else { 1015adcdb94eSMarc Zyngier prop_page = gic_rdists->prop_page; 1016015ec038SMarc Zyngier hwirq = d->hwirq; 1017015ec038SMarc Zyngier } 1018adcdb94eSMarc Zyngier 1019adcdb94eSMarc Zyngier cfg = page_address(prop_page) + hwirq - 8192; 1020adcdb94eSMarc Zyngier *cfg &= ~clr; 1021015ec038SMarc Zyngier *cfg |= set | LPI_PROP_GROUP1; 1022c48ed51cSMarc Zyngier 1023c48ed51cSMarc Zyngier /* 1024c48ed51cSMarc Zyngier * Make the above write visible to the redistributors. 1025c48ed51cSMarc Zyngier * And yes, we're flushing exactly: One. Single. Byte. 1026c48ed51cSMarc Zyngier * Humpf... 1027c48ed51cSMarc Zyngier */ 1028c48ed51cSMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 1029328191c0SVladimir Murzin gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1030c48ed51cSMarc Zyngier else 1031c48ed51cSMarc Zyngier dsb(ishst); 1032015ec038SMarc Zyngier } 1033015ec038SMarc Zyngier 1034015ec038SMarc Zyngier static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) 1035015ec038SMarc Zyngier { 1036015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1037015ec038SMarc Zyngier 1038015ec038SMarc Zyngier lpi_write_config(d, clr, set); 1039adcdb94eSMarc Zyngier its_send_inv(its_dev, its_get_event_id(d)); 1040c48ed51cSMarc Zyngier } 1041c48ed51cSMarc Zyngier 1042015ec038SMarc Zyngier static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) 1043015ec038SMarc Zyngier { 1044015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1045015ec038SMarc Zyngier u32 event = its_get_event_id(d); 1046015ec038SMarc Zyngier 1047015ec038SMarc Zyngier if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) 1048015ec038SMarc Zyngier return; 1049015ec038SMarc Zyngier 1050015ec038SMarc Zyngier its_dev->event_map.vlpi_maps[event].db_enabled = enable; 1051015ec038SMarc Zyngier 1052015ec038SMarc Zyngier /* 1053015ec038SMarc Zyngier * More fun with the architecture: 1054015ec038SMarc Zyngier * 1055015ec038SMarc Zyngier * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI 1056015ec038SMarc Zyngier * value or to 1023, depending on the enable bit. But that 1057015ec038SMarc Zyngier * would be issueing a mapping for an /existing/ DevID+EventID 1058015ec038SMarc Zyngier * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI 1059015ec038SMarc Zyngier * to the /same/ vPE, using this opportunity to adjust the 1060015ec038SMarc Zyngier * doorbell. Mouahahahaha. We loves it, Precious. 1061015ec038SMarc Zyngier */ 1062015ec038SMarc Zyngier its_send_vmovi(its_dev, event); 1063c48ed51cSMarc Zyngier } 1064c48ed51cSMarc Zyngier 1065c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d) 1066c48ed51cSMarc Zyngier { 1067015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1068015ec038SMarc Zyngier its_vlpi_set_doorbell(d, false); 1069015ec038SMarc Zyngier 1070adcdb94eSMarc Zyngier lpi_update_config(d, LPI_PROP_ENABLED, 0); 1071c48ed51cSMarc Zyngier } 1072c48ed51cSMarc Zyngier 1073c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d) 1074c48ed51cSMarc Zyngier { 1075015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1076015ec038SMarc Zyngier its_vlpi_set_doorbell(d, true); 1077015ec038SMarc Zyngier 1078adcdb94eSMarc Zyngier lpi_update_config(d, 0, LPI_PROP_ENABLED); 1079c48ed51cSMarc Zyngier } 1080c48ed51cSMarc Zyngier 1081c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1082c48ed51cSMarc Zyngier bool force) 1083c48ed51cSMarc Zyngier { 1084fbf8f40eSGanapatrao Kulkarni unsigned int cpu; 1085fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 1086c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1087c48ed51cSMarc Zyngier struct its_collection *target_col; 1088c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 1089c48ed51cSMarc Zyngier 1090015ec038SMarc Zyngier /* A forwarded interrupt should use irq_set_vcpu_affinity */ 1091015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1092015ec038SMarc Zyngier return -EINVAL; 1093015ec038SMarc Zyngier 1094fbf8f40eSGanapatrao Kulkarni /* lpi cannot be routed to a redistributor that is on a foreign node */ 1095fbf8f40eSGanapatrao Kulkarni if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1096fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) { 1097fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1098fbf8f40eSGanapatrao Kulkarni if (!cpumask_intersects(mask_val, cpu_mask)) 1099fbf8f40eSGanapatrao Kulkarni return -EINVAL; 1100fbf8f40eSGanapatrao Kulkarni } 1101fbf8f40eSGanapatrao Kulkarni } 1102fbf8f40eSGanapatrao Kulkarni 1103fbf8f40eSGanapatrao Kulkarni cpu = cpumask_any_and(mask_val, cpu_mask); 1104fbf8f40eSGanapatrao Kulkarni 1105c48ed51cSMarc Zyngier if (cpu >= nr_cpu_ids) 1106c48ed51cSMarc Zyngier return -EINVAL; 1107c48ed51cSMarc Zyngier 11088b8d94a7SMaJun /* don't set the affinity when the target cpu is same as current one */ 11098b8d94a7SMaJun if (cpu != its_dev->event_map.col_map[id]) { 1110c48ed51cSMarc Zyngier target_col = &its_dev->its->collections[cpu]; 1111c48ed51cSMarc Zyngier its_send_movi(its_dev, target_col, id); 1112591e5becSMarc Zyngier its_dev->event_map.col_map[id] = cpu; 11130d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 11148b8d94a7SMaJun } 1115c48ed51cSMarc Zyngier 1116c48ed51cSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 1117c48ed51cSMarc Zyngier } 1118c48ed51cSMarc Zyngier 1119558b0165SArd Biesheuvel static u64 its_irq_get_msi_base(struct its_device *its_dev) 1120558b0165SArd Biesheuvel { 1121558b0165SArd Biesheuvel struct its_node *its = its_dev->its; 1122558b0165SArd Biesheuvel 1123558b0165SArd Biesheuvel return its->phys_base + GITS_TRANSLATER; 1124558b0165SArd Biesheuvel } 1125558b0165SArd Biesheuvel 1126b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 1127b48ac83dSMarc Zyngier { 1128b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1129b48ac83dSMarc Zyngier struct its_node *its; 1130b48ac83dSMarc Zyngier u64 addr; 1131b48ac83dSMarc Zyngier 1132b48ac83dSMarc Zyngier its = its_dev->its; 1133558b0165SArd Biesheuvel addr = its->get_msi_base(its_dev); 1134b48ac83dSMarc Zyngier 1135b11283ebSVladimir Murzin msg->address_lo = lower_32_bits(addr); 1136b11283ebSVladimir Murzin msg->address_hi = upper_32_bits(addr); 1137b48ac83dSMarc Zyngier msg->data = its_get_event_id(d); 113844bb7e24SRobin Murphy 113944bb7e24SRobin Murphy iommu_dma_map_msi_msg(d->irq, msg); 1140b48ac83dSMarc Zyngier } 1141b48ac83dSMarc Zyngier 11428d85dcedSMarc Zyngier static int its_irq_set_irqchip_state(struct irq_data *d, 11438d85dcedSMarc Zyngier enum irqchip_irq_state which, 11448d85dcedSMarc Zyngier bool state) 11458d85dcedSMarc Zyngier { 11468d85dcedSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 11478d85dcedSMarc Zyngier u32 event = its_get_event_id(d); 11488d85dcedSMarc Zyngier 11498d85dcedSMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 11508d85dcedSMarc Zyngier return -EINVAL; 11518d85dcedSMarc Zyngier 11528d85dcedSMarc Zyngier if (state) 11538d85dcedSMarc Zyngier its_send_int(its_dev, event); 11548d85dcedSMarc Zyngier else 11558d85dcedSMarc Zyngier its_send_clear(its_dev, event); 11568d85dcedSMarc Zyngier 11578d85dcedSMarc Zyngier return 0; 11588d85dcedSMarc Zyngier } 11598d85dcedSMarc Zyngier 1160*2247e1bfSMarc Zyngier static void its_map_vm(struct its_node *its, struct its_vm *vm) 1161*2247e1bfSMarc Zyngier { 1162*2247e1bfSMarc Zyngier unsigned long flags; 1163*2247e1bfSMarc Zyngier 1164*2247e1bfSMarc Zyngier /* Not using the ITS list? Everything is always mapped. */ 1165*2247e1bfSMarc Zyngier if (!its_list_map) 1166*2247e1bfSMarc Zyngier return; 1167*2247e1bfSMarc Zyngier 1168*2247e1bfSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 1169*2247e1bfSMarc Zyngier 1170*2247e1bfSMarc Zyngier /* 1171*2247e1bfSMarc Zyngier * If the VM wasn't mapped yet, iterate over the vpes and get 1172*2247e1bfSMarc Zyngier * them mapped now. 1173*2247e1bfSMarc Zyngier */ 1174*2247e1bfSMarc Zyngier vm->vlpi_count[its->list_nr]++; 1175*2247e1bfSMarc Zyngier 1176*2247e1bfSMarc Zyngier if (vm->vlpi_count[its->list_nr] == 1) { 1177*2247e1bfSMarc Zyngier int i; 1178*2247e1bfSMarc Zyngier 1179*2247e1bfSMarc Zyngier for (i = 0; i < vm->nr_vpes; i++) { 1180*2247e1bfSMarc Zyngier struct its_vpe *vpe = vm->vpes[i]; 1181*2247e1bfSMarc Zyngier 1182*2247e1bfSMarc Zyngier /* Map the VPE to the first possible CPU */ 1183*2247e1bfSMarc Zyngier vpe->col_idx = cpumask_first(cpu_online_mask); 1184*2247e1bfSMarc Zyngier its_send_vmapp(its, vpe, true); 1185*2247e1bfSMarc Zyngier its_send_vinvall(its, vpe); 1186*2247e1bfSMarc Zyngier } 1187*2247e1bfSMarc Zyngier } 1188*2247e1bfSMarc Zyngier 1189*2247e1bfSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 1190*2247e1bfSMarc Zyngier } 1191*2247e1bfSMarc Zyngier 1192*2247e1bfSMarc Zyngier static void its_unmap_vm(struct its_node *its, struct its_vm *vm) 1193*2247e1bfSMarc Zyngier { 1194*2247e1bfSMarc Zyngier unsigned long flags; 1195*2247e1bfSMarc Zyngier 1196*2247e1bfSMarc Zyngier /* Not using the ITS list? Everything is always mapped. */ 1197*2247e1bfSMarc Zyngier if (!its_list_map) 1198*2247e1bfSMarc Zyngier return; 1199*2247e1bfSMarc Zyngier 1200*2247e1bfSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 1201*2247e1bfSMarc Zyngier 1202*2247e1bfSMarc Zyngier if (!--vm->vlpi_count[its->list_nr]) { 1203*2247e1bfSMarc Zyngier int i; 1204*2247e1bfSMarc Zyngier 1205*2247e1bfSMarc Zyngier for (i = 0; i < vm->nr_vpes; i++) 1206*2247e1bfSMarc Zyngier its_send_vmapp(its, vm->vpes[i], false); 1207*2247e1bfSMarc Zyngier } 1208*2247e1bfSMarc Zyngier 1209*2247e1bfSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 1210*2247e1bfSMarc Zyngier } 1211*2247e1bfSMarc Zyngier 1212d011e4e6SMarc Zyngier static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 1213d011e4e6SMarc Zyngier { 1214d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1215d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1216d011e4e6SMarc Zyngier int ret = 0; 1217d011e4e6SMarc Zyngier 1218d011e4e6SMarc Zyngier if (!info->map) 1219d011e4e6SMarc Zyngier return -EINVAL; 1220d011e4e6SMarc Zyngier 1221d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1222d011e4e6SMarc Zyngier 1223d011e4e6SMarc Zyngier if (!its_dev->event_map.vm) { 1224d011e4e6SMarc Zyngier struct its_vlpi_map *maps; 1225d011e4e6SMarc Zyngier 1226d011e4e6SMarc Zyngier maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis, 1227d011e4e6SMarc Zyngier GFP_KERNEL); 1228d011e4e6SMarc Zyngier if (!maps) { 1229d011e4e6SMarc Zyngier ret = -ENOMEM; 1230d011e4e6SMarc Zyngier goto out; 1231d011e4e6SMarc Zyngier } 1232d011e4e6SMarc Zyngier 1233d011e4e6SMarc Zyngier its_dev->event_map.vm = info->map->vm; 1234d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps = maps; 1235d011e4e6SMarc Zyngier } else if (its_dev->event_map.vm != info->map->vm) { 1236d011e4e6SMarc Zyngier ret = -EINVAL; 1237d011e4e6SMarc Zyngier goto out; 1238d011e4e6SMarc Zyngier } 1239d011e4e6SMarc Zyngier 1240d011e4e6SMarc Zyngier /* Get our private copy of the mapping information */ 1241d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps[event] = *info->map; 1242d011e4e6SMarc Zyngier 1243d011e4e6SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 1244d011e4e6SMarc Zyngier /* Already mapped, move it around */ 1245d011e4e6SMarc Zyngier its_send_vmovi(its_dev, event); 1246d011e4e6SMarc Zyngier } else { 1247*2247e1bfSMarc Zyngier /* Ensure all the VPEs are mapped on this ITS */ 1248*2247e1bfSMarc Zyngier its_map_vm(its_dev->its, info->map->vm); 1249*2247e1bfSMarc Zyngier 1250d011e4e6SMarc Zyngier /* Drop the physical mapping */ 1251d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1252d011e4e6SMarc Zyngier 1253d011e4e6SMarc Zyngier /* and install the virtual one */ 1254d011e4e6SMarc Zyngier its_send_vmapti(its_dev, event); 1255d011e4e6SMarc Zyngier irqd_set_forwarded_to_vcpu(d); 1256d011e4e6SMarc Zyngier 1257d011e4e6SMarc Zyngier /* Increment the number of VLPIs */ 1258d011e4e6SMarc Zyngier its_dev->event_map.nr_vlpis++; 1259d011e4e6SMarc Zyngier } 1260d011e4e6SMarc Zyngier 1261d011e4e6SMarc Zyngier out: 1262d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1263d011e4e6SMarc Zyngier return ret; 1264d011e4e6SMarc Zyngier } 1265d011e4e6SMarc Zyngier 1266d011e4e6SMarc Zyngier static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1267d011e4e6SMarc Zyngier { 1268d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1269d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1270d011e4e6SMarc Zyngier int ret = 0; 1271d011e4e6SMarc Zyngier 1272d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1273d011e4e6SMarc Zyngier 1274d011e4e6SMarc Zyngier if (!its_dev->event_map.vm || 1275d011e4e6SMarc Zyngier !its_dev->event_map.vlpi_maps[event].vm) { 1276d011e4e6SMarc Zyngier ret = -EINVAL; 1277d011e4e6SMarc Zyngier goto out; 1278d011e4e6SMarc Zyngier } 1279d011e4e6SMarc Zyngier 1280d011e4e6SMarc Zyngier /* Copy our mapping information to the incoming request */ 1281d011e4e6SMarc Zyngier *info->map = its_dev->event_map.vlpi_maps[event]; 1282d011e4e6SMarc Zyngier 1283d011e4e6SMarc Zyngier out: 1284d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1285d011e4e6SMarc Zyngier return ret; 1286d011e4e6SMarc Zyngier } 1287d011e4e6SMarc Zyngier 1288d011e4e6SMarc Zyngier static int its_vlpi_unmap(struct irq_data *d) 1289d011e4e6SMarc Zyngier { 1290d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1291d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1292d011e4e6SMarc Zyngier int ret = 0; 1293d011e4e6SMarc Zyngier 1294d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1295d011e4e6SMarc Zyngier 1296d011e4e6SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 1297d011e4e6SMarc Zyngier ret = -EINVAL; 1298d011e4e6SMarc Zyngier goto out; 1299d011e4e6SMarc Zyngier } 1300d011e4e6SMarc Zyngier 1301d011e4e6SMarc Zyngier /* Drop the virtual mapping */ 1302d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1303d011e4e6SMarc Zyngier 1304d011e4e6SMarc Zyngier /* and restore the physical one */ 1305d011e4e6SMarc Zyngier irqd_clr_forwarded_to_vcpu(d); 1306d011e4e6SMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 1307d011e4e6SMarc Zyngier lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 1308d011e4e6SMarc Zyngier LPI_PROP_ENABLED | 1309d011e4e6SMarc Zyngier LPI_PROP_GROUP1)); 1310d011e4e6SMarc Zyngier 1311*2247e1bfSMarc Zyngier /* Potentially unmap the VM from this ITS */ 1312*2247e1bfSMarc Zyngier its_unmap_vm(its_dev->its, its_dev->event_map.vm); 1313*2247e1bfSMarc Zyngier 1314d011e4e6SMarc Zyngier /* 1315d011e4e6SMarc Zyngier * Drop the refcount and make the device available again if 1316d011e4e6SMarc Zyngier * this was the last VLPI. 1317d011e4e6SMarc Zyngier */ 1318d011e4e6SMarc Zyngier if (!--its_dev->event_map.nr_vlpis) { 1319d011e4e6SMarc Zyngier its_dev->event_map.vm = NULL; 1320d011e4e6SMarc Zyngier kfree(its_dev->event_map.vlpi_maps); 1321d011e4e6SMarc Zyngier } 1322d011e4e6SMarc Zyngier 1323d011e4e6SMarc Zyngier out: 1324d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1325d011e4e6SMarc Zyngier return ret; 1326d011e4e6SMarc Zyngier } 1327d011e4e6SMarc Zyngier 1328015ec038SMarc Zyngier static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) 1329015ec038SMarc Zyngier { 1330015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1331015ec038SMarc Zyngier 1332015ec038SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 1333015ec038SMarc Zyngier return -EINVAL; 1334015ec038SMarc Zyngier 1335015ec038SMarc Zyngier if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) 1336015ec038SMarc Zyngier lpi_update_config(d, 0xff, info->config); 1337015ec038SMarc Zyngier else 1338015ec038SMarc Zyngier lpi_write_config(d, 0xff, info->config); 1339015ec038SMarc Zyngier its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); 1340015ec038SMarc Zyngier 1341015ec038SMarc Zyngier return 0; 1342015ec038SMarc Zyngier } 1343015ec038SMarc Zyngier 1344c808eea8SMarc Zyngier static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 1345c808eea8SMarc Zyngier { 1346c808eea8SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1347c808eea8SMarc Zyngier struct its_cmd_info *info = vcpu_info; 1348c808eea8SMarc Zyngier 1349c808eea8SMarc Zyngier /* Need a v4 ITS */ 1350d011e4e6SMarc Zyngier if (!its_dev->its->is_v4) 1351c808eea8SMarc Zyngier return -EINVAL; 1352c808eea8SMarc Zyngier 1353d011e4e6SMarc Zyngier /* Unmap request? */ 1354d011e4e6SMarc Zyngier if (!info) 1355d011e4e6SMarc Zyngier return its_vlpi_unmap(d); 1356d011e4e6SMarc Zyngier 1357c808eea8SMarc Zyngier switch (info->cmd_type) { 1358c808eea8SMarc Zyngier case MAP_VLPI: 1359d011e4e6SMarc Zyngier return its_vlpi_map(d, info); 1360c808eea8SMarc Zyngier 1361c808eea8SMarc Zyngier case GET_VLPI: 1362d011e4e6SMarc Zyngier return its_vlpi_get(d, info); 1363c808eea8SMarc Zyngier 1364c808eea8SMarc Zyngier case PROP_UPDATE_VLPI: 1365c808eea8SMarc Zyngier case PROP_UPDATE_AND_INV_VLPI: 1366015ec038SMarc Zyngier return its_vlpi_prop_update(d, info); 1367c808eea8SMarc Zyngier 1368c808eea8SMarc Zyngier default: 1369c808eea8SMarc Zyngier return -EINVAL; 1370c808eea8SMarc Zyngier } 1371c808eea8SMarc Zyngier } 1372c808eea8SMarc Zyngier 1373c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = { 1374c48ed51cSMarc Zyngier .name = "ITS", 1375c48ed51cSMarc Zyngier .irq_mask = its_mask_irq, 1376c48ed51cSMarc Zyngier .irq_unmask = its_unmask_irq, 1377004fa08dSAshok Kumar .irq_eoi = irq_chip_eoi_parent, 1378c48ed51cSMarc Zyngier .irq_set_affinity = its_set_affinity, 1379b48ac83dSMarc Zyngier .irq_compose_msi_msg = its_irq_compose_msi_msg, 13808d85dcedSMarc Zyngier .irq_set_irqchip_state = its_irq_set_irqchip_state, 1381c808eea8SMarc Zyngier .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1382b48ac83dSMarc Zyngier }; 1383b48ac83dSMarc Zyngier 1384bf9529f8SMarc Zyngier /* 1385bf9529f8SMarc Zyngier * How we allocate LPIs: 1386bf9529f8SMarc Zyngier * 1387bf9529f8SMarc Zyngier * The GIC has id_bits bits for interrupt identifiers. From there, we 1388bf9529f8SMarc Zyngier * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as 1389bf9529f8SMarc Zyngier * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 1390bf9529f8SMarc Zyngier * bits to the right. 1391bf9529f8SMarc Zyngier * 1392bf9529f8SMarc Zyngier * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1393bf9529f8SMarc Zyngier */ 1394bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK_SHIFT 5 1395bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 13966c31e123SShanker Donthineni #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1397bf9529f8SMarc Zyngier 1398bf9529f8SMarc Zyngier static unsigned long *lpi_bitmap; 1399bf9529f8SMarc Zyngier static u32 lpi_chunks; 1400bf9529f8SMarc Zyngier static DEFINE_SPINLOCK(lpi_lock); 1401bf9529f8SMarc Zyngier 1402bf9529f8SMarc Zyngier static int its_lpi_to_chunk(int lpi) 1403bf9529f8SMarc Zyngier { 1404bf9529f8SMarc Zyngier return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; 1405bf9529f8SMarc Zyngier } 1406bf9529f8SMarc Zyngier 1407bf9529f8SMarc Zyngier static int its_chunk_to_lpi(int chunk) 1408bf9529f8SMarc Zyngier { 1409bf9529f8SMarc Zyngier return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 1410bf9529f8SMarc Zyngier } 1411bf9529f8SMarc Zyngier 141204a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits) 1413bf9529f8SMarc Zyngier { 1414bf9529f8SMarc Zyngier lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 1415bf9529f8SMarc Zyngier 1416bf9529f8SMarc Zyngier lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), 1417bf9529f8SMarc Zyngier GFP_KERNEL); 1418bf9529f8SMarc Zyngier if (!lpi_bitmap) { 1419bf9529f8SMarc Zyngier lpi_chunks = 0; 1420bf9529f8SMarc Zyngier return -ENOMEM; 1421bf9529f8SMarc Zyngier } 1422bf9529f8SMarc Zyngier 1423bf9529f8SMarc Zyngier pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); 1424bf9529f8SMarc Zyngier return 0; 1425bf9529f8SMarc Zyngier } 1426bf9529f8SMarc Zyngier 1427bf9529f8SMarc Zyngier static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) 1428bf9529f8SMarc Zyngier { 1429bf9529f8SMarc Zyngier unsigned long *bitmap = NULL; 1430bf9529f8SMarc Zyngier int chunk_id; 1431bf9529f8SMarc Zyngier int nr_chunks; 1432bf9529f8SMarc Zyngier int i; 1433bf9529f8SMarc Zyngier 1434bf9529f8SMarc Zyngier nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); 1435bf9529f8SMarc Zyngier 1436bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 1437bf9529f8SMarc Zyngier 1438bf9529f8SMarc Zyngier do { 1439bf9529f8SMarc Zyngier chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, 1440bf9529f8SMarc Zyngier 0, nr_chunks, 0); 1441bf9529f8SMarc Zyngier if (chunk_id < lpi_chunks) 1442bf9529f8SMarc Zyngier break; 1443bf9529f8SMarc Zyngier 1444bf9529f8SMarc Zyngier nr_chunks--; 1445bf9529f8SMarc Zyngier } while (nr_chunks > 0); 1446bf9529f8SMarc Zyngier 1447bf9529f8SMarc Zyngier if (!nr_chunks) 1448bf9529f8SMarc Zyngier goto out; 1449bf9529f8SMarc Zyngier 1450bf9529f8SMarc Zyngier bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), 1451bf9529f8SMarc Zyngier GFP_ATOMIC); 1452bf9529f8SMarc Zyngier if (!bitmap) 1453bf9529f8SMarc Zyngier goto out; 1454bf9529f8SMarc Zyngier 1455bf9529f8SMarc Zyngier for (i = 0; i < nr_chunks; i++) 1456bf9529f8SMarc Zyngier set_bit(chunk_id + i, lpi_bitmap); 1457bf9529f8SMarc Zyngier 1458bf9529f8SMarc Zyngier *base = its_chunk_to_lpi(chunk_id); 1459bf9529f8SMarc Zyngier *nr_ids = nr_chunks * IRQS_PER_CHUNK; 1460bf9529f8SMarc Zyngier 1461bf9529f8SMarc Zyngier out: 1462bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 1463bf9529f8SMarc Zyngier 1464c8415b94SMarc Zyngier if (!bitmap) 1465c8415b94SMarc Zyngier *base = *nr_ids = 0; 1466c8415b94SMarc Zyngier 1467bf9529f8SMarc Zyngier return bitmap; 1468bf9529f8SMarc Zyngier } 1469bf9529f8SMarc Zyngier 1470cf2be8baSMarc Zyngier static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) 1471bf9529f8SMarc Zyngier { 1472bf9529f8SMarc Zyngier int lpi; 1473bf9529f8SMarc Zyngier 1474bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 1475bf9529f8SMarc Zyngier 1476bf9529f8SMarc Zyngier for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 1477bf9529f8SMarc Zyngier int chunk = its_lpi_to_chunk(lpi); 1478cf2be8baSMarc Zyngier 1479bf9529f8SMarc Zyngier BUG_ON(chunk > lpi_chunks); 1480bf9529f8SMarc Zyngier if (test_bit(chunk, lpi_bitmap)) { 1481bf9529f8SMarc Zyngier clear_bit(chunk, lpi_bitmap); 1482bf9529f8SMarc Zyngier } else { 1483bf9529f8SMarc Zyngier pr_err("Bad LPI chunk %d\n", chunk); 1484bf9529f8SMarc Zyngier } 1485bf9529f8SMarc Zyngier } 1486bf9529f8SMarc Zyngier 1487bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 1488bf9529f8SMarc Zyngier 1489cf2be8baSMarc Zyngier kfree(bitmap); 1490bf9529f8SMarc Zyngier } 14911ac19ca6SMarc Zyngier 14920e5ccf91SMarc Zyngier static struct page *its_allocate_prop_table(gfp_t gfp_flags) 14930e5ccf91SMarc Zyngier { 14940e5ccf91SMarc Zyngier struct page *prop_page; 14951ac19ca6SMarc Zyngier 14960e5ccf91SMarc Zyngier prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 14970e5ccf91SMarc Zyngier if (!prop_page) 14980e5ccf91SMarc Zyngier return NULL; 14990e5ccf91SMarc Zyngier 15000e5ccf91SMarc Zyngier /* Priority 0xa0, Group-1, disabled */ 15010e5ccf91SMarc Zyngier memset(page_address(prop_page), 15020e5ccf91SMarc Zyngier LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 15030e5ccf91SMarc Zyngier LPI_PROPBASE_SZ); 15040e5ccf91SMarc Zyngier 15050e5ccf91SMarc Zyngier /* Make sure the GIC will observe the written configuration */ 15060e5ccf91SMarc Zyngier gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); 15070e5ccf91SMarc Zyngier 15080e5ccf91SMarc Zyngier return prop_page; 15090e5ccf91SMarc Zyngier } 15100e5ccf91SMarc Zyngier 15117d75bbb4SMarc Zyngier static void its_free_prop_table(struct page *prop_page) 15127d75bbb4SMarc Zyngier { 15137d75bbb4SMarc Zyngier free_pages((unsigned long)page_address(prop_page), 15147d75bbb4SMarc Zyngier get_order(LPI_PROPBASE_SZ)); 15157d75bbb4SMarc Zyngier } 15161ac19ca6SMarc Zyngier 15171ac19ca6SMarc Zyngier static int __init its_alloc_lpi_tables(void) 15181ac19ca6SMarc Zyngier { 15191ac19ca6SMarc Zyngier phys_addr_t paddr; 15201ac19ca6SMarc Zyngier 15216c31e123SShanker Donthineni lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 15220e5ccf91SMarc Zyngier gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 15231ac19ca6SMarc Zyngier if (!gic_rdists->prop_page) { 15241ac19ca6SMarc Zyngier pr_err("Failed to allocate PROPBASE\n"); 15251ac19ca6SMarc Zyngier return -ENOMEM; 15261ac19ca6SMarc Zyngier } 15271ac19ca6SMarc Zyngier 15281ac19ca6SMarc Zyngier paddr = page_to_phys(gic_rdists->prop_page); 15291ac19ca6SMarc Zyngier pr_info("GIC: using LPI property table @%pa\n", &paddr); 15301ac19ca6SMarc Zyngier 15316c31e123SShanker Donthineni return its_lpi_init(lpi_id_bits); 15321ac19ca6SMarc Zyngier } 15331ac19ca6SMarc Zyngier 15341ac19ca6SMarc Zyngier static const char *its_base_type_string[] = { 15351ac19ca6SMarc Zyngier [GITS_BASER_TYPE_DEVICE] = "Devices", 15361ac19ca6SMarc Zyngier [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 15374f46de9dSMarc Zyngier [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", 15381ac19ca6SMarc Zyngier [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 15391ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 15401ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 15411ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 15421ac19ca6SMarc Zyngier }; 15431ac19ca6SMarc Zyngier 15442d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 15452d81d425SShanker Donthineni { 15462d81d425SShanker Donthineni u32 idx = baser - its->tables; 15472d81d425SShanker Donthineni 15480968a619SVladimir Murzin return gits_read_baser(its->base + GITS_BASER + (idx << 3)); 15492d81d425SShanker Donthineni } 15502d81d425SShanker Donthineni 15512d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser, 15522d81d425SShanker Donthineni u64 val) 15532d81d425SShanker Donthineni { 15542d81d425SShanker Donthineni u32 idx = baser - its->tables; 15552d81d425SShanker Donthineni 15560968a619SVladimir Murzin gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); 15572d81d425SShanker Donthineni baser->val = its_read_baser(its, baser); 15582d81d425SShanker Donthineni } 15592d81d425SShanker Donthineni 15609347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser, 15613faf24eaSShanker Donthineni u64 cache, u64 shr, u32 psz, u32 order, 15623faf24eaSShanker Donthineni bool indirect) 15639347359aSShanker Donthineni { 15649347359aSShanker Donthineni u64 val = its_read_baser(its, baser); 15659347359aSShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(val); 15669347359aSShanker Donthineni u64 type = GITS_BASER_TYPE(val); 15679347359aSShanker Donthineni u32 alloc_pages; 15689347359aSShanker Donthineni void *base; 15699347359aSShanker Donthineni u64 tmp; 15709347359aSShanker Donthineni 15719347359aSShanker Donthineni retry_alloc_baser: 15729347359aSShanker Donthineni alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 15739347359aSShanker Donthineni if (alloc_pages > GITS_BASER_PAGES_MAX) { 15749347359aSShanker Donthineni pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 15759347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 15769347359aSShanker Donthineni alloc_pages, GITS_BASER_PAGES_MAX); 15779347359aSShanker Donthineni alloc_pages = GITS_BASER_PAGES_MAX; 15789347359aSShanker Donthineni order = get_order(GITS_BASER_PAGES_MAX * psz); 15799347359aSShanker Donthineni } 15809347359aSShanker Donthineni 15819347359aSShanker Donthineni base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 15829347359aSShanker Donthineni if (!base) 15839347359aSShanker Donthineni return -ENOMEM; 15849347359aSShanker Donthineni 15859347359aSShanker Donthineni retry_baser: 15869347359aSShanker Donthineni val = (virt_to_phys(base) | 15879347359aSShanker Donthineni (type << GITS_BASER_TYPE_SHIFT) | 15889347359aSShanker Donthineni ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 15899347359aSShanker Donthineni ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 15909347359aSShanker Donthineni cache | 15919347359aSShanker Donthineni shr | 15929347359aSShanker Donthineni GITS_BASER_VALID); 15939347359aSShanker Donthineni 15943faf24eaSShanker Donthineni val |= indirect ? GITS_BASER_INDIRECT : 0x0; 15953faf24eaSShanker Donthineni 15969347359aSShanker Donthineni switch (psz) { 15979347359aSShanker Donthineni case SZ_4K: 15989347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_4K; 15999347359aSShanker Donthineni break; 16009347359aSShanker Donthineni case SZ_16K: 16019347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_16K; 16029347359aSShanker Donthineni break; 16039347359aSShanker Donthineni case SZ_64K: 16049347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_64K; 16059347359aSShanker Donthineni break; 16069347359aSShanker Donthineni } 16079347359aSShanker Donthineni 16089347359aSShanker Donthineni its_write_baser(its, baser, val); 16099347359aSShanker Donthineni tmp = baser->val; 16109347359aSShanker Donthineni 16119347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 16129347359aSShanker Donthineni /* 16139347359aSShanker Donthineni * Shareability didn't stick. Just use 16149347359aSShanker Donthineni * whatever the read reported, which is likely 16159347359aSShanker Donthineni * to be the only thing this redistributor 16169347359aSShanker Donthineni * supports. If that's zero, make it 16179347359aSShanker Donthineni * non-cacheable as well. 16189347359aSShanker Donthineni */ 16199347359aSShanker Donthineni shr = tmp & GITS_BASER_SHAREABILITY_MASK; 16209347359aSShanker Donthineni if (!shr) { 16219347359aSShanker Donthineni cache = GITS_BASER_nC; 1622328191c0SVladimir Murzin gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 16239347359aSShanker Donthineni } 16249347359aSShanker Donthineni goto retry_baser; 16259347359aSShanker Donthineni } 16269347359aSShanker Donthineni 16279347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 16289347359aSShanker Donthineni /* 16299347359aSShanker Donthineni * Page size didn't stick. Let's try a smaller 16309347359aSShanker Donthineni * size and retry. If we reach 4K, then 16319347359aSShanker Donthineni * something is horribly wrong... 16329347359aSShanker Donthineni */ 16339347359aSShanker Donthineni free_pages((unsigned long)base, order); 16349347359aSShanker Donthineni baser->base = NULL; 16359347359aSShanker Donthineni 16369347359aSShanker Donthineni switch (psz) { 16379347359aSShanker Donthineni case SZ_16K: 16389347359aSShanker Donthineni psz = SZ_4K; 16399347359aSShanker Donthineni goto retry_alloc_baser; 16409347359aSShanker Donthineni case SZ_64K: 16419347359aSShanker Donthineni psz = SZ_16K; 16429347359aSShanker Donthineni goto retry_alloc_baser; 16439347359aSShanker Donthineni } 16449347359aSShanker Donthineni } 16459347359aSShanker Donthineni 16469347359aSShanker Donthineni if (val != tmp) { 1647b11283ebSVladimir Murzin pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 16489347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 1649b11283ebSVladimir Murzin val, tmp); 16509347359aSShanker Donthineni free_pages((unsigned long)base, order); 16519347359aSShanker Donthineni return -ENXIO; 16529347359aSShanker Donthineni } 16539347359aSShanker Donthineni 16549347359aSShanker Donthineni baser->order = order; 16559347359aSShanker Donthineni baser->base = base; 16569347359aSShanker Donthineni baser->psz = psz; 16573faf24eaSShanker Donthineni tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 16589347359aSShanker Donthineni 16593faf24eaSShanker Donthineni pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 1660d524eaa2SVladimir Murzin &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), 16619347359aSShanker Donthineni its_base_type_string[type], 16629347359aSShanker Donthineni (unsigned long)virt_to_phys(base), 16633faf24eaSShanker Donthineni indirect ? "indirect" : "flat", (int)esz, 16649347359aSShanker Donthineni psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 16659347359aSShanker Donthineni 16669347359aSShanker Donthineni return 0; 16679347359aSShanker Donthineni } 16689347359aSShanker Donthineni 16694cacac57SMarc Zyngier static bool its_parse_indirect_baser(struct its_node *its, 16704cacac57SMarc Zyngier struct its_baser *baser, 16713faf24eaSShanker Donthineni u32 psz, u32 *order) 16724b75c459SShanker Donthineni { 16734cacac57SMarc Zyngier u64 tmp = its_read_baser(its, baser); 16744cacac57SMarc Zyngier u64 type = GITS_BASER_TYPE(tmp); 16754cacac57SMarc Zyngier u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 16762fd632a0SShanker Donthineni u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 16774b75c459SShanker Donthineni u32 ids = its->device_ids; 16784b75c459SShanker Donthineni u32 new_order = *order; 16793faf24eaSShanker Donthineni bool indirect = false; 16803faf24eaSShanker Donthineni 16813faf24eaSShanker Donthineni /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 16823faf24eaSShanker Donthineni if ((esz << ids) > (psz * 2)) { 16833faf24eaSShanker Donthineni /* 16843faf24eaSShanker Donthineni * Find out whether hw supports a single or two-level table by 16853faf24eaSShanker Donthineni * table by reading bit at offset '62' after writing '1' to it. 16863faf24eaSShanker Donthineni */ 16873faf24eaSShanker Donthineni its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 16883faf24eaSShanker Donthineni indirect = !!(baser->val & GITS_BASER_INDIRECT); 16893faf24eaSShanker Donthineni 16903faf24eaSShanker Donthineni if (indirect) { 16913faf24eaSShanker Donthineni /* 16923faf24eaSShanker Donthineni * The size of the lvl2 table is equal to ITS page size 16933faf24eaSShanker Donthineni * which is 'psz'. For computing lvl1 table size, 16943faf24eaSShanker Donthineni * subtract ID bits that sparse lvl2 table from 'ids' 16953faf24eaSShanker Donthineni * which is reported by ITS hardware times lvl1 table 16963faf24eaSShanker Donthineni * entry size. 16973faf24eaSShanker Donthineni */ 1698d524eaa2SVladimir Murzin ids -= ilog2(psz / (int)esz); 16993faf24eaSShanker Donthineni esz = GITS_LVL1_ENTRY_SIZE; 17003faf24eaSShanker Donthineni } 17013faf24eaSShanker Donthineni } 17024b75c459SShanker Donthineni 17034b75c459SShanker Donthineni /* 17044b75c459SShanker Donthineni * Allocate as many entries as required to fit the 17054b75c459SShanker Donthineni * range of device IDs that the ITS can grok... The ID 17064b75c459SShanker Donthineni * space being incredibly sparse, this results in a 17073faf24eaSShanker Donthineni * massive waste of memory if two-level device table 17083faf24eaSShanker Donthineni * feature is not supported by hardware. 17094b75c459SShanker Donthineni */ 17104b75c459SShanker Donthineni new_order = max_t(u32, get_order(esz << ids), new_order); 17114b75c459SShanker Donthineni if (new_order >= MAX_ORDER) { 17124b75c459SShanker Donthineni new_order = MAX_ORDER - 1; 1713d524eaa2SVladimir Murzin ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 17144cacac57SMarc Zyngier pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", 17154cacac57SMarc Zyngier &its->phys_base, its_base_type_string[type], 17164cacac57SMarc Zyngier its->device_ids, ids); 17174b75c459SShanker Donthineni } 17184b75c459SShanker Donthineni 17194b75c459SShanker Donthineni *order = new_order; 17203faf24eaSShanker Donthineni 17213faf24eaSShanker Donthineni return indirect; 17224b75c459SShanker Donthineni } 17234b75c459SShanker Donthineni 17241ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its) 17251ac19ca6SMarc Zyngier { 17261ac19ca6SMarc Zyngier int i; 17271ac19ca6SMarc Zyngier 17281ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 17291a485f4dSShanker Donthineni if (its->tables[i].base) { 17301a485f4dSShanker Donthineni free_pages((unsigned long)its->tables[i].base, 17311a485f4dSShanker Donthineni its->tables[i].order); 17321a485f4dSShanker Donthineni its->tables[i].base = NULL; 17331ac19ca6SMarc Zyngier } 17341ac19ca6SMarc Zyngier } 17351ac19ca6SMarc Zyngier } 17361ac19ca6SMarc Zyngier 17370e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its) 17381ac19ca6SMarc Zyngier { 17391ac19ca6SMarc Zyngier u64 shr = GITS_BASER_InnerShareable; 17402fd632a0SShanker Donthineni u64 cache = GITS_BASER_RaWaWb; 17419347359aSShanker Donthineni u32 psz = SZ_64K; 17429347359aSShanker Donthineni int err, i; 174394100970SRobert Richter 1744fa150019SArd Biesheuvel if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) 1745fa150019SArd Biesheuvel /* erratum 24313: ignore memory access type */ 17469347359aSShanker Donthineni cache = GITS_BASER_nCnB; 1747466b7d16SShanker Donthineni 17481ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 17492d81d425SShanker Donthineni struct its_baser *baser = its->tables + i; 17502d81d425SShanker Donthineni u64 val = its_read_baser(its, baser); 17511ac19ca6SMarc Zyngier u64 type = GITS_BASER_TYPE(val); 17529347359aSShanker Donthineni u32 order = get_order(psz); 17533faf24eaSShanker Donthineni bool indirect = false; 17541ac19ca6SMarc Zyngier 17554cacac57SMarc Zyngier switch (type) { 17564cacac57SMarc Zyngier case GITS_BASER_TYPE_NONE: 17571ac19ca6SMarc Zyngier continue; 17581ac19ca6SMarc Zyngier 17594cacac57SMarc Zyngier case GITS_BASER_TYPE_DEVICE: 17604cacac57SMarc Zyngier case GITS_BASER_TYPE_VCPU: 17614cacac57SMarc Zyngier indirect = its_parse_indirect_baser(its, baser, 17624cacac57SMarc Zyngier psz, &order); 17634cacac57SMarc Zyngier break; 17644cacac57SMarc Zyngier } 1765f54b97edSMarc Zyngier 17663faf24eaSShanker Donthineni err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 17679347359aSShanker Donthineni if (err < 0) { 17689347359aSShanker Donthineni its_free_tables(its); 17699347359aSShanker Donthineni return err; 177030f21363SRobert Richter } 177130f21363SRobert Richter 17729347359aSShanker Donthineni /* Update settings which will be used for next BASERn */ 17739347359aSShanker Donthineni psz = baser->psz; 17749347359aSShanker Donthineni cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 17759347359aSShanker Donthineni shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 17761ac19ca6SMarc Zyngier } 17771ac19ca6SMarc Zyngier 17781ac19ca6SMarc Zyngier return 0; 17791ac19ca6SMarc Zyngier } 17801ac19ca6SMarc Zyngier 17811ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its) 17821ac19ca6SMarc Zyngier { 17831ac19ca6SMarc Zyngier its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), 17841ac19ca6SMarc Zyngier GFP_KERNEL); 17851ac19ca6SMarc Zyngier if (!its->collections) 17861ac19ca6SMarc Zyngier return -ENOMEM; 17871ac19ca6SMarc Zyngier 17881ac19ca6SMarc Zyngier return 0; 17891ac19ca6SMarc Zyngier } 17901ac19ca6SMarc Zyngier 17917c297a2dSMarc Zyngier static struct page *its_allocate_pending_table(gfp_t gfp_flags) 17927c297a2dSMarc Zyngier { 17937c297a2dSMarc Zyngier struct page *pend_page; 17947c297a2dSMarc Zyngier /* 17957c297a2dSMarc Zyngier * The pending pages have to be at least 64kB aligned, 17967c297a2dSMarc Zyngier * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 17977c297a2dSMarc Zyngier */ 17987c297a2dSMarc Zyngier pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 17997c297a2dSMarc Zyngier get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 18007c297a2dSMarc Zyngier if (!pend_page) 18017c297a2dSMarc Zyngier return NULL; 18027c297a2dSMarc Zyngier 18037c297a2dSMarc Zyngier /* Make sure the GIC will observe the zero-ed page */ 18047c297a2dSMarc Zyngier gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 18057c297a2dSMarc Zyngier 18067c297a2dSMarc Zyngier return pend_page; 18077c297a2dSMarc Zyngier } 18087c297a2dSMarc Zyngier 18097d75bbb4SMarc Zyngier static void its_free_pending_table(struct page *pt) 18107d75bbb4SMarc Zyngier { 18117d75bbb4SMarc Zyngier free_pages((unsigned long)page_address(pt), 18127d75bbb4SMarc Zyngier get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 18137d75bbb4SMarc Zyngier } 18147d75bbb4SMarc Zyngier 18151ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void) 18161ac19ca6SMarc Zyngier { 18171ac19ca6SMarc Zyngier void __iomem *rbase = gic_data_rdist_rd_base(); 18181ac19ca6SMarc Zyngier struct page *pend_page; 18191ac19ca6SMarc Zyngier u64 val, tmp; 18201ac19ca6SMarc Zyngier 18211ac19ca6SMarc Zyngier /* If we didn't allocate the pending table yet, do it now */ 18221ac19ca6SMarc Zyngier pend_page = gic_data_rdist()->pend_page; 18231ac19ca6SMarc Zyngier if (!pend_page) { 18241ac19ca6SMarc Zyngier phys_addr_t paddr; 18257c297a2dSMarc Zyngier 18267c297a2dSMarc Zyngier pend_page = its_allocate_pending_table(GFP_NOWAIT); 18271ac19ca6SMarc Zyngier if (!pend_page) { 18281ac19ca6SMarc Zyngier pr_err("Failed to allocate PENDBASE for CPU%d\n", 18291ac19ca6SMarc Zyngier smp_processor_id()); 18301ac19ca6SMarc Zyngier return; 18311ac19ca6SMarc Zyngier } 18321ac19ca6SMarc Zyngier 18331ac19ca6SMarc Zyngier paddr = page_to_phys(pend_page); 18341ac19ca6SMarc Zyngier pr_info("CPU%d: using LPI pending table @%pa\n", 18351ac19ca6SMarc Zyngier smp_processor_id(), &paddr); 18361ac19ca6SMarc Zyngier gic_data_rdist()->pend_page = pend_page; 18371ac19ca6SMarc Zyngier } 18381ac19ca6SMarc Zyngier 18391ac19ca6SMarc Zyngier /* Disable LPIs */ 18401ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 18411ac19ca6SMarc Zyngier val &= ~GICR_CTLR_ENABLE_LPIS; 18421ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 18431ac19ca6SMarc Zyngier 18441ac19ca6SMarc Zyngier /* 18451ac19ca6SMarc Zyngier * Make sure any change to the table is observable by the GIC. 18461ac19ca6SMarc Zyngier */ 18471ac19ca6SMarc Zyngier dsb(sy); 18481ac19ca6SMarc Zyngier 18491ac19ca6SMarc Zyngier /* set PROPBASE */ 18501ac19ca6SMarc Zyngier val = (page_to_phys(gic_rdists->prop_page) | 18511ac19ca6SMarc Zyngier GICR_PROPBASER_InnerShareable | 18522fd632a0SShanker Donthineni GICR_PROPBASER_RaWaWb | 18531ac19ca6SMarc Zyngier ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 18541ac19ca6SMarc Zyngier 18550968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 18560968a619SVladimir Murzin tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); 18571ac19ca6SMarc Zyngier 18581ac19ca6SMarc Zyngier if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 1859241a386cSMarc Zyngier if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 1860241a386cSMarc Zyngier /* 1861241a386cSMarc Zyngier * The HW reports non-shareable, we must 1862241a386cSMarc Zyngier * remove the cacheability attributes as 1863241a386cSMarc Zyngier * well. 1864241a386cSMarc Zyngier */ 1865241a386cSMarc Zyngier val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 1866241a386cSMarc Zyngier GICR_PROPBASER_CACHEABILITY_MASK); 1867241a386cSMarc Zyngier val |= GICR_PROPBASER_nC; 18680968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 1869241a386cSMarc Zyngier } 18701ac19ca6SMarc Zyngier pr_info_once("GIC: using cache flushing for LPI property table\n"); 18711ac19ca6SMarc Zyngier gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 18721ac19ca6SMarc Zyngier } 18731ac19ca6SMarc Zyngier 18741ac19ca6SMarc Zyngier /* set PENDBASE */ 18751ac19ca6SMarc Zyngier val = (page_to_phys(pend_page) | 18764ad3e363SMarc Zyngier GICR_PENDBASER_InnerShareable | 18772fd632a0SShanker Donthineni GICR_PENDBASER_RaWaWb); 18781ac19ca6SMarc Zyngier 18790968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 18800968a619SVladimir Murzin tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); 1881241a386cSMarc Zyngier 1882241a386cSMarc Zyngier if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 1883241a386cSMarc Zyngier /* 1884241a386cSMarc Zyngier * The HW reports non-shareable, we must remove the 1885241a386cSMarc Zyngier * cacheability attributes as well. 1886241a386cSMarc Zyngier */ 1887241a386cSMarc Zyngier val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 1888241a386cSMarc Zyngier GICR_PENDBASER_CACHEABILITY_MASK); 1889241a386cSMarc Zyngier val |= GICR_PENDBASER_nC; 18900968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 1891241a386cSMarc Zyngier } 18921ac19ca6SMarc Zyngier 18931ac19ca6SMarc Zyngier /* Enable LPIs */ 18941ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 18951ac19ca6SMarc Zyngier val |= GICR_CTLR_ENABLE_LPIS; 18961ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 18971ac19ca6SMarc Zyngier 18981ac19ca6SMarc Zyngier /* Make sure the GIC has seen the above */ 18991ac19ca6SMarc Zyngier dsb(sy); 19001ac19ca6SMarc Zyngier } 19011ac19ca6SMarc Zyngier 19021ac19ca6SMarc Zyngier static void its_cpu_init_collection(void) 19031ac19ca6SMarc Zyngier { 19041ac19ca6SMarc Zyngier struct its_node *its; 19051ac19ca6SMarc Zyngier int cpu; 19061ac19ca6SMarc Zyngier 19071ac19ca6SMarc Zyngier spin_lock(&its_lock); 19081ac19ca6SMarc Zyngier cpu = smp_processor_id(); 19091ac19ca6SMarc Zyngier 19101ac19ca6SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 19111ac19ca6SMarc Zyngier u64 target; 19121ac19ca6SMarc Zyngier 1913fbf8f40eSGanapatrao Kulkarni /* avoid cross node collections and its mapping */ 1914fbf8f40eSGanapatrao Kulkarni if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1915fbf8f40eSGanapatrao Kulkarni struct device_node *cpu_node; 1916fbf8f40eSGanapatrao Kulkarni 1917fbf8f40eSGanapatrao Kulkarni cpu_node = of_get_cpu_node(cpu, NULL); 1918fbf8f40eSGanapatrao Kulkarni if (its->numa_node != NUMA_NO_NODE && 1919fbf8f40eSGanapatrao Kulkarni its->numa_node != of_node_to_nid(cpu_node)) 1920fbf8f40eSGanapatrao Kulkarni continue; 1921fbf8f40eSGanapatrao Kulkarni } 1922fbf8f40eSGanapatrao Kulkarni 19231ac19ca6SMarc Zyngier /* 19241ac19ca6SMarc Zyngier * We now have to bind each collection to its target 19251ac19ca6SMarc Zyngier * redistributor. 19261ac19ca6SMarc Zyngier */ 1927589ce5f4SMarc Zyngier if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 19281ac19ca6SMarc Zyngier /* 19291ac19ca6SMarc Zyngier * This ITS wants the physical address of the 19301ac19ca6SMarc Zyngier * redistributor. 19311ac19ca6SMarc Zyngier */ 19321ac19ca6SMarc Zyngier target = gic_data_rdist()->phys_base; 19331ac19ca6SMarc Zyngier } else { 19341ac19ca6SMarc Zyngier /* 19351ac19ca6SMarc Zyngier * This ITS wants a linear CPU number. 19361ac19ca6SMarc Zyngier */ 1937589ce5f4SMarc Zyngier target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 1938263fcd31SMarc Zyngier target = GICR_TYPER_CPU_NUMBER(target) << 16; 19391ac19ca6SMarc Zyngier } 19401ac19ca6SMarc Zyngier 19411ac19ca6SMarc Zyngier /* Perform collection mapping */ 19421ac19ca6SMarc Zyngier its->collections[cpu].target_address = target; 19431ac19ca6SMarc Zyngier its->collections[cpu].col_id = cpu; 19441ac19ca6SMarc Zyngier 19451ac19ca6SMarc Zyngier its_send_mapc(its, &its->collections[cpu], 1); 19461ac19ca6SMarc Zyngier its_send_invall(its, &its->collections[cpu]); 19471ac19ca6SMarc Zyngier } 19481ac19ca6SMarc Zyngier 19491ac19ca6SMarc Zyngier spin_unlock(&its_lock); 19501ac19ca6SMarc Zyngier } 195184a6a2e7SMarc Zyngier 195284a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 195384a6a2e7SMarc Zyngier { 195484a6a2e7SMarc Zyngier struct its_device *its_dev = NULL, *tmp; 19553e39e8f5SMarc Zyngier unsigned long flags; 195684a6a2e7SMarc Zyngier 19573e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 195884a6a2e7SMarc Zyngier 195984a6a2e7SMarc Zyngier list_for_each_entry(tmp, &its->its_device_list, entry) { 196084a6a2e7SMarc Zyngier if (tmp->device_id == dev_id) { 196184a6a2e7SMarc Zyngier its_dev = tmp; 196284a6a2e7SMarc Zyngier break; 196384a6a2e7SMarc Zyngier } 196484a6a2e7SMarc Zyngier } 196584a6a2e7SMarc Zyngier 19663e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 196784a6a2e7SMarc Zyngier 196884a6a2e7SMarc Zyngier return its_dev; 196984a6a2e7SMarc Zyngier } 197084a6a2e7SMarc Zyngier 1971466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type) 1972466b7d16SShanker Donthineni { 1973466b7d16SShanker Donthineni int i; 1974466b7d16SShanker Donthineni 1975466b7d16SShanker Donthineni for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1976466b7d16SShanker Donthineni if (GITS_BASER_TYPE(its->tables[i].val) == type) 1977466b7d16SShanker Donthineni return &its->tables[i]; 1978466b7d16SShanker Donthineni } 1979466b7d16SShanker Donthineni 1980466b7d16SShanker Donthineni return NULL; 1981466b7d16SShanker Donthineni } 1982466b7d16SShanker Donthineni 198370cc81edSMarc Zyngier static bool its_alloc_table_entry(struct its_baser *baser, u32 id) 19843faf24eaSShanker Donthineni { 19853faf24eaSShanker Donthineni struct page *page; 19863faf24eaSShanker Donthineni u32 esz, idx; 19873faf24eaSShanker Donthineni __le64 *table; 19883faf24eaSShanker Donthineni 19893faf24eaSShanker Donthineni /* Don't allow device id that exceeds single, flat table limit */ 19903faf24eaSShanker Donthineni esz = GITS_BASER_ENTRY_SIZE(baser->val); 19913faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_INDIRECT)) 199270cc81edSMarc Zyngier return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 19933faf24eaSShanker Donthineni 19943faf24eaSShanker Donthineni /* Compute 1st level table index & check if that exceeds table limit */ 199570cc81edSMarc Zyngier idx = id >> ilog2(baser->psz / esz); 19963faf24eaSShanker Donthineni if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 19973faf24eaSShanker Donthineni return false; 19983faf24eaSShanker Donthineni 19993faf24eaSShanker Donthineni table = baser->base; 20003faf24eaSShanker Donthineni 20013faf24eaSShanker Donthineni /* Allocate memory for 2nd level table */ 20023faf24eaSShanker Donthineni if (!table[idx]) { 20033faf24eaSShanker Donthineni page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 20043faf24eaSShanker Donthineni if (!page) 20053faf24eaSShanker Donthineni return false; 20063faf24eaSShanker Donthineni 20073faf24eaSShanker Donthineni /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 20083faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2009328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(page), baser->psz); 20103faf24eaSShanker Donthineni 20113faf24eaSShanker Donthineni table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 20123faf24eaSShanker Donthineni 20133faf24eaSShanker Donthineni /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 20143faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 2015328191c0SVladimir Murzin gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 20163faf24eaSShanker Donthineni 20173faf24eaSShanker Donthineni /* Ensure updated table contents are visible to ITS hardware */ 20183faf24eaSShanker Donthineni dsb(sy); 20193faf24eaSShanker Donthineni } 20203faf24eaSShanker Donthineni 20213faf24eaSShanker Donthineni return true; 20223faf24eaSShanker Donthineni } 20233faf24eaSShanker Donthineni 202470cc81edSMarc Zyngier static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 202570cc81edSMarc Zyngier { 202670cc81edSMarc Zyngier struct its_baser *baser; 202770cc81edSMarc Zyngier 202870cc81edSMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 202970cc81edSMarc Zyngier 203070cc81edSMarc Zyngier /* Don't allow device id that exceeds ITS hardware limit */ 203170cc81edSMarc Zyngier if (!baser) 203270cc81edSMarc Zyngier return (ilog2(dev_id) < its->device_ids); 203370cc81edSMarc Zyngier 203470cc81edSMarc Zyngier return its_alloc_table_entry(baser, dev_id); 203570cc81edSMarc Zyngier } 203670cc81edSMarc Zyngier 20377d75bbb4SMarc Zyngier static bool its_alloc_vpe_table(u32 vpe_id) 20387d75bbb4SMarc Zyngier { 20397d75bbb4SMarc Zyngier struct its_node *its; 20407d75bbb4SMarc Zyngier 20417d75bbb4SMarc Zyngier /* 20427d75bbb4SMarc Zyngier * Make sure the L2 tables are allocated on *all* v4 ITSs. We 20437d75bbb4SMarc Zyngier * could try and only do it on ITSs corresponding to devices 20447d75bbb4SMarc Zyngier * that have interrupts targeted at this VPE, but the 20457d75bbb4SMarc Zyngier * complexity becomes crazy (and you have tons of memory 20467d75bbb4SMarc Zyngier * anyway, right?). 20477d75bbb4SMarc Zyngier */ 20487d75bbb4SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 20497d75bbb4SMarc Zyngier struct its_baser *baser; 20507d75bbb4SMarc Zyngier 20517d75bbb4SMarc Zyngier if (!its->is_v4) 20527d75bbb4SMarc Zyngier continue; 20537d75bbb4SMarc Zyngier 20547d75bbb4SMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 20557d75bbb4SMarc Zyngier if (!baser) 20567d75bbb4SMarc Zyngier return false; 20577d75bbb4SMarc Zyngier 20587d75bbb4SMarc Zyngier if (!its_alloc_table_entry(baser, vpe_id)) 20597d75bbb4SMarc Zyngier return false; 20607d75bbb4SMarc Zyngier } 20617d75bbb4SMarc Zyngier 20627d75bbb4SMarc Zyngier return true; 20637d75bbb4SMarc Zyngier } 20647d75bbb4SMarc Zyngier 206584a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 206693f94ea0SMarc Zyngier int nvecs, bool alloc_lpis) 206784a6a2e7SMarc Zyngier { 206884a6a2e7SMarc Zyngier struct its_device *dev; 206993f94ea0SMarc Zyngier unsigned long *lpi_map = NULL; 20703e39e8f5SMarc Zyngier unsigned long flags; 2071591e5becSMarc Zyngier u16 *col_map = NULL; 207284a6a2e7SMarc Zyngier void *itt; 207384a6a2e7SMarc Zyngier int lpi_base; 207484a6a2e7SMarc Zyngier int nr_lpis; 2075c8481267SMarc Zyngier int nr_ites; 207684a6a2e7SMarc Zyngier int sz; 207784a6a2e7SMarc Zyngier 20783faf24eaSShanker Donthineni if (!its_alloc_device_table(its, dev_id)) 2079466b7d16SShanker Donthineni return NULL; 2080466b7d16SShanker Donthineni 208184a6a2e7SMarc Zyngier dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2082c8481267SMarc Zyngier /* 2083c8481267SMarc Zyngier * At least one bit of EventID is being used, hence a minimum 2084c8481267SMarc Zyngier * of two entries. No, the architecture doesn't let you 2085c8481267SMarc Zyngier * express an ITT with a single entry. 2086c8481267SMarc Zyngier */ 208796555c47SWill Deacon nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 2088c8481267SMarc Zyngier sz = nr_ites * its->ite_size; 208984a6a2e7SMarc Zyngier sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 20906c834125SYun Wu itt = kzalloc(sz, GFP_KERNEL); 209193f94ea0SMarc Zyngier if (alloc_lpis) { 209284a6a2e7SMarc Zyngier lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2093591e5becSMarc Zyngier if (lpi_map) 209493f94ea0SMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_lpis, 209593f94ea0SMarc Zyngier GFP_KERNEL); 209693f94ea0SMarc Zyngier } else { 209793f94ea0SMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL); 209893f94ea0SMarc Zyngier nr_lpis = 0; 209993f94ea0SMarc Zyngier lpi_base = 0; 210093f94ea0SMarc Zyngier } 210184a6a2e7SMarc Zyngier 210293f94ea0SMarc Zyngier if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 210384a6a2e7SMarc Zyngier kfree(dev); 210484a6a2e7SMarc Zyngier kfree(itt); 210584a6a2e7SMarc Zyngier kfree(lpi_map); 2106591e5becSMarc Zyngier kfree(col_map); 210784a6a2e7SMarc Zyngier return NULL; 210884a6a2e7SMarc Zyngier } 210984a6a2e7SMarc Zyngier 2110328191c0SVladimir Murzin gic_flush_dcache_to_poc(itt, sz); 21115a9a8915SMarc Zyngier 211284a6a2e7SMarc Zyngier dev->its = its; 211384a6a2e7SMarc Zyngier dev->itt = itt; 2114c8481267SMarc Zyngier dev->nr_ites = nr_ites; 2115591e5becSMarc Zyngier dev->event_map.lpi_map = lpi_map; 2116591e5becSMarc Zyngier dev->event_map.col_map = col_map; 2117591e5becSMarc Zyngier dev->event_map.lpi_base = lpi_base; 2118591e5becSMarc Zyngier dev->event_map.nr_lpis = nr_lpis; 2119d011e4e6SMarc Zyngier mutex_init(&dev->event_map.vlpi_lock); 212084a6a2e7SMarc Zyngier dev->device_id = dev_id; 212184a6a2e7SMarc Zyngier INIT_LIST_HEAD(&dev->entry); 212284a6a2e7SMarc Zyngier 21233e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 212484a6a2e7SMarc Zyngier list_add(&dev->entry, &its->its_device_list); 21253e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 212684a6a2e7SMarc Zyngier 212784a6a2e7SMarc Zyngier /* Map device to its ITT */ 212884a6a2e7SMarc Zyngier its_send_mapd(dev, 1); 212984a6a2e7SMarc Zyngier 213084a6a2e7SMarc Zyngier return dev; 213184a6a2e7SMarc Zyngier } 213284a6a2e7SMarc Zyngier 213384a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev) 213484a6a2e7SMarc Zyngier { 21353e39e8f5SMarc Zyngier unsigned long flags; 21363e39e8f5SMarc Zyngier 21373e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its_dev->its->lock, flags); 213884a6a2e7SMarc Zyngier list_del(&its_dev->entry); 21393e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 214084a6a2e7SMarc Zyngier kfree(its_dev->itt); 214184a6a2e7SMarc Zyngier kfree(its_dev); 214284a6a2e7SMarc Zyngier } 2143b48ac83dSMarc Zyngier 2144b48ac83dSMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 2145b48ac83dSMarc Zyngier { 2146b48ac83dSMarc Zyngier int idx; 2147b48ac83dSMarc Zyngier 2148591e5becSMarc Zyngier idx = find_first_zero_bit(dev->event_map.lpi_map, 2149591e5becSMarc Zyngier dev->event_map.nr_lpis); 2150591e5becSMarc Zyngier if (idx == dev->event_map.nr_lpis) 2151b48ac83dSMarc Zyngier return -ENOSPC; 2152b48ac83dSMarc Zyngier 2153591e5becSMarc Zyngier *hwirq = dev->event_map.lpi_base + idx; 2154591e5becSMarc Zyngier set_bit(idx, dev->event_map.lpi_map); 2155b48ac83dSMarc Zyngier 2156b48ac83dSMarc Zyngier return 0; 2157b48ac83dSMarc Zyngier } 2158b48ac83dSMarc Zyngier 215954456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 2160b48ac83dSMarc Zyngier int nvec, msi_alloc_info_t *info) 2161b48ac83dSMarc Zyngier { 2162b48ac83dSMarc Zyngier struct its_node *its; 2163b48ac83dSMarc Zyngier struct its_device *its_dev; 216454456db9SMarc Zyngier struct msi_domain_info *msi_info; 216554456db9SMarc Zyngier u32 dev_id; 2166b48ac83dSMarc Zyngier 216754456db9SMarc Zyngier /* 216854456db9SMarc Zyngier * We ignore "dev" entierely, and rely on the dev_id that has 216954456db9SMarc Zyngier * been passed via the scratchpad. This limits this domain's 217054456db9SMarc Zyngier * usefulness to upper layers that definitely know that they 217154456db9SMarc Zyngier * are built on top of the ITS. 217254456db9SMarc Zyngier */ 217354456db9SMarc Zyngier dev_id = info->scratchpad[0].ul; 217454456db9SMarc Zyngier 217554456db9SMarc Zyngier msi_info = msi_get_domain_info(domain); 217654456db9SMarc Zyngier its = msi_info->data; 217754456db9SMarc Zyngier 217820b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi && 217920b3d54eSMarc Zyngier vpe_proxy.dev && 218020b3d54eSMarc Zyngier vpe_proxy.dev->its == its && 218120b3d54eSMarc Zyngier dev_id == vpe_proxy.dev->device_id) { 218220b3d54eSMarc Zyngier /* Bad luck. Get yourself a better implementation */ 218320b3d54eSMarc Zyngier WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", 218420b3d54eSMarc Zyngier dev_id); 218520b3d54eSMarc Zyngier return -EINVAL; 218620b3d54eSMarc Zyngier } 218720b3d54eSMarc Zyngier 2188f130420eSMarc Zyngier its_dev = its_find_device(its, dev_id); 2189e8137f4fSMarc Zyngier if (its_dev) { 2190e8137f4fSMarc Zyngier /* 2191e8137f4fSMarc Zyngier * We already have seen this ID, probably through 2192e8137f4fSMarc Zyngier * another alias (PCI bridge of some sort). No need to 2193e8137f4fSMarc Zyngier * create the device. 2194e8137f4fSMarc Zyngier */ 2195f130420eSMarc Zyngier pr_debug("Reusing ITT for devID %x\n", dev_id); 2196e8137f4fSMarc Zyngier goto out; 2197e8137f4fSMarc Zyngier } 2198b48ac83dSMarc Zyngier 219993f94ea0SMarc Zyngier its_dev = its_create_device(its, dev_id, nvec, true); 2200b48ac83dSMarc Zyngier if (!its_dev) 2201b48ac83dSMarc Zyngier return -ENOMEM; 2202b48ac83dSMarc Zyngier 2203f130420eSMarc Zyngier pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2204e8137f4fSMarc Zyngier out: 2205b48ac83dSMarc Zyngier info->scratchpad[0].ptr = its_dev; 2206b48ac83dSMarc Zyngier return 0; 2207b48ac83dSMarc Zyngier } 2208b48ac83dSMarc Zyngier 220954456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = { 221054456db9SMarc Zyngier .msi_prepare = its_msi_prepare, 221154456db9SMarc Zyngier }; 221254456db9SMarc Zyngier 2213b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain, 2214b48ac83dSMarc Zyngier unsigned int virq, 2215b48ac83dSMarc Zyngier irq_hw_number_t hwirq) 2216b48ac83dSMarc Zyngier { 2217f833f57fSMarc Zyngier struct irq_fwspec fwspec; 2218b48ac83dSMarc Zyngier 2219f833f57fSMarc Zyngier if (irq_domain_get_of_node(domain->parent)) { 2220f833f57fSMarc Zyngier fwspec.fwnode = domain->parent->fwnode; 2221f833f57fSMarc Zyngier fwspec.param_count = 3; 2222f833f57fSMarc Zyngier fwspec.param[0] = GIC_IRQ_TYPE_LPI; 2223f833f57fSMarc Zyngier fwspec.param[1] = hwirq; 2224f833f57fSMarc Zyngier fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 22253f010cf1STomasz Nowicki } else if (is_fwnode_irqchip(domain->parent->fwnode)) { 22263f010cf1STomasz Nowicki fwspec.fwnode = domain->parent->fwnode; 22273f010cf1STomasz Nowicki fwspec.param_count = 2; 22283f010cf1STomasz Nowicki fwspec.param[0] = hwirq; 22293f010cf1STomasz Nowicki fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 2230f833f57fSMarc Zyngier } else { 2231f833f57fSMarc Zyngier return -EINVAL; 2232f833f57fSMarc Zyngier } 2233b48ac83dSMarc Zyngier 2234f833f57fSMarc Zyngier return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 2235b48ac83dSMarc Zyngier } 2236b48ac83dSMarc Zyngier 2237b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 2238b48ac83dSMarc Zyngier unsigned int nr_irqs, void *args) 2239b48ac83dSMarc Zyngier { 2240b48ac83dSMarc Zyngier msi_alloc_info_t *info = args; 2241b48ac83dSMarc Zyngier struct its_device *its_dev = info->scratchpad[0].ptr; 2242b48ac83dSMarc Zyngier irq_hw_number_t hwirq; 2243b48ac83dSMarc Zyngier int err; 2244b48ac83dSMarc Zyngier int i; 2245b48ac83dSMarc Zyngier 2246b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 2247b48ac83dSMarc Zyngier err = its_alloc_device_irq(its_dev, &hwirq); 2248b48ac83dSMarc Zyngier if (err) 2249b48ac83dSMarc Zyngier return err; 2250b48ac83dSMarc Zyngier 2251b48ac83dSMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 2252b48ac83dSMarc Zyngier if (err) 2253b48ac83dSMarc Zyngier return err; 2254b48ac83dSMarc Zyngier 2255b48ac83dSMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, 2256b48ac83dSMarc Zyngier hwirq, &its_irq_chip, its_dev); 22570d224d35SMarc Zyngier irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2258f130420eSMarc Zyngier pr_debug("ID:%d pID:%d vID:%d\n", 2259591e5becSMarc Zyngier (int)(hwirq - its_dev->event_map.lpi_base), 2260591e5becSMarc Zyngier (int) hwirq, virq + i); 2261b48ac83dSMarc Zyngier } 2262b48ac83dSMarc Zyngier 2263b48ac83dSMarc Zyngier return 0; 2264b48ac83dSMarc Zyngier } 2265b48ac83dSMarc Zyngier 226672491643SThomas Gleixner static int its_irq_domain_activate(struct irq_domain *domain, 226772491643SThomas Gleixner struct irq_data *d, bool early) 2268aca268dfSMarc Zyngier { 2269aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2270aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 2271fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 22720d224d35SMarc Zyngier int cpu; 2273fbf8f40eSGanapatrao Kulkarni 2274fbf8f40eSGanapatrao Kulkarni /* get the cpu_mask of local node */ 2275fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) 2276fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2277aca268dfSMarc Zyngier 2278591e5becSMarc Zyngier /* Bind the LPI to the first possible CPU */ 22790d224d35SMarc Zyngier cpu = cpumask_first(cpu_mask); 22800d224d35SMarc Zyngier its_dev->event_map.col_map[event] = cpu; 22810d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2282591e5becSMarc Zyngier 2283aca268dfSMarc Zyngier /* Map the GIC IRQ and event to the device */ 22846a25ad3aSMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 228572491643SThomas Gleixner return 0; 2286aca268dfSMarc Zyngier } 2287aca268dfSMarc Zyngier 2288aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain, 2289aca268dfSMarc Zyngier struct irq_data *d) 2290aca268dfSMarc Zyngier { 2291aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2292aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 2293aca268dfSMarc Zyngier 2294aca268dfSMarc Zyngier /* Stop the delivery of interrupts */ 2295aca268dfSMarc Zyngier its_send_discard(its_dev, event); 2296aca268dfSMarc Zyngier } 2297aca268dfSMarc Zyngier 2298b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 2299b48ac83dSMarc Zyngier unsigned int nr_irqs) 2300b48ac83dSMarc Zyngier { 2301b48ac83dSMarc Zyngier struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2302b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2303b48ac83dSMarc Zyngier int i; 2304b48ac83dSMarc Zyngier 2305b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 2306b48ac83dSMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 2307b48ac83dSMarc Zyngier virq + i); 2308aca268dfSMarc Zyngier u32 event = its_get_event_id(data); 2309b48ac83dSMarc Zyngier 2310b48ac83dSMarc Zyngier /* Mark interrupt index as unused */ 2311591e5becSMarc Zyngier clear_bit(event, its_dev->event_map.lpi_map); 2312b48ac83dSMarc Zyngier 2313b48ac83dSMarc Zyngier /* Nuke the entry in the domain */ 23142da39949SMarc Zyngier irq_domain_reset_irq_data(data); 2315b48ac83dSMarc Zyngier } 2316b48ac83dSMarc Zyngier 2317b48ac83dSMarc Zyngier /* If all interrupts have been freed, start mopping the floor */ 2318591e5becSMarc Zyngier if (bitmap_empty(its_dev->event_map.lpi_map, 2319591e5becSMarc Zyngier its_dev->event_map.nr_lpis)) { 2320cf2be8baSMarc Zyngier its_lpi_free_chunks(its_dev->event_map.lpi_map, 2321cf2be8baSMarc Zyngier its_dev->event_map.lpi_base, 2322cf2be8baSMarc Zyngier its_dev->event_map.nr_lpis); 2323cf2be8baSMarc Zyngier kfree(its_dev->event_map.col_map); 2324b48ac83dSMarc Zyngier 2325b48ac83dSMarc Zyngier /* Unmap device/itt */ 2326b48ac83dSMarc Zyngier its_send_mapd(its_dev, 0); 2327b48ac83dSMarc Zyngier its_free_device(its_dev); 2328b48ac83dSMarc Zyngier } 2329b48ac83dSMarc Zyngier 2330b48ac83dSMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2331b48ac83dSMarc Zyngier } 2332b48ac83dSMarc Zyngier 2333b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = { 2334b48ac83dSMarc Zyngier .alloc = its_irq_domain_alloc, 2335b48ac83dSMarc Zyngier .free = its_irq_domain_free, 2336aca268dfSMarc Zyngier .activate = its_irq_domain_activate, 2337aca268dfSMarc Zyngier .deactivate = its_irq_domain_deactivate, 2338b48ac83dSMarc Zyngier }; 23394c21f3c2SMarc Zyngier 234020b3d54eSMarc Zyngier /* 234120b3d54eSMarc Zyngier * This is insane. 234220b3d54eSMarc Zyngier * 234320b3d54eSMarc Zyngier * If a GICv4 doesn't implement Direct LPIs (which is extremely 234420b3d54eSMarc Zyngier * likely), the only way to perform an invalidate is to use a fake 234520b3d54eSMarc Zyngier * device to issue an INV command, implying that the LPI has first 234620b3d54eSMarc Zyngier * been mapped to some event on that device. Since this is not exactly 234720b3d54eSMarc Zyngier * cheap, we try to keep that mapping around as long as possible, and 234820b3d54eSMarc Zyngier * only issue an UNMAP if we're short on available slots. 234920b3d54eSMarc Zyngier * 235020b3d54eSMarc Zyngier * Broken by design(tm). 235120b3d54eSMarc Zyngier */ 235220b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) 235320b3d54eSMarc Zyngier { 235420b3d54eSMarc Zyngier /* Already unmapped? */ 235520b3d54eSMarc Zyngier if (vpe->vpe_proxy_event == -1) 235620b3d54eSMarc Zyngier return; 235720b3d54eSMarc Zyngier 235820b3d54eSMarc Zyngier its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); 235920b3d54eSMarc Zyngier vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; 236020b3d54eSMarc Zyngier 236120b3d54eSMarc Zyngier /* 236220b3d54eSMarc Zyngier * We don't track empty slots at all, so let's move the 236320b3d54eSMarc Zyngier * next_victim pointer if we can quickly reuse that slot 236420b3d54eSMarc Zyngier * instead of nuking an existing entry. Not clear that this is 236520b3d54eSMarc Zyngier * always a win though, and this might just generate a ripple 236620b3d54eSMarc Zyngier * effect... Let's just hope VPEs don't migrate too often. 236720b3d54eSMarc Zyngier */ 236820b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 236920b3d54eSMarc Zyngier vpe_proxy.next_victim = vpe->vpe_proxy_event; 237020b3d54eSMarc Zyngier 237120b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 237220b3d54eSMarc Zyngier } 237320b3d54eSMarc Zyngier 237420b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) 237520b3d54eSMarc Zyngier { 237620b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi) { 237720b3d54eSMarc Zyngier unsigned long flags; 237820b3d54eSMarc Zyngier 237920b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 238020b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe); 238120b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 238220b3d54eSMarc Zyngier } 238320b3d54eSMarc Zyngier } 238420b3d54eSMarc Zyngier 238520b3d54eSMarc Zyngier static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) 238620b3d54eSMarc Zyngier { 238720b3d54eSMarc Zyngier /* Already mapped? */ 238820b3d54eSMarc Zyngier if (vpe->vpe_proxy_event != -1) 238920b3d54eSMarc Zyngier return; 239020b3d54eSMarc Zyngier 239120b3d54eSMarc Zyngier /* This slot was already allocated. Kick the other VPE out. */ 239220b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 239320b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); 239420b3d54eSMarc Zyngier 239520b3d54eSMarc Zyngier /* Map the new VPE instead */ 239620b3d54eSMarc Zyngier vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; 239720b3d54eSMarc Zyngier vpe->vpe_proxy_event = vpe_proxy.next_victim; 239820b3d54eSMarc Zyngier vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; 239920b3d54eSMarc Zyngier 240020b3d54eSMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; 240120b3d54eSMarc Zyngier its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); 240220b3d54eSMarc Zyngier } 240320b3d54eSMarc Zyngier 2404958b90d1SMarc Zyngier static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) 2405958b90d1SMarc Zyngier { 2406958b90d1SMarc Zyngier unsigned long flags; 2407958b90d1SMarc Zyngier struct its_collection *target_col; 2408958b90d1SMarc Zyngier 2409958b90d1SMarc Zyngier if (gic_rdists->has_direct_lpi) { 2410958b90d1SMarc Zyngier void __iomem *rdbase; 2411958b90d1SMarc Zyngier 2412958b90d1SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; 2413958b90d1SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2414958b90d1SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2415958b90d1SMarc Zyngier cpu_relax(); 2416958b90d1SMarc Zyngier 2417958b90d1SMarc Zyngier return; 2418958b90d1SMarc Zyngier } 2419958b90d1SMarc Zyngier 2420958b90d1SMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 2421958b90d1SMarc Zyngier 2422958b90d1SMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 2423958b90d1SMarc Zyngier 2424958b90d1SMarc Zyngier target_col = &vpe_proxy.dev->its->collections[to]; 2425958b90d1SMarc Zyngier its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); 2426958b90d1SMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; 2427958b90d1SMarc Zyngier 2428958b90d1SMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 2429958b90d1SMarc Zyngier } 2430958b90d1SMarc Zyngier 24313171a47aSMarc Zyngier static int its_vpe_set_affinity(struct irq_data *d, 24323171a47aSMarc Zyngier const struct cpumask *mask_val, 24333171a47aSMarc Zyngier bool force) 24343171a47aSMarc Zyngier { 24353171a47aSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 24363171a47aSMarc Zyngier int cpu = cpumask_first(mask_val); 24373171a47aSMarc Zyngier 24383171a47aSMarc Zyngier /* 24393171a47aSMarc Zyngier * Changing affinity is mega expensive, so let's be as lazy as 244020b3d54eSMarc Zyngier * we can and only do it if we really have to. Also, if mapped 2441958b90d1SMarc Zyngier * into the proxy device, we need to move the doorbell 2442958b90d1SMarc Zyngier * interrupt to its new location. 24433171a47aSMarc Zyngier */ 24443171a47aSMarc Zyngier if (vpe->col_idx != cpu) { 2445958b90d1SMarc Zyngier int from = vpe->col_idx; 2446958b90d1SMarc Zyngier 24473171a47aSMarc Zyngier vpe->col_idx = cpu; 24483171a47aSMarc Zyngier its_send_vmovp(vpe); 2449958b90d1SMarc Zyngier its_vpe_db_proxy_move(vpe, from, cpu); 24503171a47aSMarc Zyngier } 24513171a47aSMarc Zyngier 24523171a47aSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 24533171a47aSMarc Zyngier } 24543171a47aSMarc Zyngier 2455e643d803SMarc Zyngier static void its_vpe_schedule(struct its_vpe *vpe) 2456e643d803SMarc Zyngier { 2457e643d803SMarc Zyngier void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2458e643d803SMarc Zyngier u64 val; 2459e643d803SMarc Zyngier 2460e643d803SMarc Zyngier /* Schedule the VPE */ 2461e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & 2462e643d803SMarc Zyngier GENMASK_ULL(51, 12); 2463e643d803SMarc Zyngier val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2464e643d803SMarc Zyngier val |= GICR_VPROPBASER_RaWb; 2465e643d803SMarc Zyngier val |= GICR_VPROPBASER_InnerShareable; 2466e643d803SMarc Zyngier gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2467e643d803SMarc Zyngier 2468e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->vpt_page)) & 2469e643d803SMarc Zyngier GENMASK_ULL(51, 16); 2470e643d803SMarc Zyngier val |= GICR_VPENDBASER_RaWaWb; 2471e643d803SMarc Zyngier val |= GICR_VPENDBASER_NonShareable; 2472e643d803SMarc Zyngier /* 2473e643d803SMarc Zyngier * There is no good way of finding out if the pending table is 2474e643d803SMarc Zyngier * empty as we can race against the doorbell interrupt very 2475e643d803SMarc Zyngier * easily. So in the end, vpe->pending_last is only an 2476e643d803SMarc Zyngier * indication that the vcpu has something pending, not one 2477e643d803SMarc Zyngier * that the pending table is empty. A good implementation 2478e643d803SMarc Zyngier * would be able to read its coarse map pretty quickly anyway, 2479e643d803SMarc Zyngier * making this a tolerable issue. 2480e643d803SMarc Zyngier */ 2481e643d803SMarc Zyngier val |= GICR_VPENDBASER_PendingLast; 2482e643d803SMarc Zyngier val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 2483e643d803SMarc Zyngier val |= GICR_VPENDBASER_Valid; 2484e643d803SMarc Zyngier gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2485e643d803SMarc Zyngier } 2486e643d803SMarc Zyngier 2487e643d803SMarc Zyngier static void its_vpe_deschedule(struct its_vpe *vpe) 2488e643d803SMarc Zyngier { 2489e643d803SMarc Zyngier void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2490e643d803SMarc Zyngier u32 count = 1000000; /* 1s! */ 2491e643d803SMarc Zyngier bool clean; 2492e643d803SMarc Zyngier u64 val; 2493e643d803SMarc Zyngier 2494e643d803SMarc Zyngier /* We're being scheduled out */ 2495e643d803SMarc Zyngier val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2496e643d803SMarc Zyngier val &= ~GICR_VPENDBASER_Valid; 2497e643d803SMarc Zyngier gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2498e643d803SMarc Zyngier 2499e643d803SMarc Zyngier do { 2500e643d803SMarc Zyngier val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2501e643d803SMarc Zyngier clean = !(val & GICR_VPENDBASER_Dirty); 2502e643d803SMarc Zyngier if (!clean) { 2503e643d803SMarc Zyngier count--; 2504e643d803SMarc Zyngier cpu_relax(); 2505e643d803SMarc Zyngier udelay(1); 2506e643d803SMarc Zyngier } 2507e643d803SMarc Zyngier } while (!clean && count); 2508e643d803SMarc Zyngier 2509e643d803SMarc Zyngier if (unlikely(!clean && !count)) { 2510e643d803SMarc Zyngier pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2511e643d803SMarc Zyngier vpe->idai = false; 2512e643d803SMarc Zyngier vpe->pending_last = true; 2513e643d803SMarc Zyngier } else { 2514e643d803SMarc Zyngier vpe->idai = !!(val & GICR_VPENDBASER_IDAI); 2515e643d803SMarc Zyngier vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 2516e643d803SMarc Zyngier } 2517e643d803SMarc Zyngier } 2518e643d803SMarc Zyngier 251940619a2eSMarc Zyngier static void its_vpe_invall(struct its_vpe *vpe) 252040619a2eSMarc Zyngier { 252140619a2eSMarc Zyngier struct its_node *its; 252240619a2eSMarc Zyngier 252340619a2eSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 252440619a2eSMarc Zyngier if (!its->is_v4) 252540619a2eSMarc Zyngier continue; 252640619a2eSMarc Zyngier 2527*2247e1bfSMarc Zyngier if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) 2528*2247e1bfSMarc Zyngier continue; 2529*2247e1bfSMarc Zyngier 253040619a2eSMarc Zyngier its_send_vinvall(its, vpe); 253140619a2eSMarc Zyngier } 253240619a2eSMarc Zyngier } 253340619a2eSMarc Zyngier 2534e643d803SMarc Zyngier static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 2535e643d803SMarc Zyngier { 2536e643d803SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2537e643d803SMarc Zyngier struct its_cmd_info *info = vcpu_info; 2538e643d803SMarc Zyngier 2539e643d803SMarc Zyngier switch (info->cmd_type) { 2540e643d803SMarc Zyngier case SCHEDULE_VPE: 2541e643d803SMarc Zyngier its_vpe_schedule(vpe); 2542e643d803SMarc Zyngier return 0; 2543e643d803SMarc Zyngier 2544e643d803SMarc Zyngier case DESCHEDULE_VPE: 2545e643d803SMarc Zyngier its_vpe_deschedule(vpe); 2546e643d803SMarc Zyngier return 0; 2547e643d803SMarc Zyngier 25485e2f7642SMarc Zyngier case INVALL_VPE: 254940619a2eSMarc Zyngier its_vpe_invall(vpe); 25505e2f7642SMarc Zyngier return 0; 25515e2f7642SMarc Zyngier 2552e643d803SMarc Zyngier default: 2553e643d803SMarc Zyngier return -EINVAL; 2554e643d803SMarc Zyngier } 2555e643d803SMarc Zyngier } 2556e643d803SMarc Zyngier 255720b3d54eSMarc Zyngier static void its_vpe_send_cmd(struct its_vpe *vpe, 255820b3d54eSMarc Zyngier void (*cmd)(struct its_device *, u32)) 255920b3d54eSMarc Zyngier { 256020b3d54eSMarc Zyngier unsigned long flags; 256120b3d54eSMarc Zyngier 256220b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 256320b3d54eSMarc Zyngier 256420b3d54eSMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 256520b3d54eSMarc Zyngier cmd(vpe_proxy.dev, vpe->vpe_proxy_event); 256620b3d54eSMarc Zyngier 256720b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 256820b3d54eSMarc Zyngier } 256920b3d54eSMarc Zyngier 2570f6a91da7SMarc Zyngier static void its_vpe_send_inv(struct irq_data *d) 2571f6a91da7SMarc Zyngier { 2572f6a91da7SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 257320b3d54eSMarc Zyngier 257420b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 2575f6a91da7SMarc Zyngier void __iomem *rdbase; 2576f6a91da7SMarc Zyngier 2577f6a91da7SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2578f6a91da7SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); 2579f6a91da7SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2580f6a91da7SMarc Zyngier cpu_relax(); 258120b3d54eSMarc Zyngier } else { 258220b3d54eSMarc Zyngier its_vpe_send_cmd(vpe, its_send_inv); 258320b3d54eSMarc Zyngier } 2584f6a91da7SMarc Zyngier } 2585f6a91da7SMarc Zyngier 2586f6a91da7SMarc Zyngier static void its_vpe_mask_irq(struct irq_data *d) 2587f6a91da7SMarc Zyngier { 2588f6a91da7SMarc Zyngier /* 2589f6a91da7SMarc Zyngier * We need to unmask the LPI, which is described by the parent 2590f6a91da7SMarc Zyngier * irq_data. Instead of calling into the parent (which won't 2591f6a91da7SMarc Zyngier * exactly do the right thing, let's simply use the 2592f6a91da7SMarc Zyngier * parent_data pointer. Yes, I'm naughty. 2593f6a91da7SMarc Zyngier */ 2594f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 2595f6a91da7SMarc Zyngier its_vpe_send_inv(d); 2596f6a91da7SMarc Zyngier } 2597f6a91da7SMarc Zyngier 2598f6a91da7SMarc Zyngier static void its_vpe_unmask_irq(struct irq_data *d) 2599f6a91da7SMarc Zyngier { 2600f6a91da7SMarc Zyngier /* Same hack as above... */ 2601f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 2602f6a91da7SMarc Zyngier its_vpe_send_inv(d); 2603f6a91da7SMarc Zyngier } 2604f6a91da7SMarc Zyngier 2605e57a3e28SMarc Zyngier static int its_vpe_set_irqchip_state(struct irq_data *d, 2606e57a3e28SMarc Zyngier enum irqchip_irq_state which, 2607e57a3e28SMarc Zyngier bool state) 2608e57a3e28SMarc Zyngier { 2609e57a3e28SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2610e57a3e28SMarc Zyngier 2611e57a3e28SMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 2612e57a3e28SMarc Zyngier return -EINVAL; 2613e57a3e28SMarc Zyngier 2614e57a3e28SMarc Zyngier if (gic_rdists->has_direct_lpi) { 2615e57a3e28SMarc Zyngier void __iomem *rdbase; 2616e57a3e28SMarc Zyngier 2617e57a3e28SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2618e57a3e28SMarc Zyngier if (state) { 2619e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); 2620e57a3e28SMarc Zyngier } else { 2621e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2622e57a3e28SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2623e57a3e28SMarc Zyngier cpu_relax(); 2624e57a3e28SMarc Zyngier } 2625e57a3e28SMarc Zyngier } else { 2626e57a3e28SMarc Zyngier if (state) 2627e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_int); 2628e57a3e28SMarc Zyngier else 2629e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_clear); 2630e57a3e28SMarc Zyngier } 2631e57a3e28SMarc Zyngier 2632e57a3e28SMarc Zyngier return 0; 2633e57a3e28SMarc Zyngier } 2634e57a3e28SMarc Zyngier 26358fff27aeSMarc Zyngier static struct irq_chip its_vpe_irq_chip = { 26368fff27aeSMarc Zyngier .name = "GICv4-vpe", 2637f6a91da7SMarc Zyngier .irq_mask = its_vpe_mask_irq, 2638f6a91da7SMarc Zyngier .irq_unmask = its_vpe_unmask_irq, 2639f6a91da7SMarc Zyngier .irq_eoi = irq_chip_eoi_parent, 26403171a47aSMarc Zyngier .irq_set_affinity = its_vpe_set_affinity, 2641e57a3e28SMarc Zyngier .irq_set_irqchip_state = its_vpe_set_irqchip_state, 2642e643d803SMarc Zyngier .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, 26438fff27aeSMarc Zyngier }; 26448fff27aeSMarc Zyngier 26457d75bbb4SMarc Zyngier static int its_vpe_id_alloc(void) 26467d75bbb4SMarc Zyngier { 26477d75bbb4SMarc Zyngier return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 26487d75bbb4SMarc Zyngier } 26497d75bbb4SMarc Zyngier 26507d75bbb4SMarc Zyngier static void its_vpe_id_free(u16 id) 26517d75bbb4SMarc Zyngier { 26527d75bbb4SMarc Zyngier ida_simple_remove(&its_vpeid_ida, id); 26537d75bbb4SMarc Zyngier } 26547d75bbb4SMarc Zyngier 26557d75bbb4SMarc Zyngier static int its_vpe_init(struct its_vpe *vpe) 26567d75bbb4SMarc Zyngier { 26577d75bbb4SMarc Zyngier struct page *vpt_page; 26587d75bbb4SMarc Zyngier int vpe_id; 26597d75bbb4SMarc Zyngier 26607d75bbb4SMarc Zyngier /* Allocate vpe_id */ 26617d75bbb4SMarc Zyngier vpe_id = its_vpe_id_alloc(); 26627d75bbb4SMarc Zyngier if (vpe_id < 0) 26637d75bbb4SMarc Zyngier return vpe_id; 26647d75bbb4SMarc Zyngier 26657d75bbb4SMarc Zyngier /* Allocate VPT */ 26667d75bbb4SMarc Zyngier vpt_page = its_allocate_pending_table(GFP_KERNEL); 26677d75bbb4SMarc Zyngier if (!vpt_page) { 26687d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 26697d75bbb4SMarc Zyngier return -ENOMEM; 26707d75bbb4SMarc Zyngier } 26717d75bbb4SMarc Zyngier 26727d75bbb4SMarc Zyngier if (!its_alloc_vpe_table(vpe_id)) { 26737d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 26747d75bbb4SMarc Zyngier its_free_pending_table(vpe->vpt_page); 26757d75bbb4SMarc Zyngier return -ENOMEM; 26767d75bbb4SMarc Zyngier } 26777d75bbb4SMarc Zyngier 26787d75bbb4SMarc Zyngier vpe->vpe_id = vpe_id; 26797d75bbb4SMarc Zyngier vpe->vpt_page = vpt_page; 268020b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 26817d75bbb4SMarc Zyngier 26827d75bbb4SMarc Zyngier return 0; 26837d75bbb4SMarc Zyngier } 26847d75bbb4SMarc Zyngier 26857d75bbb4SMarc Zyngier static void its_vpe_teardown(struct its_vpe *vpe) 26867d75bbb4SMarc Zyngier { 268720b3d54eSMarc Zyngier its_vpe_db_proxy_unmap(vpe); 26887d75bbb4SMarc Zyngier its_vpe_id_free(vpe->vpe_id); 26897d75bbb4SMarc Zyngier its_free_pending_table(vpe->vpt_page); 26907d75bbb4SMarc Zyngier } 26917d75bbb4SMarc Zyngier 26927d75bbb4SMarc Zyngier static void its_vpe_irq_domain_free(struct irq_domain *domain, 26937d75bbb4SMarc Zyngier unsigned int virq, 26947d75bbb4SMarc Zyngier unsigned int nr_irqs) 26957d75bbb4SMarc Zyngier { 26967d75bbb4SMarc Zyngier struct its_vm *vm = domain->host_data; 26977d75bbb4SMarc Zyngier int i; 26987d75bbb4SMarc Zyngier 26997d75bbb4SMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 27007d75bbb4SMarc Zyngier 27017d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 27027d75bbb4SMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 27037d75bbb4SMarc Zyngier virq + i); 27047d75bbb4SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 27057d75bbb4SMarc Zyngier 27067d75bbb4SMarc Zyngier BUG_ON(vm != vpe->its_vm); 27077d75bbb4SMarc Zyngier 27087d75bbb4SMarc Zyngier clear_bit(data->hwirq, vm->db_bitmap); 27097d75bbb4SMarc Zyngier its_vpe_teardown(vpe); 27107d75bbb4SMarc Zyngier irq_domain_reset_irq_data(data); 27117d75bbb4SMarc Zyngier } 27127d75bbb4SMarc Zyngier 27137d75bbb4SMarc Zyngier if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 27147d75bbb4SMarc Zyngier its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 27157d75bbb4SMarc Zyngier its_free_prop_table(vm->vprop_page); 27167d75bbb4SMarc Zyngier } 27177d75bbb4SMarc Zyngier } 27187d75bbb4SMarc Zyngier 27197d75bbb4SMarc Zyngier static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 27207d75bbb4SMarc Zyngier unsigned int nr_irqs, void *args) 27217d75bbb4SMarc Zyngier { 27227d75bbb4SMarc Zyngier struct its_vm *vm = args; 27237d75bbb4SMarc Zyngier unsigned long *bitmap; 27247d75bbb4SMarc Zyngier struct page *vprop_page; 27257d75bbb4SMarc Zyngier int base, nr_ids, i, err = 0; 27267d75bbb4SMarc Zyngier 27277d75bbb4SMarc Zyngier BUG_ON(!vm); 27287d75bbb4SMarc Zyngier 27297d75bbb4SMarc Zyngier bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); 27307d75bbb4SMarc Zyngier if (!bitmap) 27317d75bbb4SMarc Zyngier return -ENOMEM; 27327d75bbb4SMarc Zyngier 27337d75bbb4SMarc Zyngier if (nr_ids < nr_irqs) { 27347d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 27357d75bbb4SMarc Zyngier return -ENOMEM; 27367d75bbb4SMarc Zyngier } 27377d75bbb4SMarc Zyngier 27387d75bbb4SMarc Zyngier vprop_page = its_allocate_prop_table(GFP_KERNEL); 27397d75bbb4SMarc Zyngier if (!vprop_page) { 27407d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 27417d75bbb4SMarc Zyngier return -ENOMEM; 27427d75bbb4SMarc Zyngier } 27437d75bbb4SMarc Zyngier 27447d75bbb4SMarc Zyngier vm->db_bitmap = bitmap; 27457d75bbb4SMarc Zyngier vm->db_lpi_base = base; 27467d75bbb4SMarc Zyngier vm->nr_db_lpis = nr_ids; 27477d75bbb4SMarc Zyngier vm->vprop_page = vprop_page; 27487d75bbb4SMarc Zyngier 27497d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 27507d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi = base + i; 27517d75bbb4SMarc Zyngier err = its_vpe_init(vm->vpes[i]); 27527d75bbb4SMarc Zyngier if (err) 27537d75bbb4SMarc Zyngier break; 27547d75bbb4SMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, 27557d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi); 27567d75bbb4SMarc Zyngier if (err) 27577d75bbb4SMarc Zyngier break; 27587d75bbb4SMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, i, 27597d75bbb4SMarc Zyngier &its_vpe_irq_chip, vm->vpes[i]); 27607d75bbb4SMarc Zyngier set_bit(i, bitmap); 27617d75bbb4SMarc Zyngier } 27627d75bbb4SMarc Zyngier 27637d75bbb4SMarc Zyngier if (err) { 27647d75bbb4SMarc Zyngier if (i > 0) 27657d75bbb4SMarc Zyngier its_vpe_irq_domain_free(domain, virq, i - 1); 27667d75bbb4SMarc Zyngier 27677d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 27687d75bbb4SMarc Zyngier its_free_prop_table(vprop_page); 27697d75bbb4SMarc Zyngier } 27707d75bbb4SMarc Zyngier 27717d75bbb4SMarc Zyngier return err; 27727d75bbb4SMarc Zyngier } 27737d75bbb4SMarc Zyngier 277472491643SThomas Gleixner static int its_vpe_irq_domain_activate(struct irq_domain *domain, 277572491643SThomas Gleixner struct irq_data *d, bool early) 2776eb78192bSMarc Zyngier { 2777eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 277840619a2eSMarc Zyngier struct its_node *its; 2779eb78192bSMarc Zyngier 2780*2247e1bfSMarc Zyngier /* If we use the list map, we issue VMAPP on demand... */ 2781*2247e1bfSMarc Zyngier if (its_list_map) 2782*2247e1bfSMarc Zyngier return true; 2783*2247e1bfSMarc Zyngier 2784eb78192bSMarc Zyngier /* Map the VPE to the first possible CPU */ 2785eb78192bSMarc Zyngier vpe->col_idx = cpumask_first(cpu_online_mask); 278640619a2eSMarc Zyngier 278740619a2eSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 278840619a2eSMarc Zyngier if (!its->is_v4) 278940619a2eSMarc Zyngier continue; 279040619a2eSMarc Zyngier 279175fd951bSMarc Zyngier its_send_vmapp(its, vpe, true); 279240619a2eSMarc Zyngier its_send_vinvall(its, vpe); 279340619a2eSMarc Zyngier } 279440619a2eSMarc Zyngier 279572491643SThomas Gleixner return 0; 2796eb78192bSMarc Zyngier } 2797eb78192bSMarc Zyngier 2798eb78192bSMarc Zyngier static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 2799eb78192bSMarc Zyngier struct irq_data *d) 2800eb78192bSMarc Zyngier { 2801eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 280275fd951bSMarc Zyngier struct its_node *its; 2803eb78192bSMarc Zyngier 2804*2247e1bfSMarc Zyngier /* 2805*2247e1bfSMarc Zyngier * If we use the list map, we unmap the VPE once no VLPIs are 2806*2247e1bfSMarc Zyngier * associated with the VM. 2807*2247e1bfSMarc Zyngier */ 2808*2247e1bfSMarc Zyngier if (its_list_map) 2809*2247e1bfSMarc Zyngier return; 2810*2247e1bfSMarc Zyngier 281175fd951bSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 281275fd951bSMarc Zyngier if (!its->is_v4) 281375fd951bSMarc Zyngier continue; 281475fd951bSMarc Zyngier 281575fd951bSMarc Zyngier its_send_vmapp(its, vpe, false); 281675fd951bSMarc Zyngier } 2817eb78192bSMarc Zyngier } 2818eb78192bSMarc Zyngier 28198fff27aeSMarc Zyngier static const struct irq_domain_ops its_vpe_domain_ops = { 28207d75bbb4SMarc Zyngier .alloc = its_vpe_irq_domain_alloc, 28217d75bbb4SMarc Zyngier .free = its_vpe_irq_domain_free, 2822eb78192bSMarc Zyngier .activate = its_vpe_irq_domain_activate, 2823eb78192bSMarc Zyngier .deactivate = its_vpe_irq_domain_deactivate, 28248fff27aeSMarc Zyngier }; 28258fff27aeSMarc Zyngier 28264559fbb3SYun Wu static int its_force_quiescent(void __iomem *base) 28274559fbb3SYun Wu { 28284559fbb3SYun Wu u32 count = 1000000; /* 1s */ 28294559fbb3SYun Wu u32 val; 28304559fbb3SYun Wu 28314559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 28327611da86SDavid Daney /* 28337611da86SDavid Daney * GIC architecture specification requires the ITS to be both 28347611da86SDavid Daney * disabled and quiescent for writes to GITS_BASER<n> or 28357611da86SDavid Daney * GITS_CBASER to not have UNPREDICTABLE results. 28367611da86SDavid Daney */ 28377611da86SDavid Daney if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 28384559fbb3SYun Wu return 0; 28394559fbb3SYun Wu 28404559fbb3SYun Wu /* Disable the generation of all interrupts to this ITS */ 2841d51c4b4dSMarc Zyngier val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); 28424559fbb3SYun Wu writel_relaxed(val, base + GITS_CTLR); 28434559fbb3SYun Wu 28444559fbb3SYun Wu /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 28454559fbb3SYun Wu while (1) { 28464559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 28474559fbb3SYun Wu if (val & GITS_CTLR_QUIESCENT) 28484559fbb3SYun Wu return 0; 28494559fbb3SYun Wu 28504559fbb3SYun Wu count--; 28514559fbb3SYun Wu if (!count) 28524559fbb3SYun Wu return -EBUSY; 28534559fbb3SYun Wu 28544559fbb3SYun Wu cpu_relax(); 28554559fbb3SYun Wu udelay(1); 28564559fbb3SYun Wu } 28574559fbb3SYun Wu } 28584559fbb3SYun Wu 28599d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) 286094100970SRobert Richter { 286194100970SRobert Richter struct its_node *its = data; 286294100970SRobert Richter 2863fa150019SArd Biesheuvel /* erratum 22375: only alloc 8MB table size */ 2864fa150019SArd Biesheuvel its->device_ids = 0x14; /* 20 bits, 8MB */ 286594100970SRobert Richter its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 28669d111d49SArd Biesheuvel 28679d111d49SArd Biesheuvel return true; 286894100970SRobert Richter } 286994100970SRobert Richter 28709d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) 2871fbf8f40eSGanapatrao Kulkarni { 2872fbf8f40eSGanapatrao Kulkarni struct its_node *its = data; 2873fbf8f40eSGanapatrao Kulkarni 2874fbf8f40eSGanapatrao Kulkarni its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 28759d111d49SArd Biesheuvel 28769d111d49SArd Biesheuvel return true; 2877fbf8f40eSGanapatrao Kulkarni } 2878fbf8f40eSGanapatrao Kulkarni 28799d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) 288090922a2dSShanker Donthineni { 288190922a2dSShanker Donthineni struct its_node *its = data; 288290922a2dSShanker Donthineni 288390922a2dSShanker Donthineni /* On QDF2400, the size of the ITE is 16Bytes */ 288490922a2dSShanker Donthineni its->ite_size = 16; 28859d111d49SArd Biesheuvel 28869d111d49SArd Biesheuvel return true; 288790922a2dSShanker Donthineni } 288890922a2dSShanker Donthineni 2889558b0165SArd Biesheuvel static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) 2890558b0165SArd Biesheuvel { 2891558b0165SArd Biesheuvel struct its_node *its = its_dev->its; 2892558b0165SArd Biesheuvel 2893558b0165SArd Biesheuvel /* 2894558b0165SArd Biesheuvel * The Socionext Synquacer SoC has a so-called 'pre-ITS', 2895558b0165SArd Biesheuvel * which maps 32-bit writes targeted at a separate window of 2896558b0165SArd Biesheuvel * size '4 << device_id_bits' onto writes to GITS_TRANSLATER 2897558b0165SArd Biesheuvel * with device ID taken from bits [device_id_bits + 1:2] of 2898558b0165SArd Biesheuvel * the window offset. 2899558b0165SArd Biesheuvel */ 2900558b0165SArd Biesheuvel return its->pre_its_base + (its_dev->device_id << 2); 2901558b0165SArd Biesheuvel } 2902558b0165SArd Biesheuvel 2903558b0165SArd Biesheuvel static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) 2904558b0165SArd Biesheuvel { 2905558b0165SArd Biesheuvel struct its_node *its = data; 2906558b0165SArd Biesheuvel u32 pre_its_window[2]; 2907558b0165SArd Biesheuvel u32 ids; 2908558b0165SArd Biesheuvel 2909558b0165SArd Biesheuvel if (!fwnode_property_read_u32_array(its->fwnode_handle, 2910558b0165SArd Biesheuvel "socionext,synquacer-pre-its", 2911558b0165SArd Biesheuvel pre_its_window, 2912558b0165SArd Biesheuvel ARRAY_SIZE(pre_its_window))) { 2913558b0165SArd Biesheuvel 2914558b0165SArd Biesheuvel its->pre_its_base = pre_its_window[0]; 2915558b0165SArd Biesheuvel its->get_msi_base = its_irq_get_msi_base_pre_its; 2916558b0165SArd Biesheuvel 2917558b0165SArd Biesheuvel ids = ilog2(pre_its_window[1]) - 2; 2918558b0165SArd Biesheuvel if (its->device_ids > ids) 2919558b0165SArd Biesheuvel its->device_ids = ids; 2920558b0165SArd Biesheuvel 2921558b0165SArd Biesheuvel /* the pre-ITS breaks isolation, so disable MSI remapping */ 2922558b0165SArd Biesheuvel its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; 2923558b0165SArd Biesheuvel return true; 2924558b0165SArd Biesheuvel } 2925558b0165SArd Biesheuvel return false; 2926558b0165SArd Biesheuvel } 2927558b0165SArd Biesheuvel 29285c9a882eSMarc Zyngier static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) 29295c9a882eSMarc Zyngier { 29305c9a882eSMarc Zyngier struct its_node *its = data; 29315c9a882eSMarc Zyngier 29325c9a882eSMarc Zyngier /* 29335c9a882eSMarc Zyngier * Hip07 insists on using the wrong address for the VLPI 29345c9a882eSMarc Zyngier * page. Trick it into doing the right thing... 29355c9a882eSMarc Zyngier */ 29365c9a882eSMarc Zyngier its->vlpi_redist_offset = SZ_128K; 29375c9a882eSMarc Zyngier return true; 29385c9a882eSMarc Zyngier } 29395c9a882eSMarc Zyngier 294067510ccaSRobert Richter static const struct gic_quirk its_quirks[] = { 294194100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375 294294100970SRobert Richter { 294394100970SRobert Richter .desc = "ITS: Cavium errata 22375, 24313", 294494100970SRobert Richter .iidr = 0xa100034c, /* ThunderX pass 1.x */ 294594100970SRobert Richter .mask = 0xffff0fff, 294694100970SRobert Richter .init = its_enable_quirk_cavium_22375, 294794100970SRobert Richter }, 294894100970SRobert Richter #endif 2949fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144 2950fbf8f40eSGanapatrao Kulkarni { 2951fbf8f40eSGanapatrao Kulkarni .desc = "ITS: Cavium erratum 23144", 2952fbf8f40eSGanapatrao Kulkarni .iidr = 0xa100034c, /* ThunderX pass 1.x */ 2953fbf8f40eSGanapatrao Kulkarni .mask = 0xffff0fff, 2954fbf8f40eSGanapatrao Kulkarni .init = its_enable_quirk_cavium_23144, 2955fbf8f40eSGanapatrao Kulkarni }, 2956fbf8f40eSGanapatrao Kulkarni #endif 295790922a2dSShanker Donthineni #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 295890922a2dSShanker Donthineni { 295990922a2dSShanker Donthineni .desc = "ITS: QDF2400 erratum 0065", 296090922a2dSShanker Donthineni .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ 296190922a2dSShanker Donthineni .mask = 0xffffffff, 296290922a2dSShanker Donthineni .init = its_enable_quirk_qdf2400_e0065, 296390922a2dSShanker Donthineni }, 296490922a2dSShanker Donthineni #endif 2965558b0165SArd Biesheuvel #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS 2966558b0165SArd Biesheuvel { 2967558b0165SArd Biesheuvel /* 2968558b0165SArd Biesheuvel * The Socionext Synquacer SoC incorporates ARM's own GIC-500 2969558b0165SArd Biesheuvel * implementation, but with a 'pre-ITS' added that requires 2970558b0165SArd Biesheuvel * special handling in software. 2971558b0165SArd Biesheuvel */ 2972558b0165SArd Biesheuvel .desc = "ITS: Socionext Synquacer pre-ITS", 2973558b0165SArd Biesheuvel .iidr = 0x0001143b, 2974558b0165SArd Biesheuvel .mask = 0xffffffff, 2975558b0165SArd Biesheuvel .init = its_enable_quirk_socionext_synquacer, 2976558b0165SArd Biesheuvel }, 2977558b0165SArd Biesheuvel #endif 29785c9a882eSMarc Zyngier #ifdef CONFIG_HISILICON_ERRATUM_161600802 29795c9a882eSMarc Zyngier { 29805c9a882eSMarc Zyngier .desc = "ITS: Hip07 erratum 161600802", 29815c9a882eSMarc Zyngier .iidr = 0x00000004, 29825c9a882eSMarc Zyngier .mask = 0xffffffff, 29835c9a882eSMarc Zyngier .init = its_enable_quirk_hip07_161600802, 29845c9a882eSMarc Zyngier }, 29855c9a882eSMarc Zyngier #endif 298667510ccaSRobert Richter { 298767510ccaSRobert Richter } 298867510ccaSRobert Richter }; 298967510ccaSRobert Richter 299067510ccaSRobert Richter static void its_enable_quirks(struct its_node *its) 299167510ccaSRobert Richter { 299267510ccaSRobert Richter u32 iidr = readl_relaxed(its->base + GITS_IIDR); 299367510ccaSRobert Richter 299467510ccaSRobert Richter gic_enable_quirks(iidr, its_quirks, its); 299567510ccaSRobert Richter } 299667510ccaSRobert Richter 2997db40f0a7STomasz Nowicki static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) 2998d14ae5e6STomasz Nowicki { 2999d14ae5e6STomasz Nowicki struct irq_domain *inner_domain; 3000d14ae5e6STomasz Nowicki struct msi_domain_info *info; 3001d14ae5e6STomasz Nowicki 3002d14ae5e6STomasz Nowicki info = kzalloc(sizeof(*info), GFP_KERNEL); 3003d14ae5e6STomasz Nowicki if (!info) 3004d14ae5e6STomasz Nowicki return -ENOMEM; 3005d14ae5e6STomasz Nowicki 3006db40f0a7STomasz Nowicki inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); 3007d14ae5e6STomasz Nowicki if (!inner_domain) { 3008d14ae5e6STomasz Nowicki kfree(info); 3009d14ae5e6STomasz Nowicki return -ENOMEM; 3010d14ae5e6STomasz Nowicki } 3011d14ae5e6STomasz Nowicki 3012db40f0a7STomasz Nowicki inner_domain->parent = its_parent; 301396f0d93aSMarc Zyngier irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); 3014558b0165SArd Biesheuvel inner_domain->flags |= its->msi_domain_flags; 3015d14ae5e6STomasz Nowicki info->ops = &its_msi_domain_ops; 3016d14ae5e6STomasz Nowicki info->data = its; 3017d14ae5e6STomasz Nowicki inner_domain->host_data = info; 3018d14ae5e6STomasz Nowicki 3019d14ae5e6STomasz Nowicki return 0; 3020d14ae5e6STomasz Nowicki } 3021d14ae5e6STomasz Nowicki 30228fff27aeSMarc Zyngier static int its_init_vpe_domain(void) 30238fff27aeSMarc Zyngier { 302420b3d54eSMarc Zyngier struct its_node *its; 302520b3d54eSMarc Zyngier u32 devid; 302620b3d54eSMarc Zyngier int entries; 302720b3d54eSMarc Zyngier 302820b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 302920b3d54eSMarc Zyngier pr_info("ITS: Using DirectLPI for VPE invalidation\n"); 303020b3d54eSMarc Zyngier return 0; 303120b3d54eSMarc Zyngier } 303220b3d54eSMarc Zyngier 303320b3d54eSMarc Zyngier /* Any ITS will do, even if not v4 */ 303420b3d54eSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 303520b3d54eSMarc Zyngier 303620b3d54eSMarc Zyngier entries = roundup_pow_of_two(nr_cpu_ids); 303720b3d54eSMarc Zyngier vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries, 303820b3d54eSMarc Zyngier GFP_KERNEL); 303920b3d54eSMarc Zyngier if (!vpe_proxy.vpes) { 304020b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 304120b3d54eSMarc Zyngier return -ENOMEM; 304220b3d54eSMarc Zyngier } 304320b3d54eSMarc Zyngier 304420b3d54eSMarc Zyngier /* Use the last possible DevID */ 304520b3d54eSMarc Zyngier devid = GENMASK(its->device_ids - 1, 0); 304620b3d54eSMarc Zyngier vpe_proxy.dev = its_create_device(its, devid, entries, false); 304720b3d54eSMarc Zyngier if (!vpe_proxy.dev) { 304820b3d54eSMarc Zyngier kfree(vpe_proxy.vpes); 304920b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device\n"); 305020b3d54eSMarc Zyngier return -ENOMEM; 305120b3d54eSMarc Zyngier } 305220b3d54eSMarc Zyngier 305320b3d54eSMarc Zyngier BUG_ON(entries != vpe_proxy.dev->nr_ites); 305420b3d54eSMarc Zyngier 305520b3d54eSMarc Zyngier raw_spin_lock_init(&vpe_proxy.lock); 305620b3d54eSMarc Zyngier vpe_proxy.next_victim = 0; 305720b3d54eSMarc Zyngier pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", 305820b3d54eSMarc Zyngier devid, vpe_proxy.dev->nr_ites); 305920b3d54eSMarc Zyngier 30608fff27aeSMarc Zyngier return 0; 30618fff27aeSMarc Zyngier } 30628fff27aeSMarc Zyngier 30633dfa576bSMarc Zyngier static int __init its_compute_its_list_map(struct resource *res, 30643dfa576bSMarc Zyngier void __iomem *its_base) 30653dfa576bSMarc Zyngier { 30663dfa576bSMarc Zyngier int its_number; 30673dfa576bSMarc Zyngier u32 ctlr; 30683dfa576bSMarc Zyngier 30693dfa576bSMarc Zyngier /* 30703dfa576bSMarc Zyngier * This is assumed to be done early enough that we're 30713dfa576bSMarc Zyngier * guaranteed to be single-threaded, hence no 30723dfa576bSMarc Zyngier * locking. Should this change, we should address 30733dfa576bSMarc Zyngier * this. 30743dfa576bSMarc Zyngier */ 3075ab60491eSMarc Zyngier its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); 3076ab60491eSMarc Zyngier if (its_number >= GICv4_ITS_LIST_MAX) { 30773dfa576bSMarc Zyngier pr_err("ITS@%pa: No ITSList entry available!\n", 30783dfa576bSMarc Zyngier &res->start); 30793dfa576bSMarc Zyngier return -EINVAL; 30803dfa576bSMarc Zyngier } 30813dfa576bSMarc Zyngier 30823dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 30833dfa576bSMarc Zyngier ctlr &= ~GITS_CTLR_ITS_NUMBER; 30843dfa576bSMarc Zyngier ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; 30853dfa576bSMarc Zyngier writel_relaxed(ctlr, its_base + GITS_CTLR); 30863dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 30873dfa576bSMarc Zyngier if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { 30883dfa576bSMarc Zyngier its_number = ctlr & GITS_CTLR_ITS_NUMBER; 30893dfa576bSMarc Zyngier its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; 30903dfa576bSMarc Zyngier } 30913dfa576bSMarc Zyngier 30923dfa576bSMarc Zyngier if (test_and_set_bit(its_number, &its_list_map)) { 30933dfa576bSMarc Zyngier pr_err("ITS@%pa: Duplicate ITSList entry %d\n", 30943dfa576bSMarc Zyngier &res->start, its_number); 30953dfa576bSMarc Zyngier return -EINVAL; 30963dfa576bSMarc Zyngier } 30973dfa576bSMarc Zyngier 30983dfa576bSMarc Zyngier return its_number; 30993dfa576bSMarc Zyngier } 31003dfa576bSMarc Zyngier 3101db40f0a7STomasz Nowicki static int __init its_probe_one(struct resource *res, 3102db40f0a7STomasz Nowicki struct fwnode_handle *handle, int numa_node) 31034c21f3c2SMarc Zyngier { 31044c21f3c2SMarc Zyngier struct its_node *its; 31054c21f3c2SMarc Zyngier void __iomem *its_base; 31063dfa576bSMarc Zyngier u32 val, ctlr; 31073dfa576bSMarc Zyngier u64 baser, tmp, typer; 31084c21f3c2SMarc Zyngier int err; 31094c21f3c2SMarc Zyngier 3110db40f0a7STomasz Nowicki its_base = ioremap(res->start, resource_size(res)); 31114c21f3c2SMarc Zyngier if (!its_base) { 3112db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); 31134c21f3c2SMarc Zyngier return -ENOMEM; 31144c21f3c2SMarc Zyngier } 31154c21f3c2SMarc Zyngier 31164c21f3c2SMarc Zyngier val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 31174c21f3c2SMarc Zyngier if (val != 0x30 && val != 0x40) { 3118db40f0a7STomasz Nowicki pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); 31194c21f3c2SMarc Zyngier err = -ENODEV; 31204c21f3c2SMarc Zyngier goto out_unmap; 31214c21f3c2SMarc Zyngier } 31224c21f3c2SMarc Zyngier 31234559fbb3SYun Wu err = its_force_quiescent(its_base); 31244559fbb3SYun Wu if (err) { 3125db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); 31264559fbb3SYun Wu goto out_unmap; 31274559fbb3SYun Wu } 31284559fbb3SYun Wu 3129db40f0a7STomasz Nowicki pr_info("ITS %pR\n", res); 31304c21f3c2SMarc Zyngier 31314c21f3c2SMarc Zyngier its = kzalloc(sizeof(*its), GFP_KERNEL); 31324c21f3c2SMarc Zyngier if (!its) { 31334c21f3c2SMarc Zyngier err = -ENOMEM; 31344c21f3c2SMarc Zyngier goto out_unmap; 31354c21f3c2SMarc Zyngier } 31364c21f3c2SMarc Zyngier 31374c21f3c2SMarc Zyngier raw_spin_lock_init(&its->lock); 31384c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->entry); 31394c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->its_device_list); 31403dfa576bSMarc Zyngier typer = gic_read_typer(its_base + GITS_TYPER); 31414c21f3c2SMarc Zyngier its->base = its_base; 3142db40f0a7STomasz Nowicki its->phys_base = res->start; 31433dfa576bSMarc Zyngier its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); 3144fa150019SArd Biesheuvel its->device_ids = GITS_TYPER_DEVBITS(typer); 31453dfa576bSMarc Zyngier its->is_v4 = !!(typer & GITS_TYPER_VLPIS); 31463dfa576bSMarc Zyngier if (its->is_v4) { 31473dfa576bSMarc Zyngier if (!(typer & GITS_TYPER_VMOVP)) { 31483dfa576bSMarc Zyngier err = its_compute_its_list_map(res, its_base); 31493dfa576bSMarc Zyngier if (err < 0) 31503dfa576bSMarc Zyngier goto out_free_its; 31513dfa576bSMarc Zyngier 3152debf6d02SMarc Zyngier its->list_nr = err; 3153debf6d02SMarc Zyngier 31543dfa576bSMarc Zyngier pr_info("ITS@%pa: Using ITS number %d\n", 31553dfa576bSMarc Zyngier &res->start, err); 31563dfa576bSMarc Zyngier } else { 31573dfa576bSMarc Zyngier pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); 31583dfa576bSMarc Zyngier } 31593dfa576bSMarc Zyngier } 31603dfa576bSMarc Zyngier 3161db40f0a7STomasz Nowicki its->numa_node = numa_node; 31624c21f3c2SMarc Zyngier 31635bc13c2cSRobert Richter its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 31645bc13c2cSRobert Richter get_order(ITS_CMD_QUEUE_SZ)); 31654c21f3c2SMarc Zyngier if (!its->cmd_base) { 31664c21f3c2SMarc Zyngier err = -ENOMEM; 31674c21f3c2SMarc Zyngier goto out_free_its; 31684c21f3c2SMarc Zyngier } 31694c21f3c2SMarc Zyngier its->cmd_write = its->cmd_base; 3170558b0165SArd Biesheuvel its->fwnode_handle = handle; 3171558b0165SArd Biesheuvel its->get_msi_base = its_irq_get_msi_base; 3172558b0165SArd Biesheuvel its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; 31734c21f3c2SMarc Zyngier 317467510ccaSRobert Richter its_enable_quirks(its); 317567510ccaSRobert Richter 31760e0b0f69SShanker Donthineni err = its_alloc_tables(its); 31774c21f3c2SMarc Zyngier if (err) 31784c21f3c2SMarc Zyngier goto out_free_cmd; 31794c21f3c2SMarc Zyngier 31804c21f3c2SMarc Zyngier err = its_alloc_collections(its); 31814c21f3c2SMarc Zyngier if (err) 31824c21f3c2SMarc Zyngier goto out_free_tables; 31834c21f3c2SMarc Zyngier 31844c21f3c2SMarc Zyngier baser = (virt_to_phys(its->cmd_base) | 31852fd632a0SShanker Donthineni GITS_CBASER_RaWaWb | 31864c21f3c2SMarc Zyngier GITS_CBASER_InnerShareable | 31874c21f3c2SMarc Zyngier (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 31884c21f3c2SMarc Zyngier GITS_CBASER_VALID); 31894c21f3c2SMarc Zyngier 31900968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 31910968a619SVladimir Murzin tmp = gits_read_cbaser(its->base + GITS_CBASER); 31924c21f3c2SMarc Zyngier 31934ad3e363SMarc Zyngier if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 3194241a386cSMarc Zyngier if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 3195241a386cSMarc Zyngier /* 3196241a386cSMarc Zyngier * The HW reports non-shareable, we must 3197241a386cSMarc Zyngier * remove the cacheability attributes as 3198241a386cSMarc Zyngier * well. 3199241a386cSMarc Zyngier */ 3200241a386cSMarc Zyngier baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 3201241a386cSMarc Zyngier GITS_CBASER_CACHEABILITY_MASK); 3202241a386cSMarc Zyngier baser |= GITS_CBASER_nC; 32030968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 3204241a386cSMarc Zyngier } 32054c21f3c2SMarc Zyngier pr_info("ITS: using cache flushing for cmd queue\n"); 32064c21f3c2SMarc Zyngier its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 32074c21f3c2SMarc Zyngier } 32084c21f3c2SMarc Zyngier 32090968a619SVladimir Murzin gits_write_cwriter(0, its->base + GITS_CWRITER); 32103dfa576bSMarc Zyngier ctlr = readl_relaxed(its->base + GITS_CTLR); 3211d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ENABLE; 3212d51c4b4dSMarc Zyngier if (its->is_v4) 3213d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ImDe; 3214d51c4b4dSMarc Zyngier writel_relaxed(ctlr, its->base + GITS_CTLR); 3215241a386cSMarc Zyngier 3216db40f0a7STomasz Nowicki err = its_init_domain(handle, its); 3217d14ae5e6STomasz Nowicki if (err) 321854456db9SMarc Zyngier goto out_free_tables; 32194c21f3c2SMarc Zyngier 32204c21f3c2SMarc Zyngier spin_lock(&its_lock); 32214c21f3c2SMarc Zyngier list_add(&its->entry, &its_nodes); 32224c21f3c2SMarc Zyngier spin_unlock(&its_lock); 32234c21f3c2SMarc Zyngier 32244c21f3c2SMarc Zyngier return 0; 32254c21f3c2SMarc Zyngier 32264c21f3c2SMarc Zyngier out_free_tables: 32274c21f3c2SMarc Zyngier its_free_tables(its); 32284c21f3c2SMarc Zyngier out_free_cmd: 32295bc13c2cSRobert Richter free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 32304c21f3c2SMarc Zyngier out_free_its: 32314c21f3c2SMarc Zyngier kfree(its); 32324c21f3c2SMarc Zyngier out_unmap: 32334c21f3c2SMarc Zyngier iounmap(its_base); 3234db40f0a7STomasz Nowicki pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); 32354c21f3c2SMarc Zyngier return err; 32364c21f3c2SMarc Zyngier } 32374c21f3c2SMarc Zyngier 32384c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void) 32394c21f3c2SMarc Zyngier { 3240589ce5f4SMarc Zyngier return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 32414c21f3c2SMarc Zyngier } 32424c21f3c2SMarc Zyngier 32434c21f3c2SMarc Zyngier int its_cpu_init(void) 32444c21f3c2SMarc Zyngier { 324516acae72SVladimir Murzin if (!list_empty(&its_nodes)) { 32464c21f3c2SMarc Zyngier if (!gic_rdists_supports_plpis()) { 32474c21f3c2SMarc Zyngier pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 32484c21f3c2SMarc Zyngier return -ENXIO; 32494c21f3c2SMarc Zyngier } 32504c21f3c2SMarc Zyngier its_cpu_init_lpis(); 32514c21f3c2SMarc Zyngier its_cpu_init_collection(); 32524c21f3c2SMarc Zyngier } 32534c21f3c2SMarc Zyngier 32544c21f3c2SMarc Zyngier return 0; 32554c21f3c2SMarc Zyngier } 32564c21f3c2SMarc Zyngier 3257935bba7cSArvind Yadav static const struct of_device_id its_device_id[] = { 32584c21f3c2SMarc Zyngier { .compatible = "arm,gic-v3-its", }, 32594c21f3c2SMarc Zyngier {}, 32604c21f3c2SMarc Zyngier }; 32614c21f3c2SMarc Zyngier 3262db40f0a7STomasz Nowicki static int __init its_of_probe(struct device_node *node) 32634c21f3c2SMarc Zyngier { 32644c21f3c2SMarc Zyngier struct device_node *np; 3265db40f0a7STomasz Nowicki struct resource res; 32664c21f3c2SMarc Zyngier 32674c21f3c2SMarc Zyngier for (np = of_find_matching_node(node, its_device_id); np; 32684c21f3c2SMarc Zyngier np = of_find_matching_node(np, its_device_id)) { 3269d14ae5e6STomasz Nowicki if (!of_property_read_bool(np, "msi-controller")) { 3270e81f54c6SRob Herring pr_warn("%pOF: no msi-controller property, ITS ignored\n", 3271e81f54c6SRob Herring np); 3272d14ae5e6STomasz Nowicki continue; 3273d14ae5e6STomasz Nowicki } 3274d14ae5e6STomasz Nowicki 3275db40f0a7STomasz Nowicki if (of_address_to_resource(np, 0, &res)) { 3276e81f54c6SRob Herring pr_warn("%pOF: no regs?\n", np); 3277db40f0a7STomasz Nowicki continue; 32784c21f3c2SMarc Zyngier } 32794c21f3c2SMarc Zyngier 3280db40f0a7STomasz Nowicki its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); 3281db40f0a7STomasz Nowicki } 3282db40f0a7STomasz Nowicki return 0; 3283db40f0a7STomasz Nowicki } 3284db40f0a7STomasz Nowicki 32853f010cf1STomasz Nowicki #ifdef CONFIG_ACPI 32863f010cf1STomasz Nowicki 32873f010cf1STomasz Nowicki #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 32883f010cf1STomasz Nowicki 3289d1ce263fSRobert Richter #ifdef CONFIG_ACPI_NUMA 3290dbd2b826SGanapatrao Kulkarni struct its_srat_map { 3291dbd2b826SGanapatrao Kulkarni /* numa node id */ 3292dbd2b826SGanapatrao Kulkarni u32 numa_node; 3293dbd2b826SGanapatrao Kulkarni /* GIC ITS ID */ 3294dbd2b826SGanapatrao Kulkarni u32 its_id; 3295dbd2b826SGanapatrao Kulkarni }; 3296dbd2b826SGanapatrao Kulkarni 3297fdf6e7a8SHanjun Guo static struct its_srat_map *its_srat_maps __initdata; 3298dbd2b826SGanapatrao Kulkarni static int its_in_srat __initdata; 3299dbd2b826SGanapatrao Kulkarni 3300dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) 3301dbd2b826SGanapatrao Kulkarni { 3302dbd2b826SGanapatrao Kulkarni int i; 3303dbd2b826SGanapatrao Kulkarni 3304dbd2b826SGanapatrao Kulkarni for (i = 0; i < its_in_srat; i++) { 3305dbd2b826SGanapatrao Kulkarni if (its_id == its_srat_maps[i].its_id) 3306dbd2b826SGanapatrao Kulkarni return its_srat_maps[i].numa_node; 3307dbd2b826SGanapatrao Kulkarni } 3308dbd2b826SGanapatrao Kulkarni return NUMA_NO_NODE; 3309dbd2b826SGanapatrao Kulkarni } 3310dbd2b826SGanapatrao Kulkarni 3311fdf6e7a8SHanjun Guo static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, 3312fdf6e7a8SHanjun Guo const unsigned long end) 3313fdf6e7a8SHanjun Guo { 3314fdf6e7a8SHanjun Guo return 0; 3315fdf6e7a8SHanjun Guo } 3316fdf6e7a8SHanjun Guo 3317dbd2b826SGanapatrao Kulkarni static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, 3318dbd2b826SGanapatrao Kulkarni const unsigned long end) 3319dbd2b826SGanapatrao Kulkarni { 3320dbd2b826SGanapatrao Kulkarni int node; 3321dbd2b826SGanapatrao Kulkarni struct acpi_srat_gic_its_affinity *its_affinity; 3322dbd2b826SGanapatrao Kulkarni 3323dbd2b826SGanapatrao Kulkarni its_affinity = (struct acpi_srat_gic_its_affinity *)header; 3324dbd2b826SGanapatrao Kulkarni if (!its_affinity) 3325dbd2b826SGanapatrao Kulkarni return -EINVAL; 3326dbd2b826SGanapatrao Kulkarni 3327dbd2b826SGanapatrao Kulkarni if (its_affinity->header.length < sizeof(*its_affinity)) { 3328dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid header length %d in ITS affinity\n", 3329dbd2b826SGanapatrao Kulkarni its_affinity->header.length); 3330dbd2b826SGanapatrao Kulkarni return -EINVAL; 3331dbd2b826SGanapatrao Kulkarni } 3332dbd2b826SGanapatrao Kulkarni 3333dbd2b826SGanapatrao Kulkarni node = acpi_map_pxm_to_node(its_affinity->proximity_domain); 3334dbd2b826SGanapatrao Kulkarni 3335dbd2b826SGanapatrao Kulkarni if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 3336dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); 3337dbd2b826SGanapatrao Kulkarni return 0; 3338dbd2b826SGanapatrao Kulkarni } 3339dbd2b826SGanapatrao Kulkarni 3340dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].numa_node = node; 3341dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].its_id = its_affinity->its_id; 3342dbd2b826SGanapatrao Kulkarni its_in_srat++; 3343dbd2b826SGanapatrao Kulkarni pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", 3344dbd2b826SGanapatrao Kulkarni its_affinity->proximity_domain, its_affinity->its_id, node); 3345dbd2b826SGanapatrao Kulkarni 3346dbd2b826SGanapatrao Kulkarni return 0; 3347dbd2b826SGanapatrao Kulkarni } 3348dbd2b826SGanapatrao Kulkarni 3349dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) 3350dbd2b826SGanapatrao Kulkarni { 3351fdf6e7a8SHanjun Guo int count; 3352fdf6e7a8SHanjun Guo 3353fdf6e7a8SHanjun Guo count = acpi_table_parse_entries(ACPI_SIG_SRAT, 3354fdf6e7a8SHanjun Guo sizeof(struct acpi_table_srat), 3355fdf6e7a8SHanjun Guo ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3356fdf6e7a8SHanjun Guo gic_acpi_match_srat_its, 0); 3357fdf6e7a8SHanjun Guo if (count <= 0) 3358fdf6e7a8SHanjun Guo return; 3359fdf6e7a8SHanjun Guo 3360fdf6e7a8SHanjun Guo its_srat_maps = kmalloc(count * sizeof(struct its_srat_map), 3361fdf6e7a8SHanjun Guo GFP_KERNEL); 3362fdf6e7a8SHanjun Guo if (!its_srat_maps) { 3363fdf6e7a8SHanjun Guo pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); 3364fdf6e7a8SHanjun Guo return; 3365fdf6e7a8SHanjun Guo } 3366fdf6e7a8SHanjun Guo 3367dbd2b826SGanapatrao Kulkarni acpi_table_parse_entries(ACPI_SIG_SRAT, 3368dbd2b826SGanapatrao Kulkarni sizeof(struct acpi_table_srat), 3369dbd2b826SGanapatrao Kulkarni ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3370dbd2b826SGanapatrao Kulkarni gic_acpi_parse_srat_its, 0); 3371dbd2b826SGanapatrao Kulkarni } 3372fdf6e7a8SHanjun Guo 3373fdf6e7a8SHanjun Guo /* free the its_srat_maps after ITS probing */ 3374fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) 3375fdf6e7a8SHanjun Guo { 3376fdf6e7a8SHanjun Guo kfree(its_srat_maps); 3377fdf6e7a8SHanjun Guo } 3378dbd2b826SGanapatrao Kulkarni #else 3379dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) { } 3380dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } 3381fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) { } 3382dbd2b826SGanapatrao Kulkarni #endif 3383dbd2b826SGanapatrao Kulkarni 33843f010cf1STomasz Nowicki static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, 33853f010cf1STomasz Nowicki const unsigned long end) 33863f010cf1STomasz Nowicki { 33873f010cf1STomasz Nowicki struct acpi_madt_generic_translator *its_entry; 33883f010cf1STomasz Nowicki struct fwnode_handle *dom_handle; 33893f010cf1STomasz Nowicki struct resource res; 33903f010cf1STomasz Nowicki int err; 33913f010cf1STomasz Nowicki 33923f010cf1STomasz Nowicki its_entry = (struct acpi_madt_generic_translator *)header; 33933f010cf1STomasz Nowicki memset(&res, 0, sizeof(res)); 33943f010cf1STomasz Nowicki res.start = its_entry->base_address; 33953f010cf1STomasz Nowicki res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 33963f010cf1STomasz Nowicki res.flags = IORESOURCE_MEM; 33973f010cf1STomasz Nowicki 33983f010cf1STomasz Nowicki dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); 33993f010cf1STomasz Nowicki if (!dom_handle) { 34003f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 34013f010cf1STomasz Nowicki &res.start); 34023f010cf1STomasz Nowicki return -ENOMEM; 34033f010cf1STomasz Nowicki } 34043f010cf1STomasz Nowicki 34053f010cf1STomasz Nowicki err = iort_register_domain_token(its_entry->translation_id, dom_handle); 34063f010cf1STomasz Nowicki if (err) { 34073f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", 34083f010cf1STomasz Nowicki &res.start, its_entry->translation_id); 34093f010cf1STomasz Nowicki goto dom_err; 34103f010cf1STomasz Nowicki } 34113f010cf1STomasz Nowicki 3412dbd2b826SGanapatrao Kulkarni err = its_probe_one(&res, dom_handle, 3413dbd2b826SGanapatrao Kulkarni acpi_get_its_numa_node(its_entry->translation_id)); 34143f010cf1STomasz Nowicki if (!err) 34153f010cf1STomasz Nowicki return 0; 34163f010cf1STomasz Nowicki 34173f010cf1STomasz Nowicki iort_deregister_domain_token(its_entry->translation_id); 34183f010cf1STomasz Nowicki dom_err: 34193f010cf1STomasz Nowicki irq_domain_free_fwnode(dom_handle); 34203f010cf1STomasz Nowicki return err; 34213f010cf1STomasz Nowicki } 34223f010cf1STomasz Nowicki 34233f010cf1STomasz Nowicki static void __init its_acpi_probe(void) 34243f010cf1STomasz Nowicki { 3425dbd2b826SGanapatrao Kulkarni acpi_table_parse_srat_its(); 34263f010cf1STomasz Nowicki acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 34273f010cf1STomasz Nowicki gic_acpi_parse_madt_its, 0); 3428fdf6e7a8SHanjun Guo acpi_its_srat_maps_free(); 34293f010cf1STomasz Nowicki } 34303f010cf1STomasz Nowicki #else 34313f010cf1STomasz Nowicki static void __init its_acpi_probe(void) { } 34323f010cf1STomasz Nowicki #endif 34333f010cf1STomasz Nowicki 3434db40f0a7STomasz Nowicki int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, 3435db40f0a7STomasz Nowicki struct irq_domain *parent_domain) 3436db40f0a7STomasz Nowicki { 3437db40f0a7STomasz Nowicki struct device_node *of_node; 34388fff27aeSMarc Zyngier struct its_node *its; 34398fff27aeSMarc Zyngier bool has_v4 = false; 34408fff27aeSMarc Zyngier int err; 3441db40f0a7STomasz Nowicki 3442db40f0a7STomasz Nowicki its_parent = parent_domain; 3443db40f0a7STomasz Nowicki of_node = to_of_node(handle); 3444db40f0a7STomasz Nowicki if (of_node) 3445db40f0a7STomasz Nowicki its_of_probe(of_node); 3446db40f0a7STomasz Nowicki else 34473f010cf1STomasz Nowicki its_acpi_probe(); 3448db40f0a7STomasz Nowicki 34494c21f3c2SMarc Zyngier if (list_empty(&its_nodes)) { 34504c21f3c2SMarc Zyngier pr_warn("ITS: No ITS available, not enabling LPIs\n"); 34514c21f3c2SMarc Zyngier return -ENXIO; 34524c21f3c2SMarc Zyngier } 34534c21f3c2SMarc Zyngier 34544c21f3c2SMarc Zyngier gic_rdists = rdists; 34558fff27aeSMarc Zyngier err = its_alloc_lpi_tables(); 34568fff27aeSMarc Zyngier if (err) 34578fff27aeSMarc Zyngier return err; 34588fff27aeSMarc Zyngier 34598fff27aeSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) 34608fff27aeSMarc Zyngier has_v4 |= its->is_v4; 34618fff27aeSMarc Zyngier 34628fff27aeSMarc Zyngier if (has_v4 & rdists->has_vlpis) { 34633d63cb53SMarc Zyngier if (its_init_vpe_domain() || 34643d63cb53SMarc Zyngier its_init_v4(parent_domain, &its_vpe_domain_ops)) { 34658fff27aeSMarc Zyngier rdists->has_vlpis = false; 34668fff27aeSMarc Zyngier pr_err("ITS: Disabling GICv4 support\n"); 34678fff27aeSMarc Zyngier } 34688fff27aeSMarc Zyngier } 34698fff27aeSMarc Zyngier 34708fff27aeSMarc Zyngier return 0; 34714c21f3c2SMarc Zyngier } 3472