1cc2d3216SMarc Zyngier /* 2d7276b80SMarc Zyngier * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 3cc2d3216SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 4cc2d3216SMarc Zyngier * 5cc2d3216SMarc Zyngier * This program is free software; you can redistribute it and/or modify 6cc2d3216SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 7cc2d3216SMarc Zyngier * published by the Free Software Foundation. 8cc2d3216SMarc Zyngier * 9cc2d3216SMarc Zyngier * This program is distributed in the hope that it will be useful, 10cc2d3216SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 11cc2d3216SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12cc2d3216SMarc Zyngier * GNU General Public License for more details. 13cc2d3216SMarc Zyngier * 14cc2d3216SMarc Zyngier * You should have received a copy of the GNU General Public License 15cc2d3216SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 16cc2d3216SMarc Zyngier */ 17cc2d3216SMarc Zyngier 183f010cf1STomasz Nowicki #include <linux/acpi.h> 198d3554b8SHanjun Guo #include <linux/acpi_iort.h> 20cc2d3216SMarc Zyngier #include <linux/bitmap.h> 21cc2d3216SMarc Zyngier #include <linux/cpu.h> 22cc2d3216SMarc Zyngier #include <linux/delay.h> 2344bb7e24SRobin Murphy #include <linux/dma-iommu.h> 24cc2d3216SMarc Zyngier #include <linux/interrupt.h> 253f010cf1STomasz Nowicki #include <linux/irqdomain.h> 26cc2d3216SMarc Zyngier #include <linux/log2.h> 27cc2d3216SMarc Zyngier #include <linux/mm.h> 28cc2d3216SMarc Zyngier #include <linux/msi.h> 29cc2d3216SMarc Zyngier #include <linux/of.h> 30cc2d3216SMarc Zyngier #include <linux/of_address.h> 31cc2d3216SMarc Zyngier #include <linux/of_irq.h> 32cc2d3216SMarc Zyngier #include <linux/of_pci.h> 33cc2d3216SMarc Zyngier #include <linux/of_platform.h> 34cc2d3216SMarc Zyngier #include <linux/percpu.h> 35cc2d3216SMarc Zyngier #include <linux/slab.h> 36cc2d3216SMarc Zyngier 3741a83e06SJoel Porquet #include <linux/irqchip.h> 38cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 39c808eea8SMarc Zyngier #include <linux/irqchip/arm-gic-v4.h> 40cc2d3216SMarc Zyngier 41cc2d3216SMarc Zyngier #include <asm/cputype.h> 42cc2d3216SMarc Zyngier #include <asm/exception.h> 43cc2d3216SMarc Zyngier 4467510ccaSRobert Richter #include "irq-gic-common.h" 4567510ccaSRobert Richter 4694100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 4794100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 48fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 49cc2d3216SMarc Zyngier 50c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 51c48ed51cSMarc Zyngier 52a13b0404SMarc Zyngier static u32 lpi_id_bits; 53a13b0404SMarc Zyngier 54a13b0404SMarc Zyngier /* 55a13b0404SMarc Zyngier * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 56a13b0404SMarc Zyngier * deal with (one configuration byte per interrupt). PENDBASE has to 57a13b0404SMarc Zyngier * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 58a13b0404SMarc Zyngier */ 59a13b0404SMarc Zyngier #define LPI_NRBITS lpi_id_bits 60a13b0404SMarc Zyngier #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 61a13b0404SMarc Zyngier #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 62a13b0404SMarc Zyngier 63a13b0404SMarc Zyngier #define LPI_PROP_DEFAULT_PRIO 0xa0 64a13b0404SMarc Zyngier 65cc2d3216SMarc Zyngier /* 66cc2d3216SMarc Zyngier * Collection structure - just an ID, and a redistributor address to 67cc2d3216SMarc Zyngier * ping. We use one per CPU as a bag of interrupts assigned to this 68cc2d3216SMarc Zyngier * CPU. 69cc2d3216SMarc Zyngier */ 70cc2d3216SMarc Zyngier struct its_collection { 71cc2d3216SMarc Zyngier u64 target_address; 72cc2d3216SMarc Zyngier u16 col_id; 73cc2d3216SMarc Zyngier }; 74cc2d3216SMarc Zyngier 75cc2d3216SMarc Zyngier /* 769347359aSShanker Donthineni * The ITS_BASER structure - contains memory information, cached 779347359aSShanker Donthineni * value of BASER register configuration and ITS page size. 78466b7d16SShanker Donthineni */ 79466b7d16SShanker Donthineni struct its_baser { 80466b7d16SShanker Donthineni void *base; 81466b7d16SShanker Donthineni u64 val; 82466b7d16SShanker Donthineni u32 order; 839347359aSShanker Donthineni u32 psz; 84466b7d16SShanker Donthineni }; 85466b7d16SShanker Donthineni 86466b7d16SShanker Donthineni /* 87cc2d3216SMarc Zyngier * The ITS structure - contains most of the infrastructure, with the 88841514abSMarc Zyngier * top-level MSI domain, the command queue, the collections, and the 89841514abSMarc Zyngier * list of devices writing to it. 90cc2d3216SMarc Zyngier */ 91cc2d3216SMarc Zyngier struct its_node { 92cc2d3216SMarc Zyngier raw_spinlock_t lock; 93cc2d3216SMarc Zyngier struct list_head entry; 94cc2d3216SMarc Zyngier void __iomem *base; 95db40f0a7STomasz Nowicki phys_addr_t phys_base; 96cc2d3216SMarc Zyngier struct its_cmd_block *cmd_base; 97cc2d3216SMarc Zyngier struct its_cmd_block *cmd_write; 98466b7d16SShanker Donthineni struct its_baser tables[GITS_BASER_NR_REGS]; 99cc2d3216SMarc Zyngier struct its_collection *collections; 100cc2d3216SMarc Zyngier struct list_head its_device_list; 101cc2d3216SMarc Zyngier u64 flags; 102cc2d3216SMarc Zyngier u32 ite_size; 103466b7d16SShanker Donthineni u32 device_ids; 104fbf8f40eSGanapatrao Kulkarni int numa_node; 1053dfa576bSMarc Zyngier bool is_v4; 106cc2d3216SMarc Zyngier }; 107cc2d3216SMarc Zyngier 108cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN SZ_256 109cc2d3216SMarc Zyngier 1102eca0d6cSShanker Donthineni /* Convert page order to size in bytes */ 1112eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 1122eca0d6cSShanker Donthineni 113591e5becSMarc Zyngier struct event_lpi_map { 114591e5becSMarc Zyngier unsigned long *lpi_map; 115591e5becSMarc Zyngier u16 *col_map; 116591e5becSMarc Zyngier irq_hw_number_t lpi_base; 117591e5becSMarc Zyngier int nr_lpis; 118d011e4e6SMarc Zyngier struct mutex vlpi_lock; 119d011e4e6SMarc Zyngier struct its_vm *vm; 120d011e4e6SMarc Zyngier struct its_vlpi_map *vlpi_maps; 121d011e4e6SMarc Zyngier int nr_vlpis; 122591e5becSMarc Zyngier }; 123591e5becSMarc Zyngier 124cc2d3216SMarc Zyngier /* 125d011e4e6SMarc Zyngier * The ITS view of a device - belongs to an ITS, owns an interrupt 126d011e4e6SMarc Zyngier * translation table, and a list of interrupts. If it some of its 127d011e4e6SMarc Zyngier * LPIs are injected into a guest (GICv4), the event_map.vm field 128d011e4e6SMarc Zyngier * indicates which one. 129cc2d3216SMarc Zyngier */ 130cc2d3216SMarc Zyngier struct its_device { 131cc2d3216SMarc Zyngier struct list_head entry; 132cc2d3216SMarc Zyngier struct its_node *its; 133591e5becSMarc Zyngier struct event_lpi_map event_map; 134cc2d3216SMarc Zyngier void *itt; 135cc2d3216SMarc Zyngier u32 nr_ites; 136cc2d3216SMarc Zyngier u32 device_id; 137cc2d3216SMarc Zyngier }; 138cc2d3216SMarc Zyngier 13920b3d54eSMarc Zyngier static struct { 14020b3d54eSMarc Zyngier raw_spinlock_t lock; 14120b3d54eSMarc Zyngier struct its_device *dev; 14220b3d54eSMarc Zyngier struct its_vpe **vpes; 14320b3d54eSMarc Zyngier int next_victim; 14420b3d54eSMarc Zyngier } vpe_proxy; 14520b3d54eSMarc Zyngier 1461ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes); 1471ac19ca6SMarc Zyngier static DEFINE_SPINLOCK(its_lock); 1481ac19ca6SMarc Zyngier static struct rdists *gic_rdists; 149db40f0a7STomasz Nowicki static struct irq_domain *its_parent; 1501ac19ca6SMarc Zyngier 1513dfa576bSMarc Zyngier /* 1523dfa576bSMarc Zyngier * We have a maximum number of 16 ITSs in the whole system if we're 1533dfa576bSMarc Zyngier * using the ITSList mechanism 1543dfa576bSMarc Zyngier */ 1553dfa576bSMarc Zyngier #define ITS_LIST_MAX 16 1563dfa576bSMarc Zyngier 1573dfa576bSMarc Zyngier static unsigned long its_list_map; 1583171a47aSMarc Zyngier static u16 vmovp_seq_num; 1593171a47aSMarc Zyngier static DEFINE_RAW_SPINLOCK(vmovp_lock); 1603171a47aSMarc Zyngier 1617d75bbb4SMarc Zyngier static DEFINE_IDA(its_vpeid_ida); 1623dfa576bSMarc Zyngier 1631ac19ca6SMarc Zyngier #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 1641ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 165e643d803SMarc Zyngier #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 1661ac19ca6SMarc Zyngier 167591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev, 168591e5becSMarc Zyngier u32 event) 169591e5becSMarc Zyngier { 170591e5becSMarc Zyngier struct its_node *its = its_dev->its; 171591e5becSMarc Zyngier 172591e5becSMarc Zyngier return its->collections + its_dev->event_map.col_map[event]; 173591e5becSMarc Zyngier } 174591e5becSMarc Zyngier 175cc2d3216SMarc Zyngier /* 176cc2d3216SMarc Zyngier * ITS command descriptors - parameters to be encoded in a command 177cc2d3216SMarc Zyngier * block. 178cc2d3216SMarc Zyngier */ 179cc2d3216SMarc Zyngier struct its_cmd_desc { 180cc2d3216SMarc Zyngier union { 181cc2d3216SMarc Zyngier struct { 182cc2d3216SMarc Zyngier struct its_device *dev; 183cc2d3216SMarc Zyngier u32 event_id; 184cc2d3216SMarc Zyngier } its_inv_cmd; 185cc2d3216SMarc Zyngier 186cc2d3216SMarc Zyngier struct { 187cc2d3216SMarc Zyngier struct its_device *dev; 188cc2d3216SMarc Zyngier u32 event_id; 1898d85dcedSMarc Zyngier } its_clear_cmd; 1908d85dcedSMarc Zyngier 1918d85dcedSMarc Zyngier struct { 1928d85dcedSMarc Zyngier struct its_device *dev; 1938d85dcedSMarc Zyngier u32 event_id; 194cc2d3216SMarc Zyngier } its_int_cmd; 195cc2d3216SMarc Zyngier 196cc2d3216SMarc Zyngier struct { 197cc2d3216SMarc Zyngier struct its_device *dev; 198cc2d3216SMarc Zyngier int valid; 199cc2d3216SMarc Zyngier } its_mapd_cmd; 200cc2d3216SMarc Zyngier 201cc2d3216SMarc Zyngier struct { 202cc2d3216SMarc Zyngier struct its_collection *col; 203cc2d3216SMarc Zyngier int valid; 204cc2d3216SMarc Zyngier } its_mapc_cmd; 205cc2d3216SMarc Zyngier 206cc2d3216SMarc Zyngier struct { 207cc2d3216SMarc Zyngier struct its_device *dev; 208cc2d3216SMarc Zyngier u32 phys_id; 209cc2d3216SMarc Zyngier u32 event_id; 2106a25ad3aSMarc Zyngier } its_mapti_cmd; 211cc2d3216SMarc Zyngier 212cc2d3216SMarc Zyngier struct { 213cc2d3216SMarc Zyngier struct its_device *dev; 214cc2d3216SMarc Zyngier struct its_collection *col; 215591e5becSMarc Zyngier u32 event_id; 216cc2d3216SMarc Zyngier } its_movi_cmd; 217cc2d3216SMarc Zyngier 218cc2d3216SMarc Zyngier struct { 219cc2d3216SMarc Zyngier struct its_device *dev; 220cc2d3216SMarc Zyngier u32 event_id; 221cc2d3216SMarc Zyngier } its_discard_cmd; 222cc2d3216SMarc Zyngier 223cc2d3216SMarc Zyngier struct { 224cc2d3216SMarc Zyngier struct its_collection *col; 225cc2d3216SMarc Zyngier } its_invall_cmd; 226d011e4e6SMarc Zyngier 227d011e4e6SMarc Zyngier struct { 228d011e4e6SMarc Zyngier struct its_vpe *vpe; 229eb78192bSMarc Zyngier } its_vinvall_cmd; 230eb78192bSMarc Zyngier 231eb78192bSMarc Zyngier struct { 232eb78192bSMarc Zyngier struct its_vpe *vpe; 233eb78192bSMarc Zyngier struct its_collection *col; 234eb78192bSMarc Zyngier bool valid; 235eb78192bSMarc Zyngier } its_vmapp_cmd; 236eb78192bSMarc Zyngier 237eb78192bSMarc Zyngier struct { 238eb78192bSMarc Zyngier struct its_vpe *vpe; 239d011e4e6SMarc Zyngier struct its_device *dev; 240d011e4e6SMarc Zyngier u32 virt_id; 241d011e4e6SMarc Zyngier u32 event_id; 242d011e4e6SMarc Zyngier bool db_enabled; 243d011e4e6SMarc Zyngier } its_vmapti_cmd; 244d011e4e6SMarc Zyngier 245d011e4e6SMarc Zyngier struct { 246d011e4e6SMarc Zyngier struct its_vpe *vpe; 247d011e4e6SMarc Zyngier struct its_device *dev; 248d011e4e6SMarc Zyngier u32 event_id; 249d011e4e6SMarc Zyngier bool db_enabled; 250d011e4e6SMarc Zyngier } its_vmovi_cmd; 2513171a47aSMarc Zyngier 2523171a47aSMarc Zyngier struct { 2533171a47aSMarc Zyngier struct its_vpe *vpe; 2543171a47aSMarc Zyngier struct its_collection *col; 2553171a47aSMarc Zyngier u16 seq_num; 2563171a47aSMarc Zyngier u16 its_list; 2573171a47aSMarc Zyngier } its_vmovp_cmd; 258cc2d3216SMarc Zyngier }; 259cc2d3216SMarc Zyngier }; 260cc2d3216SMarc Zyngier 261cc2d3216SMarc Zyngier /* 262cc2d3216SMarc Zyngier * The ITS command block, which is what the ITS actually parses. 263cc2d3216SMarc Zyngier */ 264cc2d3216SMarc Zyngier struct its_cmd_block { 265cc2d3216SMarc Zyngier u64 raw_cmd[4]; 266cc2d3216SMarc Zyngier }; 267cc2d3216SMarc Zyngier 268cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ SZ_64K 269cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 270cc2d3216SMarc Zyngier 271cc2d3216SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 272cc2d3216SMarc Zyngier struct its_cmd_desc *); 273cc2d3216SMarc Zyngier 274d011e4e6SMarc Zyngier typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, 275d011e4e6SMarc Zyngier struct its_cmd_desc *); 276d011e4e6SMarc Zyngier 2774d36f136SMarc Zyngier static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 2784d36f136SMarc Zyngier { 2794d36f136SMarc Zyngier u64 mask = GENMASK_ULL(h, l); 2804d36f136SMarc Zyngier *raw_cmd &= ~mask; 2814d36f136SMarc Zyngier *raw_cmd |= (val << l) & mask; 2824d36f136SMarc Zyngier } 2834d36f136SMarc Zyngier 284cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 285cc2d3216SMarc Zyngier { 2864d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); 287cc2d3216SMarc Zyngier } 288cc2d3216SMarc Zyngier 289cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 290cc2d3216SMarc Zyngier { 2914d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); 292cc2d3216SMarc Zyngier } 293cc2d3216SMarc Zyngier 294cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 295cc2d3216SMarc Zyngier { 2964d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); 297cc2d3216SMarc Zyngier } 298cc2d3216SMarc Zyngier 299cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 300cc2d3216SMarc Zyngier { 3014d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); 302cc2d3216SMarc Zyngier } 303cc2d3216SMarc Zyngier 304cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size) 305cc2d3216SMarc Zyngier { 3064d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); 307cc2d3216SMarc Zyngier } 308cc2d3216SMarc Zyngier 309cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 310cc2d3216SMarc Zyngier { 3114d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); 312cc2d3216SMarc Zyngier } 313cc2d3216SMarc Zyngier 314cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid) 315cc2d3216SMarc Zyngier { 3164d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); 317cc2d3216SMarc Zyngier } 318cc2d3216SMarc Zyngier 319cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 320cc2d3216SMarc Zyngier { 3214d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); 322cc2d3216SMarc Zyngier } 323cc2d3216SMarc Zyngier 324cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 325cc2d3216SMarc Zyngier { 3264d36f136SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 327cc2d3216SMarc Zyngier } 328cc2d3216SMarc Zyngier 329d011e4e6SMarc Zyngier static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 330d011e4e6SMarc Zyngier { 331d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 332d011e4e6SMarc Zyngier } 333d011e4e6SMarc Zyngier 334d011e4e6SMarc Zyngier static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 335d011e4e6SMarc Zyngier { 336d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 337d011e4e6SMarc Zyngier } 338d011e4e6SMarc Zyngier 339d011e4e6SMarc Zyngier static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 340d011e4e6SMarc Zyngier { 341d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 342d011e4e6SMarc Zyngier } 343d011e4e6SMarc Zyngier 344d011e4e6SMarc Zyngier static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 345d011e4e6SMarc Zyngier { 346d011e4e6SMarc Zyngier its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 347d011e4e6SMarc Zyngier } 348d011e4e6SMarc Zyngier 3493171a47aSMarc Zyngier static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) 3503171a47aSMarc Zyngier { 3513171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); 3523171a47aSMarc Zyngier } 3533171a47aSMarc Zyngier 3543171a47aSMarc Zyngier static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) 3553171a47aSMarc Zyngier { 3563171a47aSMarc Zyngier its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); 3573171a47aSMarc Zyngier } 3583171a47aSMarc Zyngier 359eb78192bSMarc Zyngier static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 360eb78192bSMarc Zyngier { 361eb78192bSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); 362eb78192bSMarc Zyngier } 363eb78192bSMarc Zyngier 364eb78192bSMarc Zyngier static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 365eb78192bSMarc Zyngier { 366eb78192bSMarc Zyngier its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); 367eb78192bSMarc Zyngier } 368eb78192bSMarc Zyngier 369cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd) 370cc2d3216SMarc Zyngier { 371cc2d3216SMarc Zyngier /* Let's fixup BE commands */ 372cc2d3216SMarc Zyngier cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); 373cc2d3216SMarc Zyngier cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); 374cc2d3216SMarc Zyngier cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); 375cc2d3216SMarc Zyngier cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 376cc2d3216SMarc Zyngier } 377cc2d3216SMarc Zyngier 378cc2d3216SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, 379cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 380cc2d3216SMarc Zyngier { 381cc2d3216SMarc Zyngier unsigned long itt_addr; 382c8481267SMarc Zyngier u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 383cc2d3216SMarc Zyngier 384cc2d3216SMarc Zyngier itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 385cc2d3216SMarc Zyngier itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 386cc2d3216SMarc Zyngier 387cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPD); 388cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 389cc2d3216SMarc Zyngier its_encode_size(cmd, size - 1); 390cc2d3216SMarc Zyngier its_encode_itt(cmd, itt_addr); 391cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapd_cmd.valid); 392cc2d3216SMarc Zyngier 393cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 394cc2d3216SMarc Zyngier 395591e5becSMarc Zyngier return NULL; 396cc2d3216SMarc Zyngier } 397cc2d3216SMarc Zyngier 398cc2d3216SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 399cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 400cc2d3216SMarc Zyngier { 401cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPC); 402cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 403cc2d3216SMarc Zyngier its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 404cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapc_cmd.valid); 405cc2d3216SMarc Zyngier 406cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 407cc2d3216SMarc Zyngier 408cc2d3216SMarc Zyngier return desc->its_mapc_cmd.col; 409cc2d3216SMarc Zyngier } 410cc2d3216SMarc Zyngier 4116a25ad3aSMarc Zyngier static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, 412cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 413cc2d3216SMarc Zyngier { 414591e5becSMarc Zyngier struct its_collection *col; 415591e5becSMarc Zyngier 4166a25ad3aSMarc Zyngier col = dev_event_to_col(desc->its_mapti_cmd.dev, 4176a25ad3aSMarc Zyngier desc->its_mapti_cmd.event_id); 418591e5becSMarc Zyngier 4196a25ad3aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPTI); 4206a25ad3aSMarc Zyngier its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); 4216a25ad3aSMarc Zyngier its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); 4226a25ad3aSMarc Zyngier its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); 423591e5becSMarc Zyngier its_encode_collection(cmd, col->col_id); 424cc2d3216SMarc Zyngier 425cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 426cc2d3216SMarc Zyngier 427591e5becSMarc Zyngier return col; 428cc2d3216SMarc Zyngier } 429cc2d3216SMarc Zyngier 430cc2d3216SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 431cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 432cc2d3216SMarc Zyngier { 433591e5becSMarc Zyngier struct its_collection *col; 434591e5becSMarc Zyngier 435591e5becSMarc Zyngier col = dev_event_to_col(desc->its_movi_cmd.dev, 436591e5becSMarc Zyngier desc->its_movi_cmd.event_id); 437591e5becSMarc Zyngier 438cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MOVI); 439cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 440591e5becSMarc Zyngier its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 441cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 442cc2d3216SMarc Zyngier 443cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 444cc2d3216SMarc Zyngier 445591e5becSMarc Zyngier return col; 446cc2d3216SMarc Zyngier } 447cc2d3216SMarc Zyngier 448cc2d3216SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 449cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 450cc2d3216SMarc Zyngier { 451591e5becSMarc Zyngier struct its_collection *col; 452591e5becSMarc Zyngier 453591e5becSMarc Zyngier col = dev_event_to_col(desc->its_discard_cmd.dev, 454591e5becSMarc Zyngier desc->its_discard_cmd.event_id); 455591e5becSMarc Zyngier 456cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_DISCARD); 457cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 458cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 459cc2d3216SMarc Zyngier 460cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 461cc2d3216SMarc Zyngier 462591e5becSMarc Zyngier return col; 463cc2d3216SMarc Zyngier } 464cc2d3216SMarc Zyngier 465cc2d3216SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 466cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 467cc2d3216SMarc Zyngier { 468591e5becSMarc Zyngier struct its_collection *col; 469591e5becSMarc Zyngier 470591e5becSMarc Zyngier col = dev_event_to_col(desc->its_inv_cmd.dev, 471591e5becSMarc Zyngier desc->its_inv_cmd.event_id); 472591e5becSMarc Zyngier 473cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 474cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 475cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 476cc2d3216SMarc Zyngier 477cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 478cc2d3216SMarc Zyngier 479591e5becSMarc Zyngier return col; 480cc2d3216SMarc Zyngier } 481cc2d3216SMarc Zyngier 4828d85dcedSMarc Zyngier static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, 4838d85dcedSMarc Zyngier struct its_cmd_desc *desc) 4848d85dcedSMarc Zyngier { 4858d85dcedSMarc Zyngier struct its_collection *col; 4868d85dcedSMarc Zyngier 4878d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_int_cmd.dev, 4888d85dcedSMarc Zyngier desc->its_int_cmd.event_id); 4898d85dcedSMarc Zyngier 4908d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INT); 4918d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 4928d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_int_cmd.event_id); 4938d85dcedSMarc Zyngier 4948d85dcedSMarc Zyngier its_fixup_cmd(cmd); 4958d85dcedSMarc Zyngier 4968d85dcedSMarc Zyngier return col; 4978d85dcedSMarc Zyngier } 4988d85dcedSMarc Zyngier 4998d85dcedSMarc Zyngier static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, 5008d85dcedSMarc Zyngier struct its_cmd_desc *desc) 5018d85dcedSMarc Zyngier { 5028d85dcedSMarc Zyngier struct its_collection *col; 5038d85dcedSMarc Zyngier 5048d85dcedSMarc Zyngier col = dev_event_to_col(desc->its_clear_cmd.dev, 5058d85dcedSMarc Zyngier desc->its_clear_cmd.event_id); 5068d85dcedSMarc Zyngier 5078d85dcedSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_CLEAR); 5088d85dcedSMarc Zyngier its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 5098d85dcedSMarc Zyngier its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 5108d85dcedSMarc Zyngier 5118d85dcedSMarc Zyngier its_fixup_cmd(cmd); 5128d85dcedSMarc Zyngier 5138d85dcedSMarc Zyngier return col; 5148d85dcedSMarc Zyngier } 5158d85dcedSMarc Zyngier 516cc2d3216SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 517cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 518cc2d3216SMarc Zyngier { 519cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVALL); 520cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 521cc2d3216SMarc Zyngier 522cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 523cc2d3216SMarc Zyngier 524cc2d3216SMarc Zyngier return NULL; 525cc2d3216SMarc Zyngier } 526cc2d3216SMarc Zyngier 527eb78192bSMarc Zyngier static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, 528eb78192bSMarc Zyngier struct its_cmd_desc *desc) 529eb78192bSMarc Zyngier { 530eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VINVALL); 531eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); 532eb78192bSMarc Zyngier 533eb78192bSMarc Zyngier its_fixup_cmd(cmd); 534eb78192bSMarc Zyngier 535eb78192bSMarc Zyngier return desc->its_vinvall_cmd.vpe; 536eb78192bSMarc Zyngier } 537eb78192bSMarc Zyngier 538eb78192bSMarc Zyngier static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, 539eb78192bSMarc Zyngier struct its_cmd_desc *desc) 540eb78192bSMarc Zyngier { 541eb78192bSMarc Zyngier unsigned long vpt_addr; 542eb78192bSMarc Zyngier 543eb78192bSMarc Zyngier vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 544eb78192bSMarc Zyngier 545eb78192bSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPP); 546eb78192bSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 547eb78192bSMarc Zyngier its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 548eb78192bSMarc Zyngier its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); 549eb78192bSMarc Zyngier its_encode_vpt_addr(cmd, vpt_addr); 550eb78192bSMarc Zyngier its_encode_vpt_size(cmd, LPI_NRBITS - 1); 551eb78192bSMarc Zyngier 552eb78192bSMarc Zyngier its_fixup_cmd(cmd); 553eb78192bSMarc Zyngier 554eb78192bSMarc Zyngier return desc->its_vmapp_cmd.vpe; 555eb78192bSMarc Zyngier } 556eb78192bSMarc Zyngier 557d011e4e6SMarc Zyngier static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, 558d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 559d011e4e6SMarc Zyngier { 560d011e4e6SMarc Zyngier u32 db; 561d011e4e6SMarc Zyngier 562d011e4e6SMarc Zyngier if (desc->its_vmapti_cmd.db_enabled) 563d011e4e6SMarc Zyngier db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 564d011e4e6SMarc Zyngier else 565d011e4e6SMarc Zyngier db = 1023; 566d011e4e6SMarc Zyngier 567d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMAPTI); 568d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 569d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 570d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 571d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 572d011e4e6SMarc Zyngier its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 573d011e4e6SMarc Zyngier 574d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 575d011e4e6SMarc Zyngier 576d011e4e6SMarc Zyngier return desc->its_vmapti_cmd.vpe; 577d011e4e6SMarc Zyngier } 578d011e4e6SMarc Zyngier 579d011e4e6SMarc Zyngier static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, 580d011e4e6SMarc Zyngier struct its_cmd_desc *desc) 581d011e4e6SMarc Zyngier { 582d011e4e6SMarc Zyngier u32 db; 583d011e4e6SMarc Zyngier 584d011e4e6SMarc Zyngier if (desc->its_vmovi_cmd.db_enabled) 585d011e4e6SMarc Zyngier db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 586d011e4e6SMarc Zyngier else 587d011e4e6SMarc Zyngier db = 1023; 588d011e4e6SMarc Zyngier 589d011e4e6SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVI); 590d011e4e6SMarc Zyngier its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 591d011e4e6SMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 592d011e4e6SMarc Zyngier its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 593d011e4e6SMarc Zyngier its_encode_db_phys_id(cmd, db); 594d011e4e6SMarc Zyngier its_encode_db_valid(cmd, true); 595d011e4e6SMarc Zyngier 596d011e4e6SMarc Zyngier its_fixup_cmd(cmd); 597d011e4e6SMarc Zyngier 598d011e4e6SMarc Zyngier return desc->its_vmovi_cmd.vpe; 599d011e4e6SMarc Zyngier } 600d011e4e6SMarc Zyngier 6013171a47aSMarc Zyngier static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, 6023171a47aSMarc Zyngier struct its_cmd_desc *desc) 6033171a47aSMarc Zyngier { 6043171a47aSMarc Zyngier its_encode_cmd(cmd, GITS_CMD_VMOVP); 6053171a47aSMarc Zyngier its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 6063171a47aSMarc Zyngier its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 6073171a47aSMarc Zyngier its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 6083171a47aSMarc Zyngier its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); 6093171a47aSMarc Zyngier 6103171a47aSMarc Zyngier its_fixup_cmd(cmd); 6113171a47aSMarc Zyngier 6123171a47aSMarc Zyngier return desc->its_vmovp_cmd.vpe; 6133171a47aSMarc Zyngier } 6143171a47aSMarc Zyngier 615cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its, 616cc2d3216SMarc Zyngier struct its_cmd_block *ptr) 617cc2d3216SMarc Zyngier { 618cc2d3216SMarc Zyngier return (ptr - its->cmd_base) * sizeof(*ptr); 619cc2d3216SMarc Zyngier } 620cc2d3216SMarc Zyngier 621cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its) 622cc2d3216SMarc Zyngier { 623cc2d3216SMarc Zyngier int widx; 624cc2d3216SMarc Zyngier int ridx; 625cc2d3216SMarc Zyngier 626cc2d3216SMarc Zyngier widx = its->cmd_write - its->cmd_base; 627cc2d3216SMarc Zyngier ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 628cc2d3216SMarc Zyngier 629cc2d3216SMarc Zyngier /* This is incredibly unlikely to happen, unless the ITS locks up. */ 630cc2d3216SMarc Zyngier if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 631cc2d3216SMarc Zyngier return 1; 632cc2d3216SMarc Zyngier 633cc2d3216SMarc Zyngier return 0; 634cc2d3216SMarc Zyngier } 635cc2d3216SMarc Zyngier 636cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its) 637cc2d3216SMarc Zyngier { 638cc2d3216SMarc Zyngier struct its_cmd_block *cmd; 639cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 640cc2d3216SMarc Zyngier 641cc2d3216SMarc Zyngier while (its_queue_full(its)) { 642cc2d3216SMarc Zyngier count--; 643cc2d3216SMarc Zyngier if (!count) { 644cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue not draining\n"); 645cc2d3216SMarc Zyngier return NULL; 646cc2d3216SMarc Zyngier } 647cc2d3216SMarc Zyngier cpu_relax(); 648cc2d3216SMarc Zyngier udelay(1); 649cc2d3216SMarc Zyngier } 650cc2d3216SMarc Zyngier 651cc2d3216SMarc Zyngier cmd = its->cmd_write++; 652cc2d3216SMarc Zyngier 653cc2d3216SMarc Zyngier /* Handle queue wrapping */ 654cc2d3216SMarc Zyngier if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 655cc2d3216SMarc Zyngier its->cmd_write = its->cmd_base; 656cc2d3216SMarc Zyngier 65734d677a9SMarc Zyngier /* Clear command */ 65834d677a9SMarc Zyngier cmd->raw_cmd[0] = 0; 65934d677a9SMarc Zyngier cmd->raw_cmd[1] = 0; 66034d677a9SMarc Zyngier cmd->raw_cmd[2] = 0; 66134d677a9SMarc Zyngier cmd->raw_cmd[3] = 0; 66234d677a9SMarc Zyngier 663cc2d3216SMarc Zyngier return cmd; 664cc2d3216SMarc Zyngier } 665cc2d3216SMarc Zyngier 666cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its) 667cc2d3216SMarc Zyngier { 668cc2d3216SMarc Zyngier u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 669cc2d3216SMarc Zyngier 670cc2d3216SMarc Zyngier writel_relaxed(wr, its->base + GITS_CWRITER); 671cc2d3216SMarc Zyngier 672cc2d3216SMarc Zyngier return its->cmd_write; 673cc2d3216SMarc Zyngier } 674cc2d3216SMarc Zyngier 675cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 676cc2d3216SMarc Zyngier { 677cc2d3216SMarc Zyngier /* 678cc2d3216SMarc Zyngier * Make sure the commands written to memory are observable by 679cc2d3216SMarc Zyngier * the ITS. 680cc2d3216SMarc Zyngier */ 681cc2d3216SMarc Zyngier if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 682328191c0SVladimir Murzin gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); 683cc2d3216SMarc Zyngier else 684cc2d3216SMarc Zyngier dsb(ishst); 685cc2d3216SMarc Zyngier } 686cc2d3216SMarc Zyngier 687cc2d3216SMarc Zyngier static void its_wait_for_range_completion(struct its_node *its, 688cc2d3216SMarc Zyngier struct its_cmd_block *from, 689cc2d3216SMarc Zyngier struct its_cmd_block *to) 690cc2d3216SMarc Zyngier { 691cc2d3216SMarc Zyngier u64 rd_idx, from_idx, to_idx; 692cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 693cc2d3216SMarc Zyngier 694cc2d3216SMarc Zyngier from_idx = its_cmd_ptr_to_offset(its, from); 695cc2d3216SMarc Zyngier to_idx = its_cmd_ptr_to_offset(its, to); 696cc2d3216SMarc Zyngier 697cc2d3216SMarc Zyngier while (1) { 698cc2d3216SMarc Zyngier rd_idx = readl_relaxed(its->base + GITS_CREADR); 6999bdd8b1cSMarc Zyngier 7009bdd8b1cSMarc Zyngier /* Direct case */ 7019bdd8b1cSMarc Zyngier if (from_idx < to_idx && rd_idx >= to_idx) 7029bdd8b1cSMarc Zyngier break; 7039bdd8b1cSMarc Zyngier 7049bdd8b1cSMarc Zyngier /* Wrapped case */ 7059bdd8b1cSMarc Zyngier if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) 706cc2d3216SMarc Zyngier break; 707cc2d3216SMarc Zyngier 708cc2d3216SMarc Zyngier count--; 709cc2d3216SMarc Zyngier if (!count) { 710cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue timeout\n"); 711cc2d3216SMarc Zyngier return; 712cc2d3216SMarc Zyngier } 713cc2d3216SMarc Zyngier cpu_relax(); 714cc2d3216SMarc Zyngier udelay(1); 715cc2d3216SMarc Zyngier } 716cc2d3216SMarc Zyngier } 717cc2d3216SMarc Zyngier 718e4f9094bSMarc Zyngier /* Warning, macro hell follows */ 719e4f9094bSMarc Zyngier #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ 720e4f9094bSMarc Zyngier void name(struct its_node *its, \ 721e4f9094bSMarc Zyngier buildtype builder, \ 722e4f9094bSMarc Zyngier struct its_cmd_desc *desc) \ 723e4f9094bSMarc Zyngier { \ 724e4f9094bSMarc Zyngier struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 725e4f9094bSMarc Zyngier synctype *sync_obj; \ 726e4f9094bSMarc Zyngier unsigned long flags; \ 727e4f9094bSMarc Zyngier \ 728e4f9094bSMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); \ 729e4f9094bSMarc Zyngier \ 730e4f9094bSMarc Zyngier cmd = its_allocate_entry(its); \ 731e4f9094bSMarc Zyngier if (!cmd) { /* We're soooooo screewed... */ \ 732e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 733e4f9094bSMarc Zyngier return; \ 734e4f9094bSMarc Zyngier } \ 735e4f9094bSMarc Zyngier sync_obj = builder(cmd, desc); \ 736e4f9094bSMarc Zyngier its_flush_cmd(its, cmd); \ 737e4f9094bSMarc Zyngier \ 738e4f9094bSMarc Zyngier if (sync_obj) { \ 739e4f9094bSMarc Zyngier sync_cmd = its_allocate_entry(its); \ 740e4f9094bSMarc Zyngier if (!sync_cmd) \ 741e4f9094bSMarc Zyngier goto post; \ 742e4f9094bSMarc Zyngier \ 743e4f9094bSMarc Zyngier buildfn(sync_cmd, sync_obj); \ 744e4f9094bSMarc Zyngier its_flush_cmd(its, sync_cmd); \ 745e4f9094bSMarc Zyngier } \ 746e4f9094bSMarc Zyngier \ 747e4f9094bSMarc Zyngier post: \ 748e4f9094bSMarc Zyngier next_cmd = its_post_commands(its); \ 749e4f9094bSMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); \ 750e4f9094bSMarc Zyngier \ 751e4f9094bSMarc Zyngier its_wait_for_range_completion(its, cmd, next_cmd); \ 752e4f9094bSMarc Zyngier } 753e4f9094bSMarc Zyngier 754e4f9094bSMarc Zyngier static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, 755e4f9094bSMarc Zyngier struct its_collection *sync_col) 756cc2d3216SMarc Zyngier { 757cc2d3216SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 758cc2d3216SMarc Zyngier its_encode_target(sync_cmd, sync_col->target_address); 759e4f9094bSMarc Zyngier 760cc2d3216SMarc Zyngier its_fixup_cmd(sync_cmd); 761cc2d3216SMarc Zyngier } 762cc2d3216SMarc Zyngier 763e4f9094bSMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 764e4f9094bSMarc Zyngier struct its_collection, its_build_sync_cmd) 765cc2d3216SMarc Zyngier 766d011e4e6SMarc Zyngier static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, 767d011e4e6SMarc Zyngier struct its_vpe *sync_vpe) 768d011e4e6SMarc Zyngier { 769d011e4e6SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 770d011e4e6SMarc Zyngier its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 771d011e4e6SMarc Zyngier 772d011e4e6SMarc Zyngier its_fixup_cmd(sync_cmd); 773d011e4e6SMarc Zyngier } 774d011e4e6SMarc Zyngier 775d011e4e6SMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 776d011e4e6SMarc Zyngier struct its_vpe, its_build_vsync_cmd) 777d011e4e6SMarc Zyngier 7788d85dcedSMarc Zyngier static void its_send_int(struct its_device *dev, u32 event_id) 7798d85dcedSMarc Zyngier { 7808d85dcedSMarc Zyngier struct its_cmd_desc desc; 7818d85dcedSMarc Zyngier 7828d85dcedSMarc Zyngier desc.its_int_cmd.dev = dev; 7838d85dcedSMarc Zyngier desc.its_int_cmd.event_id = event_id; 7848d85dcedSMarc Zyngier 7858d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_int_cmd, &desc); 7868d85dcedSMarc Zyngier } 7878d85dcedSMarc Zyngier 7888d85dcedSMarc Zyngier static void its_send_clear(struct its_device *dev, u32 event_id) 7898d85dcedSMarc Zyngier { 7908d85dcedSMarc Zyngier struct its_cmd_desc desc; 7918d85dcedSMarc Zyngier 7928d85dcedSMarc Zyngier desc.its_clear_cmd.dev = dev; 7938d85dcedSMarc Zyngier desc.its_clear_cmd.event_id = event_id; 7948d85dcedSMarc Zyngier 7958d85dcedSMarc Zyngier its_send_single_command(dev->its, its_build_clear_cmd, &desc); 796cc2d3216SMarc Zyngier } 797cc2d3216SMarc Zyngier 798cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id) 799cc2d3216SMarc Zyngier { 800cc2d3216SMarc Zyngier struct its_cmd_desc desc; 801cc2d3216SMarc Zyngier 802cc2d3216SMarc Zyngier desc.its_inv_cmd.dev = dev; 803cc2d3216SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 804cc2d3216SMarc Zyngier 805cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_inv_cmd, &desc); 806cc2d3216SMarc Zyngier } 807cc2d3216SMarc Zyngier 808cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid) 809cc2d3216SMarc Zyngier { 810cc2d3216SMarc Zyngier struct its_cmd_desc desc; 811cc2d3216SMarc Zyngier 812cc2d3216SMarc Zyngier desc.its_mapd_cmd.dev = dev; 813cc2d3216SMarc Zyngier desc.its_mapd_cmd.valid = !!valid; 814cc2d3216SMarc Zyngier 815cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 816cc2d3216SMarc Zyngier } 817cc2d3216SMarc Zyngier 818cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col, 819cc2d3216SMarc Zyngier int valid) 820cc2d3216SMarc Zyngier { 821cc2d3216SMarc Zyngier struct its_cmd_desc desc; 822cc2d3216SMarc Zyngier 823cc2d3216SMarc Zyngier desc.its_mapc_cmd.col = col; 824cc2d3216SMarc Zyngier desc.its_mapc_cmd.valid = !!valid; 825cc2d3216SMarc Zyngier 826cc2d3216SMarc Zyngier its_send_single_command(its, its_build_mapc_cmd, &desc); 827cc2d3216SMarc Zyngier } 828cc2d3216SMarc Zyngier 8296a25ad3aSMarc Zyngier static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) 830cc2d3216SMarc Zyngier { 831cc2d3216SMarc Zyngier struct its_cmd_desc desc; 832cc2d3216SMarc Zyngier 8336a25ad3aSMarc Zyngier desc.its_mapti_cmd.dev = dev; 8346a25ad3aSMarc Zyngier desc.its_mapti_cmd.phys_id = irq_id; 8356a25ad3aSMarc Zyngier desc.its_mapti_cmd.event_id = id; 836cc2d3216SMarc Zyngier 8376a25ad3aSMarc Zyngier its_send_single_command(dev->its, its_build_mapti_cmd, &desc); 838cc2d3216SMarc Zyngier } 839cc2d3216SMarc Zyngier 840cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev, 841cc2d3216SMarc Zyngier struct its_collection *col, u32 id) 842cc2d3216SMarc Zyngier { 843cc2d3216SMarc Zyngier struct its_cmd_desc desc; 844cc2d3216SMarc Zyngier 845cc2d3216SMarc Zyngier desc.its_movi_cmd.dev = dev; 846cc2d3216SMarc Zyngier desc.its_movi_cmd.col = col; 847591e5becSMarc Zyngier desc.its_movi_cmd.event_id = id; 848cc2d3216SMarc Zyngier 849cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_movi_cmd, &desc); 850cc2d3216SMarc Zyngier } 851cc2d3216SMarc Zyngier 852cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id) 853cc2d3216SMarc Zyngier { 854cc2d3216SMarc Zyngier struct its_cmd_desc desc; 855cc2d3216SMarc Zyngier 856cc2d3216SMarc Zyngier desc.its_discard_cmd.dev = dev; 857cc2d3216SMarc Zyngier desc.its_discard_cmd.event_id = id; 858cc2d3216SMarc Zyngier 859cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_discard_cmd, &desc); 860cc2d3216SMarc Zyngier } 861cc2d3216SMarc Zyngier 862cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col) 863cc2d3216SMarc Zyngier { 864cc2d3216SMarc Zyngier struct its_cmd_desc desc; 865cc2d3216SMarc Zyngier 866cc2d3216SMarc Zyngier desc.its_invall_cmd.col = col; 867cc2d3216SMarc Zyngier 868cc2d3216SMarc Zyngier its_send_single_command(its, its_build_invall_cmd, &desc); 869cc2d3216SMarc Zyngier } 870c48ed51cSMarc Zyngier 871d011e4e6SMarc Zyngier static void its_send_vmapti(struct its_device *dev, u32 id) 872d011e4e6SMarc Zyngier { 873d011e4e6SMarc Zyngier struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 874d011e4e6SMarc Zyngier struct its_cmd_desc desc; 875d011e4e6SMarc Zyngier 876d011e4e6SMarc Zyngier desc.its_vmapti_cmd.vpe = map->vpe; 877d011e4e6SMarc Zyngier desc.its_vmapti_cmd.dev = dev; 878d011e4e6SMarc Zyngier desc.its_vmapti_cmd.virt_id = map->vintid; 879d011e4e6SMarc Zyngier desc.its_vmapti_cmd.event_id = id; 880d011e4e6SMarc Zyngier desc.its_vmapti_cmd.db_enabled = map->db_enabled; 881d011e4e6SMarc Zyngier 882d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 883d011e4e6SMarc Zyngier } 884d011e4e6SMarc Zyngier 885d011e4e6SMarc Zyngier static void its_send_vmovi(struct its_device *dev, u32 id) 886d011e4e6SMarc Zyngier { 887d011e4e6SMarc Zyngier struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 888d011e4e6SMarc Zyngier struct its_cmd_desc desc; 889d011e4e6SMarc Zyngier 890d011e4e6SMarc Zyngier desc.its_vmovi_cmd.vpe = map->vpe; 891d011e4e6SMarc Zyngier desc.its_vmovi_cmd.dev = dev; 892d011e4e6SMarc Zyngier desc.its_vmovi_cmd.event_id = id; 893d011e4e6SMarc Zyngier desc.its_vmovi_cmd.db_enabled = map->db_enabled; 894d011e4e6SMarc Zyngier 895d011e4e6SMarc Zyngier its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 896d011e4e6SMarc Zyngier } 897d011e4e6SMarc Zyngier 898eb78192bSMarc Zyngier static void its_send_vmapp(struct its_vpe *vpe, bool valid) 899eb78192bSMarc Zyngier { 900eb78192bSMarc Zyngier struct its_cmd_desc desc; 901eb78192bSMarc Zyngier struct its_node *its; 902eb78192bSMarc Zyngier 903eb78192bSMarc Zyngier desc.its_vmapp_cmd.vpe = vpe; 904eb78192bSMarc Zyngier desc.its_vmapp_cmd.valid = valid; 905eb78192bSMarc Zyngier 906eb78192bSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 907eb78192bSMarc Zyngier if (!its->is_v4) 908eb78192bSMarc Zyngier continue; 909eb78192bSMarc Zyngier 910eb78192bSMarc Zyngier desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; 911eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); 912eb78192bSMarc Zyngier } 913eb78192bSMarc Zyngier } 914eb78192bSMarc Zyngier 9153171a47aSMarc Zyngier static void its_send_vmovp(struct its_vpe *vpe) 9163171a47aSMarc Zyngier { 9173171a47aSMarc Zyngier struct its_cmd_desc desc; 9183171a47aSMarc Zyngier struct its_node *its; 9193171a47aSMarc Zyngier unsigned long flags; 9203171a47aSMarc Zyngier int col_id = vpe->col_idx; 9213171a47aSMarc Zyngier 9223171a47aSMarc Zyngier desc.its_vmovp_cmd.vpe = vpe; 9233171a47aSMarc Zyngier desc.its_vmovp_cmd.its_list = (u16)its_list_map; 9243171a47aSMarc Zyngier 9253171a47aSMarc Zyngier if (!its_list_map) { 9263171a47aSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 9273171a47aSMarc Zyngier desc.its_vmovp_cmd.seq_num = 0; 9283171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 9293171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 9303171a47aSMarc Zyngier return; 9313171a47aSMarc Zyngier } 9323171a47aSMarc Zyngier 9333171a47aSMarc Zyngier /* 9343171a47aSMarc Zyngier * Yet another marvel of the architecture. If using the 9353171a47aSMarc Zyngier * its_list "feature", we need to make sure that all ITSs 9363171a47aSMarc Zyngier * receive all VMOVP commands in the same order. The only way 9373171a47aSMarc Zyngier * to guarantee this is to make vmovp a serialization point. 9383171a47aSMarc Zyngier * 9393171a47aSMarc Zyngier * Wall <-- Head. 9403171a47aSMarc Zyngier */ 9413171a47aSMarc Zyngier raw_spin_lock_irqsave(&vmovp_lock, flags); 9423171a47aSMarc Zyngier 9433171a47aSMarc Zyngier desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; 9443171a47aSMarc Zyngier 9453171a47aSMarc Zyngier /* Emit VMOVPs */ 9463171a47aSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 9473171a47aSMarc Zyngier if (!its->is_v4) 9483171a47aSMarc Zyngier continue; 9493171a47aSMarc Zyngier 9503171a47aSMarc Zyngier desc.its_vmovp_cmd.col = &its->collections[col_id]; 9513171a47aSMarc Zyngier its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 9523171a47aSMarc Zyngier } 9533171a47aSMarc Zyngier 9543171a47aSMarc Zyngier raw_spin_unlock_irqrestore(&vmovp_lock, flags); 9553171a47aSMarc Zyngier } 9563171a47aSMarc Zyngier 957eb78192bSMarc Zyngier static void its_send_vinvall(struct its_vpe *vpe) 958eb78192bSMarc Zyngier { 959eb78192bSMarc Zyngier struct its_cmd_desc desc; 960eb78192bSMarc Zyngier struct its_node *its; 961eb78192bSMarc Zyngier 962eb78192bSMarc Zyngier desc.its_vinvall_cmd.vpe = vpe; 963eb78192bSMarc Zyngier 964eb78192bSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 965eb78192bSMarc Zyngier if (!its->is_v4) 966eb78192bSMarc Zyngier continue; 967eb78192bSMarc Zyngier its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); 968eb78192bSMarc Zyngier } 969eb78192bSMarc Zyngier } 970eb78192bSMarc Zyngier 971c48ed51cSMarc Zyngier /* 972c48ed51cSMarc Zyngier * irqchip functions - assumes MSI, mostly. 973c48ed51cSMarc Zyngier */ 974c48ed51cSMarc Zyngier 975c48ed51cSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d) 976c48ed51cSMarc Zyngier { 977c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 978591e5becSMarc Zyngier return d->hwirq - its_dev->event_map.lpi_base; 979c48ed51cSMarc Zyngier } 980c48ed51cSMarc Zyngier 981015ec038SMarc Zyngier static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) 982c48ed51cSMarc Zyngier { 983015ec038SMarc Zyngier irq_hw_number_t hwirq; 984adcdb94eSMarc Zyngier struct page *prop_page; 985adcdb94eSMarc Zyngier u8 *cfg; 986c48ed51cSMarc Zyngier 987015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 988015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 989015ec038SMarc Zyngier u32 event = its_get_event_id(d); 990015ec038SMarc Zyngier 991015ec038SMarc Zyngier prop_page = its_dev->event_map.vm->vprop_page; 992015ec038SMarc Zyngier hwirq = its_dev->event_map.vlpi_maps[event].vintid; 993015ec038SMarc Zyngier } else { 994adcdb94eSMarc Zyngier prop_page = gic_rdists->prop_page; 995015ec038SMarc Zyngier hwirq = d->hwirq; 996015ec038SMarc Zyngier } 997adcdb94eSMarc Zyngier 998adcdb94eSMarc Zyngier cfg = page_address(prop_page) + hwirq - 8192; 999adcdb94eSMarc Zyngier *cfg &= ~clr; 1000015ec038SMarc Zyngier *cfg |= set | LPI_PROP_GROUP1; 1001c48ed51cSMarc Zyngier 1002c48ed51cSMarc Zyngier /* 1003c48ed51cSMarc Zyngier * Make the above write visible to the redistributors. 1004c48ed51cSMarc Zyngier * And yes, we're flushing exactly: One. Single. Byte. 1005c48ed51cSMarc Zyngier * Humpf... 1006c48ed51cSMarc Zyngier */ 1007c48ed51cSMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 1008328191c0SVladimir Murzin gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1009c48ed51cSMarc Zyngier else 1010c48ed51cSMarc Zyngier dsb(ishst); 1011015ec038SMarc Zyngier } 1012015ec038SMarc Zyngier 1013015ec038SMarc Zyngier static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) 1014015ec038SMarc Zyngier { 1015015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1016015ec038SMarc Zyngier 1017015ec038SMarc Zyngier lpi_write_config(d, clr, set); 1018adcdb94eSMarc Zyngier its_send_inv(its_dev, its_get_event_id(d)); 1019c48ed51cSMarc Zyngier } 1020c48ed51cSMarc Zyngier 1021015ec038SMarc Zyngier static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) 1022015ec038SMarc Zyngier { 1023015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1024015ec038SMarc Zyngier u32 event = its_get_event_id(d); 1025015ec038SMarc Zyngier 1026015ec038SMarc Zyngier if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) 1027015ec038SMarc Zyngier return; 1028015ec038SMarc Zyngier 1029015ec038SMarc Zyngier its_dev->event_map.vlpi_maps[event].db_enabled = enable; 1030015ec038SMarc Zyngier 1031015ec038SMarc Zyngier /* 1032015ec038SMarc Zyngier * More fun with the architecture: 1033015ec038SMarc Zyngier * 1034015ec038SMarc Zyngier * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI 1035015ec038SMarc Zyngier * value or to 1023, depending on the enable bit. But that 1036015ec038SMarc Zyngier * would be issueing a mapping for an /existing/ DevID+EventID 1037015ec038SMarc Zyngier * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI 1038015ec038SMarc Zyngier * to the /same/ vPE, using this opportunity to adjust the 1039015ec038SMarc Zyngier * doorbell. Mouahahahaha. We loves it, Precious. 1040015ec038SMarc Zyngier */ 1041015ec038SMarc Zyngier its_send_vmovi(its_dev, event); 1042c48ed51cSMarc Zyngier } 1043c48ed51cSMarc Zyngier 1044c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d) 1045c48ed51cSMarc Zyngier { 1046015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1047015ec038SMarc Zyngier its_vlpi_set_doorbell(d, false); 1048015ec038SMarc Zyngier 1049adcdb94eSMarc Zyngier lpi_update_config(d, LPI_PROP_ENABLED, 0); 1050c48ed51cSMarc Zyngier } 1051c48ed51cSMarc Zyngier 1052c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d) 1053c48ed51cSMarc Zyngier { 1054015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1055015ec038SMarc Zyngier its_vlpi_set_doorbell(d, true); 1056015ec038SMarc Zyngier 1057adcdb94eSMarc Zyngier lpi_update_config(d, 0, LPI_PROP_ENABLED); 1058c48ed51cSMarc Zyngier } 1059c48ed51cSMarc Zyngier 1060c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1061c48ed51cSMarc Zyngier bool force) 1062c48ed51cSMarc Zyngier { 1063fbf8f40eSGanapatrao Kulkarni unsigned int cpu; 1064fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 1065c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1066c48ed51cSMarc Zyngier struct its_collection *target_col; 1067c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 1068c48ed51cSMarc Zyngier 1069015ec038SMarc Zyngier /* A forwarded interrupt should use irq_set_vcpu_affinity */ 1070015ec038SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) 1071015ec038SMarc Zyngier return -EINVAL; 1072015ec038SMarc Zyngier 1073fbf8f40eSGanapatrao Kulkarni /* lpi cannot be routed to a redistributor that is on a foreign node */ 1074fbf8f40eSGanapatrao Kulkarni if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1075fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) { 1076fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1077fbf8f40eSGanapatrao Kulkarni if (!cpumask_intersects(mask_val, cpu_mask)) 1078fbf8f40eSGanapatrao Kulkarni return -EINVAL; 1079fbf8f40eSGanapatrao Kulkarni } 1080fbf8f40eSGanapatrao Kulkarni } 1081fbf8f40eSGanapatrao Kulkarni 1082fbf8f40eSGanapatrao Kulkarni cpu = cpumask_any_and(mask_val, cpu_mask); 1083fbf8f40eSGanapatrao Kulkarni 1084c48ed51cSMarc Zyngier if (cpu >= nr_cpu_ids) 1085c48ed51cSMarc Zyngier return -EINVAL; 1086c48ed51cSMarc Zyngier 10878b8d94a7SMaJun /* don't set the affinity when the target cpu is same as current one */ 10888b8d94a7SMaJun if (cpu != its_dev->event_map.col_map[id]) { 1089c48ed51cSMarc Zyngier target_col = &its_dev->its->collections[cpu]; 1090c48ed51cSMarc Zyngier its_send_movi(its_dev, target_col, id); 1091591e5becSMarc Zyngier its_dev->event_map.col_map[id] = cpu; 10920d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 10938b8d94a7SMaJun } 1094c48ed51cSMarc Zyngier 1095c48ed51cSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 1096c48ed51cSMarc Zyngier } 1097c48ed51cSMarc Zyngier 1098b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 1099b48ac83dSMarc Zyngier { 1100b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1101b48ac83dSMarc Zyngier struct its_node *its; 1102b48ac83dSMarc Zyngier u64 addr; 1103b48ac83dSMarc Zyngier 1104b48ac83dSMarc Zyngier its = its_dev->its; 1105b48ac83dSMarc Zyngier addr = its->phys_base + GITS_TRANSLATER; 1106b48ac83dSMarc Zyngier 1107b11283ebSVladimir Murzin msg->address_lo = lower_32_bits(addr); 1108b11283ebSVladimir Murzin msg->address_hi = upper_32_bits(addr); 1109b48ac83dSMarc Zyngier msg->data = its_get_event_id(d); 111044bb7e24SRobin Murphy 111144bb7e24SRobin Murphy iommu_dma_map_msi_msg(d->irq, msg); 1112b48ac83dSMarc Zyngier } 1113b48ac83dSMarc Zyngier 11148d85dcedSMarc Zyngier static int its_irq_set_irqchip_state(struct irq_data *d, 11158d85dcedSMarc Zyngier enum irqchip_irq_state which, 11168d85dcedSMarc Zyngier bool state) 11178d85dcedSMarc Zyngier { 11188d85dcedSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 11198d85dcedSMarc Zyngier u32 event = its_get_event_id(d); 11208d85dcedSMarc Zyngier 11218d85dcedSMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 11228d85dcedSMarc Zyngier return -EINVAL; 11238d85dcedSMarc Zyngier 11248d85dcedSMarc Zyngier if (state) 11258d85dcedSMarc Zyngier its_send_int(its_dev, event); 11268d85dcedSMarc Zyngier else 11278d85dcedSMarc Zyngier its_send_clear(its_dev, event); 11288d85dcedSMarc Zyngier 11298d85dcedSMarc Zyngier return 0; 11308d85dcedSMarc Zyngier } 11318d85dcedSMarc Zyngier 1132d011e4e6SMarc Zyngier static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 1133d011e4e6SMarc Zyngier { 1134d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1135d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1136d011e4e6SMarc Zyngier int ret = 0; 1137d011e4e6SMarc Zyngier 1138d011e4e6SMarc Zyngier if (!info->map) 1139d011e4e6SMarc Zyngier return -EINVAL; 1140d011e4e6SMarc Zyngier 1141d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1142d011e4e6SMarc Zyngier 1143d011e4e6SMarc Zyngier if (!its_dev->event_map.vm) { 1144d011e4e6SMarc Zyngier struct its_vlpi_map *maps; 1145d011e4e6SMarc Zyngier 1146d011e4e6SMarc Zyngier maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis, 1147d011e4e6SMarc Zyngier GFP_KERNEL); 1148d011e4e6SMarc Zyngier if (!maps) { 1149d011e4e6SMarc Zyngier ret = -ENOMEM; 1150d011e4e6SMarc Zyngier goto out; 1151d011e4e6SMarc Zyngier } 1152d011e4e6SMarc Zyngier 1153d011e4e6SMarc Zyngier its_dev->event_map.vm = info->map->vm; 1154d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps = maps; 1155d011e4e6SMarc Zyngier } else if (its_dev->event_map.vm != info->map->vm) { 1156d011e4e6SMarc Zyngier ret = -EINVAL; 1157d011e4e6SMarc Zyngier goto out; 1158d011e4e6SMarc Zyngier } 1159d011e4e6SMarc Zyngier 1160d011e4e6SMarc Zyngier /* Get our private copy of the mapping information */ 1161d011e4e6SMarc Zyngier its_dev->event_map.vlpi_maps[event] = *info->map; 1162d011e4e6SMarc Zyngier 1163d011e4e6SMarc Zyngier if (irqd_is_forwarded_to_vcpu(d)) { 1164d011e4e6SMarc Zyngier /* Already mapped, move it around */ 1165d011e4e6SMarc Zyngier its_send_vmovi(its_dev, event); 1166d011e4e6SMarc Zyngier } else { 1167d011e4e6SMarc Zyngier /* Drop the physical mapping */ 1168d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1169d011e4e6SMarc Zyngier 1170d011e4e6SMarc Zyngier /* and install the virtual one */ 1171d011e4e6SMarc Zyngier its_send_vmapti(its_dev, event); 1172d011e4e6SMarc Zyngier irqd_set_forwarded_to_vcpu(d); 1173d011e4e6SMarc Zyngier 1174d011e4e6SMarc Zyngier /* Increment the number of VLPIs */ 1175d011e4e6SMarc Zyngier its_dev->event_map.nr_vlpis++; 1176d011e4e6SMarc Zyngier } 1177d011e4e6SMarc Zyngier 1178d011e4e6SMarc Zyngier out: 1179d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1180d011e4e6SMarc Zyngier return ret; 1181d011e4e6SMarc Zyngier } 1182d011e4e6SMarc Zyngier 1183d011e4e6SMarc Zyngier static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1184d011e4e6SMarc Zyngier { 1185d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1186d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1187d011e4e6SMarc Zyngier int ret = 0; 1188d011e4e6SMarc Zyngier 1189d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1190d011e4e6SMarc Zyngier 1191d011e4e6SMarc Zyngier if (!its_dev->event_map.vm || 1192d011e4e6SMarc Zyngier !its_dev->event_map.vlpi_maps[event].vm) { 1193d011e4e6SMarc Zyngier ret = -EINVAL; 1194d011e4e6SMarc Zyngier goto out; 1195d011e4e6SMarc Zyngier } 1196d011e4e6SMarc Zyngier 1197d011e4e6SMarc Zyngier /* Copy our mapping information to the incoming request */ 1198d011e4e6SMarc Zyngier *info->map = its_dev->event_map.vlpi_maps[event]; 1199d011e4e6SMarc Zyngier 1200d011e4e6SMarc Zyngier out: 1201d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1202d011e4e6SMarc Zyngier return ret; 1203d011e4e6SMarc Zyngier } 1204d011e4e6SMarc Zyngier 1205d011e4e6SMarc Zyngier static int its_vlpi_unmap(struct irq_data *d) 1206d011e4e6SMarc Zyngier { 1207d011e4e6SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1208d011e4e6SMarc Zyngier u32 event = its_get_event_id(d); 1209d011e4e6SMarc Zyngier int ret = 0; 1210d011e4e6SMarc Zyngier 1211d011e4e6SMarc Zyngier mutex_lock(&its_dev->event_map.vlpi_lock); 1212d011e4e6SMarc Zyngier 1213d011e4e6SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 1214d011e4e6SMarc Zyngier ret = -EINVAL; 1215d011e4e6SMarc Zyngier goto out; 1216d011e4e6SMarc Zyngier } 1217d011e4e6SMarc Zyngier 1218d011e4e6SMarc Zyngier /* Drop the virtual mapping */ 1219d011e4e6SMarc Zyngier its_send_discard(its_dev, event); 1220d011e4e6SMarc Zyngier 1221d011e4e6SMarc Zyngier /* and restore the physical one */ 1222d011e4e6SMarc Zyngier irqd_clr_forwarded_to_vcpu(d); 1223d011e4e6SMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 1224d011e4e6SMarc Zyngier lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 1225d011e4e6SMarc Zyngier LPI_PROP_ENABLED | 1226d011e4e6SMarc Zyngier LPI_PROP_GROUP1)); 1227d011e4e6SMarc Zyngier 1228d011e4e6SMarc Zyngier /* 1229d011e4e6SMarc Zyngier * Drop the refcount and make the device available again if 1230d011e4e6SMarc Zyngier * this was the last VLPI. 1231d011e4e6SMarc Zyngier */ 1232d011e4e6SMarc Zyngier if (!--its_dev->event_map.nr_vlpis) { 1233d011e4e6SMarc Zyngier its_dev->event_map.vm = NULL; 1234d011e4e6SMarc Zyngier kfree(its_dev->event_map.vlpi_maps); 1235d011e4e6SMarc Zyngier } 1236d011e4e6SMarc Zyngier 1237d011e4e6SMarc Zyngier out: 1238d011e4e6SMarc Zyngier mutex_unlock(&its_dev->event_map.vlpi_lock); 1239d011e4e6SMarc Zyngier return ret; 1240d011e4e6SMarc Zyngier } 1241d011e4e6SMarc Zyngier 1242015ec038SMarc Zyngier static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) 1243015ec038SMarc Zyngier { 1244015ec038SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1245015ec038SMarc Zyngier 1246015ec038SMarc Zyngier if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 1247015ec038SMarc Zyngier return -EINVAL; 1248015ec038SMarc Zyngier 1249015ec038SMarc Zyngier if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) 1250015ec038SMarc Zyngier lpi_update_config(d, 0xff, info->config); 1251015ec038SMarc Zyngier else 1252015ec038SMarc Zyngier lpi_write_config(d, 0xff, info->config); 1253015ec038SMarc Zyngier its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); 1254015ec038SMarc Zyngier 1255015ec038SMarc Zyngier return 0; 1256015ec038SMarc Zyngier } 1257015ec038SMarc Zyngier 1258c808eea8SMarc Zyngier static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 1259c808eea8SMarc Zyngier { 1260c808eea8SMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1261c808eea8SMarc Zyngier struct its_cmd_info *info = vcpu_info; 1262c808eea8SMarc Zyngier 1263c808eea8SMarc Zyngier /* Need a v4 ITS */ 1264d011e4e6SMarc Zyngier if (!its_dev->its->is_v4) 1265c808eea8SMarc Zyngier return -EINVAL; 1266c808eea8SMarc Zyngier 1267d011e4e6SMarc Zyngier /* Unmap request? */ 1268d011e4e6SMarc Zyngier if (!info) 1269d011e4e6SMarc Zyngier return its_vlpi_unmap(d); 1270d011e4e6SMarc Zyngier 1271c808eea8SMarc Zyngier switch (info->cmd_type) { 1272c808eea8SMarc Zyngier case MAP_VLPI: 1273d011e4e6SMarc Zyngier return its_vlpi_map(d, info); 1274c808eea8SMarc Zyngier 1275c808eea8SMarc Zyngier case GET_VLPI: 1276d011e4e6SMarc Zyngier return its_vlpi_get(d, info); 1277c808eea8SMarc Zyngier 1278c808eea8SMarc Zyngier case PROP_UPDATE_VLPI: 1279c808eea8SMarc Zyngier case PROP_UPDATE_AND_INV_VLPI: 1280015ec038SMarc Zyngier return its_vlpi_prop_update(d, info); 1281c808eea8SMarc Zyngier 1282c808eea8SMarc Zyngier default: 1283c808eea8SMarc Zyngier return -EINVAL; 1284c808eea8SMarc Zyngier } 1285c808eea8SMarc Zyngier } 1286c808eea8SMarc Zyngier 1287c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = { 1288c48ed51cSMarc Zyngier .name = "ITS", 1289c48ed51cSMarc Zyngier .irq_mask = its_mask_irq, 1290c48ed51cSMarc Zyngier .irq_unmask = its_unmask_irq, 1291004fa08dSAshok Kumar .irq_eoi = irq_chip_eoi_parent, 1292c48ed51cSMarc Zyngier .irq_set_affinity = its_set_affinity, 1293b48ac83dSMarc Zyngier .irq_compose_msi_msg = its_irq_compose_msi_msg, 12948d85dcedSMarc Zyngier .irq_set_irqchip_state = its_irq_set_irqchip_state, 1295c808eea8SMarc Zyngier .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1296b48ac83dSMarc Zyngier }; 1297b48ac83dSMarc Zyngier 1298bf9529f8SMarc Zyngier /* 1299bf9529f8SMarc Zyngier * How we allocate LPIs: 1300bf9529f8SMarc Zyngier * 1301bf9529f8SMarc Zyngier * The GIC has id_bits bits for interrupt identifiers. From there, we 1302bf9529f8SMarc Zyngier * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as 1303bf9529f8SMarc Zyngier * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 1304bf9529f8SMarc Zyngier * bits to the right. 1305bf9529f8SMarc Zyngier * 1306bf9529f8SMarc Zyngier * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1307bf9529f8SMarc Zyngier */ 1308bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK_SHIFT 5 1309bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 13106c31e123SShanker Donthineni #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1311bf9529f8SMarc Zyngier 1312bf9529f8SMarc Zyngier static unsigned long *lpi_bitmap; 1313bf9529f8SMarc Zyngier static u32 lpi_chunks; 1314bf9529f8SMarc Zyngier static DEFINE_SPINLOCK(lpi_lock); 1315bf9529f8SMarc Zyngier 1316bf9529f8SMarc Zyngier static int its_lpi_to_chunk(int lpi) 1317bf9529f8SMarc Zyngier { 1318bf9529f8SMarc Zyngier return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; 1319bf9529f8SMarc Zyngier } 1320bf9529f8SMarc Zyngier 1321bf9529f8SMarc Zyngier static int its_chunk_to_lpi(int chunk) 1322bf9529f8SMarc Zyngier { 1323bf9529f8SMarc Zyngier return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 1324bf9529f8SMarc Zyngier } 1325bf9529f8SMarc Zyngier 132604a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits) 1327bf9529f8SMarc Zyngier { 1328bf9529f8SMarc Zyngier lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 1329bf9529f8SMarc Zyngier 1330bf9529f8SMarc Zyngier lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), 1331bf9529f8SMarc Zyngier GFP_KERNEL); 1332bf9529f8SMarc Zyngier if (!lpi_bitmap) { 1333bf9529f8SMarc Zyngier lpi_chunks = 0; 1334bf9529f8SMarc Zyngier return -ENOMEM; 1335bf9529f8SMarc Zyngier } 1336bf9529f8SMarc Zyngier 1337bf9529f8SMarc Zyngier pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); 1338bf9529f8SMarc Zyngier return 0; 1339bf9529f8SMarc Zyngier } 1340bf9529f8SMarc Zyngier 1341bf9529f8SMarc Zyngier static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) 1342bf9529f8SMarc Zyngier { 1343bf9529f8SMarc Zyngier unsigned long *bitmap = NULL; 1344bf9529f8SMarc Zyngier int chunk_id; 1345bf9529f8SMarc Zyngier int nr_chunks; 1346bf9529f8SMarc Zyngier int i; 1347bf9529f8SMarc Zyngier 1348bf9529f8SMarc Zyngier nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); 1349bf9529f8SMarc Zyngier 1350bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 1351bf9529f8SMarc Zyngier 1352bf9529f8SMarc Zyngier do { 1353bf9529f8SMarc Zyngier chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, 1354bf9529f8SMarc Zyngier 0, nr_chunks, 0); 1355bf9529f8SMarc Zyngier if (chunk_id < lpi_chunks) 1356bf9529f8SMarc Zyngier break; 1357bf9529f8SMarc Zyngier 1358bf9529f8SMarc Zyngier nr_chunks--; 1359bf9529f8SMarc Zyngier } while (nr_chunks > 0); 1360bf9529f8SMarc Zyngier 1361bf9529f8SMarc Zyngier if (!nr_chunks) 1362bf9529f8SMarc Zyngier goto out; 1363bf9529f8SMarc Zyngier 1364bf9529f8SMarc Zyngier bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), 1365bf9529f8SMarc Zyngier GFP_ATOMIC); 1366bf9529f8SMarc Zyngier if (!bitmap) 1367bf9529f8SMarc Zyngier goto out; 1368bf9529f8SMarc Zyngier 1369bf9529f8SMarc Zyngier for (i = 0; i < nr_chunks; i++) 1370bf9529f8SMarc Zyngier set_bit(chunk_id + i, lpi_bitmap); 1371bf9529f8SMarc Zyngier 1372bf9529f8SMarc Zyngier *base = its_chunk_to_lpi(chunk_id); 1373bf9529f8SMarc Zyngier *nr_ids = nr_chunks * IRQS_PER_CHUNK; 1374bf9529f8SMarc Zyngier 1375bf9529f8SMarc Zyngier out: 1376bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 1377bf9529f8SMarc Zyngier 1378c8415b94SMarc Zyngier if (!bitmap) 1379c8415b94SMarc Zyngier *base = *nr_ids = 0; 1380c8415b94SMarc Zyngier 1381bf9529f8SMarc Zyngier return bitmap; 1382bf9529f8SMarc Zyngier } 1383bf9529f8SMarc Zyngier 1384cf2be8baSMarc Zyngier static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) 1385bf9529f8SMarc Zyngier { 1386bf9529f8SMarc Zyngier int lpi; 1387bf9529f8SMarc Zyngier 1388bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 1389bf9529f8SMarc Zyngier 1390bf9529f8SMarc Zyngier for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 1391bf9529f8SMarc Zyngier int chunk = its_lpi_to_chunk(lpi); 1392cf2be8baSMarc Zyngier 1393bf9529f8SMarc Zyngier BUG_ON(chunk > lpi_chunks); 1394bf9529f8SMarc Zyngier if (test_bit(chunk, lpi_bitmap)) { 1395bf9529f8SMarc Zyngier clear_bit(chunk, lpi_bitmap); 1396bf9529f8SMarc Zyngier } else { 1397bf9529f8SMarc Zyngier pr_err("Bad LPI chunk %d\n", chunk); 1398bf9529f8SMarc Zyngier } 1399bf9529f8SMarc Zyngier } 1400bf9529f8SMarc Zyngier 1401bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 1402bf9529f8SMarc Zyngier 1403cf2be8baSMarc Zyngier kfree(bitmap); 1404bf9529f8SMarc Zyngier } 14051ac19ca6SMarc Zyngier 14060e5ccf91SMarc Zyngier static struct page *its_allocate_prop_table(gfp_t gfp_flags) 14070e5ccf91SMarc Zyngier { 14080e5ccf91SMarc Zyngier struct page *prop_page; 14091ac19ca6SMarc Zyngier 14100e5ccf91SMarc Zyngier prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 14110e5ccf91SMarc Zyngier if (!prop_page) 14120e5ccf91SMarc Zyngier return NULL; 14130e5ccf91SMarc Zyngier 14140e5ccf91SMarc Zyngier /* Priority 0xa0, Group-1, disabled */ 14150e5ccf91SMarc Zyngier memset(page_address(prop_page), 14160e5ccf91SMarc Zyngier LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 14170e5ccf91SMarc Zyngier LPI_PROPBASE_SZ); 14180e5ccf91SMarc Zyngier 14190e5ccf91SMarc Zyngier /* Make sure the GIC will observe the written configuration */ 14200e5ccf91SMarc Zyngier gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); 14210e5ccf91SMarc Zyngier 14220e5ccf91SMarc Zyngier return prop_page; 14230e5ccf91SMarc Zyngier } 14240e5ccf91SMarc Zyngier 14257d75bbb4SMarc Zyngier static void its_free_prop_table(struct page *prop_page) 14267d75bbb4SMarc Zyngier { 14277d75bbb4SMarc Zyngier free_pages((unsigned long)page_address(prop_page), 14287d75bbb4SMarc Zyngier get_order(LPI_PROPBASE_SZ)); 14297d75bbb4SMarc Zyngier } 14301ac19ca6SMarc Zyngier 14311ac19ca6SMarc Zyngier static int __init its_alloc_lpi_tables(void) 14321ac19ca6SMarc Zyngier { 14331ac19ca6SMarc Zyngier phys_addr_t paddr; 14341ac19ca6SMarc Zyngier 14356c31e123SShanker Donthineni lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 14360e5ccf91SMarc Zyngier gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 14371ac19ca6SMarc Zyngier if (!gic_rdists->prop_page) { 14381ac19ca6SMarc Zyngier pr_err("Failed to allocate PROPBASE\n"); 14391ac19ca6SMarc Zyngier return -ENOMEM; 14401ac19ca6SMarc Zyngier } 14411ac19ca6SMarc Zyngier 14421ac19ca6SMarc Zyngier paddr = page_to_phys(gic_rdists->prop_page); 14431ac19ca6SMarc Zyngier pr_info("GIC: using LPI property table @%pa\n", &paddr); 14441ac19ca6SMarc Zyngier 14456c31e123SShanker Donthineni return its_lpi_init(lpi_id_bits); 14461ac19ca6SMarc Zyngier } 14471ac19ca6SMarc Zyngier 14481ac19ca6SMarc Zyngier static const char *its_base_type_string[] = { 14491ac19ca6SMarc Zyngier [GITS_BASER_TYPE_DEVICE] = "Devices", 14501ac19ca6SMarc Zyngier [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 14514f46de9dSMarc Zyngier [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", 14521ac19ca6SMarc Zyngier [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 14531ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 14541ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 14551ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 14561ac19ca6SMarc Zyngier }; 14571ac19ca6SMarc Zyngier 14582d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 14592d81d425SShanker Donthineni { 14602d81d425SShanker Donthineni u32 idx = baser - its->tables; 14612d81d425SShanker Donthineni 14620968a619SVladimir Murzin return gits_read_baser(its->base + GITS_BASER + (idx << 3)); 14632d81d425SShanker Donthineni } 14642d81d425SShanker Donthineni 14652d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser, 14662d81d425SShanker Donthineni u64 val) 14672d81d425SShanker Donthineni { 14682d81d425SShanker Donthineni u32 idx = baser - its->tables; 14692d81d425SShanker Donthineni 14700968a619SVladimir Murzin gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); 14712d81d425SShanker Donthineni baser->val = its_read_baser(its, baser); 14722d81d425SShanker Donthineni } 14732d81d425SShanker Donthineni 14749347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser, 14753faf24eaSShanker Donthineni u64 cache, u64 shr, u32 psz, u32 order, 14763faf24eaSShanker Donthineni bool indirect) 14779347359aSShanker Donthineni { 14789347359aSShanker Donthineni u64 val = its_read_baser(its, baser); 14799347359aSShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(val); 14809347359aSShanker Donthineni u64 type = GITS_BASER_TYPE(val); 14819347359aSShanker Donthineni u32 alloc_pages; 14829347359aSShanker Donthineni void *base; 14839347359aSShanker Donthineni u64 tmp; 14849347359aSShanker Donthineni 14859347359aSShanker Donthineni retry_alloc_baser: 14869347359aSShanker Donthineni alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 14879347359aSShanker Donthineni if (alloc_pages > GITS_BASER_PAGES_MAX) { 14889347359aSShanker Donthineni pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 14899347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 14909347359aSShanker Donthineni alloc_pages, GITS_BASER_PAGES_MAX); 14919347359aSShanker Donthineni alloc_pages = GITS_BASER_PAGES_MAX; 14929347359aSShanker Donthineni order = get_order(GITS_BASER_PAGES_MAX * psz); 14939347359aSShanker Donthineni } 14949347359aSShanker Donthineni 14959347359aSShanker Donthineni base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 14969347359aSShanker Donthineni if (!base) 14979347359aSShanker Donthineni return -ENOMEM; 14989347359aSShanker Donthineni 14999347359aSShanker Donthineni retry_baser: 15009347359aSShanker Donthineni val = (virt_to_phys(base) | 15019347359aSShanker Donthineni (type << GITS_BASER_TYPE_SHIFT) | 15029347359aSShanker Donthineni ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 15039347359aSShanker Donthineni ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 15049347359aSShanker Donthineni cache | 15059347359aSShanker Donthineni shr | 15069347359aSShanker Donthineni GITS_BASER_VALID); 15079347359aSShanker Donthineni 15083faf24eaSShanker Donthineni val |= indirect ? GITS_BASER_INDIRECT : 0x0; 15093faf24eaSShanker Donthineni 15109347359aSShanker Donthineni switch (psz) { 15119347359aSShanker Donthineni case SZ_4K: 15129347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_4K; 15139347359aSShanker Donthineni break; 15149347359aSShanker Donthineni case SZ_16K: 15159347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_16K; 15169347359aSShanker Donthineni break; 15179347359aSShanker Donthineni case SZ_64K: 15189347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_64K; 15199347359aSShanker Donthineni break; 15209347359aSShanker Donthineni } 15219347359aSShanker Donthineni 15229347359aSShanker Donthineni its_write_baser(its, baser, val); 15239347359aSShanker Donthineni tmp = baser->val; 15249347359aSShanker Donthineni 15259347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 15269347359aSShanker Donthineni /* 15279347359aSShanker Donthineni * Shareability didn't stick. Just use 15289347359aSShanker Donthineni * whatever the read reported, which is likely 15299347359aSShanker Donthineni * to be the only thing this redistributor 15309347359aSShanker Donthineni * supports. If that's zero, make it 15319347359aSShanker Donthineni * non-cacheable as well. 15329347359aSShanker Donthineni */ 15339347359aSShanker Donthineni shr = tmp & GITS_BASER_SHAREABILITY_MASK; 15349347359aSShanker Donthineni if (!shr) { 15359347359aSShanker Donthineni cache = GITS_BASER_nC; 1536328191c0SVladimir Murzin gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 15379347359aSShanker Donthineni } 15389347359aSShanker Donthineni goto retry_baser; 15399347359aSShanker Donthineni } 15409347359aSShanker Donthineni 15419347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 15429347359aSShanker Donthineni /* 15439347359aSShanker Donthineni * Page size didn't stick. Let's try a smaller 15449347359aSShanker Donthineni * size and retry. If we reach 4K, then 15459347359aSShanker Donthineni * something is horribly wrong... 15469347359aSShanker Donthineni */ 15479347359aSShanker Donthineni free_pages((unsigned long)base, order); 15489347359aSShanker Donthineni baser->base = NULL; 15499347359aSShanker Donthineni 15509347359aSShanker Donthineni switch (psz) { 15519347359aSShanker Donthineni case SZ_16K: 15529347359aSShanker Donthineni psz = SZ_4K; 15539347359aSShanker Donthineni goto retry_alloc_baser; 15549347359aSShanker Donthineni case SZ_64K: 15559347359aSShanker Donthineni psz = SZ_16K; 15569347359aSShanker Donthineni goto retry_alloc_baser; 15579347359aSShanker Donthineni } 15589347359aSShanker Donthineni } 15599347359aSShanker Donthineni 15609347359aSShanker Donthineni if (val != tmp) { 1561b11283ebSVladimir Murzin pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 15629347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 1563b11283ebSVladimir Murzin val, tmp); 15649347359aSShanker Donthineni free_pages((unsigned long)base, order); 15659347359aSShanker Donthineni return -ENXIO; 15669347359aSShanker Donthineni } 15679347359aSShanker Donthineni 15689347359aSShanker Donthineni baser->order = order; 15699347359aSShanker Donthineni baser->base = base; 15709347359aSShanker Donthineni baser->psz = psz; 15713faf24eaSShanker Donthineni tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 15729347359aSShanker Donthineni 15733faf24eaSShanker Donthineni pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 1574d524eaa2SVladimir Murzin &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), 15759347359aSShanker Donthineni its_base_type_string[type], 15769347359aSShanker Donthineni (unsigned long)virt_to_phys(base), 15773faf24eaSShanker Donthineni indirect ? "indirect" : "flat", (int)esz, 15789347359aSShanker Donthineni psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 15799347359aSShanker Donthineni 15809347359aSShanker Donthineni return 0; 15819347359aSShanker Donthineni } 15829347359aSShanker Donthineni 15834cacac57SMarc Zyngier static bool its_parse_indirect_baser(struct its_node *its, 15844cacac57SMarc Zyngier struct its_baser *baser, 15853faf24eaSShanker Donthineni u32 psz, u32 *order) 15864b75c459SShanker Donthineni { 15874cacac57SMarc Zyngier u64 tmp = its_read_baser(its, baser); 15884cacac57SMarc Zyngier u64 type = GITS_BASER_TYPE(tmp); 15894cacac57SMarc Zyngier u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 15902fd632a0SShanker Donthineni u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 15914b75c459SShanker Donthineni u32 ids = its->device_ids; 15924b75c459SShanker Donthineni u32 new_order = *order; 15933faf24eaSShanker Donthineni bool indirect = false; 15943faf24eaSShanker Donthineni 15953faf24eaSShanker Donthineni /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 15963faf24eaSShanker Donthineni if ((esz << ids) > (psz * 2)) { 15973faf24eaSShanker Donthineni /* 15983faf24eaSShanker Donthineni * Find out whether hw supports a single or two-level table by 15993faf24eaSShanker Donthineni * table by reading bit at offset '62' after writing '1' to it. 16003faf24eaSShanker Donthineni */ 16013faf24eaSShanker Donthineni its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 16023faf24eaSShanker Donthineni indirect = !!(baser->val & GITS_BASER_INDIRECT); 16033faf24eaSShanker Donthineni 16043faf24eaSShanker Donthineni if (indirect) { 16053faf24eaSShanker Donthineni /* 16063faf24eaSShanker Donthineni * The size of the lvl2 table is equal to ITS page size 16073faf24eaSShanker Donthineni * which is 'psz'. For computing lvl1 table size, 16083faf24eaSShanker Donthineni * subtract ID bits that sparse lvl2 table from 'ids' 16093faf24eaSShanker Donthineni * which is reported by ITS hardware times lvl1 table 16103faf24eaSShanker Donthineni * entry size. 16113faf24eaSShanker Donthineni */ 1612d524eaa2SVladimir Murzin ids -= ilog2(psz / (int)esz); 16133faf24eaSShanker Donthineni esz = GITS_LVL1_ENTRY_SIZE; 16143faf24eaSShanker Donthineni } 16153faf24eaSShanker Donthineni } 16164b75c459SShanker Donthineni 16174b75c459SShanker Donthineni /* 16184b75c459SShanker Donthineni * Allocate as many entries as required to fit the 16194b75c459SShanker Donthineni * range of device IDs that the ITS can grok... The ID 16204b75c459SShanker Donthineni * space being incredibly sparse, this results in a 16213faf24eaSShanker Donthineni * massive waste of memory if two-level device table 16223faf24eaSShanker Donthineni * feature is not supported by hardware. 16234b75c459SShanker Donthineni */ 16244b75c459SShanker Donthineni new_order = max_t(u32, get_order(esz << ids), new_order); 16254b75c459SShanker Donthineni if (new_order >= MAX_ORDER) { 16264b75c459SShanker Donthineni new_order = MAX_ORDER - 1; 1627d524eaa2SVladimir Murzin ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 16284cacac57SMarc Zyngier pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", 16294cacac57SMarc Zyngier &its->phys_base, its_base_type_string[type], 16304cacac57SMarc Zyngier its->device_ids, ids); 16314b75c459SShanker Donthineni } 16324b75c459SShanker Donthineni 16334b75c459SShanker Donthineni *order = new_order; 16343faf24eaSShanker Donthineni 16353faf24eaSShanker Donthineni return indirect; 16364b75c459SShanker Donthineni } 16374b75c459SShanker Donthineni 16381ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its) 16391ac19ca6SMarc Zyngier { 16401ac19ca6SMarc Zyngier int i; 16411ac19ca6SMarc Zyngier 16421ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 16431a485f4dSShanker Donthineni if (its->tables[i].base) { 16441a485f4dSShanker Donthineni free_pages((unsigned long)its->tables[i].base, 16451a485f4dSShanker Donthineni its->tables[i].order); 16461a485f4dSShanker Donthineni its->tables[i].base = NULL; 16471ac19ca6SMarc Zyngier } 16481ac19ca6SMarc Zyngier } 16491ac19ca6SMarc Zyngier } 16501ac19ca6SMarc Zyngier 16510e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its) 16521ac19ca6SMarc Zyngier { 1653589ce5f4SMarc Zyngier u64 typer = gic_read_typer(its->base + GITS_TYPER); 16549347359aSShanker Donthineni u32 ids = GITS_TYPER_DEVBITS(typer); 16551ac19ca6SMarc Zyngier u64 shr = GITS_BASER_InnerShareable; 16562fd632a0SShanker Donthineni u64 cache = GITS_BASER_RaWaWb; 16579347359aSShanker Donthineni u32 psz = SZ_64K; 16589347359aSShanker Donthineni int err, i; 165994100970SRobert Richter 166094100970SRobert Richter if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { 166194100970SRobert Richter /* 166294100970SRobert Richter * erratum 22375: only alloc 8MB table size 166394100970SRobert Richter * erratum 24313: ignore memory access type 166494100970SRobert Richter */ 16659347359aSShanker Donthineni cache = GITS_BASER_nCnB; 166694100970SRobert Richter ids = 0x14; /* 20 bits, 8MB */ 166794100970SRobert Richter } 16681ac19ca6SMarc Zyngier 1669466b7d16SShanker Donthineni its->device_ids = ids; 1670466b7d16SShanker Donthineni 16711ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 16722d81d425SShanker Donthineni struct its_baser *baser = its->tables + i; 16732d81d425SShanker Donthineni u64 val = its_read_baser(its, baser); 16741ac19ca6SMarc Zyngier u64 type = GITS_BASER_TYPE(val); 16759347359aSShanker Donthineni u32 order = get_order(psz); 16763faf24eaSShanker Donthineni bool indirect = false; 16771ac19ca6SMarc Zyngier 16784cacac57SMarc Zyngier switch (type) { 16794cacac57SMarc Zyngier case GITS_BASER_TYPE_NONE: 16801ac19ca6SMarc Zyngier continue; 16811ac19ca6SMarc Zyngier 16824cacac57SMarc Zyngier case GITS_BASER_TYPE_DEVICE: 16834cacac57SMarc Zyngier case GITS_BASER_TYPE_VCPU: 16844cacac57SMarc Zyngier indirect = its_parse_indirect_baser(its, baser, 16854cacac57SMarc Zyngier psz, &order); 16864cacac57SMarc Zyngier break; 16874cacac57SMarc Zyngier } 1688f54b97edSMarc Zyngier 16893faf24eaSShanker Donthineni err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 16909347359aSShanker Donthineni if (err < 0) { 16919347359aSShanker Donthineni its_free_tables(its); 16929347359aSShanker Donthineni return err; 169330f21363SRobert Richter } 169430f21363SRobert Richter 16959347359aSShanker Donthineni /* Update settings which will be used for next BASERn */ 16969347359aSShanker Donthineni psz = baser->psz; 16979347359aSShanker Donthineni cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 16989347359aSShanker Donthineni shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 16991ac19ca6SMarc Zyngier } 17001ac19ca6SMarc Zyngier 17011ac19ca6SMarc Zyngier return 0; 17021ac19ca6SMarc Zyngier } 17031ac19ca6SMarc Zyngier 17041ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its) 17051ac19ca6SMarc Zyngier { 17061ac19ca6SMarc Zyngier its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), 17071ac19ca6SMarc Zyngier GFP_KERNEL); 17081ac19ca6SMarc Zyngier if (!its->collections) 17091ac19ca6SMarc Zyngier return -ENOMEM; 17101ac19ca6SMarc Zyngier 17111ac19ca6SMarc Zyngier return 0; 17121ac19ca6SMarc Zyngier } 17131ac19ca6SMarc Zyngier 17147c297a2dSMarc Zyngier static struct page *its_allocate_pending_table(gfp_t gfp_flags) 17157c297a2dSMarc Zyngier { 17167c297a2dSMarc Zyngier struct page *pend_page; 17177c297a2dSMarc Zyngier /* 17187c297a2dSMarc Zyngier * The pending pages have to be at least 64kB aligned, 17197c297a2dSMarc Zyngier * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 17207c297a2dSMarc Zyngier */ 17217c297a2dSMarc Zyngier pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 17227c297a2dSMarc Zyngier get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 17237c297a2dSMarc Zyngier if (!pend_page) 17247c297a2dSMarc Zyngier return NULL; 17257c297a2dSMarc Zyngier 17267c297a2dSMarc Zyngier /* Make sure the GIC will observe the zero-ed page */ 17277c297a2dSMarc Zyngier gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 17287c297a2dSMarc Zyngier 17297c297a2dSMarc Zyngier return pend_page; 17307c297a2dSMarc Zyngier } 17317c297a2dSMarc Zyngier 17327d75bbb4SMarc Zyngier static void its_free_pending_table(struct page *pt) 17337d75bbb4SMarc Zyngier { 17347d75bbb4SMarc Zyngier free_pages((unsigned long)page_address(pt), 17357d75bbb4SMarc Zyngier get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 17367d75bbb4SMarc Zyngier } 17377d75bbb4SMarc Zyngier 17381ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void) 17391ac19ca6SMarc Zyngier { 17401ac19ca6SMarc Zyngier void __iomem *rbase = gic_data_rdist_rd_base(); 17411ac19ca6SMarc Zyngier struct page *pend_page; 17421ac19ca6SMarc Zyngier u64 val, tmp; 17431ac19ca6SMarc Zyngier 17441ac19ca6SMarc Zyngier /* If we didn't allocate the pending table yet, do it now */ 17451ac19ca6SMarc Zyngier pend_page = gic_data_rdist()->pend_page; 17461ac19ca6SMarc Zyngier if (!pend_page) { 17471ac19ca6SMarc Zyngier phys_addr_t paddr; 17487c297a2dSMarc Zyngier 17497c297a2dSMarc Zyngier pend_page = its_allocate_pending_table(GFP_NOWAIT); 17501ac19ca6SMarc Zyngier if (!pend_page) { 17511ac19ca6SMarc Zyngier pr_err("Failed to allocate PENDBASE for CPU%d\n", 17521ac19ca6SMarc Zyngier smp_processor_id()); 17531ac19ca6SMarc Zyngier return; 17541ac19ca6SMarc Zyngier } 17551ac19ca6SMarc Zyngier 17561ac19ca6SMarc Zyngier paddr = page_to_phys(pend_page); 17571ac19ca6SMarc Zyngier pr_info("CPU%d: using LPI pending table @%pa\n", 17581ac19ca6SMarc Zyngier smp_processor_id(), &paddr); 17591ac19ca6SMarc Zyngier gic_data_rdist()->pend_page = pend_page; 17601ac19ca6SMarc Zyngier } 17611ac19ca6SMarc Zyngier 17621ac19ca6SMarc Zyngier /* Disable LPIs */ 17631ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 17641ac19ca6SMarc Zyngier val &= ~GICR_CTLR_ENABLE_LPIS; 17651ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 17661ac19ca6SMarc Zyngier 17671ac19ca6SMarc Zyngier /* 17681ac19ca6SMarc Zyngier * Make sure any change to the table is observable by the GIC. 17691ac19ca6SMarc Zyngier */ 17701ac19ca6SMarc Zyngier dsb(sy); 17711ac19ca6SMarc Zyngier 17721ac19ca6SMarc Zyngier /* set PROPBASE */ 17731ac19ca6SMarc Zyngier val = (page_to_phys(gic_rdists->prop_page) | 17741ac19ca6SMarc Zyngier GICR_PROPBASER_InnerShareable | 17752fd632a0SShanker Donthineni GICR_PROPBASER_RaWaWb | 17761ac19ca6SMarc Zyngier ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 17771ac19ca6SMarc Zyngier 17780968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 17790968a619SVladimir Murzin tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); 17801ac19ca6SMarc Zyngier 17811ac19ca6SMarc Zyngier if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 1782241a386cSMarc Zyngier if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 1783241a386cSMarc Zyngier /* 1784241a386cSMarc Zyngier * The HW reports non-shareable, we must 1785241a386cSMarc Zyngier * remove the cacheability attributes as 1786241a386cSMarc Zyngier * well. 1787241a386cSMarc Zyngier */ 1788241a386cSMarc Zyngier val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 1789241a386cSMarc Zyngier GICR_PROPBASER_CACHEABILITY_MASK); 1790241a386cSMarc Zyngier val |= GICR_PROPBASER_nC; 17910968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 1792241a386cSMarc Zyngier } 17931ac19ca6SMarc Zyngier pr_info_once("GIC: using cache flushing for LPI property table\n"); 17941ac19ca6SMarc Zyngier gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 17951ac19ca6SMarc Zyngier } 17961ac19ca6SMarc Zyngier 17971ac19ca6SMarc Zyngier /* set PENDBASE */ 17981ac19ca6SMarc Zyngier val = (page_to_phys(pend_page) | 17994ad3e363SMarc Zyngier GICR_PENDBASER_InnerShareable | 18002fd632a0SShanker Donthineni GICR_PENDBASER_RaWaWb); 18011ac19ca6SMarc Zyngier 18020968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 18030968a619SVladimir Murzin tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); 1804241a386cSMarc Zyngier 1805241a386cSMarc Zyngier if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 1806241a386cSMarc Zyngier /* 1807241a386cSMarc Zyngier * The HW reports non-shareable, we must remove the 1808241a386cSMarc Zyngier * cacheability attributes as well. 1809241a386cSMarc Zyngier */ 1810241a386cSMarc Zyngier val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 1811241a386cSMarc Zyngier GICR_PENDBASER_CACHEABILITY_MASK); 1812241a386cSMarc Zyngier val |= GICR_PENDBASER_nC; 18130968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 1814241a386cSMarc Zyngier } 18151ac19ca6SMarc Zyngier 18161ac19ca6SMarc Zyngier /* Enable LPIs */ 18171ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 18181ac19ca6SMarc Zyngier val |= GICR_CTLR_ENABLE_LPIS; 18191ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 18201ac19ca6SMarc Zyngier 18211ac19ca6SMarc Zyngier /* Make sure the GIC has seen the above */ 18221ac19ca6SMarc Zyngier dsb(sy); 18231ac19ca6SMarc Zyngier } 18241ac19ca6SMarc Zyngier 18251ac19ca6SMarc Zyngier static void its_cpu_init_collection(void) 18261ac19ca6SMarc Zyngier { 18271ac19ca6SMarc Zyngier struct its_node *its; 18281ac19ca6SMarc Zyngier int cpu; 18291ac19ca6SMarc Zyngier 18301ac19ca6SMarc Zyngier spin_lock(&its_lock); 18311ac19ca6SMarc Zyngier cpu = smp_processor_id(); 18321ac19ca6SMarc Zyngier 18331ac19ca6SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 18341ac19ca6SMarc Zyngier u64 target; 18351ac19ca6SMarc Zyngier 1836fbf8f40eSGanapatrao Kulkarni /* avoid cross node collections and its mapping */ 1837fbf8f40eSGanapatrao Kulkarni if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1838fbf8f40eSGanapatrao Kulkarni struct device_node *cpu_node; 1839fbf8f40eSGanapatrao Kulkarni 1840fbf8f40eSGanapatrao Kulkarni cpu_node = of_get_cpu_node(cpu, NULL); 1841fbf8f40eSGanapatrao Kulkarni if (its->numa_node != NUMA_NO_NODE && 1842fbf8f40eSGanapatrao Kulkarni its->numa_node != of_node_to_nid(cpu_node)) 1843fbf8f40eSGanapatrao Kulkarni continue; 1844fbf8f40eSGanapatrao Kulkarni } 1845fbf8f40eSGanapatrao Kulkarni 18461ac19ca6SMarc Zyngier /* 18471ac19ca6SMarc Zyngier * We now have to bind each collection to its target 18481ac19ca6SMarc Zyngier * redistributor. 18491ac19ca6SMarc Zyngier */ 1850589ce5f4SMarc Zyngier if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 18511ac19ca6SMarc Zyngier /* 18521ac19ca6SMarc Zyngier * This ITS wants the physical address of the 18531ac19ca6SMarc Zyngier * redistributor. 18541ac19ca6SMarc Zyngier */ 18551ac19ca6SMarc Zyngier target = gic_data_rdist()->phys_base; 18561ac19ca6SMarc Zyngier } else { 18571ac19ca6SMarc Zyngier /* 18581ac19ca6SMarc Zyngier * This ITS wants a linear CPU number. 18591ac19ca6SMarc Zyngier */ 1860589ce5f4SMarc Zyngier target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 1861263fcd31SMarc Zyngier target = GICR_TYPER_CPU_NUMBER(target) << 16; 18621ac19ca6SMarc Zyngier } 18631ac19ca6SMarc Zyngier 18641ac19ca6SMarc Zyngier /* Perform collection mapping */ 18651ac19ca6SMarc Zyngier its->collections[cpu].target_address = target; 18661ac19ca6SMarc Zyngier its->collections[cpu].col_id = cpu; 18671ac19ca6SMarc Zyngier 18681ac19ca6SMarc Zyngier its_send_mapc(its, &its->collections[cpu], 1); 18691ac19ca6SMarc Zyngier its_send_invall(its, &its->collections[cpu]); 18701ac19ca6SMarc Zyngier } 18711ac19ca6SMarc Zyngier 18721ac19ca6SMarc Zyngier spin_unlock(&its_lock); 18731ac19ca6SMarc Zyngier } 187484a6a2e7SMarc Zyngier 187584a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 187684a6a2e7SMarc Zyngier { 187784a6a2e7SMarc Zyngier struct its_device *its_dev = NULL, *tmp; 18783e39e8f5SMarc Zyngier unsigned long flags; 187984a6a2e7SMarc Zyngier 18803e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 188184a6a2e7SMarc Zyngier 188284a6a2e7SMarc Zyngier list_for_each_entry(tmp, &its->its_device_list, entry) { 188384a6a2e7SMarc Zyngier if (tmp->device_id == dev_id) { 188484a6a2e7SMarc Zyngier its_dev = tmp; 188584a6a2e7SMarc Zyngier break; 188684a6a2e7SMarc Zyngier } 188784a6a2e7SMarc Zyngier } 188884a6a2e7SMarc Zyngier 18893e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 189084a6a2e7SMarc Zyngier 189184a6a2e7SMarc Zyngier return its_dev; 189284a6a2e7SMarc Zyngier } 189384a6a2e7SMarc Zyngier 1894466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type) 1895466b7d16SShanker Donthineni { 1896466b7d16SShanker Donthineni int i; 1897466b7d16SShanker Donthineni 1898466b7d16SShanker Donthineni for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1899466b7d16SShanker Donthineni if (GITS_BASER_TYPE(its->tables[i].val) == type) 1900466b7d16SShanker Donthineni return &its->tables[i]; 1901466b7d16SShanker Donthineni } 1902466b7d16SShanker Donthineni 1903466b7d16SShanker Donthineni return NULL; 1904466b7d16SShanker Donthineni } 1905466b7d16SShanker Donthineni 190670cc81edSMarc Zyngier static bool its_alloc_table_entry(struct its_baser *baser, u32 id) 19073faf24eaSShanker Donthineni { 19083faf24eaSShanker Donthineni struct page *page; 19093faf24eaSShanker Donthineni u32 esz, idx; 19103faf24eaSShanker Donthineni __le64 *table; 19113faf24eaSShanker Donthineni 19123faf24eaSShanker Donthineni /* Don't allow device id that exceeds single, flat table limit */ 19133faf24eaSShanker Donthineni esz = GITS_BASER_ENTRY_SIZE(baser->val); 19143faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_INDIRECT)) 191570cc81edSMarc Zyngier return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 19163faf24eaSShanker Donthineni 19173faf24eaSShanker Donthineni /* Compute 1st level table index & check if that exceeds table limit */ 191870cc81edSMarc Zyngier idx = id >> ilog2(baser->psz / esz); 19193faf24eaSShanker Donthineni if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 19203faf24eaSShanker Donthineni return false; 19213faf24eaSShanker Donthineni 19223faf24eaSShanker Donthineni table = baser->base; 19233faf24eaSShanker Donthineni 19243faf24eaSShanker Donthineni /* Allocate memory for 2nd level table */ 19253faf24eaSShanker Donthineni if (!table[idx]) { 19263faf24eaSShanker Donthineni page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 19273faf24eaSShanker Donthineni if (!page) 19283faf24eaSShanker Donthineni return false; 19293faf24eaSShanker Donthineni 19303faf24eaSShanker Donthineni /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 19313faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1932328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(page), baser->psz); 19333faf24eaSShanker Donthineni 19343faf24eaSShanker Donthineni table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 19353faf24eaSShanker Donthineni 19363faf24eaSShanker Donthineni /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 19373faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1938328191c0SVladimir Murzin gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 19393faf24eaSShanker Donthineni 19403faf24eaSShanker Donthineni /* Ensure updated table contents are visible to ITS hardware */ 19413faf24eaSShanker Donthineni dsb(sy); 19423faf24eaSShanker Donthineni } 19433faf24eaSShanker Donthineni 19443faf24eaSShanker Donthineni return true; 19453faf24eaSShanker Donthineni } 19463faf24eaSShanker Donthineni 194770cc81edSMarc Zyngier static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 194870cc81edSMarc Zyngier { 194970cc81edSMarc Zyngier struct its_baser *baser; 195070cc81edSMarc Zyngier 195170cc81edSMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 195270cc81edSMarc Zyngier 195370cc81edSMarc Zyngier /* Don't allow device id that exceeds ITS hardware limit */ 195470cc81edSMarc Zyngier if (!baser) 195570cc81edSMarc Zyngier return (ilog2(dev_id) < its->device_ids); 195670cc81edSMarc Zyngier 195770cc81edSMarc Zyngier return its_alloc_table_entry(baser, dev_id); 195870cc81edSMarc Zyngier } 195970cc81edSMarc Zyngier 19607d75bbb4SMarc Zyngier static bool its_alloc_vpe_table(u32 vpe_id) 19617d75bbb4SMarc Zyngier { 19627d75bbb4SMarc Zyngier struct its_node *its; 19637d75bbb4SMarc Zyngier 19647d75bbb4SMarc Zyngier /* 19657d75bbb4SMarc Zyngier * Make sure the L2 tables are allocated on *all* v4 ITSs. We 19667d75bbb4SMarc Zyngier * could try and only do it on ITSs corresponding to devices 19677d75bbb4SMarc Zyngier * that have interrupts targeted at this VPE, but the 19687d75bbb4SMarc Zyngier * complexity becomes crazy (and you have tons of memory 19697d75bbb4SMarc Zyngier * anyway, right?). 19707d75bbb4SMarc Zyngier */ 19717d75bbb4SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 19727d75bbb4SMarc Zyngier struct its_baser *baser; 19737d75bbb4SMarc Zyngier 19747d75bbb4SMarc Zyngier if (!its->is_v4) 19757d75bbb4SMarc Zyngier continue; 19767d75bbb4SMarc Zyngier 19777d75bbb4SMarc Zyngier baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 19787d75bbb4SMarc Zyngier if (!baser) 19797d75bbb4SMarc Zyngier return false; 19807d75bbb4SMarc Zyngier 19817d75bbb4SMarc Zyngier if (!its_alloc_table_entry(baser, vpe_id)) 19827d75bbb4SMarc Zyngier return false; 19837d75bbb4SMarc Zyngier } 19847d75bbb4SMarc Zyngier 19857d75bbb4SMarc Zyngier return true; 19867d75bbb4SMarc Zyngier } 19877d75bbb4SMarc Zyngier 198884a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 198993f94ea0SMarc Zyngier int nvecs, bool alloc_lpis) 199084a6a2e7SMarc Zyngier { 199184a6a2e7SMarc Zyngier struct its_device *dev; 199293f94ea0SMarc Zyngier unsigned long *lpi_map = NULL; 19933e39e8f5SMarc Zyngier unsigned long flags; 1994591e5becSMarc Zyngier u16 *col_map = NULL; 199584a6a2e7SMarc Zyngier void *itt; 199684a6a2e7SMarc Zyngier int lpi_base; 199784a6a2e7SMarc Zyngier int nr_lpis; 1998c8481267SMarc Zyngier int nr_ites; 199984a6a2e7SMarc Zyngier int sz; 200084a6a2e7SMarc Zyngier 20013faf24eaSShanker Donthineni if (!its_alloc_device_table(its, dev_id)) 2002466b7d16SShanker Donthineni return NULL; 2003466b7d16SShanker Donthineni 200484a6a2e7SMarc Zyngier dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2005c8481267SMarc Zyngier /* 2006c8481267SMarc Zyngier * At least one bit of EventID is being used, hence a minimum 2007c8481267SMarc Zyngier * of two entries. No, the architecture doesn't let you 2008c8481267SMarc Zyngier * express an ITT with a single entry. 2009c8481267SMarc Zyngier */ 201096555c47SWill Deacon nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 2011c8481267SMarc Zyngier sz = nr_ites * its->ite_size; 201284a6a2e7SMarc Zyngier sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 20136c834125SYun Wu itt = kzalloc(sz, GFP_KERNEL); 201493f94ea0SMarc Zyngier if (alloc_lpis) { 201584a6a2e7SMarc Zyngier lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2016591e5becSMarc Zyngier if (lpi_map) 201793f94ea0SMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_lpis, 201893f94ea0SMarc Zyngier GFP_KERNEL); 201993f94ea0SMarc Zyngier } else { 202093f94ea0SMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL); 202193f94ea0SMarc Zyngier nr_lpis = 0; 202293f94ea0SMarc Zyngier lpi_base = 0; 202393f94ea0SMarc Zyngier } 202484a6a2e7SMarc Zyngier 202593f94ea0SMarc Zyngier if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 202684a6a2e7SMarc Zyngier kfree(dev); 202784a6a2e7SMarc Zyngier kfree(itt); 202884a6a2e7SMarc Zyngier kfree(lpi_map); 2029591e5becSMarc Zyngier kfree(col_map); 203084a6a2e7SMarc Zyngier return NULL; 203184a6a2e7SMarc Zyngier } 203284a6a2e7SMarc Zyngier 2033328191c0SVladimir Murzin gic_flush_dcache_to_poc(itt, sz); 20345a9a8915SMarc Zyngier 203584a6a2e7SMarc Zyngier dev->its = its; 203684a6a2e7SMarc Zyngier dev->itt = itt; 2037c8481267SMarc Zyngier dev->nr_ites = nr_ites; 2038591e5becSMarc Zyngier dev->event_map.lpi_map = lpi_map; 2039591e5becSMarc Zyngier dev->event_map.col_map = col_map; 2040591e5becSMarc Zyngier dev->event_map.lpi_base = lpi_base; 2041591e5becSMarc Zyngier dev->event_map.nr_lpis = nr_lpis; 2042d011e4e6SMarc Zyngier mutex_init(&dev->event_map.vlpi_lock); 204384a6a2e7SMarc Zyngier dev->device_id = dev_id; 204484a6a2e7SMarc Zyngier INIT_LIST_HEAD(&dev->entry); 204584a6a2e7SMarc Zyngier 20463e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 204784a6a2e7SMarc Zyngier list_add(&dev->entry, &its->its_device_list); 20483e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 204984a6a2e7SMarc Zyngier 205084a6a2e7SMarc Zyngier /* Map device to its ITT */ 205184a6a2e7SMarc Zyngier its_send_mapd(dev, 1); 205284a6a2e7SMarc Zyngier 205384a6a2e7SMarc Zyngier return dev; 205484a6a2e7SMarc Zyngier } 205584a6a2e7SMarc Zyngier 205684a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev) 205784a6a2e7SMarc Zyngier { 20583e39e8f5SMarc Zyngier unsigned long flags; 20593e39e8f5SMarc Zyngier 20603e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its_dev->its->lock, flags); 206184a6a2e7SMarc Zyngier list_del(&its_dev->entry); 20623e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 206384a6a2e7SMarc Zyngier kfree(its_dev->itt); 206484a6a2e7SMarc Zyngier kfree(its_dev); 206584a6a2e7SMarc Zyngier } 2066b48ac83dSMarc Zyngier 2067b48ac83dSMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 2068b48ac83dSMarc Zyngier { 2069b48ac83dSMarc Zyngier int idx; 2070b48ac83dSMarc Zyngier 2071591e5becSMarc Zyngier idx = find_first_zero_bit(dev->event_map.lpi_map, 2072591e5becSMarc Zyngier dev->event_map.nr_lpis); 2073591e5becSMarc Zyngier if (idx == dev->event_map.nr_lpis) 2074b48ac83dSMarc Zyngier return -ENOSPC; 2075b48ac83dSMarc Zyngier 2076591e5becSMarc Zyngier *hwirq = dev->event_map.lpi_base + idx; 2077591e5becSMarc Zyngier set_bit(idx, dev->event_map.lpi_map); 2078b48ac83dSMarc Zyngier 2079b48ac83dSMarc Zyngier return 0; 2080b48ac83dSMarc Zyngier } 2081b48ac83dSMarc Zyngier 208254456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 2083b48ac83dSMarc Zyngier int nvec, msi_alloc_info_t *info) 2084b48ac83dSMarc Zyngier { 2085b48ac83dSMarc Zyngier struct its_node *its; 2086b48ac83dSMarc Zyngier struct its_device *its_dev; 208754456db9SMarc Zyngier struct msi_domain_info *msi_info; 208854456db9SMarc Zyngier u32 dev_id; 2089b48ac83dSMarc Zyngier 209054456db9SMarc Zyngier /* 209154456db9SMarc Zyngier * We ignore "dev" entierely, and rely on the dev_id that has 209254456db9SMarc Zyngier * been passed via the scratchpad. This limits this domain's 209354456db9SMarc Zyngier * usefulness to upper layers that definitely know that they 209454456db9SMarc Zyngier * are built on top of the ITS. 209554456db9SMarc Zyngier */ 209654456db9SMarc Zyngier dev_id = info->scratchpad[0].ul; 209754456db9SMarc Zyngier 209854456db9SMarc Zyngier msi_info = msi_get_domain_info(domain); 209954456db9SMarc Zyngier its = msi_info->data; 210054456db9SMarc Zyngier 210120b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi && 210220b3d54eSMarc Zyngier vpe_proxy.dev && 210320b3d54eSMarc Zyngier vpe_proxy.dev->its == its && 210420b3d54eSMarc Zyngier dev_id == vpe_proxy.dev->device_id) { 210520b3d54eSMarc Zyngier /* Bad luck. Get yourself a better implementation */ 210620b3d54eSMarc Zyngier WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", 210720b3d54eSMarc Zyngier dev_id); 210820b3d54eSMarc Zyngier return -EINVAL; 210920b3d54eSMarc Zyngier } 211020b3d54eSMarc Zyngier 2111f130420eSMarc Zyngier its_dev = its_find_device(its, dev_id); 2112e8137f4fSMarc Zyngier if (its_dev) { 2113e8137f4fSMarc Zyngier /* 2114e8137f4fSMarc Zyngier * We already have seen this ID, probably through 2115e8137f4fSMarc Zyngier * another alias (PCI bridge of some sort). No need to 2116e8137f4fSMarc Zyngier * create the device. 2117e8137f4fSMarc Zyngier */ 2118f130420eSMarc Zyngier pr_debug("Reusing ITT for devID %x\n", dev_id); 2119e8137f4fSMarc Zyngier goto out; 2120e8137f4fSMarc Zyngier } 2121b48ac83dSMarc Zyngier 212293f94ea0SMarc Zyngier its_dev = its_create_device(its, dev_id, nvec, true); 2123b48ac83dSMarc Zyngier if (!its_dev) 2124b48ac83dSMarc Zyngier return -ENOMEM; 2125b48ac83dSMarc Zyngier 2126f130420eSMarc Zyngier pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2127e8137f4fSMarc Zyngier out: 2128b48ac83dSMarc Zyngier info->scratchpad[0].ptr = its_dev; 2129b48ac83dSMarc Zyngier return 0; 2130b48ac83dSMarc Zyngier } 2131b48ac83dSMarc Zyngier 213254456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = { 213354456db9SMarc Zyngier .msi_prepare = its_msi_prepare, 213454456db9SMarc Zyngier }; 213554456db9SMarc Zyngier 2136b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain, 2137b48ac83dSMarc Zyngier unsigned int virq, 2138b48ac83dSMarc Zyngier irq_hw_number_t hwirq) 2139b48ac83dSMarc Zyngier { 2140f833f57fSMarc Zyngier struct irq_fwspec fwspec; 2141b48ac83dSMarc Zyngier 2142f833f57fSMarc Zyngier if (irq_domain_get_of_node(domain->parent)) { 2143f833f57fSMarc Zyngier fwspec.fwnode = domain->parent->fwnode; 2144f833f57fSMarc Zyngier fwspec.param_count = 3; 2145f833f57fSMarc Zyngier fwspec.param[0] = GIC_IRQ_TYPE_LPI; 2146f833f57fSMarc Zyngier fwspec.param[1] = hwirq; 2147f833f57fSMarc Zyngier fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 21483f010cf1STomasz Nowicki } else if (is_fwnode_irqchip(domain->parent->fwnode)) { 21493f010cf1STomasz Nowicki fwspec.fwnode = domain->parent->fwnode; 21503f010cf1STomasz Nowicki fwspec.param_count = 2; 21513f010cf1STomasz Nowicki fwspec.param[0] = hwirq; 21523f010cf1STomasz Nowicki fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 2153f833f57fSMarc Zyngier } else { 2154f833f57fSMarc Zyngier return -EINVAL; 2155f833f57fSMarc Zyngier } 2156b48ac83dSMarc Zyngier 2157f833f57fSMarc Zyngier return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 2158b48ac83dSMarc Zyngier } 2159b48ac83dSMarc Zyngier 2160b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 2161b48ac83dSMarc Zyngier unsigned int nr_irqs, void *args) 2162b48ac83dSMarc Zyngier { 2163b48ac83dSMarc Zyngier msi_alloc_info_t *info = args; 2164b48ac83dSMarc Zyngier struct its_device *its_dev = info->scratchpad[0].ptr; 2165b48ac83dSMarc Zyngier irq_hw_number_t hwirq; 2166b48ac83dSMarc Zyngier int err; 2167b48ac83dSMarc Zyngier int i; 2168b48ac83dSMarc Zyngier 2169b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 2170b48ac83dSMarc Zyngier err = its_alloc_device_irq(its_dev, &hwirq); 2171b48ac83dSMarc Zyngier if (err) 2172b48ac83dSMarc Zyngier return err; 2173b48ac83dSMarc Zyngier 2174b48ac83dSMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 2175b48ac83dSMarc Zyngier if (err) 2176b48ac83dSMarc Zyngier return err; 2177b48ac83dSMarc Zyngier 2178b48ac83dSMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, 2179b48ac83dSMarc Zyngier hwirq, &its_irq_chip, its_dev); 21800d224d35SMarc Zyngier irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2181f130420eSMarc Zyngier pr_debug("ID:%d pID:%d vID:%d\n", 2182591e5becSMarc Zyngier (int)(hwirq - its_dev->event_map.lpi_base), 2183591e5becSMarc Zyngier (int) hwirq, virq + i); 2184b48ac83dSMarc Zyngier } 2185b48ac83dSMarc Zyngier 2186b48ac83dSMarc Zyngier return 0; 2187b48ac83dSMarc Zyngier } 2188b48ac83dSMarc Zyngier 2189aca268dfSMarc Zyngier static void its_irq_domain_activate(struct irq_domain *domain, 2190aca268dfSMarc Zyngier struct irq_data *d) 2191aca268dfSMarc Zyngier { 2192aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2193aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 2194fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 21950d224d35SMarc Zyngier int cpu; 2196fbf8f40eSGanapatrao Kulkarni 2197fbf8f40eSGanapatrao Kulkarni /* get the cpu_mask of local node */ 2198fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) 2199fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2200aca268dfSMarc Zyngier 2201591e5becSMarc Zyngier /* Bind the LPI to the first possible CPU */ 22020d224d35SMarc Zyngier cpu = cpumask_first(cpu_mask); 22030d224d35SMarc Zyngier its_dev->event_map.col_map[event] = cpu; 22040d224d35SMarc Zyngier irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2205591e5becSMarc Zyngier 2206aca268dfSMarc Zyngier /* Map the GIC IRQ and event to the device */ 22076a25ad3aSMarc Zyngier its_send_mapti(its_dev, d->hwirq, event); 2208aca268dfSMarc Zyngier } 2209aca268dfSMarc Zyngier 2210aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain, 2211aca268dfSMarc Zyngier struct irq_data *d) 2212aca268dfSMarc Zyngier { 2213aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2214aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 2215aca268dfSMarc Zyngier 2216aca268dfSMarc Zyngier /* Stop the delivery of interrupts */ 2217aca268dfSMarc Zyngier its_send_discard(its_dev, event); 2218aca268dfSMarc Zyngier } 2219aca268dfSMarc Zyngier 2220b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 2221b48ac83dSMarc Zyngier unsigned int nr_irqs) 2222b48ac83dSMarc Zyngier { 2223b48ac83dSMarc Zyngier struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2224b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2225b48ac83dSMarc Zyngier int i; 2226b48ac83dSMarc Zyngier 2227b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 2228b48ac83dSMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 2229b48ac83dSMarc Zyngier virq + i); 2230aca268dfSMarc Zyngier u32 event = its_get_event_id(data); 2231b48ac83dSMarc Zyngier 2232b48ac83dSMarc Zyngier /* Mark interrupt index as unused */ 2233591e5becSMarc Zyngier clear_bit(event, its_dev->event_map.lpi_map); 2234b48ac83dSMarc Zyngier 2235b48ac83dSMarc Zyngier /* Nuke the entry in the domain */ 22362da39949SMarc Zyngier irq_domain_reset_irq_data(data); 2237b48ac83dSMarc Zyngier } 2238b48ac83dSMarc Zyngier 2239b48ac83dSMarc Zyngier /* If all interrupts have been freed, start mopping the floor */ 2240591e5becSMarc Zyngier if (bitmap_empty(its_dev->event_map.lpi_map, 2241591e5becSMarc Zyngier its_dev->event_map.nr_lpis)) { 2242cf2be8baSMarc Zyngier its_lpi_free_chunks(its_dev->event_map.lpi_map, 2243cf2be8baSMarc Zyngier its_dev->event_map.lpi_base, 2244cf2be8baSMarc Zyngier its_dev->event_map.nr_lpis); 2245cf2be8baSMarc Zyngier kfree(its_dev->event_map.col_map); 2246b48ac83dSMarc Zyngier 2247b48ac83dSMarc Zyngier /* Unmap device/itt */ 2248b48ac83dSMarc Zyngier its_send_mapd(its_dev, 0); 2249b48ac83dSMarc Zyngier its_free_device(its_dev); 2250b48ac83dSMarc Zyngier } 2251b48ac83dSMarc Zyngier 2252b48ac83dSMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2253b48ac83dSMarc Zyngier } 2254b48ac83dSMarc Zyngier 2255b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = { 2256b48ac83dSMarc Zyngier .alloc = its_irq_domain_alloc, 2257b48ac83dSMarc Zyngier .free = its_irq_domain_free, 2258aca268dfSMarc Zyngier .activate = its_irq_domain_activate, 2259aca268dfSMarc Zyngier .deactivate = its_irq_domain_deactivate, 2260b48ac83dSMarc Zyngier }; 22614c21f3c2SMarc Zyngier 226220b3d54eSMarc Zyngier /* 226320b3d54eSMarc Zyngier * This is insane. 226420b3d54eSMarc Zyngier * 226520b3d54eSMarc Zyngier * If a GICv4 doesn't implement Direct LPIs (which is extremely 226620b3d54eSMarc Zyngier * likely), the only way to perform an invalidate is to use a fake 226720b3d54eSMarc Zyngier * device to issue an INV command, implying that the LPI has first 226820b3d54eSMarc Zyngier * been mapped to some event on that device. Since this is not exactly 226920b3d54eSMarc Zyngier * cheap, we try to keep that mapping around as long as possible, and 227020b3d54eSMarc Zyngier * only issue an UNMAP if we're short on available slots. 227120b3d54eSMarc Zyngier * 227220b3d54eSMarc Zyngier * Broken by design(tm). 227320b3d54eSMarc Zyngier */ 227420b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) 227520b3d54eSMarc Zyngier { 227620b3d54eSMarc Zyngier /* Already unmapped? */ 227720b3d54eSMarc Zyngier if (vpe->vpe_proxy_event == -1) 227820b3d54eSMarc Zyngier return; 227920b3d54eSMarc Zyngier 228020b3d54eSMarc Zyngier its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); 228120b3d54eSMarc Zyngier vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; 228220b3d54eSMarc Zyngier 228320b3d54eSMarc Zyngier /* 228420b3d54eSMarc Zyngier * We don't track empty slots at all, so let's move the 228520b3d54eSMarc Zyngier * next_victim pointer if we can quickly reuse that slot 228620b3d54eSMarc Zyngier * instead of nuking an existing entry. Not clear that this is 228720b3d54eSMarc Zyngier * always a win though, and this might just generate a ripple 228820b3d54eSMarc Zyngier * effect... Let's just hope VPEs don't migrate too often. 228920b3d54eSMarc Zyngier */ 229020b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 229120b3d54eSMarc Zyngier vpe_proxy.next_victim = vpe->vpe_proxy_event; 229220b3d54eSMarc Zyngier 229320b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 229420b3d54eSMarc Zyngier } 229520b3d54eSMarc Zyngier 229620b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) 229720b3d54eSMarc Zyngier { 229820b3d54eSMarc Zyngier if (!gic_rdists->has_direct_lpi) { 229920b3d54eSMarc Zyngier unsigned long flags; 230020b3d54eSMarc Zyngier 230120b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 230220b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe); 230320b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 230420b3d54eSMarc Zyngier } 230520b3d54eSMarc Zyngier } 230620b3d54eSMarc Zyngier 230720b3d54eSMarc Zyngier static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) 230820b3d54eSMarc Zyngier { 230920b3d54eSMarc Zyngier /* Already mapped? */ 231020b3d54eSMarc Zyngier if (vpe->vpe_proxy_event != -1) 231120b3d54eSMarc Zyngier return; 231220b3d54eSMarc Zyngier 231320b3d54eSMarc Zyngier /* This slot was already allocated. Kick the other VPE out. */ 231420b3d54eSMarc Zyngier if (vpe_proxy.vpes[vpe_proxy.next_victim]) 231520b3d54eSMarc Zyngier its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); 231620b3d54eSMarc Zyngier 231720b3d54eSMarc Zyngier /* Map the new VPE instead */ 231820b3d54eSMarc Zyngier vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; 231920b3d54eSMarc Zyngier vpe->vpe_proxy_event = vpe_proxy.next_victim; 232020b3d54eSMarc Zyngier vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; 232120b3d54eSMarc Zyngier 232220b3d54eSMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; 232320b3d54eSMarc Zyngier its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); 232420b3d54eSMarc Zyngier } 232520b3d54eSMarc Zyngier 2326958b90d1SMarc Zyngier static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) 2327958b90d1SMarc Zyngier { 2328958b90d1SMarc Zyngier unsigned long flags; 2329958b90d1SMarc Zyngier struct its_collection *target_col; 2330958b90d1SMarc Zyngier 2331958b90d1SMarc Zyngier if (gic_rdists->has_direct_lpi) { 2332958b90d1SMarc Zyngier void __iomem *rdbase; 2333958b90d1SMarc Zyngier 2334958b90d1SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; 2335958b90d1SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2336958b90d1SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2337958b90d1SMarc Zyngier cpu_relax(); 2338958b90d1SMarc Zyngier 2339958b90d1SMarc Zyngier return; 2340958b90d1SMarc Zyngier } 2341958b90d1SMarc Zyngier 2342958b90d1SMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 2343958b90d1SMarc Zyngier 2344958b90d1SMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 2345958b90d1SMarc Zyngier 2346958b90d1SMarc Zyngier target_col = &vpe_proxy.dev->its->collections[to]; 2347958b90d1SMarc Zyngier its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); 2348958b90d1SMarc Zyngier vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; 2349958b90d1SMarc Zyngier 2350958b90d1SMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 2351958b90d1SMarc Zyngier } 2352958b90d1SMarc Zyngier 23533171a47aSMarc Zyngier static int its_vpe_set_affinity(struct irq_data *d, 23543171a47aSMarc Zyngier const struct cpumask *mask_val, 23553171a47aSMarc Zyngier bool force) 23563171a47aSMarc Zyngier { 23573171a47aSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 23583171a47aSMarc Zyngier int cpu = cpumask_first(mask_val); 23593171a47aSMarc Zyngier 23603171a47aSMarc Zyngier /* 23613171a47aSMarc Zyngier * Changing affinity is mega expensive, so let's be as lazy as 236220b3d54eSMarc Zyngier * we can and only do it if we really have to. Also, if mapped 2363958b90d1SMarc Zyngier * into the proxy device, we need to move the doorbell 2364958b90d1SMarc Zyngier * interrupt to its new location. 23653171a47aSMarc Zyngier */ 23663171a47aSMarc Zyngier if (vpe->col_idx != cpu) { 2367958b90d1SMarc Zyngier int from = vpe->col_idx; 2368958b90d1SMarc Zyngier 23693171a47aSMarc Zyngier vpe->col_idx = cpu; 23703171a47aSMarc Zyngier its_send_vmovp(vpe); 2371958b90d1SMarc Zyngier its_vpe_db_proxy_move(vpe, from, cpu); 23723171a47aSMarc Zyngier } 23733171a47aSMarc Zyngier 23743171a47aSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 23753171a47aSMarc Zyngier } 23763171a47aSMarc Zyngier 2377e643d803SMarc Zyngier static void its_vpe_schedule(struct its_vpe *vpe) 2378e643d803SMarc Zyngier { 2379e643d803SMarc Zyngier void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2380e643d803SMarc Zyngier u64 val; 2381e643d803SMarc Zyngier 2382e643d803SMarc Zyngier /* Schedule the VPE */ 2383e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & 2384e643d803SMarc Zyngier GENMASK_ULL(51, 12); 2385e643d803SMarc Zyngier val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2386e643d803SMarc Zyngier val |= GICR_VPROPBASER_RaWb; 2387e643d803SMarc Zyngier val |= GICR_VPROPBASER_InnerShareable; 2388e643d803SMarc Zyngier gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2389e643d803SMarc Zyngier 2390e643d803SMarc Zyngier val = virt_to_phys(page_address(vpe->vpt_page)) & 2391e643d803SMarc Zyngier GENMASK_ULL(51, 16); 2392e643d803SMarc Zyngier val |= GICR_VPENDBASER_RaWaWb; 2393e643d803SMarc Zyngier val |= GICR_VPENDBASER_NonShareable; 2394e643d803SMarc Zyngier /* 2395e643d803SMarc Zyngier * There is no good way of finding out if the pending table is 2396e643d803SMarc Zyngier * empty as we can race against the doorbell interrupt very 2397e643d803SMarc Zyngier * easily. So in the end, vpe->pending_last is only an 2398e643d803SMarc Zyngier * indication that the vcpu has something pending, not one 2399e643d803SMarc Zyngier * that the pending table is empty. A good implementation 2400e643d803SMarc Zyngier * would be able to read its coarse map pretty quickly anyway, 2401e643d803SMarc Zyngier * making this a tolerable issue. 2402e643d803SMarc Zyngier */ 2403e643d803SMarc Zyngier val |= GICR_VPENDBASER_PendingLast; 2404e643d803SMarc Zyngier val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 2405e643d803SMarc Zyngier val |= GICR_VPENDBASER_Valid; 2406e643d803SMarc Zyngier gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2407e643d803SMarc Zyngier } 2408e643d803SMarc Zyngier 2409e643d803SMarc Zyngier static void its_vpe_deschedule(struct its_vpe *vpe) 2410e643d803SMarc Zyngier { 2411e643d803SMarc Zyngier void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2412e643d803SMarc Zyngier u32 count = 1000000; /* 1s! */ 2413e643d803SMarc Zyngier bool clean; 2414e643d803SMarc Zyngier u64 val; 2415e643d803SMarc Zyngier 2416e643d803SMarc Zyngier /* We're being scheduled out */ 2417e643d803SMarc Zyngier val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2418e643d803SMarc Zyngier val &= ~GICR_VPENDBASER_Valid; 2419e643d803SMarc Zyngier gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2420e643d803SMarc Zyngier 2421e643d803SMarc Zyngier do { 2422e643d803SMarc Zyngier val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2423e643d803SMarc Zyngier clean = !(val & GICR_VPENDBASER_Dirty); 2424e643d803SMarc Zyngier if (!clean) { 2425e643d803SMarc Zyngier count--; 2426e643d803SMarc Zyngier cpu_relax(); 2427e643d803SMarc Zyngier udelay(1); 2428e643d803SMarc Zyngier } 2429e643d803SMarc Zyngier } while (!clean && count); 2430e643d803SMarc Zyngier 2431e643d803SMarc Zyngier if (unlikely(!clean && !count)) { 2432e643d803SMarc Zyngier pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2433e643d803SMarc Zyngier vpe->idai = false; 2434e643d803SMarc Zyngier vpe->pending_last = true; 2435e643d803SMarc Zyngier } else { 2436e643d803SMarc Zyngier vpe->idai = !!(val & GICR_VPENDBASER_IDAI); 2437e643d803SMarc Zyngier vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 2438e643d803SMarc Zyngier } 2439e643d803SMarc Zyngier } 2440e643d803SMarc Zyngier 2441e643d803SMarc Zyngier static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 2442e643d803SMarc Zyngier { 2443e643d803SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2444e643d803SMarc Zyngier struct its_cmd_info *info = vcpu_info; 2445e643d803SMarc Zyngier 2446e643d803SMarc Zyngier switch (info->cmd_type) { 2447e643d803SMarc Zyngier case SCHEDULE_VPE: 2448e643d803SMarc Zyngier its_vpe_schedule(vpe); 2449e643d803SMarc Zyngier return 0; 2450e643d803SMarc Zyngier 2451e643d803SMarc Zyngier case DESCHEDULE_VPE: 2452e643d803SMarc Zyngier its_vpe_deschedule(vpe); 2453e643d803SMarc Zyngier return 0; 2454e643d803SMarc Zyngier 24555e2f7642SMarc Zyngier case INVALL_VPE: 24565e2f7642SMarc Zyngier its_send_vinvall(vpe); 24575e2f7642SMarc Zyngier return 0; 24585e2f7642SMarc Zyngier 2459e643d803SMarc Zyngier default: 2460e643d803SMarc Zyngier return -EINVAL; 2461e643d803SMarc Zyngier } 2462e643d803SMarc Zyngier } 2463e643d803SMarc Zyngier 246420b3d54eSMarc Zyngier static void its_vpe_send_cmd(struct its_vpe *vpe, 246520b3d54eSMarc Zyngier void (*cmd)(struct its_device *, u32)) 246620b3d54eSMarc Zyngier { 246720b3d54eSMarc Zyngier unsigned long flags; 246820b3d54eSMarc Zyngier 246920b3d54eSMarc Zyngier raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 247020b3d54eSMarc Zyngier 247120b3d54eSMarc Zyngier its_vpe_db_proxy_map_locked(vpe); 247220b3d54eSMarc Zyngier cmd(vpe_proxy.dev, vpe->vpe_proxy_event); 247320b3d54eSMarc Zyngier 247420b3d54eSMarc Zyngier raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 247520b3d54eSMarc Zyngier } 247620b3d54eSMarc Zyngier 2477f6a91da7SMarc Zyngier static void its_vpe_send_inv(struct irq_data *d) 2478f6a91da7SMarc Zyngier { 2479f6a91da7SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 248020b3d54eSMarc Zyngier 248120b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 2482f6a91da7SMarc Zyngier void __iomem *rdbase; 2483f6a91da7SMarc Zyngier 2484f6a91da7SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2485f6a91da7SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); 2486f6a91da7SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2487f6a91da7SMarc Zyngier cpu_relax(); 248820b3d54eSMarc Zyngier } else { 248920b3d54eSMarc Zyngier its_vpe_send_cmd(vpe, its_send_inv); 249020b3d54eSMarc Zyngier } 2491f6a91da7SMarc Zyngier } 2492f6a91da7SMarc Zyngier 2493f6a91da7SMarc Zyngier static void its_vpe_mask_irq(struct irq_data *d) 2494f6a91da7SMarc Zyngier { 2495f6a91da7SMarc Zyngier /* 2496f6a91da7SMarc Zyngier * We need to unmask the LPI, which is described by the parent 2497f6a91da7SMarc Zyngier * irq_data. Instead of calling into the parent (which won't 2498f6a91da7SMarc Zyngier * exactly do the right thing, let's simply use the 2499f6a91da7SMarc Zyngier * parent_data pointer. Yes, I'm naughty. 2500f6a91da7SMarc Zyngier */ 2501f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 2502f6a91da7SMarc Zyngier its_vpe_send_inv(d); 2503f6a91da7SMarc Zyngier } 2504f6a91da7SMarc Zyngier 2505f6a91da7SMarc Zyngier static void its_vpe_unmask_irq(struct irq_data *d) 2506f6a91da7SMarc Zyngier { 2507f6a91da7SMarc Zyngier /* Same hack as above... */ 2508f6a91da7SMarc Zyngier lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 2509f6a91da7SMarc Zyngier its_vpe_send_inv(d); 2510f6a91da7SMarc Zyngier } 2511f6a91da7SMarc Zyngier 2512e57a3e28SMarc Zyngier static int its_vpe_set_irqchip_state(struct irq_data *d, 2513e57a3e28SMarc Zyngier enum irqchip_irq_state which, 2514e57a3e28SMarc Zyngier bool state) 2515e57a3e28SMarc Zyngier { 2516e57a3e28SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2517e57a3e28SMarc Zyngier 2518e57a3e28SMarc Zyngier if (which != IRQCHIP_STATE_PENDING) 2519e57a3e28SMarc Zyngier return -EINVAL; 2520e57a3e28SMarc Zyngier 2521e57a3e28SMarc Zyngier if (gic_rdists->has_direct_lpi) { 2522e57a3e28SMarc Zyngier void __iomem *rdbase; 2523e57a3e28SMarc Zyngier 2524e57a3e28SMarc Zyngier rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 2525e57a3e28SMarc Zyngier if (state) { 2526e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); 2527e57a3e28SMarc Zyngier } else { 2528e57a3e28SMarc Zyngier gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 2529e57a3e28SMarc Zyngier while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 2530e57a3e28SMarc Zyngier cpu_relax(); 2531e57a3e28SMarc Zyngier } 2532e57a3e28SMarc Zyngier } else { 2533e57a3e28SMarc Zyngier if (state) 2534e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_int); 2535e57a3e28SMarc Zyngier else 2536e57a3e28SMarc Zyngier its_vpe_send_cmd(vpe, its_send_clear); 2537e57a3e28SMarc Zyngier } 2538e57a3e28SMarc Zyngier 2539e57a3e28SMarc Zyngier return 0; 2540e57a3e28SMarc Zyngier } 2541e57a3e28SMarc Zyngier 25428fff27aeSMarc Zyngier static struct irq_chip its_vpe_irq_chip = { 25438fff27aeSMarc Zyngier .name = "GICv4-vpe", 2544f6a91da7SMarc Zyngier .irq_mask = its_vpe_mask_irq, 2545f6a91da7SMarc Zyngier .irq_unmask = its_vpe_unmask_irq, 2546f6a91da7SMarc Zyngier .irq_eoi = irq_chip_eoi_parent, 25473171a47aSMarc Zyngier .irq_set_affinity = its_vpe_set_affinity, 2548e57a3e28SMarc Zyngier .irq_set_irqchip_state = its_vpe_set_irqchip_state, 2549e643d803SMarc Zyngier .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, 25508fff27aeSMarc Zyngier }; 25518fff27aeSMarc Zyngier 25527d75bbb4SMarc Zyngier static int its_vpe_id_alloc(void) 25537d75bbb4SMarc Zyngier { 25547d75bbb4SMarc Zyngier return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 25557d75bbb4SMarc Zyngier } 25567d75bbb4SMarc Zyngier 25577d75bbb4SMarc Zyngier static void its_vpe_id_free(u16 id) 25587d75bbb4SMarc Zyngier { 25597d75bbb4SMarc Zyngier ida_simple_remove(&its_vpeid_ida, id); 25607d75bbb4SMarc Zyngier } 25617d75bbb4SMarc Zyngier 25627d75bbb4SMarc Zyngier static int its_vpe_init(struct its_vpe *vpe) 25637d75bbb4SMarc Zyngier { 25647d75bbb4SMarc Zyngier struct page *vpt_page; 25657d75bbb4SMarc Zyngier int vpe_id; 25667d75bbb4SMarc Zyngier 25677d75bbb4SMarc Zyngier /* Allocate vpe_id */ 25687d75bbb4SMarc Zyngier vpe_id = its_vpe_id_alloc(); 25697d75bbb4SMarc Zyngier if (vpe_id < 0) 25707d75bbb4SMarc Zyngier return vpe_id; 25717d75bbb4SMarc Zyngier 25727d75bbb4SMarc Zyngier /* Allocate VPT */ 25737d75bbb4SMarc Zyngier vpt_page = its_allocate_pending_table(GFP_KERNEL); 25747d75bbb4SMarc Zyngier if (!vpt_page) { 25757d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 25767d75bbb4SMarc Zyngier return -ENOMEM; 25777d75bbb4SMarc Zyngier } 25787d75bbb4SMarc Zyngier 25797d75bbb4SMarc Zyngier if (!its_alloc_vpe_table(vpe_id)) { 25807d75bbb4SMarc Zyngier its_vpe_id_free(vpe_id); 25817d75bbb4SMarc Zyngier its_free_pending_table(vpe->vpt_page); 25827d75bbb4SMarc Zyngier return -ENOMEM; 25837d75bbb4SMarc Zyngier } 25847d75bbb4SMarc Zyngier 25857d75bbb4SMarc Zyngier vpe->vpe_id = vpe_id; 25867d75bbb4SMarc Zyngier vpe->vpt_page = vpt_page; 258720b3d54eSMarc Zyngier vpe->vpe_proxy_event = -1; 25887d75bbb4SMarc Zyngier 25897d75bbb4SMarc Zyngier return 0; 25907d75bbb4SMarc Zyngier } 25917d75bbb4SMarc Zyngier 25927d75bbb4SMarc Zyngier static void its_vpe_teardown(struct its_vpe *vpe) 25937d75bbb4SMarc Zyngier { 259420b3d54eSMarc Zyngier its_vpe_db_proxy_unmap(vpe); 25957d75bbb4SMarc Zyngier its_vpe_id_free(vpe->vpe_id); 25967d75bbb4SMarc Zyngier its_free_pending_table(vpe->vpt_page); 25977d75bbb4SMarc Zyngier } 25987d75bbb4SMarc Zyngier 25997d75bbb4SMarc Zyngier static void its_vpe_irq_domain_free(struct irq_domain *domain, 26007d75bbb4SMarc Zyngier unsigned int virq, 26017d75bbb4SMarc Zyngier unsigned int nr_irqs) 26027d75bbb4SMarc Zyngier { 26037d75bbb4SMarc Zyngier struct its_vm *vm = domain->host_data; 26047d75bbb4SMarc Zyngier int i; 26057d75bbb4SMarc Zyngier 26067d75bbb4SMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 26077d75bbb4SMarc Zyngier 26087d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 26097d75bbb4SMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 26107d75bbb4SMarc Zyngier virq + i); 26117d75bbb4SMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 26127d75bbb4SMarc Zyngier 26137d75bbb4SMarc Zyngier BUG_ON(vm != vpe->its_vm); 26147d75bbb4SMarc Zyngier 26157d75bbb4SMarc Zyngier clear_bit(data->hwirq, vm->db_bitmap); 26167d75bbb4SMarc Zyngier its_vpe_teardown(vpe); 26177d75bbb4SMarc Zyngier irq_domain_reset_irq_data(data); 26187d75bbb4SMarc Zyngier } 26197d75bbb4SMarc Zyngier 26207d75bbb4SMarc Zyngier if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 26217d75bbb4SMarc Zyngier its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 26227d75bbb4SMarc Zyngier its_free_prop_table(vm->vprop_page); 26237d75bbb4SMarc Zyngier } 26247d75bbb4SMarc Zyngier } 26257d75bbb4SMarc Zyngier 26267d75bbb4SMarc Zyngier static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 26277d75bbb4SMarc Zyngier unsigned int nr_irqs, void *args) 26287d75bbb4SMarc Zyngier { 26297d75bbb4SMarc Zyngier struct its_vm *vm = args; 26307d75bbb4SMarc Zyngier unsigned long *bitmap; 26317d75bbb4SMarc Zyngier struct page *vprop_page; 26327d75bbb4SMarc Zyngier int base, nr_ids, i, err = 0; 26337d75bbb4SMarc Zyngier 26347d75bbb4SMarc Zyngier BUG_ON(!vm); 26357d75bbb4SMarc Zyngier 26367d75bbb4SMarc Zyngier bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); 26377d75bbb4SMarc Zyngier if (!bitmap) 26387d75bbb4SMarc Zyngier return -ENOMEM; 26397d75bbb4SMarc Zyngier 26407d75bbb4SMarc Zyngier if (nr_ids < nr_irqs) { 26417d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 26427d75bbb4SMarc Zyngier return -ENOMEM; 26437d75bbb4SMarc Zyngier } 26447d75bbb4SMarc Zyngier 26457d75bbb4SMarc Zyngier vprop_page = its_allocate_prop_table(GFP_KERNEL); 26467d75bbb4SMarc Zyngier if (!vprop_page) { 26477d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 26487d75bbb4SMarc Zyngier return -ENOMEM; 26497d75bbb4SMarc Zyngier } 26507d75bbb4SMarc Zyngier 26517d75bbb4SMarc Zyngier vm->db_bitmap = bitmap; 26527d75bbb4SMarc Zyngier vm->db_lpi_base = base; 26537d75bbb4SMarc Zyngier vm->nr_db_lpis = nr_ids; 26547d75bbb4SMarc Zyngier vm->vprop_page = vprop_page; 26557d75bbb4SMarc Zyngier 26567d75bbb4SMarc Zyngier for (i = 0; i < nr_irqs; i++) { 26577d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi = base + i; 26587d75bbb4SMarc Zyngier err = its_vpe_init(vm->vpes[i]); 26597d75bbb4SMarc Zyngier if (err) 26607d75bbb4SMarc Zyngier break; 26617d75bbb4SMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, 26627d75bbb4SMarc Zyngier vm->vpes[i]->vpe_db_lpi); 26637d75bbb4SMarc Zyngier if (err) 26647d75bbb4SMarc Zyngier break; 26657d75bbb4SMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, i, 26667d75bbb4SMarc Zyngier &its_vpe_irq_chip, vm->vpes[i]); 26677d75bbb4SMarc Zyngier set_bit(i, bitmap); 26687d75bbb4SMarc Zyngier } 26697d75bbb4SMarc Zyngier 26707d75bbb4SMarc Zyngier if (err) { 26717d75bbb4SMarc Zyngier if (i > 0) 26727d75bbb4SMarc Zyngier its_vpe_irq_domain_free(domain, virq, i - 1); 26737d75bbb4SMarc Zyngier 26747d75bbb4SMarc Zyngier its_lpi_free_chunks(bitmap, base, nr_ids); 26757d75bbb4SMarc Zyngier its_free_prop_table(vprop_page); 26767d75bbb4SMarc Zyngier } 26777d75bbb4SMarc Zyngier 26787d75bbb4SMarc Zyngier return err; 26797d75bbb4SMarc Zyngier } 26807d75bbb4SMarc Zyngier 2681eb78192bSMarc Zyngier static void its_vpe_irq_domain_activate(struct irq_domain *domain, 2682eb78192bSMarc Zyngier struct irq_data *d) 2683eb78192bSMarc Zyngier { 2684eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2685eb78192bSMarc Zyngier 2686eb78192bSMarc Zyngier /* Map the VPE to the first possible CPU */ 2687eb78192bSMarc Zyngier vpe->col_idx = cpumask_first(cpu_online_mask); 2688eb78192bSMarc Zyngier its_send_vmapp(vpe, true); 2689eb78192bSMarc Zyngier its_send_vinvall(vpe); 2690eb78192bSMarc Zyngier } 2691eb78192bSMarc Zyngier 2692eb78192bSMarc Zyngier static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 2693eb78192bSMarc Zyngier struct irq_data *d) 2694eb78192bSMarc Zyngier { 2695eb78192bSMarc Zyngier struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2696eb78192bSMarc Zyngier 2697eb78192bSMarc Zyngier its_send_vmapp(vpe, false); 2698eb78192bSMarc Zyngier } 2699eb78192bSMarc Zyngier 27008fff27aeSMarc Zyngier static const struct irq_domain_ops its_vpe_domain_ops = { 27017d75bbb4SMarc Zyngier .alloc = its_vpe_irq_domain_alloc, 27027d75bbb4SMarc Zyngier .free = its_vpe_irq_domain_free, 2703eb78192bSMarc Zyngier .activate = its_vpe_irq_domain_activate, 2704eb78192bSMarc Zyngier .deactivate = its_vpe_irq_domain_deactivate, 27058fff27aeSMarc Zyngier }; 27068fff27aeSMarc Zyngier 27074559fbb3SYun Wu static int its_force_quiescent(void __iomem *base) 27084559fbb3SYun Wu { 27094559fbb3SYun Wu u32 count = 1000000; /* 1s */ 27104559fbb3SYun Wu u32 val; 27114559fbb3SYun Wu 27124559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 27137611da86SDavid Daney /* 27147611da86SDavid Daney * GIC architecture specification requires the ITS to be both 27157611da86SDavid Daney * disabled and quiescent for writes to GITS_BASER<n> or 27167611da86SDavid Daney * GITS_CBASER to not have UNPREDICTABLE results. 27177611da86SDavid Daney */ 27187611da86SDavid Daney if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 27194559fbb3SYun Wu return 0; 27204559fbb3SYun Wu 27214559fbb3SYun Wu /* Disable the generation of all interrupts to this ITS */ 2722d51c4b4dSMarc Zyngier val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); 27234559fbb3SYun Wu writel_relaxed(val, base + GITS_CTLR); 27244559fbb3SYun Wu 27254559fbb3SYun Wu /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 27264559fbb3SYun Wu while (1) { 27274559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 27284559fbb3SYun Wu if (val & GITS_CTLR_QUIESCENT) 27294559fbb3SYun Wu return 0; 27304559fbb3SYun Wu 27314559fbb3SYun Wu count--; 27324559fbb3SYun Wu if (!count) 27334559fbb3SYun Wu return -EBUSY; 27344559fbb3SYun Wu 27354559fbb3SYun Wu cpu_relax(); 27364559fbb3SYun Wu udelay(1); 27374559fbb3SYun Wu } 27384559fbb3SYun Wu } 27394559fbb3SYun Wu 274094100970SRobert Richter static void __maybe_unused its_enable_quirk_cavium_22375(void *data) 274194100970SRobert Richter { 274294100970SRobert Richter struct its_node *its = data; 274394100970SRobert Richter 274494100970SRobert Richter its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 274594100970SRobert Richter } 274694100970SRobert Richter 2747fbf8f40eSGanapatrao Kulkarni static void __maybe_unused its_enable_quirk_cavium_23144(void *data) 2748fbf8f40eSGanapatrao Kulkarni { 2749fbf8f40eSGanapatrao Kulkarni struct its_node *its = data; 2750fbf8f40eSGanapatrao Kulkarni 2751fbf8f40eSGanapatrao Kulkarni its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 2752fbf8f40eSGanapatrao Kulkarni } 2753fbf8f40eSGanapatrao Kulkarni 275490922a2dSShanker Donthineni static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) 275590922a2dSShanker Donthineni { 275690922a2dSShanker Donthineni struct its_node *its = data; 275790922a2dSShanker Donthineni 275890922a2dSShanker Donthineni /* On QDF2400, the size of the ITE is 16Bytes */ 275990922a2dSShanker Donthineni its->ite_size = 16; 276090922a2dSShanker Donthineni } 276190922a2dSShanker Donthineni 276267510ccaSRobert Richter static const struct gic_quirk its_quirks[] = { 276394100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375 276494100970SRobert Richter { 276594100970SRobert Richter .desc = "ITS: Cavium errata 22375, 24313", 276694100970SRobert Richter .iidr = 0xa100034c, /* ThunderX pass 1.x */ 276794100970SRobert Richter .mask = 0xffff0fff, 276894100970SRobert Richter .init = its_enable_quirk_cavium_22375, 276994100970SRobert Richter }, 277094100970SRobert Richter #endif 2771fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144 2772fbf8f40eSGanapatrao Kulkarni { 2773fbf8f40eSGanapatrao Kulkarni .desc = "ITS: Cavium erratum 23144", 2774fbf8f40eSGanapatrao Kulkarni .iidr = 0xa100034c, /* ThunderX pass 1.x */ 2775fbf8f40eSGanapatrao Kulkarni .mask = 0xffff0fff, 2776fbf8f40eSGanapatrao Kulkarni .init = its_enable_quirk_cavium_23144, 2777fbf8f40eSGanapatrao Kulkarni }, 2778fbf8f40eSGanapatrao Kulkarni #endif 277990922a2dSShanker Donthineni #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 278090922a2dSShanker Donthineni { 278190922a2dSShanker Donthineni .desc = "ITS: QDF2400 erratum 0065", 278290922a2dSShanker Donthineni .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ 278390922a2dSShanker Donthineni .mask = 0xffffffff, 278490922a2dSShanker Donthineni .init = its_enable_quirk_qdf2400_e0065, 278590922a2dSShanker Donthineni }, 278690922a2dSShanker Donthineni #endif 278767510ccaSRobert Richter { 278867510ccaSRobert Richter } 278967510ccaSRobert Richter }; 279067510ccaSRobert Richter 279167510ccaSRobert Richter static void its_enable_quirks(struct its_node *its) 279267510ccaSRobert Richter { 279367510ccaSRobert Richter u32 iidr = readl_relaxed(its->base + GITS_IIDR); 279467510ccaSRobert Richter 279567510ccaSRobert Richter gic_enable_quirks(iidr, its_quirks, its); 279667510ccaSRobert Richter } 279767510ccaSRobert Richter 2798db40f0a7STomasz Nowicki static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) 2799d14ae5e6STomasz Nowicki { 2800d14ae5e6STomasz Nowicki struct irq_domain *inner_domain; 2801d14ae5e6STomasz Nowicki struct msi_domain_info *info; 2802d14ae5e6STomasz Nowicki 2803d14ae5e6STomasz Nowicki info = kzalloc(sizeof(*info), GFP_KERNEL); 2804d14ae5e6STomasz Nowicki if (!info) 2805d14ae5e6STomasz Nowicki return -ENOMEM; 2806d14ae5e6STomasz Nowicki 2807db40f0a7STomasz Nowicki inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); 2808d14ae5e6STomasz Nowicki if (!inner_domain) { 2809d14ae5e6STomasz Nowicki kfree(info); 2810d14ae5e6STomasz Nowicki return -ENOMEM; 2811d14ae5e6STomasz Nowicki } 2812d14ae5e6STomasz Nowicki 2813db40f0a7STomasz Nowicki inner_domain->parent = its_parent; 281496f0d93aSMarc Zyngier irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); 281559768527SEric Auger inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; 2816d14ae5e6STomasz Nowicki info->ops = &its_msi_domain_ops; 2817d14ae5e6STomasz Nowicki info->data = its; 2818d14ae5e6STomasz Nowicki inner_domain->host_data = info; 2819d14ae5e6STomasz Nowicki 2820d14ae5e6STomasz Nowicki return 0; 2821d14ae5e6STomasz Nowicki } 2822d14ae5e6STomasz Nowicki 28238fff27aeSMarc Zyngier static int its_init_vpe_domain(void) 28248fff27aeSMarc Zyngier { 282520b3d54eSMarc Zyngier struct its_node *its; 282620b3d54eSMarc Zyngier u32 devid; 282720b3d54eSMarc Zyngier int entries; 282820b3d54eSMarc Zyngier 282920b3d54eSMarc Zyngier if (gic_rdists->has_direct_lpi) { 283020b3d54eSMarc Zyngier pr_info("ITS: Using DirectLPI for VPE invalidation\n"); 283120b3d54eSMarc Zyngier return 0; 283220b3d54eSMarc Zyngier } 283320b3d54eSMarc Zyngier 283420b3d54eSMarc Zyngier /* Any ITS will do, even if not v4 */ 283520b3d54eSMarc Zyngier its = list_first_entry(&its_nodes, struct its_node, entry); 283620b3d54eSMarc Zyngier 283720b3d54eSMarc Zyngier entries = roundup_pow_of_two(nr_cpu_ids); 283820b3d54eSMarc Zyngier vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries, 283920b3d54eSMarc Zyngier GFP_KERNEL); 284020b3d54eSMarc Zyngier if (!vpe_proxy.vpes) { 284120b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 284220b3d54eSMarc Zyngier return -ENOMEM; 284320b3d54eSMarc Zyngier } 284420b3d54eSMarc Zyngier 284520b3d54eSMarc Zyngier /* Use the last possible DevID */ 284620b3d54eSMarc Zyngier devid = GENMASK(its->device_ids - 1, 0); 284720b3d54eSMarc Zyngier vpe_proxy.dev = its_create_device(its, devid, entries, false); 284820b3d54eSMarc Zyngier if (!vpe_proxy.dev) { 284920b3d54eSMarc Zyngier kfree(vpe_proxy.vpes); 285020b3d54eSMarc Zyngier pr_err("ITS: Can't allocate GICv4 proxy device\n"); 285120b3d54eSMarc Zyngier return -ENOMEM; 285220b3d54eSMarc Zyngier } 285320b3d54eSMarc Zyngier 2854*c427a475SShanker Donthineni BUG_ON(entries > vpe_proxy.dev->nr_ites); 285520b3d54eSMarc Zyngier 285620b3d54eSMarc Zyngier raw_spin_lock_init(&vpe_proxy.lock); 285720b3d54eSMarc Zyngier vpe_proxy.next_victim = 0; 285820b3d54eSMarc Zyngier pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", 285920b3d54eSMarc Zyngier devid, vpe_proxy.dev->nr_ites); 286020b3d54eSMarc Zyngier 28618fff27aeSMarc Zyngier return 0; 28628fff27aeSMarc Zyngier } 28638fff27aeSMarc Zyngier 28643dfa576bSMarc Zyngier static int __init its_compute_its_list_map(struct resource *res, 28653dfa576bSMarc Zyngier void __iomem *its_base) 28663dfa576bSMarc Zyngier { 28673dfa576bSMarc Zyngier int its_number; 28683dfa576bSMarc Zyngier u32 ctlr; 28693dfa576bSMarc Zyngier 28703dfa576bSMarc Zyngier /* 28713dfa576bSMarc Zyngier * This is assumed to be done early enough that we're 28723dfa576bSMarc Zyngier * guaranteed to be single-threaded, hence no 28733dfa576bSMarc Zyngier * locking. Should this change, we should address 28743dfa576bSMarc Zyngier * this. 28753dfa576bSMarc Zyngier */ 28763dfa576bSMarc Zyngier its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); 28773dfa576bSMarc Zyngier if (its_number >= ITS_LIST_MAX) { 28783dfa576bSMarc Zyngier pr_err("ITS@%pa: No ITSList entry available!\n", 28793dfa576bSMarc Zyngier &res->start); 28803dfa576bSMarc Zyngier return -EINVAL; 28813dfa576bSMarc Zyngier } 28823dfa576bSMarc Zyngier 28833dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 28843dfa576bSMarc Zyngier ctlr &= ~GITS_CTLR_ITS_NUMBER; 28853dfa576bSMarc Zyngier ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; 28863dfa576bSMarc Zyngier writel_relaxed(ctlr, its_base + GITS_CTLR); 28873dfa576bSMarc Zyngier ctlr = readl_relaxed(its_base + GITS_CTLR); 28883dfa576bSMarc Zyngier if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { 28893dfa576bSMarc Zyngier its_number = ctlr & GITS_CTLR_ITS_NUMBER; 28903dfa576bSMarc Zyngier its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; 28913dfa576bSMarc Zyngier } 28923dfa576bSMarc Zyngier 28933dfa576bSMarc Zyngier if (test_and_set_bit(its_number, &its_list_map)) { 28943dfa576bSMarc Zyngier pr_err("ITS@%pa: Duplicate ITSList entry %d\n", 28953dfa576bSMarc Zyngier &res->start, its_number); 28963dfa576bSMarc Zyngier return -EINVAL; 28973dfa576bSMarc Zyngier } 28983dfa576bSMarc Zyngier 28993dfa576bSMarc Zyngier return its_number; 29003dfa576bSMarc Zyngier } 29013dfa576bSMarc Zyngier 2902db40f0a7STomasz Nowicki static int __init its_probe_one(struct resource *res, 2903db40f0a7STomasz Nowicki struct fwnode_handle *handle, int numa_node) 29044c21f3c2SMarc Zyngier { 29054c21f3c2SMarc Zyngier struct its_node *its; 29064c21f3c2SMarc Zyngier void __iomem *its_base; 29073dfa576bSMarc Zyngier u32 val, ctlr; 29083dfa576bSMarc Zyngier u64 baser, tmp, typer; 29094c21f3c2SMarc Zyngier int err; 29104c21f3c2SMarc Zyngier 2911db40f0a7STomasz Nowicki its_base = ioremap(res->start, resource_size(res)); 29124c21f3c2SMarc Zyngier if (!its_base) { 2913db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); 29144c21f3c2SMarc Zyngier return -ENOMEM; 29154c21f3c2SMarc Zyngier } 29164c21f3c2SMarc Zyngier 29174c21f3c2SMarc Zyngier val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 29184c21f3c2SMarc Zyngier if (val != 0x30 && val != 0x40) { 2919db40f0a7STomasz Nowicki pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); 29204c21f3c2SMarc Zyngier err = -ENODEV; 29214c21f3c2SMarc Zyngier goto out_unmap; 29224c21f3c2SMarc Zyngier } 29234c21f3c2SMarc Zyngier 29244559fbb3SYun Wu err = its_force_quiescent(its_base); 29254559fbb3SYun Wu if (err) { 2926db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); 29274559fbb3SYun Wu goto out_unmap; 29284559fbb3SYun Wu } 29294559fbb3SYun Wu 2930db40f0a7STomasz Nowicki pr_info("ITS %pR\n", res); 29314c21f3c2SMarc Zyngier 29324c21f3c2SMarc Zyngier its = kzalloc(sizeof(*its), GFP_KERNEL); 29334c21f3c2SMarc Zyngier if (!its) { 29344c21f3c2SMarc Zyngier err = -ENOMEM; 29354c21f3c2SMarc Zyngier goto out_unmap; 29364c21f3c2SMarc Zyngier } 29374c21f3c2SMarc Zyngier 29384c21f3c2SMarc Zyngier raw_spin_lock_init(&its->lock); 29394c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->entry); 29404c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->its_device_list); 29413dfa576bSMarc Zyngier typer = gic_read_typer(its_base + GITS_TYPER); 29424c21f3c2SMarc Zyngier its->base = its_base; 2943db40f0a7STomasz Nowicki its->phys_base = res->start; 29443dfa576bSMarc Zyngier its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); 29453dfa576bSMarc Zyngier its->is_v4 = !!(typer & GITS_TYPER_VLPIS); 29463dfa576bSMarc Zyngier if (its->is_v4) { 29473dfa576bSMarc Zyngier if (!(typer & GITS_TYPER_VMOVP)) { 29483dfa576bSMarc Zyngier err = its_compute_its_list_map(res, its_base); 29493dfa576bSMarc Zyngier if (err < 0) 29503dfa576bSMarc Zyngier goto out_free_its; 29513dfa576bSMarc Zyngier 29523dfa576bSMarc Zyngier pr_info("ITS@%pa: Using ITS number %d\n", 29533dfa576bSMarc Zyngier &res->start, err); 29543dfa576bSMarc Zyngier } else { 29553dfa576bSMarc Zyngier pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); 29563dfa576bSMarc Zyngier } 29573dfa576bSMarc Zyngier } 29583dfa576bSMarc Zyngier 2959db40f0a7STomasz Nowicki its->numa_node = numa_node; 29604c21f3c2SMarc Zyngier 29615bc13c2cSRobert Richter its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 29625bc13c2cSRobert Richter get_order(ITS_CMD_QUEUE_SZ)); 29634c21f3c2SMarc Zyngier if (!its->cmd_base) { 29644c21f3c2SMarc Zyngier err = -ENOMEM; 29654c21f3c2SMarc Zyngier goto out_free_its; 29664c21f3c2SMarc Zyngier } 29674c21f3c2SMarc Zyngier its->cmd_write = its->cmd_base; 29684c21f3c2SMarc Zyngier 296967510ccaSRobert Richter its_enable_quirks(its); 297067510ccaSRobert Richter 29710e0b0f69SShanker Donthineni err = its_alloc_tables(its); 29724c21f3c2SMarc Zyngier if (err) 29734c21f3c2SMarc Zyngier goto out_free_cmd; 29744c21f3c2SMarc Zyngier 29754c21f3c2SMarc Zyngier err = its_alloc_collections(its); 29764c21f3c2SMarc Zyngier if (err) 29774c21f3c2SMarc Zyngier goto out_free_tables; 29784c21f3c2SMarc Zyngier 29794c21f3c2SMarc Zyngier baser = (virt_to_phys(its->cmd_base) | 29802fd632a0SShanker Donthineni GITS_CBASER_RaWaWb | 29814c21f3c2SMarc Zyngier GITS_CBASER_InnerShareable | 29824c21f3c2SMarc Zyngier (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 29834c21f3c2SMarc Zyngier GITS_CBASER_VALID); 29844c21f3c2SMarc Zyngier 29850968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 29860968a619SVladimir Murzin tmp = gits_read_cbaser(its->base + GITS_CBASER); 29874c21f3c2SMarc Zyngier 29884ad3e363SMarc Zyngier if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 2989241a386cSMarc Zyngier if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 2990241a386cSMarc Zyngier /* 2991241a386cSMarc Zyngier * The HW reports non-shareable, we must 2992241a386cSMarc Zyngier * remove the cacheability attributes as 2993241a386cSMarc Zyngier * well. 2994241a386cSMarc Zyngier */ 2995241a386cSMarc Zyngier baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 2996241a386cSMarc Zyngier GITS_CBASER_CACHEABILITY_MASK); 2997241a386cSMarc Zyngier baser |= GITS_CBASER_nC; 29980968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 2999241a386cSMarc Zyngier } 30004c21f3c2SMarc Zyngier pr_info("ITS: using cache flushing for cmd queue\n"); 30014c21f3c2SMarc Zyngier its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 30024c21f3c2SMarc Zyngier } 30034c21f3c2SMarc Zyngier 30040968a619SVladimir Murzin gits_write_cwriter(0, its->base + GITS_CWRITER); 30053dfa576bSMarc Zyngier ctlr = readl_relaxed(its->base + GITS_CTLR); 3006d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ENABLE; 3007d51c4b4dSMarc Zyngier if (its->is_v4) 3008d51c4b4dSMarc Zyngier ctlr |= GITS_CTLR_ImDe; 3009d51c4b4dSMarc Zyngier writel_relaxed(ctlr, its->base + GITS_CTLR); 3010241a386cSMarc Zyngier 3011db40f0a7STomasz Nowicki err = its_init_domain(handle, its); 3012d14ae5e6STomasz Nowicki if (err) 301354456db9SMarc Zyngier goto out_free_tables; 30144c21f3c2SMarc Zyngier 30154c21f3c2SMarc Zyngier spin_lock(&its_lock); 30164c21f3c2SMarc Zyngier list_add(&its->entry, &its_nodes); 30174c21f3c2SMarc Zyngier spin_unlock(&its_lock); 30184c21f3c2SMarc Zyngier 30194c21f3c2SMarc Zyngier return 0; 30204c21f3c2SMarc Zyngier 30214c21f3c2SMarc Zyngier out_free_tables: 30224c21f3c2SMarc Zyngier its_free_tables(its); 30234c21f3c2SMarc Zyngier out_free_cmd: 30245bc13c2cSRobert Richter free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 30254c21f3c2SMarc Zyngier out_free_its: 30264c21f3c2SMarc Zyngier kfree(its); 30274c21f3c2SMarc Zyngier out_unmap: 30284c21f3c2SMarc Zyngier iounmap(its_base); 3029db40f0a7STomasz Nowicki pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); 30304c21f3c2SMarc Zyngier return err; 30314c21f3c2SMarc Zyngier } 30324c21f3c2SMarc Zyngier 30334c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void) 30344c21f3c2SMarc Zyngier { 3035589ce5f4SMarc Zyngier return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 30364c21f3c2SMarc Zyngier } 30374c21f3c2SMarc Zyngier 30384c21f3c2SMarc Zyngier int its_cpu_init(void) 30394c21f3c2SMarc Zyngier { 304016acae72SVladimir Murzin if (!list_empty(&its_nodes)) { 30414c21f3c2SMarc Zyngier if (!gic_rdists_supports_plpis()) { 30424c21f3c2SMarc Zyngier pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 30434c21f3c2SMarc Zyngier return -ENXIO; 30444c21f3c2SMarc Zyngier } 30454c21f3c2SMarc Zyngier its_cpu_init_lpis(); 30464c21f3c2SMarc Zyngier its_cpu_init_collection(); 30474c21f3c2SMarc Zyngier } 30484c21f3c2SMarc Zyngier 30494c21f3c2SMarc Zyngier return 0; 30504c21f3c2SMarc Zyngier } 30514c21f3c2SMarc Zyngier 3052935bba7cSArvind Yadav static const struct of_device_id its_device_id[] = { 30534c21f3c2SMarc Zyngier { .compatible = "arm,gic-v3-its", }, 30544c21f3c2SMarc Zyngier {}, 30554c21f3c2SMarc Zyngier }; 30564c21f3c2SMarc Zyngier 3057db40f0a7STomasz Nowicki static int __init its_of_probe(struct device_node *node) 30584c21f3c2SMarc Zyngier { 30594c21f3c2SMarc Zyngier struct device_node *np; 3060db40f0a7STomasz Nowicki struct resource res; 30614c21f3c2SMarc Zyngier 30624c21f3c2SMarc Zyngier for (np = of_find_matching_node(node, its_device_id); np; 30634c21f3c2SMarc Zyngier np = of_find_matching_node(np, its_device_id)) { 3064d14ae5e6STomasz Nowicki if (!of_property_read_bool(np, "msi-controller")) { 3065e81f54c6SRob Herring pr_warn("%pOF: no msi-controller property, ITS ignored\n", 3066e81f54c6SRob Herring np); 3067d14ae5e6STomasz Nowicki continue; 3068d14ae5e6STomasz Nowicki } 3069d14ae5e6STomasz Nowicki 3070db40f0a7STomasz Nowicki if (of_address_to_resource(np, 0, &res)) { 3071e81f54c6SRob Herring pr_warn("%pOF: no regs?\n", np); 3072db40f0a7STomasz Nowicki continue; 30734c21f3c2SMarc Zyngier } 30744c21f3c2SMarc Zyngier 3075db40f0a7STomasz Nowicki its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); 3076db40f0a7STomasz Nowicki } 3077db40f0a7STomasz Nowicki return 0; 3078db40f0a7STomasz Nowicki } 3079db40f0a7STomasz Nowicki 30803f010cf1STomasz Nowicki #ifdef CONFIG_ACPI 30813f010cf1STomasz Nowicki 30823f010cf1STomasz Nowicki #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 30833f010cf1STomasz Nowicki 3084d1ce263fSRobert Richter #ifdef CONFIG_ACPI_NUMA 3085dbd2b826SGanapatrao Kulkarni struct its_srat_map { 3086dbd2b826SGanapatrao Kulkarni /* numa node id */ 3087dbd2b826SGanapatrao Kulkarni u32 numa_node; 3088dbd2b826SGanapatrao Kulkarni /* GIC ITS ID */ 3089dbd2b826SGanapatrao Kulkarni u32 its_id; 3090dbd2b826SGanapatrao Kulkarni }; 3091dbd2b826SGanapatrao Kulkarni 3092fdf6e7a8SHanjun Guo static struct its_srat_map *its_srat_maps __initdata; 3093dbd2b826SGanapatrao Kulkarni static int its_in_srat __initdata; 3094dbd2b826SGanapatrao Kulkarni 3095dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) 3096dbd2b826SGanapatrao Kulkarni { 3097dbd2b826SGanapatrao Kulkarni int i; 3098dbd2b826SGanapatrao Kulkarni 3099dbd2b826SGanapatrao Kulkarni for (i = 0; i < its_in_srat; i++) { 3100dbd2b826SGanapatrao Kulkarni if (its_id == its_srat_maps[i].its_id) 3101dbd2b826SGanapatrao Kulkarni return its_srat_maps[i].numa_node; 3102dbd2b826SGanapatrao Kulkarni } 3103dbd2b826SGanapatrao Kulkarni return NUMA_NO_NODE; 3104dbd2b826SGanapatrao Kulkarni } 3105dbd2b826SGanapatrao Kulkarni 3106fdf6e7a8SHanjun Guo static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, 3107fdf6e7a8SHanjun Guo const unsigned long end) 3108fdf6e7a8SHanjun Guo { 3109fdf6e7a8SHanjun Guo return 0; 3110fdf6e7a8SHanjun Guo } 3111fdf6e7a8SHanjun Guo 3112dbd2b826SGanapatrao Kulkarni static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, 3113dbd2b826SGanapatrao Kulkarni const unsigned long end) 3114dbd2b826SGanapatrao Kulkarni { 3115dbd2b826SGanapatrao Kulkarni int node; 3116dbd2b826SGanapatrao Kulkarni struct acpi_srat_gic_its_affinity *its_affinity; 3117dbd2b826SGanapatrao Kulkarni 3118dbd2b826SGanapatrao Kulkarni its_affinity = (struct acpi_srat_gic_its_affinity *)header; 3119dbd2b826SGanapatrao Kulkarni if (!its_affinity) 3120dbd2b826SGanapatrao Kulkarni return -EINVAL; 3121dbd2b826SGanapatrao Kulkarni 3122dbd2b826SGanapatrao Kulkarni if (its_affinity->header.length < sizeof(*its_affinity)) { 3123dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid header length %d in ITS affinity\n", 3124dbd2b826SGanapatrao Kulkarni its_affinity->header.length); 3125dbd2b826SGanapatrao Kulkarni return -EINVAL; 3126dbd2b826SGanapatrao Kulkarni } 3127dbd2b826SGanapatrao Kulkarni 3128dbd2b826SGanapatrao Kulkarni node = acpi_map_pxm_to_node(its_affinity->proximity_domain); 3129dbd2b826SGanapatrao Kulkarni 3130dbd2b826SGanapatrao Kulkarni if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 3131dbd2b826SGanapatrao Kulkarni pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); 3132dbd2b826SGanapatrao Kulkarni return 0; 3133dbd2b826SGanapatrao Kulkarni } 3134dbd2b826SGanapatrao Kulkarni 3135dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].numa_node = node; 3136dbd2b826SGanapatrao Kulkarni its_srat_maps[its_in_srat].its_id = its_affinity->its_id; 3137dbd2b826SGanapatrao Kulkarni its_in_srat++; 3138dbd2b826SGanapatrao Kulkarni pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", 3139dbd2b826SGanapatrao Kulkarni its_affinity->proximity_domain, its_affinity->its_id, node); 3140dbd2b826SGanapatrao Kulkarni 3141dbd2b826SGanapatrao Kulkarni return 0; 3142dbd2b826SGanapatrao Kulkarni } 3143dbd2b826SGanapatrao Kulkarni 3144dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) 3145dbd2b826SGanapatrao Kulkarni { 3146fdf6e7a8SHanjun Guo int count; 3147fdf6e7a8SHanjun Guo 3148fdf6e7a8SHanjun Guo count = acpi_table_parse_entries(ACPI_SIG_SRAT, 3149fdf6e7a8SHanjun Guo sizeof(struct acpi_table_srat), 3150fdf6e7a8SHanjun Guo ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3151fdf6e7a8SHanjun Guo gic_acpi_match_srat_its, 0); 3152fdf6e7a8SHanjun Guo if (count <= 0) 3153fdf6e7a8SHanjun Guo return; 3154fdf6e7a8SHanjun Guo 3155fdf6e7a8SHanjun Guo its_srat_maps = kmalloc(count * sizeof(struct its_srat_map), 3156fdf6e7a8SHanjun Guo GFP_KERNEL); 3157fdf6e7a8SHanjun Guo if (!its_srat_maps) { 3158fdf6e7a8SHanjun Guo pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); 3159fdf6e7a8SHanjun Guo return; 3160fdf6e7a8SHanjun Guo } 3161fdf6e7a8SHanjun Guo 3162dbd2b826SGanapatrao Kulkarni acpi_table_parse_entries(ACPI_SIG_SRAT, 3163dbd2b826SGanapatrao Kulkarni sizeof(struct acpi_table_srat), 3164dbd2b826SGanapatrao Kulkarni ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 3165dbd2b826SGanapatrao Kulkarni gic_acpi_parse_srat_its, 0); 3166dbd2b826SGanapatrao Kulkarni } 3167fdf6e7a8SHanjun Guo 3168fdf6e7a8SHanjun Guo /* free the its_srat_maps after ITS probing */ 3169fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) 3170fdf6e7a8SHanjun Guo { 3171fdf6e7a8SHanjun Guo kfree(its_srat_maps); 3172fdf6e7a8SHanjun Guo } 3173dbd2b826SGanapatrao Kulkarni #else 3174dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void) { } 3175dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } 3176fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) { } 3177dbd2b826SGanapatrao Kulkarni #endif 3178dbd2b826SGanapatrao Kulkarni 31793f010cf1STomasz Nowicki static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, 31803f010cf1STomasz Nowicki const unsigned long end) 31813f010cf1STomasz Nowicki { 31823f010cf1STomasz Nowicki struct acpi_madt_generic_translator *its_entry; 31833f010cf1STomasz Nowicki struct fwnode_handle *dom_handle; 31843f010cf1STomasz Nowicki struct resource res; 31853f010cf1STomasz Nowicki int err; 31863f010cf1STomasz Nowicki 31873f010cf1STomasz Nowicki its_entry = (struct acpi_madt_generic_translator *)header; 31883f010cf1STomasz Nowicki memset(&res, 0, sizeof(res)); 31893f010cf1STomasz Nowicki res.start = its_entry->base_address; 31903f010cf1STomasz Nowicki res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 31913f010cf1STomasz Nowicki res.flags = IORESOURCE_MEM; 31923f010cf1STomasz Nowicki 31933f010cf1STomasz Nowicki dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); 31943f010cf1STomasz Nowicki if (!dom_handle) { 31953f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 31963f010cf1STomasz Nowicki &res.start); 31973f010cf1STomasz Nowicki return -ENOMEM; 31983f010cf1STomasz Nowicki } 31993f010cf1STomasz Nowicki 32003f010cf1STomasz Nowicki err = iort_register_domain_token(its_entry->translation_id, dom_handle); 32013f010cf1STomasz Nowicki if (err) { 32023f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", 32033f010cf1STomasz Nowicki &res.start, its_entry->translation_id); 32043f010cf1STomasz Nowicki goto dom_err; 32053f010cf1STomasz Nowicki } 32063f010cf1STomasz Nowicki 3207dbd2b826SGanapatrao Kulkarni err = its_probe_one(&res, dom_handle, 3208dbd2b826SGanapatrao Kulkarni acpi_get_its_numa_node(its_entry->translation_id)); 32093f010cf1STomasz Nowicki if (!err) 32103f010cf1STomasz Nowicki return 0; 32113f010cf1STomasz Nowicki 32123f010cf1STomasz Nowicki iort_deregister_domain_token(its_entry->translation_id); 32133f010cf1STomasz Nowicki dom_err: 32143f010cf1STomasz Nowicki irq_domain_free_fwnode(dom_handle); 32153f010cf1STomasz Nowicki return err; 32163f010cf1STomasz Nowicki } 32173f010cf1STomasz Nowicki 32183f010cf1STomasz Nowicki static void __init its_acpi_probe(void) 32193f010cf1STomasz Nowicki { 3220dbd2b826SGanapatrao Kulkarni acpi_table_parse_srat_its(); 32213f010cf1STomasz Nowicki acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 32223f010cf1STomasz Nowicki gic_acpi_parse_madt_its, 0); 3223fdf6e7a8SHanjun Guo acpi_its_srat_maps_free(); 32243f010cf1STomasz Nowicki } 32253f010cf1STomasz Nowicki #else 32263f010cf1STomasz Nowicki static void __init its_acpi_probe(void) { } 32273f010cf1STomasz Nowicki #endif 32283f010cf1STomasz Nowicki 3229db40f0a7STomasz Nowicki int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, 3230db40f0a7STomasz Nowicki struct irq_domain *parent_domain) 3231db40f0a7STomasz Nowicki { 3232db40f0a7STomasz Nowicki struct device_node *of_node; 32338fff27aeSMarc Zyngier struct its_node *its; 32348fff27aeSMarc Zyngier bool has_v4 = false; 32358fff27aeSMarc Zyngier int err; 3236db40f0a7STomasz Nowicki 3237db40f0a7STomasz Nowicki its_parent = parent_domain; 3238db40f0a7STomasz Nowicki of_node = to_of_node(handle); 3239db40f0a7STomasz Nowicki if (of_node) 3240db40f0a7STomasz Nowicki its_of_probe(of_node); 3241db40f0a7STomasz Nowicki else 32423f010cf1STomasz Nowicki its_acpi_probe(); 3243db40f0a7STomasz Nowicki 32444c21f3c2SMarc Zyngier if (list_empty(&its_nodes)) { 32454c21f3c2SMarc Zyngier pr_warn("ITS: No ITS available, not enabling LPIs\n"); 32464c21f3c2SMarc Zyngier return -ENXIO; 32474c21f3c2SMarc Zyngier } 32484c21f3c2SMarc Zyngier 32494c21f3c2SMarc Zyngier gic_rdists = rdists; 32508fff27aeSMarc Zyngier err = its_alloc_lpi_tables(); 32518fff27aeSMarc Zyngier if (err) 32528fff27aeSMarc Zyngier return err; 32538fff27aeSMarc Zyngier 32548fff27aeSMarc Zyngier list_for_each_entry(its, &its_nodes, entry) 32558fff27aeSMarc Zyngier has_v4 |= its->is_v4; 32568fff27aeSMarc Zyngier 32578fff27aeSMarc Zyngier if (has_v4 & rdists->has_vlpis) { 32583d63cb53SMarc Zyngier if (its_init_vpe_domain() || 32593d63cb53SMarc Zyngier its_init_v4(parent_domain, &its_vpe_domain_ops)) { 32608fff27aeSMarc Zyngier rdists->has_vlpis = false; 32618fff27aeSMarc Zyngier pr_err("ITS: Disabling GICv4 support\n"); 32628fff27aeSMarc Zyngier } 32638fff27aeSMarc Zyngier } 32648fff27aeSMarc Zyngier 32658fff27aeSMarc Zyngier return 0; 32664c21f3c2SMarc Zyngier } 3267