1cc2d3216SMarc Zyngier /* 2cc2d3216SMarc Zyngier * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 3cc2d3216SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 4cc2d3216SMarc Zyngier * 5cc2d3216SMarc Zyngier * This program is free software; you can redistribute it and/or modify 6cc2d3216SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 7cc2d3216SMarc Zyngier * published by the Free Software Foundation. 8cc2d3216SMarc Zyngier * 9cc2d3216SMarc Zyngier * This program is distributed in the hope that it will be useful, 10cc2d3216SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 11cc2d3216SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12cc2d3216SMarc Zyngier * GNU General Public License for more details. 13cc2d3216SMarc Zyngier * 14cc2d3216SMarc Zyngier * You should have received a copy of the GNU General Public License 15cc2d3216SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 16cc2d3216SMarc Zyngier */ 17cc2d3216SMarc Zyngier 183f010cf1STomasz Nowicki #include <linux/acpi.h> 19cc2d3216SMarc Zyngier #include <linux/bitmap.h> 20cc2d3216SMarc Zyngier #include <linux/cpu.h> 21cc2d3216SMarc Zyngier #include <linux/delay.h> 2244bb7e24SRobin Murphy #include <linux/dma-iommu.h> 23cc2d3216SMarc Zyngier #include <linux/interrupt.h> 243f010cf1STomasz Nowicki #include <linux/irqdomain.h> 253f010cf1STomasz Nowicki #include <linux/acpi_iort.h> 26cc2d3216SMarc Zyngier #include <linux/log2.h> 27cc2d3216SMarc Zyngier #include <linux/mm.h> 28cc2d3216SMarc Zyngier #include <linux/msi.h> 29cc2d3216SMarc Zyngier #include <linux/of.h> 30cc2d3216SMarc Zyngier #include <linux/of_address.h> 31cc2d3216SMarc Zyngier #include <linux/of_irq.h> 32cc2d3216SMarc Zyngier #include <linux/of_pci.h> 33cc2d3216SMarc Zyngier #include <linux/of_platform.h> 34cc2d3216SMarc Zyngier #include <linux/percpu.h> 35cc2d3216SMarc Zyngier #include <linux/slab.h> 36cc2d3216SMarc Zyngier 3741a83e06SJoel Porquet #include <linux/irqchip.h> 38cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 39cc2d3216SMarc Zyngier 40cc2d3216SMarc Zyngier #include <asm/cputype.h> 41cc2d3216SMarc Zyngier #include <asm/exception.h> 42cc2d3216SMarc Zyngier 4367510ccaSRobert Richter #include "irq-gic-common.h" 4467510ccaSRobert Richter 4594100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 4694100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 47fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 48cc2d3216SMarc Zyngier 49c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 50c48ed51cSMarc Zyngier 51cc2d3216SMarc Zyngier /* 52cc2d3216SMarc Zyngier * Collection structure - just an ID, and a redistributor address to 53cc2d3216SMarc Zyngier * ping. We use one per CPU as a bag of interrupts assigned to this 54cc2d3216SMarc Zyngier * CPU. 55cc2d3216SMarc Zyngier */ 56cc2d3216SMarc Zyngier struct its_collection { 57cc2d3216SMarc Zyngier u64 target_address; 58cc2d3216SMarc Zyngier u16 col_id; 59cc2d3216SMarc Zyngier }; 60cc2d3216SMarc Zyngier 61cc2d3216SMarc Zyngier /* 629347359aSShanker Donthineni * The ITS_BASER structure - contains memory information, cached 639347359aSShanker Donthineni * value of BASER register configuration and ITS page size. 64466b7d16SShanker Donthineni */ 65466b7d16SShanker Donthineni struct its_baser { 66466b7d16SShanker Donthineni void *base; 67466b7d16SShanker Donthineni u64 val; 68466b7d16SShanker Donthineni u32 order; 699347359aSShanker Donthineni u32 psz; 70466b7d16SShanker Donthineni }; 71466b7d16SShanker Donthineni 72466b7d16SShanker Donthineni /* 73cc2d3216SMarc Zyngier * The ITS structure - contains most of the infrastructure, with the 74841514abSMarc Zyngier * top-level MSI domain, the command queue, the collections, and the 75841514abSMarc Zyngier * list of devices writing to it. 76cc2d3216SMarc Zyngier */ 77cc2d3216SMarc Zyngier struct its_node { 78cc2d3216SMarc Zyngier raw_spinlock_t lock; 79cc2d3216SMarc Zyngier struct list_head entry; 80cc2d3216SMarc Zyngier void __iomem *base; 81db40f0a7STomasz Nowicki phys_addr_t phys_base; 82cc2d3216SMarc Zyngier struct its_cmd_block *cmd_base; 83cc2d3216SMarc Zyngier struct its_cmd_block *cmd_write; 84466b7d16SShanker Donthineni struct its_baser tables[GITS_BASER_NR_REGS]; 85cc2d3216SMarc Zyngier struct its_collection *collections; 86cc2d3216SMarc Zyngier struct list_head its_device_list; 87cc2d3216SMarc Zyngier u64 flags; 88cc2d3216SMarc Zyngier u32 ite_size; 89466b7d16SShanker Donthineni u32 device_ids; 90fbf8f40eSGanapatrao Kulkarni int numa_node; 91cc2d3216SMarc Zyngier }; 92cc2d3216SMarc Zyngier 93cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN SZ_256 94cc2d3216SMarc Zyngier 952eca0d6cSShanker Donthineni /* Convert page order to size in bytes */ 962eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 972eca0d6cSShanker Donthineni 98591e5becSMarc Zyngier struct event_lpi_map { 99591e5becSMarc Zyngier unsigned long *lpi_map; 100591e5becSMarc Zyngier u16 *col_map; 101591e5becSMarc Zyngier irq_hw_number_t lpi_base; 102591e5becSMarc Zyngier int nr_lpis; 103591e5becSMarc Zyngier }; 104591e5becSMarc Zyngier 105cc2d3216SMarc Zyngier /* 106cc2d3216SMarc Zyngier * The ITS view of a device - belongs to an ITS, a collection, owns an 107cc2d3216SMarc Zyngier * interrupt translation table, and a list of interrupts. 108cc2d3216SMarc Zyngier */ 109cc2d3216SMarc Zyngier struct its_device { 110cc2d3216SMarc Zyngier struct list_head entry; 111cc2d3216SMarc Zyngier struct its_node *its; 112591e5becSMarc Zyngier struct event_lpi_map event_map; 113cc2d3216SMarc Zyngier void *itt; 114cc2d3216SMarc Zyngier u32 nr_ites; 115cc2d3216SMarc Zyngier u32 device_id; 116cc2d3216SMarc Zyngier }; 117cc2d3216SMarc Zyngier 1181ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes); 1191ac19ca6SMarc Zyngier static DEFINE_SPINLOCK(its_lock); 1201ac19ca6SMarc Zyngier static struct rdists *gic_rdists; 121db40f0a7STomasz Nowicki static struct irq_domain *its_parent; 1221ac19ca6SMarc Zyngier 1231ac19ca6SMarc Zyngier #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 1241ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 1251ac19ca6SMarc Zyngier 126591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev, 127591e5becSMarc Zyngier u32 event) 128591e5becSMarc Zyngier { 129591e5becSMarc Zyngier struct its_node *its = its_dev->its; 130591e5becSMarc Zyngier 131591e5becSMarc Zyngier return its->collections + its_dev->event_map.col_map[event]; 132591e5becSMarc Zyngier } 133591e5becSMarc Zyngier 134cc2d3216SMarc Zyngier /* 135cc2d3216SMarc Zyngier * ITS command descriptors - parameters to be encoded in a command 136cc2d3216SMarc Zyngier * block. 137cc2d3216SMarc Zyngier */ 138cc2d3216SMarc Zyngier struct its_cmd_desc { 139cc2d3216SMarc Zyngier union { 140cc2d3216SMarc Zyngier struct { 141cc2d3216SMarc Zyngier struct its_device *dev; 142cc2d3216SMarc Zyngier u32 event_id; 143cc2d3216SMarc Zyngier } its_inv_cmd; 144cc2d3216SMarc Zyngier 145cc2d3216SMarc Zyngier struct { 146cc2d3216SMarc Zyngier struct its_device *dev; 147cc2d3216SMarc Zyngier u32 event_id; 148cc2d3216SMarc Zyngier } its_int_cmd; 149cc2d3216SMarc Zyngier 150cc2d3216SMarc Zyngier struct { 151cc2d3216SMarc Zyngier struct its_device *dev; 152cc2d3216SMarc Zyngier int valid; 153cc2d3216SMarc Zyngier } its_mapd_cmd; 154cc2d3216SMarc Zyngier 155cc2d3216SMarc Zyngier struct { 156cc2d3216SMarc Zyngier struct its_collection *col; 157cc2d3216SMarc Zyngier int valid; 158cc2d3216SMarc Zyngier } its_mapc_cmd; 159cc2d3216SMarc Zyngier 160cc2d3216SMarc Zyngier struct { 161cc2d3216SMarc Zyngier struct its_device *dev; 162cc2d3216SMarc Zyngier u32 phys_id; 163cc2d3216SMarc Zyngier u32 event_id; 164cc2d3216SMarc Zyngier } its_mapvi_cmd; 165cc2d3216SMarc Zyngier 166cc2d3216SMarc Zyngier struct { 167cc2d3216SMarc Zyngier struct its_device *dev; 168cc2d3216SMarc Zyngier struct its_collection *col; 169591e5becSMarc Zyngier u32 event_id; 170cc2d3216SMarc Zyngier } its_movi_cmd; 171cc2d3216SMarc Zyngier 172cc2d3216SMarc Zyngier struct { 173cc2d3216SMarc Zyngier struct its_device *dev; 174cc2d3216SMarc Zyngier u32 event_id; 175cc2d3216SMarc Zyngier } its_discard_cmd; 176cc2d3216SMarc Zyngier 177cc2d3216SMarc Zyngier struct { 178cc2d3216SMarc Zyngier struct its_collection *col; 179cc2d3216SMarc Zyngier } its_invall_cmd; 180cc2d3216SMarc Zyngier }; 181cc2d3216SMarc Zyngier }; 182cc2d3216SMarc Zyngier 183cc2d3216SMarc Zyngier /* 184cc2d3216SMarc Zyngier * The ITS command block, which is what the ITS actually parses. 185cc2d3216SMarc Zyngier */ 186cc2d3216SMarc Zyngier struct its_cmd_block { 187cc2d3216SMarc Zyngier u64 raw_cmd[4]; 188cc2d3216SMarc Zyngier }; 189cc2d3216SMarc Zyngier 190cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ SZ_64K 191cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 192cc2d3216SMarc Zyngier 193cc2d3216SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 194cc2d3216SMarc Zyngier struct its_cmd_desc *); 195cc2d3216SMarc Zyngier 196cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 197cc2d3216SMarc Zyngier { 198b11283ebSVladimir Murzin cmd->raw_cmd[0] &= ~0xffULL; 199cc2d3216SMarc Zyngier cmd->raw_cmd[0] |= cmd_nr; 200cc2d3216SMarc Zyngier } 201cc2d3216SMarc Zyngier 202cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 203cc2d3216SMarc Zyngier { 2047e195ba0SAndre Przywara cmd->raw_cmd[0] &= BIT_ULL(32) - 1; 205cc2d3216SMarc Zyngier cmd->raw_cmd[0] |= ((u64)devid) << 32; 206cc2d3216SMarc Zyngier } 207cc2d3216SMarc Zyngier 208cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 209cc2d3216SMarc Zyngier { 210b11283ebSVladimir Murzin cmd->raw_cmd[1] &= ~0xffffffffULL; 211cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= id; 212cc2d3216SMarc Zyngier } 213cc2d3216SMarc Zyngier 214cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 215cc2d3216SMarc Zyngier { 216b11283ebSVladimir Murzin cmd->raw_cmd[1] &= 0xffffffffULL; 217cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= ((u64)phys_id) << 32; 218cc2d3216SMarc Zyngier } 219cc2d3216SMarc Zyngier 220cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size) 221cc2d3216SMarc Zyngier { 222b11283ebSVladimir Murzin cmd->raw_cmd[1] &= ~0x1fULL; 223cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= size & 0x1f; 224cc2d3216SMarc Zyngier } 225cc2d3216SMarc Zyngier 226cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 227cc2d3216SMarc Zyngier { 228b11283ebSVladimir Murzin cmd->raw_cmd[2] &= ~0xffffffffffffULL; 229b11283ebSVladimir Murzin cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL; 230cc2d3216SMarc Zyngier } 231cc2d3216SMarc Zyngier 232cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid) 233cc2d3216SMarc Zyngier { 234b11283ebSVladimir Murzin cmd->raw_cmd[2] &= ~(1ULL << 63); 235cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= ((u64)!!valid) << 63; 236cc2d3216SMarc Zyngier } 237cc2d3216SMarc Zyngier 238cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 239cc2d3216SMarc Zyngier { 240b11283ebSVladimir Murzin cmd->raw_cmd[2] &= ~(0xffffffffULL << 16); 241b11283ebSVladimir Murzin cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16)); 242cc2d3216SMarc Zyngier } 243cc2d3216SMarc Zyngier 244cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 245cc2d3216SMarc Zyngier { 246b11283ebSVladimir Murzin cmd->raw_cmd[2] &= ~0xffffULL; 247cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= col; 248cc2d3216SMarc Zyngier } 249cc2d3216SMarc Zyngier 250cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd) 251cc2d3216SMarc Zyngier { 252cc2d3216SMarc Zyngier /* Let's fixup BE commands */ 253cc2d3216SMarc Zyngier cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); 254cc2d3216SMarc Zyngier cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); 255cc2d3216SMarc Zyngier cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); 256cc2d3216SMarc Zyngier cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 257cc2d3216SMarc Zyngier } 258cc2d3216SMarc Zyngier 259cc2d3216SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, 260cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 261cc2d3216SMarc Zyngier { 262cc2d3216SMarc Zyngier unsigned long itt_addr; 263c8481267SMarc Zyngier u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 264cc2d3216SMarc Zyngier 265cc2d3216SMarc Zyngier itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 266cc2d3216SMarc Zyngier itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 267cc2d3216SMarc Zyngier 268cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPD); 269cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 270cc2d3216SMarc Zyngier its_encode_size(cmd, size - 1); 271cc2d3216SMarc Zyngier its_encode_itt(cmd, itt_addr); 272cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapd_cmd.valid); 273cc2d3216SMarc Zyngier 274cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 275cc2d3216SMarc Zyngier 276591e5becSMarc Zyngier return NULL; 277cc2d3216SMarc Zyngier } 278cc2d3216SMarc Zyngier 279cc2d3216SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 280cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 281cc2d3216SMarc Zyngier { 282cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPC); 283cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 284cc2d3216SMarc Zyngier its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 285cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapc_cmd.valid); 286cc2d3216SMarc Zyngier 287cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 288cc2d3216SMarc Zyngier 289cc2d3216SMarc Zyngier return desc->its_mapc_cmd.col; 290cc2d3216SMarc Zyngier } 291cc2d3216SMarc Zyngier 292cc2d3216SMarc Zyngier static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, 293cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 294cc2d3216SMarc Zyngier { 295591e5becSMarc Zyngier struct its_collection *col; 296591e5becSMarc Zyngier 297591e5becSMarc Zyngier col = dev_event_to_col(desc->its_mapvi_cmd.dev, 298591e5becSMarc Zyngier desc->its_mapvi_cmd.event_id); 299591e5becSMarc Zyngier 300cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPVI); 301cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); 302cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); 303cc2d3216SMarc Zyngier its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); 304591e5becSMarc Zyngier its_encode_collection(cmd, col->col_id); 305cc2d3216SMarc Zyngier 306cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 307cc2d3216SMarc Zyngier 308591e5becSMarc Zyngier return col; 309cc2d3216SMarc Zyngier } 310cc2d3216SMarc Zyngier 311cc2d3216SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 312cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 313cc2d3216SMarc Zyngier { 314591e5becSMarc Zyngier struct its_collection *col; 315591e5becSMarc Zyngier 316591e5becSMarc Zyngier col = dev_event_to_col(desc->its_movi_cmd.dev, 317591e5becSMarc Zyngier desc->its_movi_cmd.event_id); 318591e5becSMarc Zyngier 319cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MOVI); 320cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 321591e5becSMarc Zyngier its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 322cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 323cc2d3216SMarc Zyngier 324cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 325cc2d3216SMarc Zyngier 326591e5becSMarc Zyngier return col; 327cc2d3216SMarc Zyngier } 328cc2d3216SMarc Zyngier 329cc2d3216SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 330cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 331cc2d3216SMarc Zyngier { 332591e5becSMarc Zyngier struct its_collection *col; 333591e5becSMarc Zyngier 334591e5becSMarc Zyngier col = dev_event_to_col(desc->its_discard_cmd.dev, 335591e5becSMarc Zyngier desc->its_discard_cmd.event_id); 336591e5becSMarc Zyngier 337cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_DISCARD); 338cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 339cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 340cc2d3216SMarc Zyngier 341cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 342cc2d3216SMarc Zyngier 343591e5becSMarc Zyngier return col; 344cc2d3216SMarc Zyngier } 345cc2d3216SMarc Zyngier 346cc2d3216SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 347cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 348cc2d3216SMarc Zyngier { 349591e5becSMarc Zyngier struct its_collection *col; 350591e5becSMarc Zyngier 351591e5becSMarc Zyngier col = dev_event_to_col(desc->its_inv_cmd.dev, 352591e5becSMarc Zyngier desc->its_inv_cmd.event_id); 353591e5becSMarc Zyngier 354cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 355cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 356cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 357cc2d3216SMarc Zyngier 358cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 359cc2d3216SMarc Zyngier 360591e5becSMarc Zyngier return col; 361cc2d3216SMarc Zyngier } 362cc2d3216SMarc Zyngier 363cc2d3216SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 364cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 365cc2d3216SMarc Zyngier { 366cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVALL); 367cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 368cc2d3216SMarc Zyngier 369cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 370cc2d3216SMarc Zyngier 371cc2d3216SMarc Zyngier return NULL; 372cc2d3216SMarc Zyngier } 373cc2d3216SMarc Zyngier 374cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its, 375cc2d3216SMarc Zyngier struct its_cmd_block *ptr) 376cc2d3216SMarc Zyngier { 377cc2d3216SMarc Zyngier return (ptr - its->cmd_base) * sizeof(*ptr); 378cc2d3216SMarc Zyngier } 379cc2d3216SMarc Zyngier 380cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its) 381cc2d3216SMarc Zyngier { 382cc2d3216SMarc Zyngier int widx; 383cc2d3216SMarc Zyngier int ridx; 384cc2d3216SMarc Zyngier 385cc2d3216SMarc Zyngier widx = its->cmd_write - its->cmd_base; 386cc2d3216SMarc Zyngier ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 387cc2d3216SMarc Zyngier 388cc2d3216SMarc Zyngier /* This is incredibly unlikely to happen, unless the ITS locks up. */ 389cc2d3216SMarc Zyngier if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 390cc2d3216SMarc Zyngier return 1; 391cc2d3216SMarc Zyngier 392cc2d3216SMarc Zyngier return 0; 393cc2d3216SMarc Zyngier } 394cc2d3216SMarc Zyngier 395cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its) 396cc2d3216SMarc Zyngier { 397cc2d3216SMarc Zyngier struct its_cmd_block *cmd; 398cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 399cc2d3216SMarc Zyngier 400cc2d3216SMarc Zyngier while (its_queue_full(its)) { 401cc2d3216SMarc Zyngier count--; 402cc2d3216SMarc Zyngier if (!count) { 403cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue not draining\n"); 404cc2d3216SMarc Zyngier return NULL; 405cc2d3216SMarc Zyngier } 406cc2d3216SMarc Zyngier cpu_relax(); 407cc2d3216SMarc Zyngier udelay(1); 408cc2d3216SMarc Zyngier } 409cc2d3216SMarc Zyngier 410cc2d3216SMarc Zyngier cmd = its->cmd_write++; 411cc2d3216SMarc Zyngier 412cc2d3216SMarc Zyngier /* Handle queue wrapping */ 413cc2d3216SMarc Zyngier if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 414cc2d3216SMarc Zyngier its->cmd_write = its->cmd_base; 415cc2d3216SMarc Zyngier 416cc2d3216SMarc Zyngier return cmd; 417cc2d3216SMarc Zyngier } 418cc2d3216SMarc Zyngier 419cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its) 420cc2d3216SMarc Zyngier { 421cc2d3216SMarc Zyngier u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 422cc2d3216SMarc Zyngier 423cc2d3216SMarc Zyngier writel_relaxed(wr, its->base + GITS_CWRITER); 424cc2d3216SMarc Zyngier 425cc2d3216SMarc Zyngier return its->cmd_write; 426cc2d3216SMarc Zyngier } 427cc2d3216SMarc Zyngier 428cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 429cc2d3216SMarc Zyngier { 430cc2d3216SMarc Zyngier /* 431cc2d3216SMarc Zyngier * Make sure the commands written to memory are observable by 432cc2d3216SMarc Zyngier * the ITS. 433cc2d3216SMarc Zyngier */ 434cc2d3216SMarc Zyngier if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 435328191c0SVladimir Murzin gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); 436cc2d3216SMarc Zyngier else 437cc2d3216SMarc Zyngier dsb(ishst); 438cc2d3216SMarc Zyngier } 439cc2d3216SMarc Zyngier 440cc2d3216SMarc Zyngier static void its_wait_for_range_completion(struct its_node *its, 441cc2d3216SMarc Zyngier struct its_cmd_block *from, 442cc2d3216SMarc Zyngier struct its_cmd_block *to) 443cc2d3216SMarc Zyngier { 444cc2d3216SMarc Zyngier u64 rd_idx, from_idx, to_idx; 445cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 446cc2d3216SMarc Zyngier 447cc2d3216SMarc Zyngier from_idx = its_cmd_ptr_to_offset(its, from); 448cc2d3216SMarc Zyngier to_idx = its_cmd_ptr_to_offset(its, to); 449cc2d3216SMarc Zyngier 450cc2d3216SMarc Zyngier while (1) { 451cc2d3216SMarc Zyngier rd_idx = readl_relaxed(its->base + GITS_CREADR); 452cc2d3216SMarc Zyngier if (rd_idx >= to_idx || rd_idx < from_idx) 453cc2d3216SMarc Zyngier break; 454cc2d3216SMarc Zyngier 455cc2d3216SMarc Zyngier count--; 456cc2d3216SMarc Zyngier if (!count) { 457cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue timeout\n"); 458cc2d3216SMarc Zyngier return; 459cc2d3216SMarc Zyngier } 460cc2d3216SMarc Zyngier cpu_relax(); 461cc2d3216SMarc Zyngier udelay(1); 462cc2d3216SMarc Zyngier } 463cc2d3216SMarc Zyngier } 464cc2d3216SMarc Zyngier 465cc2d3216SMarc Zyngier static void its_send_single_command(struct its_node *its, 466cc2d3216SMarc Zyngier its_cmd_builder_t builder, 467cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 468cc2d3216SMarc Zyngier { 469cc2d3216SMarc Zyngier struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 470cc2d3216SMarc Zyngier struct its_collection *sync_col; 4713e39e8f5SMarc Zyngier unsigned long flags; 472cc2d3216SMarc Zyngier 4733e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 474cc2d3216SMarc Zyngier 475cc2d3216SMarc Zyngier cmd = its_allocate_entry(its); 476cc2d3216SMarc Zyngier if (!cmd) { /* We're soooooo screewed... */ 477cc2d3216SMarc Zyngier pr_err_ratelimited("ITS can't allocate, dropping command\n"); 4783e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 479cc2d3216SMarc Zyngier return; 480cc2d3216SMarc Zyngier } 481cc2d3216SMarc Zyngier sync_col = builder(cmd, desc); 482cc2d3216SMarc Zyngier its_flush_cmd(its, cmd); 483cc2d3216SMarc Zyngier 484cc2d3216SMarc Zyngier if (sync_col) { 485cc2d3216SMarc Zyngier sync_cmd = its_allocate_entry(its); 486cc2d3216SMarc Zyngier if (!sync_cmd) { 487cc2d3216SMarc Zyngier pr_err_ratelimited("ITS can't SYNC, skipping\n"); 488cc2d3216SMarc Zyngier goto post; 489cc2d3216SMarc Zyngier } 490cc2d3216SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 491cc2d3216SMarc Zyngier its_encode_target(sync_cmd, sync_col->target_address); 492cc2d3216SMarc Zyngier its_fixup_cmd(sync_cmd); 493cc2d3216SMarc Zyngier its_flush_cmd(its, sync_cmd); 494cc2d3216SMarc Zyngier } 495cc2d3216SMarc Zyngier 496cc2d3216SMarc Zyngier post: 497cc2d3216SMarc Zyngier next_cmd = its_post_commands(its); 4983e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 499cc2d3216SMarc Zyngier 500cc2d3216SMarc Zyngier its_wait_for_range_completion(its, cmd, next_cmd); 501cc2d3216SMarc Zyngier } 502cc2d3216SMarc Zyngier 503cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id) 504cc2d3216SMarc Zyngier { 505cc2d3216SMarc Zyngier struct its_cmd_desc desc; 506cc2d3216SMarc Zyngier 507cc2d3216SMarc Zyngier desc.its_inv_cmd.dev = dev; 508cc2d3216SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 509cc2d3216SMarc Zyngier 510cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_inv_cmd, &desc); 511cc2d3216SMarc Zyngier } 512cc2d3216SMarc Zyngier 513cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid) 514cc2d3216SMarc Zyngier { 515cc2d3216SMarc Zyngier struct its_cmd_desc desc; 516cc2d3216SMarc Zyngier 517cc2d3216SMarc Zyngier desc.its_mapd_cmd.dev = dev; 518cc2d3216SMarc Zyngier desc.its_mapd_cmd.valid = !!valid; 519cc2d3216SMarc Zyngier 520cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 521cc2d3216SMarc Zyngier } 522cc2d3216SMarc Zyngier 523cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col, 524cc2d3216SMarc Zyngier int valid) 525cc2d3216SMarc Zyngier { 526cc2d3216SMarc Zyngier struct its_cmd_desc desc; 527cc2d3216SMarc Zyngier 528cc2d3216SMarc Zyngier desc.its_mapc_cmd.col = col; 529cc2d3216SMarc Zyngier desc.its_mapc_cmd.valid = !!valid; 530cc2d3216SMarc Zyngier 531cc2d3216SMarc Zyngier its_send_single_command(its, its_build_mapc_cmd, &desc); 532cc2d3216SMarc Zyngier } 533cc2d3216SMarc Zyngier 534cc2d3216SMarc Zyngier static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) 535cc2d3216SMarc Zyngier { 536cc2d3216SMarc Zyngier struct its_cmd_desc desc; 537cc2d3216SMarc Zyngier 538cc2d3216SMarc Zyngier desc.its_mapvi_cmd.dev = dev; 539cc2d3216SMarc Zyngier desc.its_mapvi_cmd.phys_id = irq_id; 540cc2d3216SMarc Zyngier desc.its_mapvi_cmd.event_id = id; 541cc2d3216SMarc Zyngier 542cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); 543cc2d3216SMarc Zyngier } 544cc2d3216SMarc Zyngier 545cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev, 546cc2d3216SMarc Zyngier struct its_collection *col, u32 id) 547cc2d3216SMarc Zyngier { 548cc2d3216SMarc Zyngier struct its_cmd_desc desc; 549cc2d3216SMarc Zyngier 550cc2d3216SMarc Zyngier desc.its_movi_cmd.dev = dev; 551cc2d3216SMarc Zyngier desc.its_movi_cmd.col = col; 552591e5becSMarc Zyngier desc.its_movi_cmd.event_id = id; 553cc2d3216SMarc Zyngier 554cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_movi_cmd, &desc); 555cc2d3216SMarc Zyngier } 556cc2d3216SMarc Zyngier 557cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id) 558cc2d3216SMarc Zyngier { 559cc2d3216SMarc Zyngier struct its_cmd_desc desc; 560cc2d3216SMarc Zyngier 561cc2d3216SMarc Zyngier desc.its_discard_cmd.dev = dev; 562cc2d3216SMarc Zyngier desc.its_discard_cmd.event_id = id; 563cc2d3216SMarc Zyngier 564cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_discard_cmd, &desc); 565cc2d3216SMarc Zyngier } 566cc2d3216SMarc Zyngier 567cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col) 568cc2d3216SMarc Zyngier { 569cc2d3216SMarc Zyngier struct its_cmd_desc desc; 570cc2d3216SMarc Zyngier 571cc2d3216SMarc Zyngier desc.its_invall_cmd.col = col; 572cc2d3216SMarc Zyngier 573cc2d3216SMarc Zyngier its_send_single_command(its, its_build_invall_cmd, &desc); 574cc2d3216SMarc Zyngier } 575c48ed51cSMarc Zyngier 576c48ed51cSMarc Zyngier /* 577c48ed51cSMarc Zyngier * irqchip functions - assumes MSI, mostly. 578c48ed51cSMarc Zyngier */ 579c48ed51cSMarc Zyngier 580c48ed51cSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d) 581c48ed51cSMarc Zyngier { 582c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 583591e5becSMarc Zyngier return d->hwirq - its_dev->event_map.lpi_base; 584c48ed51cSMarc Zyngier } 585c48ed51cSMarc Zyngier 586c48ed51cSMarc Zyngier static void lpi_set_config(struct irq_data *d, bool enable) 587c48ed51cSMarc Zyngier { 588c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 589c48ed51cSMarc Zyngier irq_hw_number_t hwirq = d->hwirq; 590c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 591c48ed51cSMarc Zyngier u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; 592c48ed51cSMarc Zyngier 593c48ed51cSMarc Zyngier if (enable) 594c48ed51cSMarc Zyngier *cfg |= LPI_PROP_ENABLED; 595c48ed51cSMarc Zyngier else 596c48ed51cSMarc Zyngier *cfg &= ~LPI_PROP_ENABLED; 597c48ed51cSMarc Zyngier 598c48ed51cSMarc Zyngier /* 599c48ed51cSMarc Zyngier * Make the above write visible to the redistributors. 600c48ed51cSMarc Zyngier * And yes, we're flushing exactly: One. Single. Byte. 601c48ed51cSMarc Zyngier * Humpf... 602c48ed51cSMarc Zyngier */ 603c48ed51cSMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 604328191c0SVladimir Murzin gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 605c48ed51cSMarc Zyngier else 606c48ed51cSMarc Zyngier dsb(ishst); 607c48ed51cSMarc Zyngier its_send_inv(its_dev, id); 608c48ed51cSMarc Zyngier } 609c48ed51cSMarc Zyngier 610c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d) 611c48ed51cSMarc Zyngier { 612c48ed51cSMarc Zyngier lpi_set_config(d, false); 613c48ed51cSMarc Zyngier } 614c48ed51cSMarc Zyngier 615c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d) 616c48ed51cSMarc Zyngier { 617c48ed51cSMarc Zyngier lpi_set_config(d, true); 618c48ed51cSMarc Zyngier } 619c48ed51cSMarc Zyngier 620c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 621c48ed51cSMarc Zyngier bool force) 622c48ed51cSMarc Zyngier { 623fbf8f40eSGanapatrao Kulkarni unsigned int cpu; 624fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 625c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 626c48ed51cSMarc Zyngier struct its_collection *target_col; 627c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 628c48ed51cSMarc Zyngier 629fbf8f40eSGanapatrao Kulkarni /* lpi cannot be routed to a redistributor that is on a foreign node */ 630fbf8f40eSGanapatrao Kulkarni if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 631fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) { 632fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 633fbf8f40eSGanapatrao Kulkarni if (!cpumask_intersects(mask_val, cpu_mask)) 634fbf8f40eSGanapatrao Kulkarni return -EINVAL; 635fbf8f40eSGanapatrao Kulkarni } 636fbf8f40eSGanapatrao Kulkarni } 637fbf8f40eSGanapatrao Kulkarni 638fbf8f40eSGanapatrao Kulkarni cpu = cpumask_any_and(mask_val, cpu_mask); 639fbf8f40eSGanapatrao Kulkarni 640c48ed51cSMarc Zyngier if (cpu >= nr_cpu_ids) 641c48ed51cSMarc Zyngier return -EINVAL; 642c48ed51cSMarc Zyngier 643c48ed51cSMarc Zyngier target_col = &its_dev->its->collections[cpu]; 644c48ed51cSMarc Zyngier its_send_movi(its_dev, target_col, id); 645591e5becSMarc Zyngier its_dev->event_map.col_map[id] = cpu; 646c48ed51cSMarc Zyngier 647c48ed51cSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 648c48ed51cSMarc Zyngier } 649c48ed51cSMarc Zyngier 650b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 651b48ac83dSMarc Zyngier { 652b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 653b48ac83dSMarc Zyngier struct its_node *its; 654b48ac83dSMarc Zyngier u64 addr; 655b48ac83dSMarc Zyngier 656b48ac83dSMarc Zyngier its = its_dev->its; 657b48ac83dSMarc Zyngier addr = its->phys_base + GITS_TRANSLATER; 658b48ac83dSMarc Zyngier 659b11283ebSVladimir Murzin msg->address_lo = lower_32_bits(addr); 660b11283ebSVladimir Murzin msg->address_hi = upper_32_bits(addr); 661b48ac83dSMarc Zyngier msg->data = its_get_event_id(d); 66244bb7e24SRobin Murphy 66344bb7e24SRobin Murphy iommu_dma_map_msi_msg(d->irq, msg); 664b48ac83dSMarc Zyngier } 665b48ac83dSMarc Zyngier 666c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = { 667c48ed51cSMarc Zyngier .name = "ITS", 668c48ed51cSMarc Zyngier .irq_mask = its_mask_irq, 669c48ed51cSMarc Zyngier .irq_unmask = its_unmask_irq, 670004fa08dSAshok Kumar .irq_eoi = irq_chip_eoi_parent, 671c48ed51cSMarc Zyngier .irq_set_affinity = its_set_affinity, 672b48ac83dSMarc Zyngier .irq_compose_msi_msg = its_irq_compose_msi_msg, 673b48ac83dSMarc Zyngier }; 674b48ac83dSMarc Zyngier 675bf9529f8SMarc Zyngier /* 676bf9529f8SMarc Zyngier * How we allocate LPIs: 677bf9529f8SMarc Zyngier * 678bf9529f8SMarc Zyngier * The GIC has id_bits bits for interrupt identifiers. From there, we 679bf9529f8SMarc Zyngier * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as 680bf9529f8SMarc Zyngier * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 681bf9529f8SMarc Zyngier * bits to the right. 682bf9529f8SMarc Zyngier * 683bf9529f8SMarc Zyngier * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 684bf9529f8SMarc Zyngier */ 685bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK_SHIFT 5 686bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 687bf9529f8SMarc Zyngier 688bf9529f8SMarc Zyngier static unsigned long *lpi_bitmap; 689bf9529f8SMarc Zyngier static u32 lpi_chunks; 690bf9529f8SMarc Zyngier static DEFINE_SPINLOCK(lpi_lock); 691bf9529f8SMarc Zyngier 692bf9529f8SMarc Zyngier static int its_lpi_to_chunk(int lpi) 693bf9529f8SMarc Zyngier { 694bf9529f8SMarc Zyngier return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; 695bf9529f8SMarc Zyngier } 696bf9529f8SMarc Zyngier 697bf9529f8SMarc Zyngier static int its_chunk_to_lpi(int chunk) 698bf9529f8SMarc Zyngier { 699bf9529f8SMarc Zyngier return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 700bf9529f8SMarc Zyngier } 701bf9529f8SMarc Zyngier 70204a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits) 703bf9529f8SMarc Zyngier { 704bf9529f8SMarc Zyngier lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 705bf9529f8SMarc Zyngier 706bf9529f8SMarc Zyngier lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), 707bf9529f8SMarc Zyngier GFP_KERNEL); 708bf9529f8SMarc Zyngier if (!lpi_bitmap) { 709bf9529f8SMarc Zyngier lpi_chunks = 0; 710bf9529f8SMarc Zyngier return -ENOMEM; 711bf9529f8SMarc Zyngier } 712bf9529f8SMarc Zyngier 713bf9529f8SMarc Zyngier pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); 714bf9529f8SMarc Zyngier return 0; 715bf9529f8SMarc Zyngier } 716bf9529f8SMarc Zyngier 717bf9529f8SMarc Zyngier static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) 718bf9529f8SMarc Zyngier { 719bf9529f8SMarc Zyngier unsigned long *bitmap = NULL; 720bf9529f8SMarc Zyngier int chunk_id; 721bf9529f8SMarc Zyngier int nr_chunks; 722bf9529f8SMarc Zyngier int i; 723bf9529f8SMarc Zyngier 724bf9529f8SMarc Zyngier nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); 725bf9529f8SMarc Zyngier 726bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 727bf9529f8SMarc Zyngier 728bf9529f8SMarc Zyngier do { 729bf9529f8SMarc Zyngier chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, 730bf9529f8SMarc Zyngier 0, nr_chunks, 0); 731bf9529f8SMarc Zyngier if (chunk_id < lpi_chunks) 732bf9529f8SMarc Zyngier break; 733bf9529f8SMarc Zyngier 734bf9529f8SMarc Zyngier nr_chunks--; 735bf9529f8SMarc Zyngier } while (nr_chunks > 0); 736bf9529f8SMarc Zyngier 737bf9529f8SMarc Zyngier if (!nr_chunks) 738bf9529f8SMarc Zyngier goto out; 739bf9529f8SMarc Zyngier 740bf9529f8SMarc Zyngier bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), 741bf9529f8SMarc Zyngier GFP_ATOMIC); 742bf9529f8SMarc Zyngier if (!bitmap) 743bf9529f8SMarc Zyngier goto out; 744bf9529f8SMarc Zyngier 745bf9529f8SMarc Zyngier for (i = 0; i < nr_chunks; i++) 746bf9529f8SMarc Zyngier set_bit(chunk_id + i, lpi_bitmap); 747bf9529f8SMarc Zyngier 748bf9529f8SMarc Zyngier *base = its_chunk_to_lpi(chunk_id); 749bf9529f8SMarc Zyngier *nr_ids = nr_chunks * IRQS_PER_CHUNK; 750bf9529f8SMarc Zyngier 751bf9529f8SMarc Zyngier out: 752bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 753bf9529f8SMarc Zyngier 754c8415b94SMarc Zyngier if (!bitmap) 755c8415b94SMarc Zyngier *base = *nr_ids = 0; 756c8415b94SMarc Zyngier 757bf9529f8SMarc Zyngier return bitmap; 758bf9529f8SMarc Zyngier } 759bf9529f8SMarc Zyngier 760591e5becSMarc Zyngier static void its_lpi_free(struct event_lpi_map *map) 761bf9529f8SMarc Zyngier { 762591e5becSMarc Zyngier int base = map->lpi_base; 763591e5becSMarc Zyngier int nr_ids = map->nr_lpis; 764bf9529f8SMarc Zyngier int lpi; 765bf9529f8SMarc Zyngier 766bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 767bf9529f8SMarc Zyngier 768bf9529f8SMarc Zyngier for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 769bf9529f8SMarc Zyngier int chunk = its_lpi_to_chunk(lpi); 770bf9529f8SMarc Zyngier BUG_ON(chunk > lpi_chunks); 771bf9529f8SMarc Zyngier if (test_bit(chunk, lpi_bitmap)) { 772bf9529f8SMarc Zyngier clear_bit(chunk, lpi_bitmap); 773bf9529f8SMarc Zyngier } else { 774bf9529f8SMarc Zyngier pr_err("Bad LPI chunk %d\n", chunk); 775bf9529f8SMarc Zyngier } 776bf9529f8SMarc Zyngier } 777bf9529f8SMarc Zyngier 778bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 779bf9529f8SMarc Zyngier 780591e5becSMarc Zyngier kfree(map->lpi_map); 781591e5becSMarc Zyngier kfree(map->col_map); 782bf9529f8SMarc Zyngier } 7831ac19ca6SMarc Zyngier 7841ac19ca6SMarc Zyngier /* 7851ac19ca6SMarc Zyngier * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to 7861ac19ca6SMarc Zyngier * deal with (one configuration byte per interrupt). PENDBASE has to 7871ac19ca6SMarc Zyngier * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 7881ac19ca6SMarc Zyngier */ 7891ac19ca6SMarc Zyngier #define LPI_PROPBASE_SZ SZ_64K 7901ac19ca6SMarc Zyngier #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) 7911ac19ca6SMarc Zyngier 7921ac19ca6SMarc Zyngier /* 7931ac19ca6SMarc Zyngier * This is how many bits of ID we need, including the useless ones. 7941ac19ca6SMarc Zyngier */ 7951ac19ca6SMarc Zyngier #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) 7961ac19ca6SMarc Zyngier 7971ac19ca6SMarc Zyngier #define LPI_PROP_DEFAULT_PRIO 0xa0 7981ac19ca6SMarc Zyngier 7991ac19ca6SMarc Zyngier static int __init its_alloc_lpi_tables(void) 8001ac19ca6SMarc Zyngier { 8011ac19ca6SMarc Zyngier phys_addr_t paddr; 8021ac19ca6SMarc Zyngier 8031ac19ca6SMarc Zyngier gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, 8041ac19ca6SMarc Zyngier get_order(LPI_PROPBASE_SZ)); 8051ac19ca6SMarc Zyngier if (!gic_rdists->prop_page) { 8061ac19ca6SMarc Zyngier pr_err("Failed to allocate PROPBASE\n"); 8071ac19ca6SMarc Zyngier return -ENOMEM; 8081ac19ca6SMarc Zyngier } 8091ac19ca6SMarc Zyngier 8101ac19ca6SMarc Zyngier paddr = page_to_phys(gic_rdists->prop_page); 8111ac19ca6SMarc Zyngier pr_info("GIC: using LPI property table @%pa\n", &paddr); 8121ac19ca6SMarc Zyngier 8131ac19ca6SMarc Zyngier /* Priority 0xa0, Group-1, disabled */ 8141ac19ca6SMarc Zyngier memset(page_address(gic_rdists->prop_page), 8151ac19ca6SMarc Zyngier LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 8161ac19ca6SMarc Zyngier LPI_PROPBASE_SZ); 8171ac19ca6SMarc Zyngier 8181ac19ca6SMarc Zyngier /* Make sure the GIC will observe the written configuration */ 819328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); 8201ac19ca6SMarc Zyngier 8211ac19ca6SMarc Zyngier return 0; 8221ac19ca6SMarc Zyngier } 8231ac19ca6SMarc Zyngier 8241ac19ca6SMarc Zyngier static const char *its_base_type_string[] = { 8251ac19ca6SMarc Zyngier [GITS_BASER_TYPE_DEVICE] = "Devices", 8261ac19ca6SMarc Zyngier [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 8271ac19ca6SMarc Zyngier [GITS_BASER_TYPE_CPU] = "Physical CPUs", 8281ac19ca6SMarc Zyngier [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 8291ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 8301ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 8311ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 8321ac19ca6SMarc Zyngier }; 8331ac19ca6SMarc Zyngier 8342d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 8352d81d425SShanker Donthineni { 8362d81d425SShanker Donthineni u32 idx = baser - its->tables; 8372d81d425SShanker Donthineni 8380968a619SVladimir Murzin return gits_read_baser(its->base + GITS_BASER + (idx << 3)); 8392d81d425SShanker Donthineni } 8402d81d425SShanker Donthineni 8412d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser, 8422d81d425SShanker Donthineni u64 val) 8432d81d425SShanker Donthineni { 8442d81d425SShanker Donthineni u32 idx = baser - its->tables; 8452d81d425SShanker Donthineni 8460968a619SVladimir Murzin gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); 8472d81d425SShanker Donthineni baser->val = its_read_baser(its, baser); 8482d81d425SShanker Donthineni } 8492d81d425SShanker Donthineni 8509347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser, 8513faf24eaSShanker Donthineni u64 cache, u64 shr, u32 psz, u32 order, 8523faf24eaSShanker Donthineni bool indirect) 8539347359aSShanker Donthineni { 8549347359aSShanker Donthineni u64 val = its_read_baser(its, baser); 8559347359aSShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(val); 8569347359aSShanker Donthineni u64 type = GITS_BASER_TYPE(val); 8579347359aSShanker Donthineni u32 alloc_pages; 8589347359aSShanker Donthineni void *base; 8599347359aSShanker Donthineni u64 tmp; 8609347359aSShanker Donthineni 8619347359aSShanker Donthineni retry_alloc_baser: 8629347359aSShanker Donthineni alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 8639347359aSShanker Donthineni if (alloc_pages > GITS_BASER_PAGES_MAX) { 8649347359aSShanker Donthineni pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 8659347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 8669347359aSShanker Donthineni alloc_pages, GITS_BASER_PAGES_MAX); 8679347359aSShanker Donthineni alloc_pages = GITS_BASER_PAGES_MAX; 8689347359aSShanker Donthineni order = get_order(GITS_BASER_PAGES_MAX * psz); 8699347359aSShanker Donthineni } 8709347359aSShanker Donthineni 8719347359aSShanker Donthineni base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 8729347359aSShanker Donthineni if (!base) 8739347359aSShanker Donthineni return -ENOMEM; 8749347359aSShanker Donthineni 8759347359aSShanker Donthineni retry_baser: 8769347359aSShanker Donthineni val = (virt_to_phys(base) | 8779347359aSShanker Donthineni (type << GITS_BASER_TYPE_SHIFT) | 8789347359aSShanker Donthineni ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 8799347359aSShanker Donthineni ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 8809347359aSShanker Donthineni cache | 8819347359aSShanker Donthineni shr | 8829347359aSShanker Donthineni GITS_BASER_VALID); 8839347359aSShanker Donthineni 8843faf24eaSShanker Donthineni val |= indirect ? GITS_BASER_INDIRECT : 0x0; 8853faf24eaSShanker Donthineni 8869347359aSShanker Donthineni switch (psz) { 8879347359aSShanker Donthineni case SZ_4K: 8889347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_4K; 8899347359aSShanker Donthineni break; 8909347359aSShanker Donthineni case SZ_16K: 8919347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_16K; 8929347359aSShanker Donthineni break; 8939347359aSShanker Donthineni case SZ_64K: 8949347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_64K; 8959347359aSShanker Donthineni break; 8969347359aSShanker Donthineni } 8979347359aSShanker Donthineni 8989347359aSShanker Donthineni its_write_baser(its, baser, val); 8999347359aSShanker Donthineni tmp = baser->val; 9009347359aSShanker Donthineni 9019347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 9029347359aSShanker Donthineni /* 9039347359aSShanker Donthineni * Shareability didn't stick. Just use 9049347359aSShanker Donthineni * whatever the read reported, which is likely 9059347359aSShanker Donthineni * to be the only thing this redistributor 9069347359aSShanker Donthineni * supports. If that's zero, make it 9079347359aSShanker Donthineni * non-cacheable as well. 9089347359aSShanker Donthineni */ 9099347359aSShanker Donthineni shr = tmp & GITS_BASER_SHAREABILITY_MASK; 9109347359aSShanker Donthineni if (!shr) { 9119347359aSShanker Donthineni cache = GITS_BASER_nC; 912328191c0SVladimir Murzin gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); 9139347359aSShanker Donthineni } 9149347359aSShanker Donthineni goto retry_baser; 9159347359aSShanker Donthineni } 9169347359aSShanker Donthineni 9179347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 9189347359aSShanker Donthineni /* 9199347359aSShanker Donthineni * Page size didn't stick. Let's try a smaller 9209347359aSShanker Donthineni * size and retry. If we reach 4K, then 9219347359aSShanker Donthineni * something is horribly wrong... 9229347359aSShanker Donthineni */ 9239347359aSShanker Donthineni free_pages((unsigned long)base, order); 9249347359aSShanker Donthineni baser->base = NULL; 9259347359aSShanker Donthineni 9269347359aSShanker Donthineni switch (psz) { 9279347359aSShanker Donthineni case SZ_16K: 9289347359aSShanker Donthineni psz = SZ_4K; 9299347359aSShanker Donthineni goto retry_alloc_baser; 9309347359aSShanker Donthineni case SZ_64K: 9319347359aSShanker Donthineni psz = SZ_16K; 9329347359aSShanker Donthineni goto retry_alloc_baser; 9339347359aSShanker Donthineni } 9349347359aSShanker Donthineni } 9359347359aSShanker Donthineni 9369347359aSShanker Donthineni if (val != tmp) { 937b11283ebSVladimir Murzin pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 9389347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 939b11283ebSVladimir Murzin val, tmp); 9409347359aSShanker Donthineni free_pages((unsigned long)base, order); 9419347359aSShanker Donthineni return -ENXIO; 9429347359aSShanker Donthineni } 9439347359aSShanker Donthineni 9449347359aSShanker Donthineni baser->order = order; 9459347359aSShanker Donthineni baser->base = base; 9469347359aSShanker Donthineni baser->psz = psz; 9473faf24eaSShanker Donthineni tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 9489347359aSShanker Donthineni 9493faf24eaSShanker Donthineni pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 950d524eaa2SVladimir Murzin &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), 9519347359aSShanker Donthineni its_base_type_string[type], 9529347359aSShanker Donthineni (unsigned long)virt_to_phys(base), 9533faf24eaSShanker Donthineni indirect ? "indirect" : "flat", (int)esz, 9549347359aSShanker Donthineni psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 9559347359aSShanker Donthineni 9569347359aSShanker Donthineni return 0; 9579347359aSShanker Donthineni } 9589347359aSShanker Donthineni 9593faf24eaSShanker Donthineni static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 9603faf24eaSShanker Donthineni u32 psz, u32 *order) 9614b75c459SShanker Donthineni { 9624b75c459SShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 9633faf24eaSShanker Donthineni u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb; 9644b75c459SShanker Donthineni u32 ids = its->device_ids; 9654b75c459SShanker Donthineni u32 new_order = *order; 9663faf24eaSShanker Donthineni bool indirect = false; 9673faf24eaSShanker Donthineni 9683faf24eaSShanker Donthineni /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 9693faf24eaSShanker Donthineni if ((esz << ids) > (psz * 2)) { 9703faf24eaSShanker Donthineni /* 9713faf24eaSShanker Donthineni * Find out whether hw supports a single or two-level table by 9723faf24eaSShanker Donthineni * table by reading bit at offset '62' after writing '1' to it. 9733faf24eaSShanker Donthineni */ 9743faf24eaSShanker Donthineni its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 9753faf24eaSShanker Donthineni indirect = !!(baser->val & GITS_BASER_INDIRECT); 9763faf24eaSShanker Donthineni 9773faf24eaSShanker Donthineni if (indirect) { 9783faf24eaSShanker Donthineni /* 9793faf24eaSShanker Donthineni * The size of the lvl2 table is equal to ITS page size 9803faf24eaSShanker Donthineni * which is 'psz'. For computing lvl1 table size, 9813faf24eaSShanker Donthineni * subtract ID bits that sparse lvl2 table from 'ids' 9823faf24eaSShanker Donthineni * which is reported by ITS hardware times lvl1 table 9833faf24eaSShanker Donthineni * entry size. 9843faf24eaSShanker Donthineni */ 985d524eaa2SVladimir Murzin ids -= ilog2(psz / (int)esz); 9863faf24eaSShanker Donthineni esz = GITS_LVL1_ENTRY_SIZE; 9873faf24eaSShanker Donthineni } 9883faf24eaSShanker Donthineni } 9894b75c459SShanker Donthineni 9904b75c459SShanker Donthineni /* 9914b75c459SShanker Donthineni * Allocate as many entries as required to fit the 9924b75c459SShanker Donthineni * range of device IDs that the ITS can grok... The ID 9934b75c459SShanker Donthineni * space being incredibly sparse, this results in a 9943faf24eaSShanker Donthineni * massive waste of memory if two-level device table 9953faf24eaSShanker Donthineni * feature is not supported by hardware. 9964b75c459SShanker Donthineni */ 9974b75c459SShanker Donthineni new_order = max_t(u32, get_order(esz << ids), new_order); 9984b75c459SShanker Donthineni if (new_order >= MAX_ORDER) { 9994b75c459SShanker Donthineni new_order = MAX_ORDER - 1; 1000d524eaa2SVladimir Murzin ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 10014b75c459SShanker Donthineni pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 10024b75c459SShanker Donthineni &its->phys_base, its->device_ids, ids); 10034b75c459SShanker Donthineni } 10044b75c459SShanker Donthineni 10054b75c459SShanker Donthineni *order = new_order; 10063faf24eaSShanker Donthineni 10073faf24eaSShanker Donthineni return indirect; 10084b75c459SShanker Donthineni } 10094b75c459SShanker Donthineni 10101ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its) 10111ac19ca6SMarc Zyngier { 10121ac19ca6SMarc Zyngier int i; 10131ac19ca6SMarc Zyngier 10141ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 10151a485f4dSShanker Donthineni if (its->tables[i].base) { 10161a485f4dSShanker Donthineni free_pages((unsigned long)its->tables[i].base, 10171a485f4dSShanker Donthineni its->tables[i].order); 10181a485f4dSShanker Donthineni its->tables[i].base = NULL; 10191ac19ca6SMarc Zyngier } 10201ac19ca6SMarc Zyngier } 10211ac19ca6SMarc Zyngier } 10221ac19ca6SMarc Zyngier 10230e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its) 10241ac19ca6SMarc Zyngier { 1025589ce5f4SMarc Zyngier u64 typer = gic_read_typer(its->base + GITS_TYPER); 10269347359aSShanker Donthineni u32 ids = GITS_TYPER_DEVBITS(typer); 10271ac19ca6SMarc Zyngier u64 shr = GITS_BASER_InnerShareable; 10289347359aSShanker Donthineni u64 cache = GITS_BASER_WaWb; 10299347359aSShanker Donthineni u32 psz = SZ_64K; 10309347359aSShanker Donthineni int err, i; 103194100970SRobert Richter 103294100970SRobert Richter if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { 103394100970SRobert Richter /* 103494100970SRobert Richter * erratum 22375: only alloc 8MB table size 103594100970SRobert Richter * erratum 24313: ignore memory access type 103694100970SRobert Richter */ 10379347359aSShanker Donthineni cache = GITS_BASER_nCnB; 103894100970SRobert Richter ids = 0x14; /* 20 bits, 8MB */ 103994100970SRobert Richter } 10401ac19ca6SMarc Zyngier 1041466b7d16SShanker Donthineni its->device_ids = ids; 1042466b7d16SShanker Donthineni 10431ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 10442d81d425SShanker Donthineni struct its_baser *baser = its->tables + i; 10452d81d425SShanker Donthineni u64 val = its_read_baser(its, baser); 10461ac19ca6SMarc Zyngier u64 type = GITS_BASER_TYPE(val); 10479347359aSShanker Donthineni u32 order = get_order(psz); 10483faf24eaSShanker Donthineni bool indirect = false; 10491ac19ca6SMarc Zyngier 10501ac19ca6SMarc Zyngier if (type == GITS_BASER_TYPE_NONE) 10511ac19ca6SMarc Zyngier continue; 10521ac19ca6SMarc Zyngier 10534b75c459SShanker Donthineni if (type == GITS_BASER_TYPE_DEVICE) 10543faf24eaSShanker Donthineni indirect = its_parse_baser_device(its, baser, psz, &order); 1055f54b97edSMarc Zyngier 10563faf24eaSShanker Donthineni err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 10579347359aSShanker Donthineni if (err < 0) { 10589347359aSShanker Donthineni its_free_tables(its); 10599347359aSShanker Donthineni return err; 106030f21363SRobert Richter } 106130f21363SRobert Richter 10629347359aSShanker Donthineni /* Update settings which will be used for next BASERn */ 10639347359aSShanker Donthineni psz = baser->psz; 10649347359aSShanker Donthineni cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 10659347359aSShanker Donthineni shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 10661ac19ca6SMarc Zyngier } 10671ac19ca6SMarc Zyngier 10681ac19ca6SMarc Zyngier return 0; 10691ac19ca6SMarc Zyngier } 10701ac19ca6SMarc Zyngier 10711ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its) 10721ac19ca6SMarc Zyngier { 10731ac19ca6SMarc Zyngier its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), 10741ac19ca6SMarc Zyngier GFP_KERNEL); 10751ac19ca6SMarc Zyngier if (!its->collections) 10761ac19ca6SMarc Zyngier return -ENOMEM; 10771ac19ca6SMarc Zyngier 10781ac19ca6SMarc Zyngier return 0; 10791ac19ca6SMarc Zyngier } 10801ac19ca6SMarc Zyngier 10811ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void) 10821ac19ca6SMarc Zyngier { 10831ac19ca6SMarc Zyngier void __iomem *rbase = gic_data_rdist_rd_base(); 10841ac19ca6SMarc Zyngier struct page *pend_page; 10851ac19ca6SMarc Zyngier u64 val, tmp; 10861ac19ca6SMarc Zyngier 10871ac19ca6SMarc Zyngier /* If we didn't allocate the pending table yet, do it now */ 10881ac19ca6SMarc Zyngier pend_page = gic_data_rdist()->pend_page; 10891ac19ca6SMarc Zyngier if (!pend_page) { 10901ac19ca6SMarc Zyngier phys_addr_t paddr; 10911ac19ca6SMarc Zyngier /* 10921ac19ca6SMarc Zyngier * The pending pages have to be at least 64kB aligned, 10931ac19ca6SMarc Zyngier * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 10941ac19ca6SMarc Zyngier */ 10951ac19ca6SMarc Zyngier pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, 10961ac19ca6SMarc Zyngier get_order(max(LPI_PENDBASE_SZ, SZ_64K))); 10971ac19ca6SMarc Zyngier if (!pend_page) { 10981ac19ca6SMarc Zyngier pr_err("Failed to allocate PENDBASE for CPU%d\n", 10991ac19ca6SMarc Zyngier smp_processor_id()); 11001ac19ca6SMarc Zyngier return; 11011ac19ca6SMarc Zyngier } 11021ac19ca6SMarc Zyngier 11031ac19ca6SMarc Zyngier /* Make sure the GIC will observe the zero-ed page */ 1104328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 11051ac19ca6SMarc Zyngier 11061ac19ca6SMarc Zyngier paddr = page_to_phys(pend_page); 11071ac19ca6SMarc Zyngier pr_info("CPU%d: using LPI pending table @%pa\n", 11081ac19ca6SMarc Zyngier smp_processor_id(), &paddr); 11091ac19ca6SMarc Zyngier gic_data_rdist()->pend_page = pend_page; 11101ac19ca6SMarc Zyngier } 11111ac19ca6SMarc Zyngier 11121ac19ca6SMarc Zyngier /* Disable LPIs */ 11131ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 11141ac19ca6SMarc Zyngier val &= ~GICR_CTLR_ENABLE_LPIS; 11151ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 11161ac19ca6SMarc Zyngier 11171ac19ca6SMarc Zyngier /* 11181ac19ca6SMarc Zyngier * Make sure any change to the table is observable by the GIC. 11191ac19ca6SMarc Zyngier */ 11201ac19ca6SMarc Zyngier dsb(sy); 11211ac19ca6SMarc Zyngier 11221ac19ca6SMarc Zyngier /* set PROPBASE */ 11231ac19ca6SMarc Zyngier val = (page_to_phys(gic_rdists->prop_page) | 11241ac19ca6SMarc Zyngier GICR_PROPBASER_InnerShareable | 11251ac19ca6SMarc Zyngier GICR_PROPBASER_WaWb | 11261ac19ca6SMarc Zyngier ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 11271ac19ca6SMarc Zyngier 11280968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 11290968a619SVladimir Murzin tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); 11301ac19ca6SMarc Zyngier 11311ac19ca6SMarc Zyngier if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 1132241a386cSMarc Zyngier if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 1133241a386cSMarc Zyngier /* 1134241a386cSMarc Zyngier * The HW reports non-shareable, we must 1135241a386cSMarc Zyngier * remove the cacheability attributes as 1136241a386cSMarc Zyngier * well. 1137241a386cSMarc Zyngier */ 1138241a386cSMarc Zyngier val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 1139241a386cSMarc Zyngier GICR_PROPBASER_CACHEABILITY_MASK); 1140241a386cSMarc Zyngier val |= GICR_PROPBASER_nC; 11410968a619SVladimir Murzin gicr_write_propbaser(val, rbase + GICR_PROPBASER); 1142241a386cSMarc Zyngier } 11431ac19ca6SMarc Zyngier pr_info_once("GIC: using cache flushing for LPI property table\n"); 11441ac19ca6SMarc Zyngier gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 11451ac19ca6SMarc Zyngier } 11461ac19ca6SMarc Zyngier 11471ac19ca6SMarc Zyngier /* set PENDBASE */ 11481ac19ca6SMarc Zyngier val = (page_to_phys(pend_page) | 11494ad3e363SMarc Zyngier GICR_PENDBASER_InnerShareable | 11504ad3e363SMarc Zyngier GICR_PENDBASER_WaWb); 11511ac19ca6SMarc Zyngier 11520968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 11530968a619SVladimir Murzin tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); 1154241a386cSMarc Zyngier 1155241a386cSMarc Zyngier if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 1156241a386cSMarc Zyngier /* 1157241a386cSMarc Zyngier * The HW reports non-shareable, we must remove the 1158241a386cSMarc Zyngier * cacheability attributes as well. 1159241a386cSMarc Zyngier */ 1160241a386cSMarc Zyngier val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 1161241a386cSMarc Zyngier GICR_PENDBASER_CACHEABILITY_MASK); 1162241a386cSMarc Zyngier val |= GICR_PENDBASER_nC; 11630968a619SVladimir Murzin gicr_write_pendbaser(val, rbase + GICR_PENDBASER); 1164241a386cSMarc Zyngier } 11651ac19ca6SMarc Zyngier 11661ac19ca6SMarc Zyngier /* Enable LPIs */ 11671ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 11681ac19ca6SMarc Zyngier val |= GICR_CTLR_ENABLE_LPIS; 11691ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 11701ac19ca6SMarc Zyngier 11711ac19ca6SMarc Zyngier /* Make sure the GIC has seen the above */ 11721ac19ca6SMarc Zyngier dsb(sy); 11731ac19ca6SMarc Zyngier } 11741ac19ca6SMarc Zyngier 11751ac19ca6SMarc Zyngier static void its_cpu_init_collection(void) 11761ac19ca6SMarc Zyngier { 11771ac19ca6SMarc Zyngier struct its_node *its; 11781ac19ca6SMarc Zyngier int cpu; 11791ac19ca6SMarc Zyngier 11801ac19ca6SMarc Zyngier spin_lock(&its_lock); 11811ac19ca6SMarc Zyngier cpu = smp_processor_id(); 11821ac19ca6SMarc Zyngier 11831ac19ca6SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 11841ac19ca6SMarc Zyngier u64 target; 11851ac19ca6SMarc Zyngier 1186fbf8f40eSGanapatrao Kulkarni /* avoid cross node collections and its mapping */ 1187fbf8f40eSGanapatrao Kulkarni if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1188fbf8f40eSGanapatrao Kulkarni struct device_node *cpu_node; 1189fbf8f40eSGanapatrao Kulkarni 1190fbf8f40eSGanapatrao Kulkarni cpu_node = of_get_cpu_node(cpu, NULL); 1191fbf8f40eSGanapatrao Kulkarni if (its->numa_node != NUMA_NO_NODE && 1192fbf8f40eSGanapatrao Kulkarni its->numa_node != of_node_to_nid(cpu_node)) 1193fbf8f40eSGanapatrao Kulkarni continue; 1194fbf8f40eSGanapatrao Kulkarni } 1195fbf8f40eSGanapatrao Kulkarni 11961ac19ca6SMarc Zyngier /* 11971ac19ca6SMarc Zyngier * We now have to bind each collection to its target 11981ac19ca6SMarc Zyngier * redistributor. 11991ac19ca6SMarc Zyngier */ 1200589ce5f4SMarc Zyngier if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 12011ac19ca6SMarc Zyngier /* 12021ac19ca6SMarc Zyngier * This ITS wants the physical address of the 12031ac19ca6SMarc Zyngier * redistributor. 12041ac19ca6SMarc Zyngier */ 12051ac19ca6SMarc Zyngier target = gic_data_rdist()->phys_base; 12061ac19ca6SMarc Zyngier } else { 12071ac19ca6SMarc Zyngier /* 12081ac19ca6SMarc Zyngier * This ITS wants a linear CPU number. 12091ac19ca6SMarc Zyngier */ 1210589ce5f4SMarc Zyngier target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); 1211263fcd31SMarc Zyngier target = GICR_TYPER_CPU_NUMBER(target) << 16; 12121ac19ca6SMarc Zyngier } 12131ac19ca6SMarc Zyngier 12141ac19ca6SMarc Zyngier /* Perform collection mapping */ 12151ac19ca6SMarc Zyngier its->collections[cpu].target_address = target; 12161ac19ca6SMarc Zyngier its->collections[cpu].col_id = cpu; 12171ac19ca6SMarc Zyngier 12181ac19ca6SMarc Zyngier its_send_mapc(its, &its->collections[cpu], 1); 12191ac19ca6SMarc Zyngier its_send_invall(its, &its->collections[cpu]); 12201ac19ca6SMarc Zyngier } 12211ac19ca6SMarc Zyngier 12221ac19ca6SMarc Zyngier spin_unlock(&its_lock); 12231ac19ca6SMarc Zyngier } 122484a6a2e7SMarc Zyngier 122584a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 122684a6a2e7SMarc Zyngier { 122784a6a2e7SMarc Zyngier struct its_device *its_dev = NULL, *tmp; 12283e39e8f5SMarc Zyngier unsigned long flags; 122984a6a2e7SMarc Zyngier 12303e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 123184a6a2e7SMarc Zyngier 123284a6a2e7SMarc Zyngier list_for_each_entry(tmp, &its->its_device_list, entry) { 123384a6a2e7SMarc Zyngier if (tmp->device_id == dev_id) { 123484a6a2e7SMarc Zyngier its_dev = tmp; 123584a6a2e7SMarc Zyngier break; 123684a6a2e7SMarc Zyngier } 123784a6a2e7SMarc Zyngier } 123884a6a2e7SMarc Zyngier 12393e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 124084a6a2e7SMarc Zyngier 124184a6a2e7SMarc Zyngier return its_dev; 124284a6a2e7SMarc Zyngier } 124384a6a2e7SMarc Zyngier 1244466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type) 1245466b7d16SShanker Donthineni { 1246466b7d16SShanker Donthineni int i; 1247466b7d16SShanker Donthineni 1248466b7d16SShanker Donthineni for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1249466b7d16SShanker Donthineni if (GITS_BASER_TYPE(its->tables[i].val) == type) 1250466b7d16SShanker Donthineni return &its->tables[i]; 1251466b7d16SShanker Donthineni } 1252466b7d16SShanker Donthineni 1253466b7d16SShanker Donthineni return NULL; 1254466b7d16SShanker Donthineni } 1255466b7d16SShanker Donthineni 12563faf24eaSShanker Donthineni static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 12573faf24eaSShanker Donthineni { 12583faf24eaSShanker Donthineni struct its_baser *baser; 12593faf24eaSShanker Donthineni struct page *page; 12603faf24eaSShanker Donthineni u32 esz, idx; 12613faf24eaSShanker Donthineni __le64 *table; 12623faf24eaSShanker Donthineni 12633faf24eaSShanker Donthineni baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 12643faf24eaSShanker Donthineni 12653faf24eaSShanker Donthineni /* Don't allow device id that exceeds ITS hardware limit */ 12663faf24eaSShanker Donthineni if (!baser) 12673faf24eaSShanker Donthineni return (ilog2(dev_id) < its->device_ids); 12683faf24eaSShanker Donthineni 12693faf24eaSShanker Donthineni /* Don't allow device id that exceeds single, flat table limit */ 12703faf24eaSShanker Donthineni esz = GITS_BASER_ENTRY_SIZE(baser->val); 12713faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_INDIRECT)) 12723faf24eaSShanker Donthineni return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 12733faf24eaSShanker Donthineni 12743faf24eaSShanker Donthineni /* Compute 1st level table index & check if that exceeds table limit */ 12753faf24eaSShanker Donthineni idx = dev_id >> ilog2(baser->psz / esz); 12763faf24eaSShanker Donthineni if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 12773faf24eaSShanker Donthineni return false; 12783faf24eaSShanker Donthineni 12793faf24eaSShanker Donthineni table = baser->base; 12803faf24eaSShanker Donthineni 12813faf24eaSShanker Donthineni /* Allocate memory for 2nd level table */ 12823faf24eaSShanker Donthineni if (!table[idx]) { 12833faf24eaSShanker Donthineni page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 12843faf24eaSShanker Donthineni if (!page) 12853faf24eaSShanker Donthineni return false; 12863faf24eaSShanker Donthineni 12873faf24eaSShanker Donthineni /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 12883faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1289328191c0SVladimir Murzin gic_flush_dcache_to_poc(page_address(page), baser->psz); 12903faf24eaSShanker Donthineni 12913faf24eaSShanker Donthineni table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 12923faf24eaSShanker Donthineni 12933faf24eaSShanker Donthineni /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 12943faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1295328191c0SVladimir Murzin gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 12963faf24eaSShanker Donthineni 12973faf24eaSShanker Donthineni /* Ensure updated table contents are visible to ITS hardware */ 12983faf24eaSShanker Donthineni dsb(sy); 12993faf24eaSShanker Donthineni } 13003faf24eaSShanker Donthineni 13013faf24eaSShanker Donthineni return true; 13023faf24eaSShanker Donthineni } 13033faf24eaSShanker Donthineni 130484a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 130584a6a2e7SMarc Zyngier int nvecs) 130684a6a2e7SMarc Zyngier { 130784a6a2e7SMarc Zyngier struct its_device *dev; 130884a6a2e7SMarc Zyngier unsigned long *lpi_map; 13093e39e8f5SMarc Zyngier unsigned long flags; 1310591e5becSMarc Zyngier u16 *col_map = NULL; 131184a6a2e7SMarc Zyngier void *itt; 131284a6a2e7SMarc Zyngier int lpi_base; 131384a6a2e7SMarc Zyngier int nr_lpis; 1314c8481267SMarc Zyngier int nr_ites; 131584a6a2e7SMarc Zyngier int sz; 131684a6a2e7SMarc Zyngier 13173faf24eaSShanker Donthineni if (!its_alloc_device_table(its, dev_id)) 1318466b7d16SShanker Donthineni return NULL; 1319466b7d16SShanker Donthineni 132084a6a2e7SMarc Zyngier dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1321c8481267SMarc Zyngier /* 1322c8481267SMarc Zyngier * At least one bit of EventID is being used, hence a minimum 1323c8481267SMarc Zyngier * of two entries. No, the architecture doesn't let you 1324c8481267SMarc Zyngier * express an ITT with a single entry. 1325c8481267SMarc Zyngier */ 132696555c47SWill Deacon nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1327c8481267SMarc Zyngier sz = nr_ites * its->ite_size; 132884a6a2e7SMarc Zyngier sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 13296c834125SYun Wu itt = kzalloc(sz, GFP_KERNEL); 133084a6a2e7SMarc Zyngier lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1331591e5becSMarc Zyngier if (lpi_map) 1332591e5becSMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); 133384a6a2e7SMarc Zyngier 1334591e5becSMarc Zyngier if (!dev || !itt || !lpi_map || !col_map) { 133584a6a2e7SMarc Zyngier kfree(dev); 133684a6a2e7SMarc Zyngier kfree(itt); 133784a6a2e7SMarc Zyngier kfree(lpi_map); 1338591e5becSMarc Zyngier kfree(col_map); 133984a6a2e7SMarc Zyngier return NULL; 134084a6a2e7SMarc Zyngier } 134184a6a2e7SMarc Zyngier 1342328191c0SVladimir Murzin gic_flush_dcache_to_poc(itt, sz); 13435a9a8915SMarc Zyngier 134484a6a2e7SMarc Zyngier dev->its = its; 134584a6a2e7SMarc Zyngier dev->itt = itt; 1346c8481267SMarc Zyngier dev->nr_ites = nr_ites; 1347591e5becSMarc Zyngier dev->event_map.lpi_map = lpi_map; 1348591e5becSMarc Zyngier dev->event_map.col_map = col_map; 1349591e5becSMarc Zyngier dev->event_map.lpi_base = lpi_base; 1350591e5becSMarc Zyngier dev->event_map.nr_lpis = nr_lpis; 135184a6a2e7SMarc Zyngier dev->device_id = dev_id; 135284a6a2e7SMarc Zyngier INIT_LIST_HEAD(&dev->entry); 135384a6a2e7SMarc Zyngier 13543e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 135584a6a2e7SMarc Zyngier list_add(&dev->entry, &its->its_device_list); 13563e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 135784a6a2e7SMarc Zyngier 135884a6a2e7SMarc Zyngier /* Map device to its ITT */ 135984a6a2e7SMarc Zyngier its_send_mapd(dev, 1); 136084a6a2e7SMarc Zyngier 136184a6a2e7SMarc Zyngier return dev; 136284a6a2e7SMarc Zyngier } 136384a6a2e7SMarc Zyngier 136484a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev) 136584a6a2e7SMarc Zyngier { 13663e39e8f5SMarc Zyngier unsigned long flags; 13673e39e8f5SMarc Zyngier 13683e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its_dev->its->lock, flags); 136984a6a2e7SMarc Zyngier list_del(&its_dev->entry); 13703e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 137184a6a2e7SMarc Zyngier kfree(its_dev->itt); 137284a6a2e7SMarc Zyngier kfree(its_dev); 137384a6a2e7SMarc Zyngier } 1374b48ac83dSMarc Zyngier 1375b48ac83dSMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 1376b48ac83dSMarc Zyngier { 1377b48ac83dSMarc Zyngier int idx; 1378b48ac83dSMarc Zyngier 1379591e5becSMarc Zyngier idx = find_first_zero_bit(dev->event_map.lpi_map, 1380591e5becSMarc Zyngier dev->event_map.nr_lpis); 1381591e5becSMarc Zyngier if (idx == dev->event_map.nr_lpis) 1382b48ac83dSMarc Zyngier return -ENOSPC; 1383b48ac83dSMarc Zyngier 1384591e5becSMarc Zyngier *hwirq = dev->event_map.lpi_base + idx; 1385591e5becSMarc Zyngier set_bit(idx, dev->event_map.lpi_map); 1386b48ac83dSMarc Zyngier 1387b48ac83dSMarc Zyngier return 0; 1388b48ac83dSMarc Zyngier } 1389b48ac83dSMarc Zyngier 139054456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1391b48ac83dSMarc Zyngier int nvec, msi_alloc_info_t *info) 1392b48ac83dSMarc Zyngier { 1393b48ac83dSMarc Zyngier struct its_node *its; 1394b48ac83dSMarc Zyngier struct its_device *its_dev; 139554456db9SMarc Zyngier struct msi_domain_info *msi_info; 139654456db9SMarc Zyngier u32 dev_id; 1397b48ac83dSMarc Zyngier 139854456db9SMarc Zyngier /* 139954456db9SMarc Zyngier * We ignore "dev" entierely, and rely on the dev_id that has 140054456db9SMarc Zyngier * been passed via the scratchpad. This limits this domain's 140154456db9SMarc Zyngier * usefulness to upper layers that definitely know that they 140254456db9SMarc Zyngier * are built on top of the ITS. 140354456db9SMarc Zyngier */ 140454456db9SMarc Zyngier dev_id = info->scratchpad[0].ul; 140554456db9SMarc Zyngier 140654456db9SMarc Zyngier msi_info = msi_get_domain_info(domain); 140754456db9SMarc Zyngier its = msi_info->data; 140854456db9SMarc Zyngier 1409f130420eSMarc Zyngier its_dev = its_find_device(its, dev_id); 1410e8137f4fSMarc Zyngier if (its_dev) { 1411e8137f4fSMarc Zyngier /* 1412e8137f4fSMarc Zyngier * We already have seen this ID, probably through 1413e8137f4fSMarc Zyngier * another alias (PCI bridge of some sort). No need to 1414e8137f4fSMarc Zyngier * create the device. 1415e8137f4fSMarc Zyngier */ 1416f130420eSMarc Zyngier pr_debug("Reusing ITT for devID %x\n", dev_id); 1417e8137f4fSMarc Zyngier goto out; 1418e8137f4fSMarc Zyngier } 1419b48ac83dSMarc Zyngier 1420f130420eSMarc Zyngier its_dev = its_create_device(its, dev_id, nvec); 1421b48ac83dSMarc Zyngier if (!its_dev) 1422b48ac83dSMarc Zyngier return -ENOMEM; 1423b48ac83dSMarc Zyngier 1424f130420eSMarc Zyngier pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1425e8137f4fSMarc Zyngier out: 1426b48ac83dSMarc Zyngier info->scratchpad[0].ptr = its_dev; 1427b48ac83dSMarc Zyngier return 0; 1428b48ac83dSMarc Zyngier } 1429b48ac83dSMarc Zyngier 143054456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = { 143154456db9SMarc Zyngier .msi_prepare = its_msi_prepare, 143254456db9SMarc Zyngier }; 143354456db9SMarc Zyngier 1434b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain, 1435b48ac83dSMarc Zyngier unsigned int virq, 1436b48ac83dSMarc Zyngier irq_hw_number_t hwirq) 1437b48ac83dSMarc Zyngier { 1438f833f57fSMarc Zyngier struct irq_fwspec fwspec; 1439b48ac83dSMarc Zyngier 1440f833f57fSMarc Zyngier if (irq_domain_get_of_node(domain->parent)) { 1441f833f57fSMarc Zyngier fwspec.fwnode = domain->parent->fwnode; 1442f833f57fSMarc Zyngier fwspec.param_count = 3; 1443f833f57fSMarc Zyngier fwspec.param[0] = GIC_IRQ_TYPE_LPI; 1444f833f57fSMarc Zyngier fwspec.param[1] = hwirq; 1445f833f57fSMarc Zyngier fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 14463f010cf1STomasz Nowicki } else if (is_fwnode_irqchip(domain->parent->fwnode)) { 14473f010cf1STomasz Nowicki fwspec.fwnode = domain->parent->fwnode; 14483f010cf1STomasz Nowicki fwspec.param_count = 2; 14493f010cf1STomasz Nowicki fwspec.param[0] = hwirq; 14503f010cf1STomasz Nowicki fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 1451f833f57fSMarc Zyngier } else { 1452f833f57fSMarc Zyngier return -EINVAL; 1453f833f57fSMarc Zyngier } 1454b48ac83dSMarc Zyngier 1455f833f57fSMarc Zyngier return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 1456b48ac83dSMarc Zyngier } 1457b48ac83dSMarc Zyngier 1458b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1459b48ac83dSMarc Zyngier unsigned int nr_irqs, void *args) 1460b48ac83dSMarc Zyngier { 1461b48ac83dSMarc Zyngier msi_alloc_info_t *info = args; 1462b48ac83dSMarc Zyngier struct its_device *its_dev = info->scratchpad[0].ptr; 1463b48ac83dSMarc Zyngier irq_hw_number_t hwirq; 1464b48ac83dSMarc Zyngier int err; 1465b48ac83dSMarc Zyngier int i; 1466b48ac83dSMarc Zyngier 1467b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 1468b48ac83dSMarc Zyngier err = its_alloc_device_irq(its_dev, &hwirq); 1469b48ac83dSMarc Zyngier if (err) 1470b48ac83dSMarc Zyngier return err; 1471b48ac83dSMarc Zyngier 1472b48ac83dSMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 1473b48ac83dSMarc Zyngier if (err) 1474b48ac83dSMarc Zyngier return err; 1475b48ac83dSMarc Zyngier 1476b48ac83dSMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, 1477b48ac83dSMarc Zyngier hwirq, &its_irq_chip, its_dev); 1478f130420eSMarc Zyngier pr_debug("ID:%d pID:%d vID:%d\n", 1479591e5becSMarc Zyngier (int)(hwirq - its_dev->event_map.lpi_base), 1480591e5becSMarc Zyngier (int) hwirq, virq + i); 1481b48ac83dSMarc Zyngier } 1482b48ac83dSMarc Zyngier 1483b48ac83dSMarc Zyngier return 0; 1484b48ac83dSMarc Zyngier } 1485b48ac83dSMarc Zyngier 1486aca268dfSMarc Zyngier static void its_irq_domain_activate(struct irq_domain *domain, 1487aca268dfSMarc Zyngier struct irq_data *d) 1488aca268dfSMarc Zyngier { 1489aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1490aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 1491fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 1492fbf8f40eSGanapatrao Kulkarni 1493fbf8f40eSGanapatrao Kulkarni /* get the cpu_mask of local node */ 1494fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) 1495fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1496aca268dfSMarc Zyngier 1497591e5becSMarc Zyngier /* Bind the LPI to the first possible CPU */ 1498fbf8f40eSGanapatrao Kulkarni its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); 1499591e5becSMarc Zyngier 1500aca268dfSMarc Zyngier /* Map the GIC IRQ and event to the device */ 1501aca268dfSMarc Zyngier its_send_mapvi(its_dev, d->hwirq, event); 1502aca268dfSMarc Zyngier } 1503aca268dfSMarc Zyngier 1504aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain, 1505aca268dfSMarc Zyngier struct irq_data *d) 1506aca268dfSMarc Zyngier { 1507aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1508aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 1509aca268dfSMarc Zyngier 1510aca268dfSMarc Zyngier /* Stop the delivery of interrupts */ 1511aca268dfSMarc Zyngier its_send_discard(its_dev, event); 1512aca268dfSMarc Zyngier } 1513aca268dfSMarc Zyngier 1514b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1515b48ac83dSMarc Zyngier unsigned int nr_irqs) 1516b48ac83dSMarc Zyngier { 1517b48ac83dSMarc Zyngier struct irq_data *d = irq_domain_get_irq_data(domain, virq); 1518b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1519b48ac83dSMarc Zyngier int i; 1520b48ac83dSMarc Zyngier 1521b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 1522b48ac83dSMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 1523b48ac83dSMarc Zyngier virq + i); 1524aca268dfSMarc Zyngier u32 event = its_get_event_id(data); 1525b48ac83dSMarc Zyngier 1526b48ac83dSMarc Zyngier /* Mark interrupt index as unused */ 1527591e5becSMarc Zyngier clear_bit(event, its_dev->event_map.lpi_map); 1528b48ac83dSMarc Zyngier 1529b48ac83dSMarc Zyngier /* Nuke the entry in the domain */ 15302da39949SMarc Zyngier irq_domain_reset_irq_data(data); 1531b48ac83dSMarc Zyngier } 1532b48ac83dSMarc Zyngier 1533b48ac83dSMarc Zyngier /* If all interrupts have been freed, start mopping the floor */ 1534591e5becSMarc Zyngier if (bitmap_empty(its_dev->event_map.lpi_map, 1535591e5becSMarc Zyngier its_dev->event_map.nr_lpis)) { 1536591e5becSMarc Zyngier its_lpi_free(&its_dev->event_map); 1537b48ac83dSMarc Zyngier 1538b48ac83dSMarc Zyngier /* Unmap device/itt */ 1539b48ac83dSMarc Zyngier its_send_mapd(its_dev, 0); 1540b48ac83dSMarc Zyngier its_free_device(its_dev); 1541b48ac83dSMarc Zyngier } 1542b48ac83dSMarc Zyngier 1543b48ac83dSMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1544b48ac83dSMarc Zyngier } 1545b48ac83dSMarc Zyngier 1546b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = { 1547b48ac83dSMarc Zyngier .alloc = its_irq_domain_alloc, 1548b48ac83dSMarc Zyngier .free = its_irq_domain_free, 1549aca268dfSMarc Zyngier .activate = its_irq_domain_activate, 1550aca268dfSMarc Zyngier .deactivate = its_irq_domain_deactivate, 1551b48ac83dSMarc Zyngier }; 15524c21f3c2SMarc Zyngier 15534559fbb3SYun Wu static int its_force_quiescent(void __iomem *base) 15544559fbb3SYun Wu { 15554559fbb3SYun Wu u32 count = 1000000; /* 1s */ 15564559fbb3SYun Wu u32 val; 15574559fbb3SYun Wu 15584559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 15597611da86SDavid Daney /* 15607611da86SDavid Daney * GIC architecture specification requires the ITS to be both 15617611da86SDavid Daney * disabled and quiescent for writes to GITS_BASER<n> or 15627611da86SDavid Daney * GITS_CBASER to not have UNPREDICTABLE results. 15637611da86SDavid Daney */ 15647611da86SDavid Daney if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 15654559fbb3SYun Wu return 0; 15664559fbb3SYun Wu 15674559fbb3SYun Wu /* Disable the generation of all interrupts to this ITS */ 15684559fbb3SYun Wu val &= ~GITS_CTLR_ENABLE; 15694559fbb3SYun Wu writel_relaxed(val, base + GITS_CTLR); 15704559fbb3SYun Wu 15714559fbb3SYun Wu /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 15724559fbb3SYun Wu while (1) { 15734559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 15744559fbb3SYun Wu if (val & GITS_CTLR_QUIESCENT) 15754559fbb3SYun Wu return 0; 15764559fbb3SYun Wu 15774559fbb3SYun Wu count--; 15784559fbb3SYun Wu if (!count) 15794559fbb3SYun Wu return -EBUSY; 15804559fbb3SYun Wu 15814559fbb3SYun Wu cpu_relax(); 15824559fbb3SYun Wu udelay(1); 15834559fbb3SYun Wu } 15844559fbb3SYun Wu } 15854559fbb3SYun Wu 158694100970SRobert Richter static void __maybe_unused its_enable_quirk_cavium_22375(void *data) 158794100970SRobert Richter { 158894100970SRobert Richter struct its_node *its = data; 158994100970SRobert Richter 159094100970SRobert Richter its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 159194100970SRobert Richter } 159294100970SRobert Richter 1593fbf8f40eSGanapatrao Kulkarni static void __maybe_unused its_enable_quirk_cavium_23144(void *data) 1594fbf8f40eSGanapatrao Kulkarni { 1595fbf8f40eSGanapatrao Kulkarni struct its_node *its = data; 1596fbf8f40eSGanapatrao Kulkarni 1597fbf8f40eSGanapatrao Kulkarni its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 1598fbf8f40eSGanapatrao Kulkarni } 1599fbf8f40eSGanapatrao Kulkarni 160067510ccaSRobert Richter static const struct gic_quirk its_quirks[] = { 160194100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375 160294100970SRobert Richter { 160394100970SRobert Richter .desc = "ITS: Cavium errata 22375, 24313", 160494100970SRobert Richter .iidr = 0xa100034c, /* ThunderX pass 1.x */ 160594100970SRobert Richter .mask = 0xffff0fff, 160694100970SRobert Richter .init = its_enable_quirk_cavium_22375, 160794100970SRobert Richter }, 160894100970SRobert Richter #endif 1609fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144 1610fbf8f40eSGanapatrao Kulkarni { 1611fbf8f40eSGanapatrao Kulkarni .desc = "ITS: Cavium erratum 23144", 1612fbf8f40eSGanapatrao Kulkarni .iidr = 0xa100034c, /* ThunderX pass 1.x */ 1613fbf8f40eSGanapatrao Kulkarni .mask = 0xffff0fff, 1614fbf8f40eSGanapatrao Kulkarni .init = its_enable_quirk_cavium_23144, 1615fbf8f40eSGanapatrao Kulkarni }, 1616fbf8f40eSGanapatrao Kulkarni #endif 161767510ccaSRobert Richter { 161867510ccaSRobert Richter } 161967510ccaSRobert Richter }; 162067510ccaSRobert Richter 162167510ccaSRobert Richter static void its_enable_quirks(struct its_node *its) 162267510ccaSRobert Richter { 162367510ccaSRobert Richter u32 iidr = readl_relaxed(its->base + GITS_IIDR); 162467510ccaSRobert Richter 162567510ccaSRobert Richter gic_enable_quirks(iidr, its_quirks, its); 162667510ccaSRobert Richter } 162767510ccaSRobert Richter 1628db40f0a7STomasz Nowicki static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) 1629d14ae5e6STomasz Nowicki { 1630d14ae5e6STomasz Nowicki struct irq_domain *inner_domain; 1631d14ae5e6STomasz Nowicki struct msi_domain_info *info; 1632d14ae5e6STomasz Nowicki 1633d14ae5e6STomasz Nowicki info = kzalloc(sizeof(*info), GFP_KERNEL); 1634d14ae5e6STomasz Nowicki if (!info) 1635d14ae5e6STomasz Nowicki return -ENOMEM; 1636d14ae5e6STomasz Nowicki 1637db40f0a7STomasz Nowicki inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); 1638d14ae5e6STomasz Nowicki if (!inner_domain) { 1639d14ae5e6STomasz Nowicki kfree(info); 1640d14ae5e6STomasz Nowicki return -ENOMEM; 1641d14ae5e6STomasz Nowicki } 1642d14ae5e6STomasz Nowicki 1643db40f0a7STomasz Nowicki inner_domain->parent = its_parent; 1644d14ae5e6STomasz Nowicki inner_domain->bus_token = DOMAIN_BUS_NEXUS; 1645d14ae5e6STomasz Nowicki info->ops = &its_msi_domain_ops; 1646d14ae5e6STomasz Nowicki info->data = its; 1647d14ae5e6STomasz Nowicki inner_domain->host_data = info; 1648d14ae5e6STomasz Nowicki 1649d14ae5e6STomasz Nowicki return 0; 1650d14ae5e6STomasz Nowicki } 1651d14ae5e6STomasz Nowicki 1652db40f0a7STomasz Nowicki static int __init its_probe_one(struct resource *res, 1653db40f0a7STomasz Nowicki struct fwnode_handle *handle, int numa_node) 16544c21f3c2SMarc Zyngier { 16554c21f3c2SMarc Zyngier struct its_node *its; 16564c21f3c2SMarc Zyngier void __iomem *its_base; 16574c21f3c2SMarc Zyngier u32 val; 16584c21f3c2SMarc Zyngier u64 baser, tmp; 16594c21f3c2SMarc Zyngier int err; 16604c21f3c2SMarc Zyngier 1661db40f0a7STomasz Nowicki its_base = ioremap(res->start, resource_size(res)); 16624c21f3c2SMarc Zyngier if (!its_base) { 1663db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); 16644c21f3c2SMarc Zyngier return -ENOMEM; 16654c21f3c2SMarc Zyngier } 16664c21f3c2SMarc Zyngier 16674c21f3c2SMarc Zyngier val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 16684c21f3c2SMarc Zyngier if (val != 0x30 && val != 0x40) { 1669db40f0a7STomasz Nowicki pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); 16704c21f3c2SMarc Zyngier err = -ENODEV; 16714c21f3c2SMarc Zyngier goto out_unmap; 16724c21f3c2SMarc Zyngier } 16734c21f3c2SMarc Zyngier 16744559fbb3SYun Wu err = its_force_quiescent(its_base); 16754559fbb3SYun Wu if (err) { 1676db40f0a7STomasz Nowicki pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); 16774559fbb3SYun Wu goto out_unmap; 16784559fbb3SYun Wu } 16794559fbb3SYun Wu 1680db40f0a7STomasz Nowicki pr_info("ITS %pR\n", res); 16814c21f3c2SMarc Zyngier 16824c21f3c2SMarc Zyngier its = kzalloc(sizeof(*its), GFP_KERNEL); 16834c21f3c2SMarc Zyngier if (!its) { 16844c21f3c2SMarc Zyngier err = -ENOMEM; 16854c21f3c2SMarc Zyngier goto out_unmap; 16864c21f3c2SMarc Zyngier } 16874c21f3c2SMarc Zyngier 16884c21f3c2SMarc Zyngier raw_spin_lock_init(&its->lock); 16894c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->entry); 16904c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->its_device_list); 16914c21f3c2SMarc Zyngier its->base = its_base; 1692db40f0a7STomasz Nowicki its->phys_base = res->start; 1693589ce5f4SMarc Zyngier its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1694db40f0a7STomasz Nowicki its->numa_node = numa_node; 16954c21f3c2SMarc Zyngier 16964c21f3c2SMarc Zyngier its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 16974c21f3c2SMarc Zyngier if (!its->cmd_base) { 16984c21f3c2SMarc Zyngier err = -ENOMEM; 16994c21f3c2SMarc Zyngier goto out_free_its; 17004c21f3c2SMarc Zyngier } 17014c21f3c2SMarc Zyngier its->cmd_write = its->cmd_base; 17024c21f3c2SMarc Zyngier 170367510ccaSRobert Richter its_enable_quirks(its); 170467510ccaSRobert Richter 17050e0b0f69SShanker Donthineni err = its_alloc_tables(its); 17064c21f3c2SMarc Zyngier if (err) 17074c21f3c2SMarc Zyngier goto out_free_cmd; 17084c21f3c2SMarc Zyngier 17094c21f3c2SMarc Zyngier err = its_alloc_collections(its); 17104c21f3c2SMarc Zyngier if (err) 17114c21f3c2SMarc Zyngier goto out_free_tables; 17124c21f3c2SMarc Zyngier 17134c21f3c2SMarc Zyngier baser = (virt_to_phys(its->cmd_base) | 17144c21f3c2SMarc Zyngier GITS_CBASER_WaWb | 17154c21f3c2SMarc Zyngier GITS_CBASER_InnerShareable | 17164c21f3c2SMarc Zyngier (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 17174c21f3c2SMarc Zyngier GITS_CBASER_VALID); 17184c21f3c2SMarc Zyngier 17190968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 17200968a619SVladimir Murzin tmp = gits_read_cbaser(its->base + GITS_CBASER); 17214c21f3c2SMarc Zyngier 17224ad3e363SMarc Zyngier if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 1723241a386cSMarc Zyngier if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 1724241a386cSMarc Zyngier /* 1725241a386cSMarc Zyngier * The HW reports non-shareable, we must 1726241a386cSMarc Zyngier * remove the cacheability attributes as 1727241a386cSMarc Zyngier * well. 1728241a386cSMarc Zyngier */ 1729241a386cSMarc Zyngier baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 1730241a386cSMarc Zyngier GITS_CBASER_CACHEABILITY_MASK); 1731241a386cSMarc Zyngier baser |= GITS_CBASER_nC; 17320968a619SVladimir Murzin gits_write_cbaser(baser, its->base + GITS_CBASER); 1733241a386cSMarc Zyngier } 17344c21f3c2SMarc Zyngier pr_info("ITS: using cache flushing for cmd queue\n"); 17354c21f3c2SMarc Zyngier its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 17364c21f3c2SMarc Zyngier } 17374c21f3c2SMarc Zyngier 17380968a619SVladimir Murzin gits_write_cwriter(0, its->base + GITS_CWRITER); 1739241a386cSMarc Zyngier writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1740241a386cSMarc Zyngier 1741db40f0a7STomasz Nowicki err = its_init_domain(handle, its); 1742d14ae5e6STomasz Nowicki if (err) 174354456db9SMarc Zyngier goto out_free_tables; 17444c21f3c2SMarc Zyngier 17454c21f3c2SMarc Zyngier spin_lock(&its_lock); 17464c21f3c2SMarc Zyngier list_add(&its->entry, &its_nodes); 17474c21f3c2SMarc Zyngier spin_unlock(&its_lock); 17484c21f3c2SMarc Zyngier 17494c21f3c2SMarc Zyngier return 0; 17504c21f3c2SMarc Zyngier 17514c21f3c2SMarc Zyngier out_free_tables: 17524c21f3c2SMarc Zyngier its_free_tables(its); 17534c21f3c2SMarc Zyngier out_free_cmd: 17544c21f3c2SMarc Zyngier kfree(its->cmd_base); 17554c21f3c2SMarc Zyngier out_free_its: 17564c21f3c2SMarc Zyngier kfree(its); 17574c21f3c2SMarc Zyngier out_unmap: 17584c21f3c2SMarc Zyngier iounmap(its_base); 1759db40f0a7STomasz Nowicki pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); 17604c21f3c2SMarc Zyngier return err; 17614c21f3c2SMarc Zyngier } 17624c21f3c2SMarc Zyngier 17634c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void) 17644c21f3c2SMarc Zyngier { 1765589ce5f4SMarc Zyngier return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 17664c21f3c2SMarc Zyngier } 17674c21f3c2SMarc Zyngier 17684c21f3c2SMarc Zyngier int its_cpu_init(void) 17694c21f3c2SMarc Zyngier { 177016acae72SVladimir Murzin if (!list_empty(&its_nodes)) { 17714c21f3c2SMarc Zyngier if (!gic_rdists_supports_plpis()) { 17724c21f3c2SMarc Zyngier pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 17734c21f3c2SMarc Zyngier return -ENXIO; 17744c21f3c2SMarc Zyngier } 17754c21f3c2SMarc Zyngier its_cpu_init_lpis(); 17764c21f3c2SMarc Zyngier its_cpu_init_collection(); 17774c21f3c2SMarc Zyngier } 17784c21f3c2SMarc Zyngier 17794c21f3c2SMarc Zyngier return 0; 17804c21f3c2SMarc Zyngier } 17814c21f3c2SMarc Zyngier 17824c21f3c2SMarc Zyngier static struct of_device_id its_device_id[] = { 17834c21f3c2SMarc Zyngier { .compatible = "arm,gic-v3-its", }, 17844c21f3c2SMarc Zyngier {}, 17854c21f3c2SMarc Zyngier }; 17864c21f3c2SMarc Zyngier 1787db40f0a7STomasz Nowicki static int __init its_of_probe(struct device_node *node) 17884c21f3c2SMarc Zyngier { 17894c21f3c2SMarc Zyngier struct device_node *np; 1790db40f0a7STomasz Nowicki struct resource res; 17914c21f3c2SMarc Zyngier 17924c21f3c2SMarc Zyngier for (np = of_find_matching_node(node, its_device_id); np; 17934c21f3c2SMarc Zyngier np = of_find_matching_node(np, its_device_id)) { 1794d14ae5e6STomasz Nowicki if (!of_property_read_bool(np, "msi-controller")) { 1795d14ae5e6STomasz Nowicki pr_warn("%s: no msi-controller property, ITS ignored\n", 1796d14ae5e6STomasz Nowicki np->full_name); 1797d14ae5e6STomasz Nowicki continue; 1798d14ae5e6STomasz Nowicki } 1799d14ae5e6STomasz Nowicki 1800db40f0a7STomasz Nowicki if (of_address_to_resource(np, 0, &res)) { 1801db40f0a7STomasz Nowicki pr_warn("%s: no regs?\n", np->full_name); 1802db40f0a7STomasz Nowicki continue; 18034c21f3c2SMarc Zyngier } 18044c21f3c2SMarc Zyngier 1805db40f0a7STomasz Nowicki its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); 1806db40f0a7STomasz Nowicki } 1807db40f0a7STomasz Nowicki return 0; 1808db40f0a7STomasz Nowicki } 1809db40f0a7STomasz Nowicki 18103f010cf1STomasz Nowicki #ifdef CONFIG_ACPI 18113f010cf1STomasz Nowicki 18123f010cf1STomasz Nowicki #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 18133f010cf1STomasz Nowicki 18143f010cf1STomasz Nowicki static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, 18153f010cf1STomasz Nowicki const unsigned long end) 18163f010cf1STomasz Nowicki { 18173f010cf1STomasz Nowicki struct acpi_madt_generic_translator *its_entry; 18183f010cf1STomasz Nowicki struct fwnode_handle *dom_handle; 18193f010cf1STomasz Nowicki struct resource res; 18203f010cf1STomasz Nowicki int err; 18213f010cf1STomasz Nowicki 18223f010cf1STomasz Nowicki its_entry = (struct acpi_madt_generic_translator *)header; 18233f010cf1STomasz Nowicki memset(&res, 0, sizeof(res)); 18243f010cf1STomasz Nowicki res.start = its_entry->base_address; 18253f010cf1STomasz Nowicki res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 18263f010cf1STomasz Nowicki res.flags = IORESOURCE_MEM; 18273f010cf1STomasz Nowicki 18283f010cf1STomasz Nowicki dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); 18293f010cf1STomasz Nowicki if (!dom_handle) { 18303f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 18313f010cf1STomasz Nowicki &res.start); 18323f010cf1STomasz Nowicki return -ENOMEM; 18333f010cf1STomasz Nowicki } 18343f010cf1STomasz Nowicki 18353f010cf1STomasz Nowicki err = iort_register_domain_token(its_entry->translation_id, dom_handle); 18363f010cf1STomasz Nowicki if (err) { 18373f010cf1STomasz Nowicki pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", 18383f010cf1STomasz Nowicki &res.start, its_entry->translation_id); 18393f010cf1STomasz Nowicki goto dom_err; 18403f010cf1STomasz Nowicki } 18413f010cf1STomasz Nowicki 18423f010cf1STomasz Nowicki err = its_probe_one(&res, dom_handle, NUMA_NO_NODE); 18433f010cf1STomasz Nowicki if (!err) 18443f010cf1STomasz Nowicki return 0; 18453f010cf1STomasz Nowicki 18463f010cf1STomasz Nowicki iort_deregister_domain_token(its_entry->translation_id); 18473f010cf1STomasz Nowicki dom_err: 18483f010cf1STomasz Nowicki irq_domain_free_fwnode(dom_handle); 18493f010cf1STomasz Nowicki return err; 18503f010cf1STomasz Nowicki } 18513f010cf1STomasz Nowicki 18523f010cf1STomasz Nowicki static void __init its_acpi_probe(void) 18533f010cf1STomasz Nowicki { 18543f010cf1STomasz Nowicki acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 18553f010cf1STomasz Nowicki gic_acpi_parse_madt_its, 0); 18563f010cf1STomasz Nowicki } 18573f010cf1STomasz Nowicki #else 18583f010cf1STomasz Nowicki static void __init its_acpi_probe(void) { } 18593f010cf1STomasz Nowicki #endif 18603f010cf1STomasz Nowicki 1861db40f0a7STomasz Nowicki int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, 1862db40f0a7STomasz Nowicki struct irq_domain *parent_domain) 1863db40f0a7STomasz Nowicki { 1864db40f0a7STomasz Nowicki struct device_node *of_node; 1865db40f0a7STomasz Nowicki 1866db40f0a7STomasz Nowicki its_parent = parent_domain; 1867db40f0a7STomasz Nowicki of_node = to_of_node(handle); 1868db40f0a7STomasz Nowicki if (of_node) 1869db40f0a7STomasz Nowicki its_of_probe(of_node); 1870db40f0a7STomasz Nowicki else 18713f010cf1STomasz Nowicki its_acpi_probe(); 1872db40f0a7STomasz Nowicki 18734c21f3c2SMarc Zyngier if (list_empty(&its_nodes)) { 18744c21f3c2SMarc Zyngier pr_warn("ITS: No ITS available, not enabling LPIs\n"); 18754c21f3c2SMarc Zyngier return -ENXIO; 18764c21f3c2SMarc Zyngier } 18774c21f3c2SMarc Zyngier 18784c21f3c2SMarc Zyngier gic_rdists = rdists; 18794c21f3c2SMarc Zyngier its_alloc_lpi_tables(); 18804c21f3c2SMarc Zyngier its_lpi_init(rdists->id_bits); 18814c21f3c2SMarc Zyngier 18824c21f3c2SMarc Zyngier return 0; 18834c21f3c2SMarc Zyngier } 1884