1cc2d3216SMarc Zyngier /* 2cc2d3216SMarc Zyngier * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 3cc2d3216SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 4cc2d3216SMarc Zyngier * 5cc2d3216SMarc Zyngier * This program is free software; you can redistribute it and/or modify 6cc2d3216SMarc Zyngier * it under the terms of the GNU General Public License version 2 as 7cc2d3216SMarc Zyngier * published by the Free Software Foundation. 8cc2d3216SMarc Zyngier * 9cc2d3216SMarc Zyngier * This program is distributed in the hope that it will be useful, 10cc2d3216SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 11cc2d3216SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12cc2d3216SMarc Zyngier * GNU General Public License for more details. 13cc2d3216SMarc Zyngier * 14cc2d3216SMarc Zyngier * You should have received a copy of the GNU General Public License 15cc2d3216SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 16cc2d3216SMarc Zyngier */ 17cc2d3216SMarc Zyngier 18cc2d3216SMarc Zyngier #include <linux/bitmap.h> 19cc2d3216SMarc Zyngier #include <linux/cpu.h> 20cc2d3216SMarc Zyngier #include <linux/delay.h> 21cc2d3216SMarc Zyngier #include <linux/interrupt.h> 22cc2d3216SMarc Zyngier #include <linux/log2.h> 23cc2d3216SMarc Zyngier #include <linux/mm.h> 24cc2d3216SMarc Zyngier #include <linux/msi.h> 25cc2d3216SMarc Zyngier #include <linux/of.h> 26cc2d3216SMarc Zyngier #include <linux/of_address.h> 27cc2d3216SMarc Zyngier #include <linux/of_irq.h> 28cc2d3216SMarc Zyngier #include <linux/of_pci.h> 29cc2d3216SMarc Zyngier #include <linux/of_platform.h> 30cc2d3216SMarc Zyngier #include <linux/percpu.h> 31cc2d3216SMarc Zyngier #include <linux/slab.h> 32cc2d3216SMarc Zyngier 3341a83e06SJoel Porquet #include <linux/irqchip.h> 34cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h> 35cc2d3216SMarc Zyngier 36cc2d3216SMarc Zyngier #include <asm/cacheflush.h> 37cc2d3216SMarc Zyngier #include <asm/cputype.h> 38cc2d3216SMarc Zyngier #include <asm/exception.h> 39cc2d3216SMarc Zyngier 4067510ccaSRobert Richter #include "irq-gic-common.h" 4167510ccaSRobert Richter 4294100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) 4394100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) 44fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 45cc2d3216SMarc Zyngier 46c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 47c48ed51cSMarc Zyngier 48cc2d3216SMarc Zyngier /* 49cc2d3216SMarc Zyngier * Collection structure - just an ID, and a redistributor address to 50cc2d3216SMarc Zyngier * ping. We use one per CPU as a bag of interrupts assigned to this 51cc2d3216SMarc Zyngier * CPU. 52cc2d3216SMarc Zyngier */ 53cc2d3216SMarc Zyngier struct its_collection { 54cc2d3216SMarc Zyngier u64 target_address; 55cc2d3216SMarc Zyngier u16 col_id; 56cc2d3216SMarc Zyngier }; 57cc2d3216SMarc Zyngier 58cc2d3216SMarc Zyngier /* 599347359aSShanker Donthineni * The ITS_BASER structure - contains memory information, cached 609347359aSShanker Donthineni * value of BASER register configuration and ITS page size. 61466b7d16SShanker Donthineni */ 62466b7d16SShanker Donthineni struct its_baser { 63466b7d16SShanker Donthineni void *base; 64466b7d16SShanker Donthineni u64 val; 65466b7d16SShanker Donthineni u32 order; 669347359aSShanker Donthineni u32 psz; 67466b7d16SShanker Donthineni }; 68466b7d16SShanker Donthineni 69466b7d16SShanker Donthineni /* 70cc2d3216SMarc Zyngier * The ITS structure - contains most of the infrastructure, with the 71841514abSMarc Zyngier * top-level MSI domain, the command queue, the collections, and the 72841514abSMarc Zyngier * list of devices writing to it. 73cc2d3216SMarc Zyngier */ 74cc2d3216SMarc Zyngier struct its_node { 75cc2d3216SMarc Zyngier raw_spinlock_t lock; 76cc2d3216SMarc Zyngier struct list_head entry; 77cc2d3216SMarc Zyngier void __iomem *base; 78cc2d3216SMarc Zyngier unsigned long phys_base; 79cc2d3216SMarc Zyngier struct its_cmd_block *cmd_base; 80cc2d3216SMarc Zyngier struct its_cmd_block *cmd_write; 81466b7d16SShanker Donthineni struct its_baser tables[GITS_BASER_NR_REGS]; 82cc2d3216SMarc Zyngier struct its_collection *collections; 83cc2d3216SMarc Zyngier struct list_head its_device_list; 84cc2d3216SMarc Zyngier u64 flags; 85cc2d3216SMarc Zyngier u32 ite_size; 86466b7d16SShanker Donthineni u32 device_ids; 87fbf8f40eSGanapatrao Kulkarni int numa_node; 88cc2d3216SMarc Zyngier }; 89cc2d3216SMarc Zyngier 90cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN SZ_256 91cc2d3216SMarc Zyngier 922eca0d6cSShanker Donthineni /* Convert page order to size in bytes */ 932eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 942eca0d6cSShanker Donthineni 95591e5becSMarc Zyngier struct event_lpi_map { 96591e5becSMarc Zyngier unsigned long *lpi_map; 97591e5becSMarc Zyngier u16 *col_map; 98591e5becSMarc Zyngier irq_hw_number_t lpi_base; 99591e5becSMarc Zyngier int nr_lpis; 100591e5becSMarc Zyngier }; 101591e5becSMarc Zyngier 102cc2d3216SMarc Zyngier /* 103cc2d3216SMarc Zyngier * The ITS view of a device - belongs to an ITS, a collection, owns an 104cc2d3216SMarc Zyngier * interrupt translation table, and a list of interrupts. 105cc2d3216SMarc Zyngier */ 106cc2d3216SMarc Zyngier struct its_device { 107cc2d3216SMarc Zyngier struct list_head entry; 108cc2d3216SMarc Zyngier struct its_node *its; 109591e5becSMarc Zyngier struct event_lpi_map event_map; 110cc2d3216SMarc Zyngier void *itt; 111cc2d3216SMarc Zyngier u32 nr_ites; 112cc2d3216SMarc Zyngier u32 device_id; 113cc2d3216SMarc Zyngier }; 114cc2d3216SMarc Zyngier 1151ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes); 1161ac19ca6SMarc Zyngier static DEFINE_SPINLOCK(its_lock); 1171ac19ca6SMarc Zyngier static struct rdists *gic_rdists; 1181ac19ca6SMarc Zyngier 1191ac19ca6SMarc Zyngier #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 1201ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 1211ac19ca6SMarc Zyngier 122591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev, 123591e5becSMarc Zyngier u32 event) 124591e5becSMarc Zyngier { 125591e5becSMarc Zyngier struct its_node *its = its_dev->its; 126591e5becSMarc Zyngier 127591e5becSMarc Zyngier return its->collections + its_dev->event_map.col_map[event]; 128591e5becSMarc Zyngier } 129591e5becSMarc Zyngier 130cc2d3216SMarc Zyngier /* 131cc2d3216SMarc Zyngier * ITS command descriptors - parameters to be encoded in a command 132cc2d3216SMarc Zyngier * block. 133cc2d3216SMarc Zyngier */ 134cc2d3216SMarc Zyngier struct its_cmd_desc { 135cc2d3216SMarc Zyngier union { 136cc2d3216SMarc Zyngier struct { 137cc2d3216SMarc Zyngier struct its_device *dev; 138cc2d3216SMarc Zyngier u32 event_id; 139cc2d3216SMarc Zyngier } its_inv_cmd; 140cc2d3216SMarc Zyngier 141cc2d3216SMarc Zyngier struct { 142cc2d3216SMarc Zyngier struct its_device *dev; 143cc2d3216SMarc Zyngier u32 event_id; 144cc2d3216SMarc Zyngier } its_int_cmd; 145cc2d3216SMarc Zyngier 146cc2d3216SMarc Zyngier struct { 147cc2d3216SMarc Zyngier struct its_device *dev; 148cc2d3216SMarc Zyngier int valid; 149cc2d3216SMarc Zyngier } its_mapd_cmd; 150cc2d3216SMarc Zyngier 151cc2d3216SMarc Zyngier struct { 152cc2d3216SMarc Zyngier struct its_collection *col; 153cc2d3216SMarc Zyngier int valid; 154cc2d3216SMarc Zyngier } its_mapc_cmd; 155cc2d3216SMarc Zyngier 156cc2d3216SMarc Zyngier struct { 157cc2d3216SMarc Zyngier struct its_device *dev; 158cc2d3216SMarc Zyngier u32 phys_id; 159cc2d3216SMarc Zyngier u32 event_id; 160cc2d3216SMarc Zyngier } its_mapvi_cmd; 161cc2d3216SMarc Zyngier 162cc2d3216SMarc Zyngier struct { 163cc2d3216SMarc Zyngier struct its_device *dev; 164cc2d3216SMarc Zyngier struct its_collection *col; 165591e5becSMarc Zyngier u32 event_id; 166cc2d3216SMarc Zyngier } its_movi_cmd; 167cc2d3216SMarc Zyngier 168cc2d3216SMarc Zyngier struct { 169cc2d3216SMarc Zyngier struct its_device *dev; 170cc2d3216SMarc Zyngier u32 event_id; 171cc2d3216SMarc Zyngier } its_discard_cmd; 172cc2d3216SMarc Zyngier 173cc2d3216SMarc Zyngier struct { 174cc2d3216SMarc Zyngier struct its_collection *col; 175cc2d3216SMarc Zyngier } its_invall_cmd; 176cc2d3216SMarc Zyngier }; 177cc2d3216SMarc Zyngier }; 178cc2d3216SMarc Zyngier 179cc2d3216SMarc Zyngier /* 180cc2d3216SMarc Zyngier * The ITS command block, which is what the ITS actually parses. 181cc2d3216SMarc Zyngier */ 182cc2d3216SMarc Zyngier struct its_cmd_block { 183cc2d3216SMarc Zyngier u64 raw_cmd[4]; 184cc2d3216SMarc Zyngier }; 185cc2d3216SMarc Zyngier 186cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ SZ_64K 187cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 188cc2d3216SMarc Zyngier 189cc2d3216SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 190cc2d3216SMarc Zyngier struct its_cmd_desc *); 191cc2d3216SMarc Zyngier 192cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) 193cc2d3216SMarc Zyngier { 194cc2d3216SMarc Zyngier cmd->raw_cmd[0] &= ~0xffUL; 195cc2d3216SMarc Zyngier cmd->raw_cmd[0] |= cmd_nr; 196cc2d3216SMarc Zyngier } 197cc2d3216SMarc Zyngier 198cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 199cc2d3216SMarc Zyngier { 2007e195ba0SAndre Przywara cmd->raw_cmd[0] &= BIT_ULL(32) - 1; 201cc2d3216SMarc Zyngier cmd->raw_cmd[0] |= ((u64)devid) << 32; 202cc2d3216SMarc Zyngier } 203cc2d3216SMarc Zyngier 204cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) 205cc2d3216SMarc Zyngier { 206cc2d3216SMarc Zyngier cmd->raw_cmd[1] &= ~0xffffffffUL; 207cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= id; 208cc2d3216SMarc Zyngier } 209cc2d3216SMarc Zyngier 210cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) 211cc2d3216SMarc Zyngier { 212cc2d3216SMarc Zyngier cmd->raw_cmd[1] &= 0xffffffffUL; 213cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= ((u64)phys_id) << 32; 214cc2d3216SMarc Zyngier } 215cc2d3216SMarc Zyngier 216cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size) 217cc2d3216SMarc Zyngier { 218cc2d3216SMarc Zyngier cmd->raw_cmd[1] &= ~0x1fUL; 219cc2d3216SMarc Zyngier cmd->raw_cmd[1] |= size & 0x1f; 220cc2d3216SMarc Zyngier } 221cc2d3216SMarc Zyngier 222cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 223cc2d3216SMarc Zyngier { 224cc2d3216SMarc Zyngier cmd->raw_cmd[2] &= ~0xffffffffffffUL; 225cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; 226cc2d3216SMarc Zyngier } 227cc2d3216SMarc Zyngier 228cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid) 229cc2d3216SMarc Zyngier { 230cc2d3216SMarc Zyngier cmd->raw_cmd[2] &= ~(1UL << 63); 231cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= ((u64)!!valid) << 63; 232cc2d3216SMarc Zyngier } 233cc2d3216SMarc Zyngier 234cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 235cc2d3216SMarc Zyngier { 236cc2d3216SMarc Zyngier cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); 237cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); 238cc2d3216SMarc Zyngier } 239cc2d3216SMarc Zyngier 240cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 241cc2d3216SMarc Zyngier { 242cc2d3216SMarc Zyngier cmd->raw_cmd[2] &= ~0xffffUL; 243cc2d3216SMarc Zyngier cmd->raw_cmd[2] |= col; 244cc2d3216SMarc Zyngier } 245cc2d3216SMarc Zyngier 246cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd) 247cc2d3216SMarc Zyngier { 248cc2d3216SMarc Zyngier /* Let's fixup BE commands */ 249cc2d3216SMarc Zyngier cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); 250cc2d3216SMarc Zyngier cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); 251cc2d3216SMarc Zyngier cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); 252cc2d3216SMarc Zyngier cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 253cc2d3216SMarc Zyngier } 254cc2d3216SMarc Zyngier 255cc2d3216SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, 256cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 257cc2d3216SMarc Zyngier { 258cc2d3216SMarc Zyngier unsigned long itt_addr; 259c8481267SMarc Zyngier u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 260cc2d3216SMarc Zyngier 261cc2d3216SMarc Zyngier itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 262cc2d3216SMarc Zyngier itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 263cc2d3216SMarc Zyngier 264cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPD); 265cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); 266cc2d3216SMarc Zyngier its_encode_size(cmd, size - 1); 267cc2d3216SMarc Zyngier its_encode_itt(cmd, itt_addr); 268cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapd_cmd.valid); 269cc2d3216SMarc Zyngier 270cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 271cc2d3216SMarc Zyngier 272591e5becSMarc Zyngier return NULL; 273cc2d3216SMarc Zyngier } 274cc2d3216SMarc Zyngier 275cc2d3216SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 276cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 277cc2d3216SMarc Zyngier { 278cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPC); 279cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 280cc2d3216SMarc Zyngier its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); 281cc2d3216SMarc Zyngier its_encode_valid(cmd, desc->its_mapc_cmd.valid); 282cc2d3216SMarc Zyngier 283cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 284cc2d3216SMarc Zyngier 285cc2d3216SMarc Zyngier return desc->its_mapc_cmd.col; 286cc2d3216SMarc Zyngier } 287cc2d3216SMarc Zyngier 288cc2d3216SMarc Zyngier static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, 289cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 290cc2d3216SMarc Zyngier { 291591e5becSMarc Zyngier struct its_collection *col; 292591e5becSMarc Zyngier 293591e5becSMarc Zyngier col = dev_event_to_col(desc->its_mapvi_cmd.dev, 294591e5becSMarc Zyngier desc->its_mapvi_cmd.event_id); 295591e5becSMarc Zyngier 296cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MAPVI); 297cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); 298cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); 299cc2d3216SMarc Zyngier its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); 300591e5becSMarc Zyngier its_encode_collection(cmd, col->col_id); 301cc2d3216SMarc Zyngier 302cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 303cc2d3216SMarc Zyngier 304591e5becSMarc Zyngier return col; 305cc2d3216SMarc Zyngier } 306cc2d3216SMarc Zyngier 307cc2d3216SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 308cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 309cc2d3216SMarc Zyngier { 310591e5becSMarc Zyngier struct its_collection *col; 311591e5becSMarc Zyngier 312591e5becSMarc Zyngier col = dev_event_to_col(desc->its_movi_cmd.dev, 313591e5becSMarc Zyngier desc->its_movi_cmd.event_id); 314591e5becSMarc Zyngier 315cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_MOVI); 316cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 317591e5becSMarc Zyngier its_encode_event_id(cmd, desc->its_movi_cmd.event_id); 318cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 319cc2d3216SMarc Zyngier 320cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 321cc2d3216SMarc Zyngier 322591e5becSMarc Zyngier return col; 323cc2d3216SMarc Zyngier } 324cc2d3216SMarc Zyngier 325cc2d3216SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 326cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 327cc2d3216SMarc Zyngier { 328591e5becSMarc Zyngier struct its_collection *col; 329591e5becSMarc Zyngier 330591e5becSMarc Zyngier col = dev_event_to_col(desc->its_discard_cmd.dev, 331591e5becSMarc Zyngier desc->its_discard_cmd.event_id); 332591e5becSMarc Zyngier 333cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_DISCARD); 334cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 335cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 336cc2d3216SMarc Zyngier 337cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 338cc2d3216SMarc Zyngier 339591e5becSMarc Zyngier return col; 340cc2d3216SMarc Zyngier } 341cc2d3216SMarc Zyngier 342cc2d3216SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 343cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 344cc2d3216SMarc Zyngier { 345591e5becSMarc Zyngier struct its_collection *col; 346591e5becSMarc Zyngier 347591e5becSMarc Zyngier col = dev_event_to_col(desc->its_inv_cmd.dev, 348591e5becSMarc Zyngier desc->its_inv_cmd.event_id); 349591e5becSMarc Zyngier 350cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INV); 351cc2d3216SMarc Zyngier its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 352cc2d3216SMarc Zyngier its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 353cc2d3216SMarc Zyngier 354cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 355cc2d3216SMarc Zyngier 356591e5becSMarc Zyngier return col; 357cc2d3216SMarc Zyngier } 358cc2d3216SMarc Zyngier 359cc2d3216SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 360cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 361cc2d3216SMarc Zyngier { 362cc2d3216SMarc Zyngier its_encode_cmd(cmd, GITS_CMD_INVALL); 363cc2d3216SMarc Zyngier its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 364cc2d3216SMarc Zyngier 365cc2d3216SMarc Zyngier its_fixup_cmd(cmd); 366cc2d3216SMarc Zyngier 367cc2d3216SMarc Zyngier return NULL; 368cc2d3216SMarc Zyngier } 369cc2d3216SMarc Zyngier 370cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its, 371cc2d3216SMarc Zyngier struct its_cmd_block *ptr) 372cc2d3216SMarc Zyngier { 373cc2d3216SMarc Zyngier return (ptr - its->cmd_base) * sizeof(*ptr); 374cc2d3216SMarc Zyngier } 375cc2d3216SMarc Zyngier 376cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its) 377cc2d3216SMarc Zyngier { 378cc2d3216SMarc Zyngier int widx; 379cc2d3216SMarc Zyngier int ridx; 380cc2d3216SMarc Zyngier 381cc2d3216SMarc Zyngier widx = its->cmd_write - its->cmd_base; 382cc2d3216SMarc Zyngier ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); 383cc2d3216SMarc Zyngier 384cc2d3216SMarc Zyngier /* This is incredibly unlikely to happen, unless the ITS locks up. */ 385cc2d3216SMarc Zyngier if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) 386cc2d3216SMarc Zyngier return 1; 387cc2d3216SMarc Zyngier 388cc2d3216SMarc Zyngier return 0; 389cc2d3216SMarc Zyngier } 390cc2d3216SMarc Zyngier 391cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its) 392cc2d3216SMarc Zyngier { 393cc2d3216SMarc Zyngier struct its_cmd_block *cmd; 394cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 395cc2d3216SMarc Zyngier 396cc2d3216SMarc Zyngier while (its_queue_full(its)) { 397cc2d3216SMarc Zyngier count--; 398cc2d3216SMarc Zyngier if (!count) { 399cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue not draining\n"); 400cc2d3216SMarc Zyngier return NULL; 401cc2d3216SMarc Zyngier } 402cc2d3216SMarc Zyngier cpu_relax(); 403cc2d3216SMarc Zyngier udelay(1); 404cc2d3216SMarc Zyngier } 405cc2d3216SMarc Zyngier 406cc2d3216SMarc Zyngier cmd = its->cmd_write++; 407cc2d3216SMarc Zyngier 408cc2d3216SMarc Zyngier /* Handle queue wrapping */ 409cc2d3216SMarc Zyngier if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) 410cc2d3216SMarc Zyngier its->cmd_write = its->cmd_base; 411cc2d3216SMarc Zyngier 412cc2d3216SMarc Zyngier return cmd; 413cc2d3216SMarc Zyngier } 414cc2d3216SMarc Zyngier 415cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its) 416cc2d3216SMarc Zyngier { 417cc2d3216SMarc Zyngier u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); 418cc2d3216SMarc Zyngier 419cc2d3216SMarc Zyngier writel_relaxed(wr, its->base + GITS_CWRITER); 420cc2d3216SMarc Zyngier 421cc2d3216SMarc Zyngier return its->cmd_write; 422cc2d3216SMarc Zyngier } 423cc2d3216SMarc Zyngier 424cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) 425cc2d3216SMarc Zyngier { 426cc2d3216SMarc Zyngier /* 427cc2d3216SMarc Zyngier * Make sure the commands written to memory are observable by 428cc2d3216SMarc Zyngier * the ITS. 429cc2d3216SMarc Zyngier */ 430cc2d3216SMarc Zyngier if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) 431cc2d3216SMarc Zyngier __flush_dcache_area(cmd, sizeof(*cmd)); 432cc2d3216SMarc Zyngier else 433cc2d3216SMarc Zyngier dsb(ishst); 434cc2d3216SMarc Zyngier } 435cc2d3216SMarc Zyngier 436cc2d3216SMarc Zyngier static void its_wait_for_range_completion(struct its_node *its, 437cc2d3216SMarc Zyngier struct its_cmd_block *from, 438cc2d3216SMarc Zyngier struct its_cmd_block *to) 439cc2d3216SMarc Zyngier { 440cc2d3216SMarc Zyngier u64 rd_idx, from_idx, to_idx; 441cc2d3216SMarc Zyngier u32 count = 1000000; /* 1s! */ 442cc2d3216SMarc Zyngier 443cc2d3216SMarc Zyngier from_idx = its_cmd_ptr_to_offset(its, from); 444cc2d3216SMarc Zyngier to_idx = its_cmd_ptr_to_offset(its, to); 445cc2d3216SMarc Zyngier 446cc2d3216SMarc Zyngier while (1) { 447cc2d3216SMarc Zyngier rd_idx = readl_relaxed(its->base + GITS_CREADR); 448cc2d3216SMarc Zyngier if (rd_idx >= to_idx || rd_idx < from_idx) 449cc2d3216SMarc Zyngier break; 450cc2d3216SMarc Zyngier 451cc2d3216SMarc Zyngier count--; 452cc2d3216SMarc Zyngier if (!count) { 453cc2d3216SMarc Zyngier pr_err_ratelimited("ITS queue timeout\n"); 454cc2d3216SMarc Zyngier return; 455cc2d3216SMarc Zyngier } 456cc2d3216SMarc Zyngier cpu_relax(); 457cc2d3216SMarc Zyngier udelay(1); 458cc2d3216SMarc Zyngier } 459cc2d3216SMarc Zyngier } 460cc2d3216SMarc Zyngier 461cc2d3216SMarc Zyngier static void its_send_single_command(struct its_node *its, 462cc2d3216SMarc Zyngier its_cmd_builder_t builder, 463cc2d3216SMarc Zyngier struct its_cmd_desc *desc) 464cc2d3216SMarc Zyngier { 465cc2d3216SMarc Zyngier struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 466cc2d3216SMarc Zyngier struct its_collection *sync_col; 4673e39e8f5SMarc Zyngier unsigned long flags; 468cc2d3216SMarc Zyngier 4693e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 470cc2d3216SMarc Zyngier 471cc2d3216SMarc Zyngier cmd = its_allocate_entry(its); 472cc2d3216SMarc Zyngier if (!cmd) { /* We're soooooo screewed... */ 473cc2d3216SMarc Zyngier pr_err_ratelimited("ITS can't allocate, dropping command\n"); 4743e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 475cc2d3216SMarc Zyngier return; 476cc2d3216SMarc Zyngier } 477cc2d3216SMarc Zyngier sync_col = builder(cmd, desc); 478cc2d3216SMarc Zyngier its_flush_cmd(its, cmd); 479cc2d3216SMarc Zyngier 480cc2d3216SMarc Zyngier if (sync_col) { 481cc2d3216SMarc Zyngier sync_cmd = its_allocate_entry(its); 482cc2d3216SMarc Zyngier if (!sync_cmd) { 483cc2d3216SMarc Zyngier pr_err_ratelimited("ITS can't SYNC, skipping\n"); 484cc2d3216SMarc Zyngier goto post; 485cc2d3216SMarc Zyngier } 486cc2d3216SMarc Zyngier its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 487cc2d3216SMarc Zyngier its_encode_target(sync_cmd, sync_col->target_address); 488cc2d3216SMarc Zyngier its_fixup_cmd(sync_cmd); 489cc2d3216SMarc Zyngier its_flush_cmd(its, sync_cmd); 490cc2d3216SMarc Zyngier } 491cc2d3216SMarc Zyngier 492cc2d3216SMarc Zyngier post: 493cc2d3216SMarc Zyngier next_cmd = its_post_commands(its); 4943e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 495cc2d3216SMarc Zyngier 496cc2d3216SMarc Zyngier its_wait_for_range_completion(its, cmd, next_cmd); 497cc2d3216SMarc Zyngier } 498cc2d3216SMarc Zyngier 499cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id) 500cc2d3216SMarc Zyngier { 501cc2d3216SMarc Zyngier struct its_cmd_desc desc; 502cc2d3216SMarc Zyngier 503cc2d3216SMarc Zyngier desc.its_inv_cmd.dev = dev; 504cc2d3216SMarc Zyngier desc.its_inv_cmd.event_id = event_id; 505cc2d3216SMarc Zyngier 506cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_inv_cmd, &desc); 507cc2d3216SMarc Zyngier } 508cc2d3216SMarc Zyngier 509cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid) 510cc2d3216SMarc Zyngier { 511cc2d3216SMarc Zyngier struct its_cmd_desc desc; 512cc2d3216SMarc Zyngier 513cc2d3216SMarc Zyngier desc.its_mapd_cmd.dev = dev; 514cc2d3216SMarc Zyngier desc.its_mapd_cmd.valid = !!valid; 515cc2d3216SMarc Zyngier 516cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapd_cmd, &desc); 517cc2d3216SMarc Zyngier } 518cc2d3216SMarc Zyngier 519cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col, 520cc2d3216SMarc Zyngier int valid) 521cc2d3216SMarc Zyngier { 522cc2d3216SMarc Zyngier struct its_cmd_desc desc; 523cc2d3216SMarc Zyngier 524cc2d3216SMarc Zyngier desc.its_mapc_cmd.col = col; 525cc2d3216SMarc Zyngier desc.its_mapc_cmd.valid = !!valid; 526cc2d3216SMarc Zyngier 527cc2d3216SMarc Zyngier its_send_single_command(its, its_build_mapc_cmd, &desc); 528cc2d3216SMarc Zyngier } 529cc2d3216SMarc Zyngier 530cc2d3216SMarc Zyngier static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) 531cc2d3216SMarc Zyngier { 532cc2d3216SMarc Zyngier struct its_cmd_desc desc; 533cc2d3216SMarc Zyngier 534cc2d3216SMarc Zyngier desc.its_mapvi_cmd.dev = dev; 535cc2d3216SMarc Zyngier desc.its_mapvi_cmd.phys_id = irq_id; 536cc2d3216SMarc Zyngier desc.its_mapvi_cmd.event_id = id; 537cc2d3216SMarc Zyngier 538cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); 539cc2d3216SMarc Zyngier } 540cc2d3216SMarc Zyngier 541cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev, 542cc2d3216SMarc Zyngier struct its_collection *col, u32 id) 543cc2d3216SMarc Zyngier { 544cc2d3216SMarc Zyngier struct its_cmd_desc desc; 545cc2d3216SMarc Zyngier 546cc2d3216SMarc Zyngier desc.its_movi_cmd.dev = dev; 547cc2d3216SMarc Zyngier desc.its_movi_cmd.col = col; 548591e5becSMarc Zyngier desc.its_movi_cmd.event_id = id; 549cc2d3216SMarc Zyngier 550cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_movi_cmd, &desc); 551cc2d3216SMarc Zyngier } 552cc2d3216SMarc Zyngier 553cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id) 554cc2d3216SMarc Zyngier { 555cc2d3216SMarc Zyngier struct its_cmd_desc desc; 556cc2d3216SMarc Zyngier 557cc2d3216SMarc Zyngier desc.its_discard_cmd.dev = dev; 558cc2d3216SMarc Zyngier desc.its_discard_cmd.event_id = id; 559cc2d3216SMarc Zyngier 560cc2d3216SMarc Zyngier its_send_single_command(dev->its, its_build_discard_cmd, &desc); 561cc2d3216SMarc Zyngier } 562cc2d3216SMarc Zyngier 563cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col) 564cc2d3216SMarc Zyngier { 565cc2d3216SMarc Zyngier struct its_cmd_desc desc; 566cc2d3216SMarc Zyngier 567cc2d3216SMarc Zyngier desc.its_invall_cmd.col = col; 568cc2d3216SMarc Zyngier 569cc2d3216SMarc Zyngier its_send_single_command(its, its_build_invall_cmd, &desc); 570cc2d3216SMarc Zyngier } 571c48ed51cSMarc Zyngier 572c48ed51cSMarc Zyngier /* 573c48ed51cSMarc Zyngier * irqchip functions - assumes MSI, mostly. 574c48ed51cSMarc Zyngier */ 575c48ed51cSMarc Zyngier 576c48ed51cSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d) 577c48ed51cSMarc Zyngier { 578c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 579591e5becSMarc Zyngier return d->hwirq - its_dev->event_map.lpi_base; 580c48ed51cSMarc Zyngier } 581c48ed51cSMarc Zyngier 582c48ed51cSMarc Zyngier static void lpi_set_config(struct irq_data *d, bool enable) 583c48ed51cSMarc Zyngier { 584c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 585c48ed51cSMarc Zyngier irq_hw_number_t hwirq = d->hwirq; 586c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 587c48ed51cSMarc Zyngier u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; 588c48ed51cSMarc Zyngier 589c48ed51cSMarc Zyngier if (enable) 590c48ed51cSMarc Zyngier *cfg |= LPI_PROP_ENABLED; 591c48ed51cSMarc Zyngier else 592c48ed51cSMarc Zyngier *cfg &= ~LPI_PROP_ENABLED; 593c48ed51cSMarc Zyngier 594c48ed51cSMarc Zyngier /* 595c48ed51cSMarc Zyngier * Make the above write visible to the redistributors. 596c48ed51cSMarc Zyngier * And yes, we're flushing exactly: One. Single. Byte. 597c48ed51cSMarc Zyngier * Humpf... 598c48ed51cSMarc Zyngier */ 599c48ed51cSMarc Zyngier if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) 600c48ed51cSMarc Zyngier __flush_dcache_area(cfg, sizeof(*cfg)); 601c48ed51cSMarc Zyngier else 602c48ed51cSMarc Zyngier dsb(ishst); 603c48ed51cSMarc Zyngier its_send_inv(its_dev, id); 604c48ed51cSMarc Zyngier } 605c48ed51cSMarc Zyngier 606c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d) 607c48ed51cSMarc Zyngier { 608c48ed51cSMarc Zyngier lpi_set_config(d, false); 609c48ed51cSMarc Zyngier } 610c48ed51cSMarc Zyngier 611c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d) 612c48ed51cSMarc Zyngier { 613c48ed51cSMarc Zyngier lpi_set_config(d, true); 614c48ed51cSMarc Zyngier } 615c48ed51cSMarc Zyngier 616c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 617c48ed51cSMarc Zyngier bool force) 618c48ed51cSMarc Zyngier { 619fbf8f40eSGanapatrao Kulkarni unsigned int cpu; 620fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 621c48ed51cSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 622c48ed51cSMarc Zyngier struct its_collection *target_col; 623c48ed51cSMarc Zyngier u32 id = its_get_event_id(d); 624c48ed51cSMarc Zyngier 625fbf8f40eSGanapatrao Kulkarni /* lpi cannot be routed to a redistributor that is on a foreign node */ 626fbf8f40eSGanapatrao Kulkarni if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 627fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) { 628fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 629fbf8f40eSGanapatrao Kulkarni if (!cpumask_intersects(mask_val, cpu_mask)) 630fbf8f40eSGanapatrao Kulkarni return -EINVAL; 631fbf8f40eSGanapatrao Kulkarni } 632fbf8f40eSGanapatrao Kulkarni } 633fbf8f40eSGanapatrao Kulkarni 634fbf8f40eSGanapatrao Kulkarni cpu = cpumask_any_and(mask_val, cpu_mask); 635fbf8f40eSGanapatrao Kulkarni 636c48ed51cSMarc Zyngier if (cpu >= nr_cpu_ids) 637c48ed51cSMarc Zyngier return -EINVAL; 638c48ed51cSMarc Zyngier 639c48ed51cSMarc Zyngier target_col = &its_dev->its->collections[cpu]; 640c48ed51cSMarc Zyngier its_send_movi(its_dev, target_col, id); 641591e5becSMarc Zyngier its_dev->event_map.col_map[id] = cpu; 642c48ed51cSMarc Zyngier 643c48ed51cSMarc Zyngier return IRQ_SET_MASK_OK_DONE; 644c48ed51cSMarc Zyngier } 645c48ed51cSMarc Zyngier 646b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 647b48ac83dSMarc Zyngier { 648b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 649b48ac83dSMarc Zyngier struct its_node *its; 650b48ac83dSMarc Zyngier u64 addr; 651b48ac83dSMarc Zyngier 652b48ac83dSMarc Zyngier its = its_dev->its; 653b48ac83dSMarc Zyngier addr = its->phys_base + GITS_TRANSLATER; 654b48ac83dSMarc Zyngier 655b48ac83dSMarc Zyngier msg->address_lo = addr & ((1UL << 32) - 1); 656b48ac83dSMarc Zyngier msg->address_hi = addr >> 32; 657b48ac83dSMarc Zyngier msg->data = its_get_event_id(d); 658b48ac83dSMarc Zyngier } 659b48ac83dSMarc Zyngier 660c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = { 661c48ed51cSMarc Zyngier .name = "ITS", 662c48ed51cSMarc Zyngier .irq_mask = its_mask_irq, 663c48ed51cSMarc Zyngier .irq_unmask = its_unmask_irq, 664004fa08dSAshok Kumar .irq_eoi = irq_chip_eoi_parent, 665c48ed51cSMarc Zyngier .irq_set_affinity = its_set_affinity, 666b48ac83dSMarc Zyngier .irq_compose_msi_msg = its_irq_compose_msi_msg, 667b48ac83dSMarc Zyngier }; 668b48ac83dSMarc Zyngier 669bf9529f8SMarc Zyngier /* 670bf9529f8SMarc Zyngier * How we allocate LPIs: 671bf9529f8SMarc Zyngier * 672bf9529f8SMarc Zyngier * The GIC has id_bits bits for interrupt identifiers. From there, we 673bf9529f8SMarc Zyngier * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as 674bf9529f8SMarc Zyngier * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 675bf9529f8SMarc Zyngier * bits to the right. 676bf9529f8SMarc Zyngier * 677bf9529f8SMarc Zyngier * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 678bf9529f8SMarc Zyngier */ 679bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK_SHIFT 5 680bf9529f8SMarc Zyngier #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 681bf9529f8SMarc Zyngier 682bf9529f8SMarc Zyngier static unsigned long *lpi_bitmap; 683bf9529f8SMarc Zyngier static u32 lpi_chunks; 684bf9529f8SMarc Zyngier static DEFINE_SPINLOCK(lpi_lock); 685bf9529f8SMarc Zyngier 686bf9529f8SMarc Zyngier static int its_lpi_to_chunk(int lpi) 687bf9529f8SMarc Zyngier { 688bf9529f8SMarc Zyngier return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; 689bf9529f8SMarc Zyngier } 690bf9529f8SMarc Zyngier 691bf9529f8SMarc Zyngier static int its_chunk_to_lpi(int chunk) 692bf9529f8SMarc Zyngier { 693bf9529f8SMarc Zyngier return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 694bf9529f8SMarc Zyngier } 695bf9529f8SMarc Zyngier 69604a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits) 697bf9529f8SMarc Zyngier { 698bf9529f8SMarc Zyngier lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 699bf9529f8SMarc Zyngier 700bf9529f8SMarc Zyngier lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), 701bf9529f8SMarc Zyngier GFP_KERNEL); 702bf9529f8SMarc Zyngier if (!lpi_bitmap) { 703bf9529f8SMarc Zyngier lpi_chunks = 0; 704bf9529f8SMarc Zyngier return -ENOMEM; 705bf9529f8SMarc Zyngier } 706bf9529f8SMarc Zyngier 707bf9529f8SMarc Zyngier pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); 708bf9529f8SMarc Zyngier return 0; 709bf9529f8SMarc Zyngier } 710bf9529f8SMarc Zyngier 711bf9529f8SMarc Zyngier static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) 712bf9529f8SMarc Zyngier { 713bf9529f8SMarc Zyngier unsigned long *bitmap = NULL; 714bf9529f8SMarc Zyngier int chunk_id; 715bf9529f8SMarc Zyngier int nr_chunks; 716bf9529f8SMarc Zyngier int i; 717bf9529f8SMarc Zyngier 718bf9529f8SMarc Zyngier nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); 719bf9529f8SMarc Zyngier 720bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 721bf9529f8SMarc Zyngier 722bf9529f8SMarc Zyngier do { 723bf9529f8SMarc Zyngier chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, 724bf9529f8SMarc Zyngier 0, nr_chunks, 0); 725bf9529f8SMarc Zyngier if (chunk_id < lpi_chunks) 726bf9529f8SMarc Zyngier break; 727bf9529f8SMarc Zyngier 728bf9529f8SMarc Zyngier nr_chunks--; 729bf9529f8SMarc Zyngier } while (nr_chunks > 0); 730bf9529f8SMarc Zyngier 731bf9529f8SMarc Zyngier if (!nr_chunks) 732bf9529f8SMarc Zyngier goto out; 733bf9529f8SMarc Zyngier 734bf9529f8SMarc Zyngier bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), 735bf9529f8SMarc Zyngier GFP_ATOMIC); 736bf9529f8SMarc Zyngier if (!bitmap) 737bf9529f8SMarc Zyngier goto out; 738bf9529f8SMarc Zyngier 739bf9529f8SMarc Zyngier for (i = 0; i < nr_chunks; i++) 740bf9529f8SMarc Zyngier set_bit(chunk_id + i, lpi_bitmap); 741bf9529f8SMarc Zyngier 742bf9529f8SMarc Zyngier *base = its_chunk_to_lpi(chunk_id); 743bf9529f8SMarc Zyngier *nr_ids = nr_chunks * IRQS_PER_CHUNK; 744bf9529f8SMarc Zyngier 745bf9529f8SMarc Zyngier out: 746bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 747bf9529f8SMarc Zyngier 748c8415b94SMarc Zyngier if (!bitmap) 749c8415b94SMarc Zyngier *base = *nr_ids = 0; 750c8415b94SMarc Zyngier 751bf9529f8SMarc Zyngier return bitmap; 752bf9529f8SMarc Zyngier } 753bf9529f8SMarc Zyngier 754591e5becSMarc Zyngier static void its_lpi_free(struct event_lpi_map *map) 755bf9529f8SMarc Zyngier { 756591e5becSMarc Zyngier int base = map->lpi_base; 757591e5becSMarc Zyngier int nr_ids = map->nr_lpis; 758bf9529f8SMarc Zyngier int lpi; 759bf9529f8SMarc Zyngier 760bf9529f8SMarc Zyngier spin_lock(&lpi_lock); 761bf9529f8SMarc Zyngier 762bf9529f8SMarc Zyngier for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 763bf9529f8SMarc Zyngier int chunk = its_lpi_to_chunk(lpi); 764bf9529f8SMarc Zyngier BUG_ON(chunk > lpi_chunks); 765bf9529f8SMarc Zyngier if (test_bit(chunk, lpi_bitmap)) { 766bf9529f8SMarc Zyngier clear_bit(chunk, lpi_bitmap); 767bf9529f8SMarc Zyngier } else { 768bf9529f8SMarc Zyngier pr_err("Bad LPI chunk %d\n", chunk); 769bf9529f8SMarc Zyngier } 770bf9529f8SMarc Zyngier } 771bf9529f8SMarc Zyngier 772bf9529f8SMarc Zyngier spin_unlock(&lpi_lock); 773bf9529f8SMarc Zyngier 774591e5becSMarc Zyngier kfree(map->lpi_map); 775591e5becSMarc Zyngier kfree(map->col_map); 776bf9529f8SMarc Zyngier } 7771ac19ca6SMarc Zyngier 7781ac19ca6SMarc Zyngier /* 7791ac19ca6SMarc Zyngier * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to 7801ac19ca6SMarc Zyngier * deal with (one configuration byte per interrupt). PENDBASE has to 7811ac19ca6SMarc Zyngier * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 7821ac19ca6SMarc Zyngier */ 7831ac19ca6SMarc Zyngier #define LPI_PROPBASE_SZ SZ_64K 7841ac19ca6SMarc Zyngier #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) 7851ac19ca6SMarc Zyngier 7861ac19ca6SMarc Zyngier /* 7871ac19ca6SMarc Zyngier * This is how many bits of ID we need, including the useless ones. 7881ac19ca6SMarc Zyngier */ 7891ac19ca6SMarc Zyngier #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) 7901ac19ca6SMarc Zyngier 7911ac19ca6SMarc Zyngier #define LPI_PROP_DEFAULT_PRIO 0xa0 7921ac19ca6SMarc Zyngier 7931ac19ca6SMarc Zyngier static int __init its_alloc_lpi_tables(void) 7941ac19ca6SMarc Zyngier { 7951ac19ca6SMarc Zyngier phys_addr_t paddr; 7961ac19ca6SMarc Zyngier 7971ac19ca6SMarc Zyngier gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, 7981ac19ca6SMarc Zyngier get_order(LPI_PROPBASE_SZ)); 7991ac19ca6SMarc Zyngier if (!gic_rdists->prop_page) { 8001ac19ca6SMarc Zyngier pr_err("Failed to allocate PROPBASE\n"); 8011ac19ca6SMarc Zyngier return -ENOMEM; 8021ac19ca6SMarc Zyngier } 8031ac19ca6SMarc Zyngier 8041ac19ca6SMarc Zyngier paddr = page_to_phys(gic_rdists->prop_page); 8051ac19ca6SMarc Zyngier pr_info("GIC: using LPI property table @%pa\n", &paddr); 8061ac19ca6SMarc Zyngier 8071ac19ca6SMarc Zyngier /* Priority 0xa0, Group-1, disabled */ 8081ac19ca6SMarc Zyngier memset(page_address(gic_rdists->prop_page), 8091ac19ca6SMarc Zyngier LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 8101ac19ca6SMarc Zyngier LPI_PROPBASE_SZ); 8111ac19ca6SMarc Zyngier 8121ac19ca6SMarc Zyngier /* Make sure the GIC will observe the written configuration */ 8131ac19ca6SMarc Zyngier __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); 8141ac19ca6SMarc Zyngier 8151ac19ca6SMarc Zyngier return 0; 8161ac19ca6SMarc Zyngier } 8171ac19ca6SMarc Zyngier 8181ac19ca6SMarc Zyngier static const char *its_base_type_string[] = { 8191ac19ca6SMarc Zyngier [GITS_BASER_TYPE_DEVICE] = "Devices", 8201ac19ca6SMarc Zyngier [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", 8211ac19ca6SMarc Zyngier [GITS_BASER_TYPE_CPU] = "Physical CPUs", 8221ac19ca6SMarc Zyngier [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", 8231ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", 8241ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", 8251ac19ca6SMarc Zyngier [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 8261ac19ca6SMarc Zyngier }; 8271ac19ca6SMarc Zyngier 8282d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 8292d81d425SShanker Donthineni { 8302d81d425SShanker Donthineni u32 idx = baser - its->tables; 8312d81d425SShanker Donthineni 8322d81d425SShanker Donthineni return readq_relaxed(its->base + GITS_BASER + (idx << 3)); 8332d81d425SShanker Donthineni } 8342d81d425SShanker Donthineni 8352d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser, 8362d81d425SShanker Donthineni u64 val) 8372d81d425SShanker Donthineni { 8382d81d425SShanker Donthineni u32 idx = baser - its->tables; 8392d81d425SShanker Donthineni 8402d81d425SShanker Donthineni writeq_relaxed(val, its->base + GITS_BASER + (idx << 3)); 8412d81d425SShanker Donthineni baser->val = its_read_baser(its, baser); 8422d81d425SShanker Donthineni } 8432d81d425SShanker Donthineni 8449347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser, 8453faf24eaSShanker Donthineni u64 cache, u64 shr, u32 psz, u32 order, 8463faf24eaSShanker Donthineni bool indirect) 8479347359aSShanker Donthineni { 8489347359aSShanker Donthineni u64 val = its_read_baser(its, baser); 8499347359aSShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(val); 8509347359aSShanker Donthineni u64 type = GITS_BASER_TYPE(val); 8519347359aSShanker Donthineni u32 alloc_pages; 8529347359aSShanker Donthineni void *base; 8539347359aSShanker Donthineni u64 tmp; 8549347359aSShanker Donthineni 8559347359aSShanker Donthineni retry_alloc_baser: 8569347359aSShanker Donthineni alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 8579347359aSShanker Donthineni if (alloc_pages > GITS_BASER_PAGES_MAX) { 8589347359aSShanker Donthineni pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 8599347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 8609347359aSShanker Donthineni alloc_pages, GITS_BASER_PAGES_MAX); 8619347359aSShanker Donthineni alloc_pages = GITS_BASER_PAGES_MAX; 8629347359aSShanker Donthineni order = get_order(GITS_BASER_PAGES_MAX * psz); 8639347359aSShanker Donthineni } 8649347359aSShanker Donthineni 8659347359aSShanker Donthineni base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 8669347359aSShanker Donthineni if (!base) 8679347359aSShanker Donthineni return -ENOMEM; 8689347359aSShanker Donthineni 8699347359aSShanker Donthineni retry_baser: 8709347359aSShanker Donthineni val = (virt_to_phys(base) | 8719347359aSShanker Donthineni (type << GITS_BASER_TYPE_SHIFT) | 8729347359aSShanker Donthineni ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 8739347359aSShanker Donthineni ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 8749347359aSShanker Donthineni cache | 8759347359aSShanker Donthineni shr | 8769347359aSShanker Donthineni GITS_BASER_VALID); 8779347359aSShanker Donthineni 8783faf24eaSShanker Donthineni val |= indirect ? GITS_BASER_INDIRECT : 0x0; 8793faf24eaSShanker Donthineni 8809347359aSShanker Donthineni switch (psz) { 8819347359aSShanker Donthineni case SZ_4K: 8829347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_4K; 8839347359aSShanker Donthineni break; 8849347359aSShanker Donthineni case SZ_16K: 8859347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_16K; 8869347359aSShanker Donthineni break; 8879347359aSShanker Donthineni case SZ_64K: 8889347359aSShanker Donthineni val |= GITS_BASER_PAGE_SIZE_64K; 8899347359aSShanker Donthineni break; 8909347359aSShanker Donthineni } 8919347359aSShanker Donthineni 8929347359aSShanker Donthineni its_write_baser(its, baser, val); 8939347359aSShanker Donthineni tmp = baser->val; 8949347359aSShanker Donthineni 8959347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 8969347359aSShanker Donthineni /* 8979347359aSShanker Donthineni * Shareability didn't stick. Just use 8989347359aSShanker Donthineni * whatever the read reported, which is likely 8999347359aSShanker Donthineni * to be the only thing this redistributor 9009347359aSShanker Donthineni * supports. If that's zero, make it 9019347359aSShanker Donthineni * non-cacheable as well. 9029347359aSShanker Donthineni */ 9039347359aSShanker Donthineni shr = tmp & GITS_BASER_SHAREABILITY_MASK; 9049347359aSShanker Donthineni if (!shr) { 9059347359aSShanker Donthineni cache = GITS_BASER_nC; 9069347359aSShanker Donthineni __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order)); 9079347359aSShanker Donthineni } 9089347359aSShanker Donthineni goto retry_baser; 9099347359aSShanker Donthineni } 9109347359aSShanker Donthineni 9119347359aSShanker Donthineni if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 9129347359aSShanker Donthineni /* 9139347359aSShanker Donthineni * Page size didn't stick. Let's try a smaller 9149347359aSShanker Donthineni * size and retry. If we reach 4K, then 9159347359aSShanker Donthineni * something is horribly wrong... 9169347359aSShanker Donthineni */ 9179347359aSShanker Donthineni free_pages((unsigned long)base, order); 9189347359aSShanker Donthineni baser->base = NULL; 9199347359aSShanker Donthineni 9209347359aSShanker Donthineni switch (psz) { 9219347359aSShanker Donthineni case SZ_16K: 9229347359aSShanker Donthineni psz = SZ_4K; 9239347359aSShanker Donthineni goto retry_alloc_baser; 9249347359aSShanker Donthineni case SZ_64K: 9259347359aSShanker Donthineni psz = SZ_16K; 9269347359aSShanker Donthineni goto retry_alloc_baser; 9279347359aSShanker Donthineni } 9289347359aSShanker Donthineni } 9299347359aSShanker Donthineni 9309347359aSShanker Donthineni if (val != tmp) { 9319347359aSShanker Donthineni pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n", 9329347359aSShanker Donthineni &its->phys_base, its_base_type_string[type], 9339347359aSShanker Donthineni (unsigned long) val, (unsigned long) tmp); 9349347359aSShanker Donthineni free_pages((unsigned long)base, order); 9359347359aSShanker Donthineni return -ENXIO; 9369347359aSShanker Donthineni } 9379347359aSShanker Donthineni 9389347359aSShanker Donthineni baser->order = order; 9399347359aSShanker Donthineni baser->base = base; 9409347359aSShanker Donthineni baser->psz = psz; 9413faf24eaSShanker Donthineni tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 9429347359aSShanker Donthineni 9433faf24eaSShanker Donthineni pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 9443faf24eaSShanker Donthineni &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp), 9459347359aSShanker Donthineni its_base_type_string[type], 9469347359aSShanker Donthineni (unsigned long)virt_to_phys(base), 9473faf24eaSShanker Donthineni indirect ? "indirect" : "flat", (int)esz, 9489347359aSShanker Donthineni psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 9499347359aSShanker Donthineni 9509347359aSShanker Donthineni return 0; 9519347359aSShanker Donthineni } 9529347359aSShanker Donthineni 9533faf24eaSShanker Donthineni static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 9543faf24eaSShanker Donthineni u32 psz, u32 *order) 9554b75c459SShanker Donthineni { 9564b75c459SShanker Donthineni u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 9573faf24eaSShanker Donthineni u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb; 9584b75c459SShanker Donthineni u32 ids = its->device_ids; 9594b75c459SShanker Donthineni u32 new_order = *order; 9603faf24eaSShanker Donthineni bool indirect = false; 9613faf24eaSShanker Donthineni 9623faf24eaSShanker Donthineni /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 9633faf24eaSShanker Donthineni if ((esz << ids) > (psz * 2)) { 9643faf24eaSShanker Donthineni /* 9653faf24eaSShanker Donthineni * Find out whether hw supports a single or two-level table by 9663faf24eaSShanker Donthineni * table by reading bit at offset '62' after writing '1' to it. 9673faf24eaSShanker Donthineni */ 9683faf24eaSShanker Donthineni its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 9693faf24eaSShanker Donthineni indirect = !!(baser->val & GITS_BASER_INDIRECT); 9703faf24eaSShanker Donthineni 9713faf24eaSShanker Donthineni if (indirect) { 9723faf24eaSShanker Donthineni /* 9733faf24eaSShanker Donthineni * The size of the lvl2 table is equal to ITS page size 9743faf24eaSShanker Donthineni * which is 'psz'. For computing lvl1 table size, 9753faf24eaSShanker Donthineni * subtract ID bits that sparse lvl2 table from 'ids' 9763faf24eaSShanker Donthineni * which is reported by ITS hardware times lvl1 table 9773faf24eaSShanker Donthineni * entry size. 9783faf24eaSShanker Donthineni */ 9793faf24eaSShanker Donthineni ids -= ilog2(psz / esz); 9803faf24eaSShanker Donthineni esz = GITS_LVL1_ENTRY_SIZE; 9813faf24eaSShanker Donthineni } 9823faf24eaSShanker Donthineni } 9834b75c459SShanker Donthineni 9844b75c459SShanker Donthineni /* 9854b75c459SShanker Donthineni * Allocate as many entries as required to fit the 9864b75c459SShanker Donthineni * range of device IDs that the ITS can grok... The ID 9874b75c459SShanker Donthineni * space being incredibly sparse, this results in a 9883faf24eaSShanker Donthineni * massive waste of memory if two-level device table 9893faf24eaSShanker Donthineni * feature is not supported by hardware. 9904b75c459SShanker Donthineni */ 9914b75c459SShanker Donthineni new_order = max_t(u32, get_order(esz << ids), new_order); 9924b75c459SShanker Donthineni if (new_order >= MAX_ORDER) { 9934b75c459SShanker Donthineni new_order = MAX_ORDER - 1; 9944b75c459SShanker Donthineni ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz); 9954b75c459SShanker Donthineni pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 9964b75c459SShanker Donthineni &its->phys_base, its->device_ids, ids); 9974b75c459SShanker Donthineni } 9984b75c459SShanker Donthineni 9994b75c459SShanker Donthineni *order = new_order; 10003faf24eaSShanker Donthineni 10013faf24eaSShanker Donthineni return indirect; 10024b75c459SShanker Donthineni } 10034b75c459SShanker Donthineni 10041ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its) 10051ac19ca6SMarc Zyngier { 10061ac19ca6SMarc Zyngier int i; 10071ac19ca6SMarc Zyngier 10081ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 10091a485f4dSShanker Donthineni if (its->tables[i].base) { 10101a485f4dSShanker Donthineni free_pages((unsigned long)its->tables[i].base, 10111a485f4dSShanker Donthineni its->tables[i].order); 10121a485f4dSShanker Donthineni its->tables[i].base = NULL; 10131ac19ca6SMarc Zyngier } 10141ac19ca6SMarc Zyngier } 10151ac19ca6SMarc Zyngier } 10161ac19ca6SMarc Zyngier 10170e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its) 10181ac19ca6SMarc Zyngier { 10199347359aSShanker Donthineni u64 typer = readq_relaxed(its->base + GITS_TYPER); 10209347359aSShanker Donthineni u32 ids = GITS_TYPER_DEVBITS(typer); 10211ac19ca6SMarc Zyngier u64 shr = GITS_BASER_InnerShareable; 10229347359aSShanker Donthineni u64 cache = GITS_BASER_WaWb; 10239347359aSShanker Donthineni u32 psz = SZ_64K; 10249347359aSShanker Donthineni int err, i; 102594100970SRobert Richter 102694100970SRobert Richter if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { 102794100970SRobert Richter /* 102894100970SRobert Richter * erratum 22375: only alloc 8MB table size 102994100970SRobert Richter * erratum 24313: ignore memory access type 103094100970SRobert Richter */ 10319347359aSShanker Donthineni cache = GITS_BASER_nCnB; 103294100970SRobert Richter ids = 0x14; /* 20 bits, 8MB */ 103394100970SRobert Richter } 10341ac19ca6SMarc Zyngier 1035466b7d16SShanker Donthineni its->device_ids = ids; 1036466b7d16SShanker Donthineni 10371ac19ca6SMarc Zyngier for (i = 0; i < GITS_BASER_NR_REGS; i++) { 10382d81d425SShanker Donthineni struct its_baser *baser = its->tables + i; 10392d81d425SShanker Donthineni u64 val = its_read_baser(its, baser); 10401ac19ca6SMarc Zyngier u64 type = GITS_BASER_TYPE(val); 10419347359aSShanker Donthineni u32 order = get_order(psz); 10423faf24eaSShanker Donthineni bool indirect = false; 10431ac19ca6SMarc Zyngier 10441ac19ca6SMarc Zyngier if (type == GITS_BASER_TYPE_NONE) 10451ac19ca6SMarc Zyngier continue; 10461ac19ca6SMarc Zyngier 10474b75c459SShanker Donthineni if (type == GITS_BASER_TYPE_DEVICE) 10483faf24eaSShanker Donthineni indirect = its_parse_baser_device(its, baser, psz, &order); 1049f54b97edSMarc Zyngier 10503faf24eaSShanker Donthineni err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 10519347359aSShanker Donthineni if (err < 0) { 10529347359aSShanker Donthineni its_free_tables(its); 10539347359aSShanker Donthineni return err; 105430f21363SRobert Richter } 105530f21363SRobert Richter 10569347359aSShanker Donthineni /* Update settings which will be used for next BASERn */ 10579347359aSShanker Donthineni psz = baser->psz; 10589347359aSShanker Donthineni cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 10599347359aSShanker Donthineni shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 10601ac19ca6SMarc Zyngier } 10611ac19ca6SMarc Zyngier 10621ac19ca6SMarc Zyngier return 0; 10631ac19ca6SMarc Zyngier } 10641ac19ca6SMarc Zyngier 10651ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its) 10661ac19ca6SMarc Zyngier { 10671ac19ca6SMarc Zyngier its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), 10681ac19ca6SMarc Zyngier GFP_KERNEL); 10691ac19ca6SMarc Zyngier if (!its->collections) 10701ac19ca6SMarc Zyngier return -ENOMEM; 10711ac19ca6SMarc Zyngier 10721ac19ca6SMarc Zyngier return 0; 10731ac19ca6SMarc Zyngier } 10741ac19ca6SMarc Zyngier 10751ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void) 10761ac19ca6SMarc Zyngier { 10771ac19ca6SMarc Zyngier void __iomem *rbase = gic_data_rdist_rd_base(); 10781ac19ca6SMarc Zyngier struct page *pend_page; 10791ac19ca6SMarc Zyngier u64 val, tmp; 10801ac19ca6SMarc Zyngier 10811ac19ca6SMarc Zyngier /* If we didn't allocate the pending table yet, do it now */ 10821ac19ca6SMarc Zyngier pend_page = gic_data_rdist()->pend_page; 10831ac19ca6SMarc Zyngier if (!pend_page) { 10841ac19ca6SMarc Zyngier phys_addr_t paddr; 10851ac19ca6SMarc Zyngier /* 10861ac19ca6SMarc Zyngier * The pending pages have to be at least 64kB aligned, 10871ac19ca6SMarc Zyngier * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 10881ac19ca6SMarc Zyngier */ 10891ac19ca6SMarc Zyngier pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, 10901ac19ca6SMarc Zyngier get_order(max(LPI_PENDBASE_SZ, SZ_64K))); 10911ac19ca6SMarc Zyngier if (!pend_page) { 10921ac19ca6SMarc Zyngier pr_err("Failed to allocate PENDBASE for CPU%d\n", 10931ac19ca6SMarc Zyngier smp_processor_id()); 10941ac19ca6SMarc Zyngier return; 10951ac19ca6SMarc Zyngier } 10961ac19ca6SMarc Zyngier 10971ac19ca6SMarc Zyngier /* Make sure the GIC will observe the zero-ed page */ 10981ac19ca6SMarc Zyngier __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ); 10991ac19ca6SMarc Zyngier 11001ac19ca6SMarc Zyngier paddr = page_to_phys(pend_page); 11011ac19ca6SMarc Zyngier pr_info("CPU%d: using LPI pending table @%pa\n", 11021ac19ca6SMarc Zyngier smp_processor_id(), &paddr); 11031ac19ca6SMarc Zyngier gic_data_rdist()->pend_page = pend_page; 11041ac19ca6SMarc Zyngier } 11051ac19ca6SMarc Zyngier 11061ac19ca6SMarc Zyngier /* Disable LPIs */ 11071ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 11081ac19ca6SMarc Zyngier val &= ~GICR_CTLR_ENABLE_LPIS; 11091ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 11101ac19ca6SMarc Zyngier 11111ac19ca6SMarc Zyngier /* 11121ac19ca6SMarc Zyngier * Make sure any change to the table is observable by the GIC. 11131ac19ca6SMarc Zyngier */ 11141ac19ca6SMarc Zyngier dsb(sy); 11151ac19ca6SMarc Zyngier 11161ac19ca6SMarc Zyngier /* set PROPBASE */ 11171ac19ca6SMarc Zyngier val = (page_to_phys(gic_rdists->prop_page) | 11181ac19ca6SMarc Zyngier GICR_PROPBASER_InnerShareable | 11191ac19ca6SMarc Zyngier GICR_PROPBASER_WaWb | 11201ac19ca6SMarc Zyngier ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); 11211ac19ca6SMarc Zyngier 11221ac19ca6SMarc Zyngier writeq_relaxed(val, rbase + GICR_PROPBASER); 11231ac19ca6SMarc Zyngier tmp = readq_relaxed(rbase + GICR_PROPBASER); 11241ac19ca6SMarc Zyngier 11251ac19ca6SMarc Zyngier if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 1126241a386cSMarc Zyngier if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { 1127241a386cSMarc Zyngier /* 1128241a386cSMarc Zyngier * The HW reports non-shareable, we must 1129241a386cSMarc Zyngier * remove the cacheability attributes as 1130241a386cSMarc Zyngier * well. 1131241a386cSMarc Zyngier */ 1132241a386cSMarc Zyngier val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | 1133241a386cSMarc Zyngier GICR_PROPBASER_CACHEABILITY_MASK); 1134241a386cSMarc Zyngier val |= GICR_PROPBASER_nC; 1135241a386cSMarc Zyngier writeq_relaxed(val, rbase + GICR_PROPBASER); 1136241a386cSMarc Zyngier } 11371ac19ca6SMarc Zyngier pr_info_once("GIC: using cache flushing for LPI property table\n"); 11381ac19ca6SMarc Zyngier gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 11391ac19ca6SMarc Zyngier } 11401ac19ca6SMarc Zyngier 11411ac19ca6SMarc Zyngier /* set PENDBASE */ 11421ac19ca6SMarc Zyngier val = (page_to_phys(pend_page) | 11434ad3e363SMarc Zyngier GICR_PENDBASER_InnerShareable | 11444ad3e363SMarc Zyngier GICR_PENDBASER_WaWb); 11451ac19ca6SMarc Zyngier 11461ac19ca6SMarc Zyngier writeq_relaxed(val, rbase + GICR_PENDBASER); 1147241a386cSMarc Zyngier tmp = readq_relaxed(rbase + GICR_PENDBASER); 1148241a386cSMarc Zyngier 1149241a386cSMarc Zyngier if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { 1150241a386cSMarc Zyngier /* 1151241a386cSMarc Zyngier * The HW reports non-shareable, we must remove the 1152241a386cSMarc Zyngier * cacheability attributes as well. 1153241a386cSMarc Zyngier */ 1154241a386cSMarc Zyngier val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | 1155241a386cSMarc Zyngier GICR_PENDBASER_CACHEABILITY_MASK); 1156241a386cSMarc Zyngier val |= GICR_PENDBASER_nC; 1157241a386cSMarc Zyngier writeq_relaxed(val, rbase + GICR_PENDBASER); 1158241a386cSMarc Zyngier } 11591ac19ca6SMarc Zyngier 11601ac19ca6SMarc Zyngier /* Enable LPIs */ 11611ac19ca6SMarc Zyngier val = readl_relaxed(rbase + GICR_CTLR); 11621ac19ca6SMarc Zyngier val |= GICR_CTLR_ENABLE_LPIS; 11631ac19ca6SMarc Zyngier writel_relaxed(val, rbase + GICR_CTLR); 11641ac19ca6SMarc Zyngier 11651ac19ca6SMarc Zyngier /* Make sure the GIC has seen the above */ 11661ac19ca6SMarc Zyngier dsb(sy); 11671ac19ca6SMarc Zyngier } 11681ac19ca6SMarc Zyngier 11691ac19ca6SMarc Zyngier static void its_cpu_init_collection(void) 11701ac19ca6SMarc Zyngier { 11711ac19ca6SMarc Zyngier struct its_node *its; 11721ac19ca6SMarc Zyngier int cpu; 11731ac19ca6SMarc Zyngier 11741ac19ca6SMarc Zyngier spin_lock(&its_lock); 11751ac19ca6SMarc Zyngier cpu = smp_processor_id(); 11761ac19ca6SMarc Zyngier 11771ac19ca6SMarc Zyngier list_for_each_entry(its, &its_nodes, entry) { 11781ac19ca6SMarc Zyngier u64 target; 11791ac19ca6SMarc Zyngier 1180fbf8f40eSGanapatrao Kulkarni /* avoid cross node collections and its mapping */ 1181fbf8f40eSGanapatrao Kulkarni if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1182fbf8f40eSGanapatrao Kulkarni struct device_node *cpu_node; 1183fbf8f40eSGanapatrao Kulkarni 1184fbf8f40eSGanapatrao Kulkarni cpu_node = of_get_cpu_node(cpu, NULL); 1185fbf8f40eSGanapatrao Kulkarni if (its->numa_node != NUMA_NO_NODE && 1186fbf8f40eSGanapatrao Kulkarni its->numa_node != of_node_to_nid(cpu_node)) 1187fbf8f40eSGanapatrao Kulkarni continue; 1188fbf8f40eSGanapatrao Kulkarni } 1189fbf8f40eSGanapatrao Kulkarni 11901ac19ca6SMarc Zyngier /* 11911ac19ca6SMarc Zyngier * We now have to bind each collection to its target 11921ac19ca6SMarc Zyngier * redistributor. 11931ac19ca6SMarc Zyngier */ 11941ac19ca6SMarc Zyngier if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 11951ac19ca6SMarc Zyngier /* 11961ac19ca6SMarc Zyngier * This ITS wants the physical address of the 11971ac19ca6SMarc Zyngier * redistributor. 11981ac19ca6SMarc Zyngier */ 11991ac19ca6SMarc Zyngier target = gic_data_rdist()->phys_base; 12001ac19ca6SMarc Zyngier } else { 12011ac19ca6SMarc Zyngier /* 12021ac19ca6SMarc Zyngier * This ITS wants a linear CPU number. 12031ac19ca6SMarc Zyngier */ 12041ac19ca6SMarc Zyngier target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); 1205263fcd31SMarc Zyngier target = GICR_TYPER_CPU_NUMBER(target) << 16; 12061ac19ca6SMarc Zyngier } 12071ac19ca6SMarc Zyngier 12081ac19ca6SMarc Zyngier /* Perform collection mapping */ 12091ac19ca6SMarc Zyngier its->collections[cpu].target_address = target; 12101ac19ca6SMarc Zyngier its->collections[cpu].col_id = cpu; 12111ac19ca6SMarc Zyngier 12121ac19ca6SMarc Zyngier its_send_mapc(its, &its->collections[cpu], 1); 12131ac19ca6SMarc Zyngier its_send_invall(its, &its->collections[cpu]); 12141ac19ca6SMarc Zyngier } 12151ac19ca6SMarc Zyngier 12161ac19ca6SMarc Zyngier spin_unlock(&its_lock); 12171ac19ca6SMarc Zyngier } 121884a6a2e7SMarc Zyngier 121984a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 122084a6a2e7SMarc Zyngier { 122184a6a2e7SMarc Zyngier struct its_device *its_dev = NULL, *tmp; 12223e39e8f5SMarc Zyngier unsigned long flags; 122384a6a2e7SMarc Zyngier 12243e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 122584a6a2e7SMarc Zyngier 122684a6a2e7SMarc Zyngier list_for_each_entry(tmp, &its->its_device_list, entry) { 122784a6a2e7SMarc Zyngier if (tmp->device_id == dev_id) { 122884a6a2e7SMarc Zyngier its_dev = tmp; 122984a6a2e7SMarc Zyngier break; 123084a6a2e7SMarc Zyngier } 123184a6a2e7SMarc Zyngier } 123284a6a2e7SMarc Zyngier 12333e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 123484a6a2e7SMarc Zyngier 123584a6a2e7SMarc Zyngier return its_dev; 123684a6a2e7SMarc Zyngier } 123784a6a2e7SMarc Zyngier 1238466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type) 1239466b7d16SShanker Donthineni { 1240466b7d16SShanker Donthineni int i; 1241466b7d16SShanker Donthineni 1242466b7d16SShanker Donthineni for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1243466b7d16SShanker Donthineni if (GITS_BASER_TYPE(its->tables[i].val) == type) 1244466b7d16SShanker Donthineni return &its->tables[i]; 1245466b7d16SShanker Donthineni } 1246466b7d16SShanker Donthineni 1247466b7d16SShanker Donthineni return NULL; 1248466b7d16SShanker Donthineni } 1249466b7d16SShanker Donthineni 12503faf24eaSShanker Donthineni static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 12513faf24eaSShanker Donthineni { 12523faf24eaSShanker Donthineni struct its_baser *baser; 12533faf24eaSShanker Donthineni struct page *page; 12543faf24eaSShanker Donthineni u32 esz, idx; 12553faf24eaSShanker Donthineni __le64 *table; 12563faf24eaSShanker Donthineni 12573faf24eaSShanker Donthineni baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 12583faf24eaSShanker Donthineni 12593faf24eaSShanker Donthineni /* Don't allow device id that exceeds ITS hardware limit */ 12603faf24eaSShanker Donthineni if (!baser) 12613faf24eaSShanker Donthineni return (ilog2(dev_id) < its->device_ids); 12623faf24eaSShanker Donthineni 12633faf24eaSShanker Donthineni /* Don't allow device id that exceeds single, flat table limit */ 12643faf24eaSShanker Donthineni esz = GITS_BASER_ENTRY_SIZE(baser->val); 12653faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_INDIRECT)) 12663faf24eaSShanker Donthineni return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 12673faf24eaSShanker Donthineni 12683faf24eaSShanker Donthineni /* Compute 1st level table index & check if that exceeds table limit */ 12693faf24eaSShanker Donthineni idx = dev_id >> ilog2(baser->psz / esz); 12703faf24eaSShanker Donthineni if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 12713faf24eaSShanker Donthineni return false; 12723faf24eaSShanker Donthineni 12733faf24eaSShanker Donthineni table = baser->base; 12743faf24eaSShanker Donthineni 12753faf24eaSShanker Donthineni /* Allocate memory for 2nd level table */ 12763faf24eaSShanker Donthineni if (!table[idx]) { 12773faf24eaSShanker Donthineni page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 12783faf24eaSShanker Donthineni if (!page) 12793faf24eaSShanker Donthineni return false; 12803faf24eaSShanker Donthineni 12813faf24eaSShanker Donthineni /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 12823faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 12833faf24eaSShanker Donthineni __flush_dcache_area(page_address(page), baser->psz); 12843faf24eaSShanker Donthineni 12853faf24eaSShanker Donthineni table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 12863faf24eaSShanker Donthineni 12873faf24eaSShanker Donthineni /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 12883faf24eaSShanker Donthineni if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 12893faf24eaSShanker Donthineni __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE); 12903faf24eaSShanker Donthineni 12913faf24eaSShanker Donthineni /* Ensure updated table contents are visible to ITS hardware */ 12923faf24eaSShanker Donthineni dsb(sy); 12933faf24eaSShanker Donthineni } 12943faf24eaSShanker Donthineni 12953faf24eaSShanker Donthineni return true; 12963faf24eaSShanker Donthineni } 12973faf24eaSShanker Donthineni 129884a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 129984a6a2e7SMarc Zyngier int nvecs) 130084a6a2e7SMarc Zyngier { 130184a6a2e7SMarc Zyngier struct its_device *dev; 130284a6a2e7SMarc Zyngier unsigned long *lpi_map; 13033e39e8f5SMarc Zyngier unsigned long flags; 1304591e5becSMarc Zyngier u16 *col_map = NULL; 130584a6a2e7SMarc Zyngier void *itt; 130684a6a2e7SMarc Zyngier int lpi_base; 130784a6a2e7SMarc Zyngier int nr_lpis; 1308c8481267SMarc Zyngier int nr_ites; 130984a6a2e7SMarc Zyngier int sz; 131084a6a2e7SMarc Zyngier 13113faf24eaSShanker Donthineni if (!its_alloc_device_table(its, dev_id)) 1312466b7d16SShanker Donthineni return NULL; 1313466b7d16SShanker Donthineni 131484a6a2e7SMarc Zyngier dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1315c8481267SMarc Zyngier /* 1316c8481267SMarc Zyngier * At least one bit of EventID is being used, hence a minimum 1317c8481267SMarc Zyngier * of two entries. No, the architecture doesn't let you 1318c8481267SMarc Zyngier * express an ITT with a single entry. 1319c8481267SMarc Zyngier */ 132096555c47SWill Deacon nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1321c8481267SMarc Zyngier sz = nr_ites * its->ite_size; 132284a6a2e7SMarc Zyngier sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 13236c834125SYun Wu itt = kzalloc(sz, GFP_KERNEL); 132484a6a2e7SMarc Zyngier lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1325591e5becSMarc Zyngier if (lpi_map) 1326591e5becSMarc Zyngier col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); 132784a6a2e7SMarc Zyngier 1328591e5becSMarc Zyngier if (!dev || !itt || !lpi_map || !col_map) { 132984a6a2e7SMarc Zyngier kfree(dev); 133084a6a2e7SMarc Zyngier kfree(itt); 133184a6a2e7SMarc Zyngier kfree(lpi_map); 1332591e5becSMarc Zyngier kfree(col_map); 133384a6a2e7SMarc Zyngier return NULL; 133484a6a2e7SMarc Zyngier } 133584a6a2e7SMarc Zyngier 13365a9a8915SMarc Zyngier __flush_dcache_area(itt, sz); 13375a9a8915SMarc Zyngier 133884a6a2e7SMarc Zyngier dev->its = its; 133984a6a2e7SMarc Zyngier dev->itt = itt; 1340c8481267SMarc Zyngier dev->nr_ites = nr_ites; 1341591e5becSMarc Zyngier dev->event_map.lpi_map = lpi_map; 1342591e5becSMarc Zyngier dev->event_map.col_map = col_map; 1343591e5becSMarc Zyngier dev->event_map.lpi_base = lpi_base; 1344591e5becSMarc Zyngier dev->event_map.nr_lpis = nr_lpis; 134584a6a2e7SMarc Zyngier dev->device_id = dev_id; 134684a6a2e7SMarc Zyngier INIT_LIST_HEAD(&dev->entry); 134784a6a2e7SMarc Zyngier 13483e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its->lock, flags); 134984a6a2e7SMarc Zyngier list_add(&dev->entry, &its->its_device_list); 13503e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its->lock, flags); 135184a6a2e7SMarc Zyngier 135284a6a2e7SMarc Zyngier /* Map device to its ITT */ 135384a6a2e7SMarc Zyngier its_send_mapd(dev, 1); 135484a6a2e7SMarc Zyngier 135584a6a2e7SMarc Zyngier return dev; 135684a6a2e7SMarc Zyngier } 135784a6a2e7SMarc Zyngier 135884a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev) 135984a6a2e7SMarc Zyngier { 13603e39e8f5SMarc Zyngier unsigned long flags; 13613e39e8f5SMarc Zyngier 13623e39e8f5SMarc Zyngier raw_spin_lock_irqsave(&its_dev->its->lock, flags); 136384a6a2e7SMarc Zyngier list_del(&its_dev->entry); 13643e39e8f5SMarc Zyngier raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 136584a6a2e7SMarc Zyngier kfree(its_dev->itt); 136684a6a2e7SMarc Zyngier kfree(its_dev); 136784a6a2e7SMarc Zyngier } 1368b48ac83dSMarc Zyngier 1369b48ac83dSMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 1370b48ac83dSMarc Zyngier { 1371b48ac83dSMarc Zyngier int idx; 1372b48ac83dSMarc Zyngier 1373591e5becSMarc Zyngier idx = find_first_zero_bit(dev->event_map.lpi_map, 1374591e5becSMarc Zyngier dev->event_map.nr_lpis); 1375591e5becSMarc Zyngier if (idx == dev->event_map.nr_lpis) 1376b48ac83dSMarc Zyngier return -ENOSPC; 1377b48ac83dSMarc Zyngier 1378591e5becSMarc Zyngier *hwirq = dev->event_map.lpi_base + idx; 1379591e5becSMarc Zyngier set_bit(idx, dev->event_map.lpi_map); 1380b48ac83dSMarc Zyngier 1381b48ac83dSMarc Zyngier return 0; 1382b48ac83dSMarc Zyngier } 1383b48ac83dSMarc Zyngier 138454456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1385b48ac83dSMarc Zyngier int nvec, msi_alloc_info_t *info) 1386b48ac83dSMarc Zyngier { 1387b48ac83dSMarc Zyngier struct its_node *its; 1388b48ac83dSMarc Zyngier struct its_device *its_dev; 138954456db9SMarc Zyngier struct msi_domain_info *msi_info; 139054456db9SMarc Zyngier u32 dev_id; 1391b48ac83dSMarc Zyngier 139254456db9SMarc Zyngier /* 139354456db9SMarc Zyngier * We ignore "dev" entierely, and rely on the dev_id that has 139454456db9SMarc Zyngier * been passed via the scratchpad. This limits this domain's 139554456db9SMarc Zyngier * usefulness to upper layers that definitely know that they 139654456db9SMarc Zyngier * are built on top of the ITS. 139754456db9SMarc Zyngier */ 139854456db9SMarc Zyngier dev_id = info->scratchpad[0].ul; 139954456db9SMarc Zyngier 140054456db9SMarc Zyngier msi_info = msi_get_domain_info(domain); 140154456db9SMarc Zyngier its = msi_info->data; 140254456db9SMarc Zyngier 1403f130420eSMarc Zyngier its_dev = its_find_device(its, dev_id); 1404e8137f4fSMarc Zyngier if (its_dev) { 1405e8137f4fSMarc Zyngier /* 1406e8137f4fSMarc Zyngier * We already have seen this ID, probably through 1407e8137f4fSMarc Zyngier * another alias (PCI bridge of some sort). No need to 1408e8137f4fSMarc Zyngier * create the device. 1409e8137f4fSMarc Zyngier */ 1410f130420eSMarc Zyngier pr_debug("Reusing ITT for devID %x\n", dev_id); 1411e8137f4fSMarc Zyngier goto out; 1412e8137f4fSMarc Zyngier } 1413b48ac83dSMarc Zyngier 1414f130420eSMarc Zyngier its_dev = its_create_device(its, dev_id, nvec); 1415b48ac83dSMarc Zyngier if (!its_dev) 1416b48ac83dSMarc Zyngier return -ENOMEM; 1417b48ac83dSMarc Zyngier 1418f130420eSMarc Zyngier pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1419e8137f4fSMarc Zyngier out: 1420b48ac83dSMarc Zyngier info->scratchpad[0].ptr = its_dev; 1421b48ac83dSMarc Zyngier return 0; 1422b48ac83dSMarc Zyngier } 1423b48ac83dSMarc Zyngier 142454456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = { 142554456db9SMarc Zyngier .msi_prepare = its_msi_prepare, 142654456db9SMarc Zyngier }; 142754456db9SMarc Zyngier 1428b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain, 1429b48ac83dSMarc Zyngier unsigned int virq, 1430b48ac83dSMarc Zyngier irq_hw_number_t hwirq) 1431b48ac83dSMarc Zyngier { 1432f833f57fSMarc Zyngier struct irq_fwspec fwspec; 1433b48ac83dSMarc Zyngier 1434f833f57fSMarc Zyngier if (irq_domain_get_of_node(domain->parent)) { 1435f833f57fSMarc Zyngier fwspec.fwnode = domain->parent->fwnode; 1436f833f57fSMarc Zyngier fwspec.param_count = 3; 1437f833f57fSMarc Zyngier fwspec.param[0] = GIC_IRQ_TYPE_LPI; 1438f833f57fSMarc Zyngier fwspec.param[1] = hwirq; 1439f833f57fSMarc Zyngier fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 1440f833f57fSMarc Zyngier } else { 1441f833f57fSMarc Zyngier return -EINVAL; 1442f833f57fSMarc Zyngier } 1443b48ac83dSMarc Zyngier 1444f833f57fSMarc Zyngier return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); 1445b48ac83dSMarc Zyngier } 1446b48ac83dSMarc Zyngier 1447b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1448b48ac83dSMarc Zyngier unsigned int nr_irqs, void *args) 1449b48ac83dSMarc Zyngier { 1450b48ac83dSMarc Zyngier msi_alloc_info_t *info = args; 1451b48ac83dSMarc Zyngier struct its_device *its_dev = info->scratchpad[0].ptr; 1452b48ac83dSMarc Zyngier irq_hw_number_t hwirq; 1453b48ac83dSMarc Zyngier int err; 1454b48ac83dSMarc Zyngier int i; 1455b48ac83dSMarc Zyngier 1456b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 1457b48ac83dSMarc Zyngier err = its_alloc_device_irq(its_dev, &hwirq); 1458b48ac83dSMarc Zyngier if (err) 1459b48ac83dSMarc Zyngier return err; 1460b48ac83dSMarc Zyngier 1461b48ac83dSMarc Zyngier err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 1462b48ac83dSMarc Zyngier if (err) 1463b48ac83dSMarc Zyngier return err; 1464b48ac83dSMarc Zyngier 1465b48ac83dSMarc Zyngier irq_domain_set_hwirq_and_chip(domain, virq + i, 1466b48ac83dSMarc Zyngier hwirq, &its_irq_chip, its_dev); 1467f130420eSMarc Zyngier pr_debug("ID:%d pID:%d vID:%d\n", 1468591e5becSMarc Zyngier (int)(hwirq - its_dev->event_map.lpi_base), 1469591e5becSMarc Zyngier (int) hwirq, virq + i); 1470b48ac83dSMarc Zyngier } 1471b48ac83dSMarc Zyngier 1472b48ac83dSMarc Zyngier return 0; 1473b48ac83dSMarc Zyngier } 1474b48ac83dSMarc Zyngier 1475aca268dfSMarc Zyngier static void its_irq_domain_activate(struct irq_domain *domain, 1476aca268dfSMarc Zyngier struct irq_data *d) 1477aca268dfSMarc Zyngier { 1478aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1479aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 1480fbf8f40eSGanapatrao Kulkarni const struct cpumask *cpu_mask = cpu_online_mask; 1481fbf8f40eSGanapatrao Kulkarni 1482fbf8f40eSGanapatrao Kulkarni /* get the cpu_mask of local node */ 1483fbf8f40eSGanapatrao Kulkarni if (its_dev->its->numa_node >= 0) 1484fbf8f40eSGanapatrao Kulkarni cpu_mask = cpumask_of_node(its_dev->its->numa_node); 1485aca268dfSMarc Zyngier 1486591e5becSMarc Zyngier /* Bind the LPI to the first possible CPU */ 1487fbf8f40eSGanapatrao Kulkarni its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); 1488591e5becSMarc Zyngier 1489aca268dfSMarc Zyngier /* Map the GIC IRQ and event to the device */ 1490aca268dfSMarc Zyngier its_send_mapvi(its_dev, d->hwirq, event); 1491aca268dfSMarc Zyngier } 1492aca268dfSMarc Zyngier 1493aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain, 1494aca268dfSMarc Zyngier struct irq_data *d) 1495aca268dfSMarc Zyngier { 1496aca268dfSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1497aca268dfSMarc Zyngier u32 event = its_get_event_id(d); 1498aca268dfSMarc Zyngier 1499aca268dfSMarc Zyngier /* Stop the delivery of interrupts */ 1500aca268dfSMarc Zyngier its_send_discard(its_dev, event); 1501aca268dfSMarc Zyngier } 1502aca268dfSMarc Zyngier 1503b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1504b48ac83dSMarc Zyngier unsigned int nr_irqs) 1505b48ac83dSMarc Zyngier { 1506b48ac83dSMarc Zyngier struct irq_data *d = irq_domain_get_irq_data(domain, virq); 1507b48ac83dSMarc Zyngier struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1508b48ac83dSMarc Zyngier int i; 1509b48ac83dSMarc Zyngier 1510b48ac83dSMarc Zyngier for (i = 0; i < nr_irqs; i++) { 1511b48ac83dSMarc Zyngier struct irq_data *data = irq_domain_get_irq_data(domain, 1512b48ac83dSMarc Zyngier virq + i); 1513aca268dfSMarc Zyngier u32 event = its_get_event_id(data); 1514b48ac83dSMarc Zyngier 1515b48ac83dSMarc Zyngier /* Mark interrupt index as unused */ 1516591e5becSMarc Zyngier clear_bit(event, its_dev->event_map.lpi_map); 1517b48ac83dSMarc Zyngier 1518b48ac83dSMarc Zyngier /* Nuke the entry in the domain */ 15192da39949SMarc Zyngier irq_domain_reset_irq_data(data); 1520b48ac83dSMarc Zyngier } 1521b48ac83dSMarc Zyngier 1522b48ac83dSMarc Zyngier /* If all interrupts have been freed, start mopping the floor */ 1523591e5becSMarc Zyngier if (bitmap_empty(its_dev->event_map.lpi_map, 1524591e5becSMarc Zyngier its_dev->event_map.nr_lpis)) { 1525591e5becSMarc Zyngier its_lpi_free(&its_dev->event_map); 1526b48ac83dSMarc Zyngier 1527b48ac83dSMarc Zyngier /* Unmap device/itt */ 1528b48ac83dSMarc Zyngier its_send_mapd(its_dev, 0); 1529b48ac83dSMarc Zyngier its_free_device(its_dev); 1530b48ac83dSMarc Zyngier } 1531b48ac83dSMarc Zyngier 1532b48ac83dSMarc Zyngier irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1533b48ac83dSMarc Zyngier } 1534b48ac83dSMarc Zyngier 1535b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = { 1536b48ac83dSMarc Zyngier .alloc = its_irq_domain_alloc, 1537b48ac83dSMarc Zyngier .free = its_irq_domain_free, 1538aca268dfSMarc Zyngier .activate = its_irq_domain_activate, 1539aca268dfSMarc Zyngier .deactivate = its_irq_domain_deactivate, 1540b48ac83dSMarc Zyngier }; 15414c21f3c2SMarc Zyngier 15424559fbb3SYun Wu static int its_force_quiescent(void __iomem *base) 15434559fbb3SYun Wu { 15444559fbb3SYun Wu u32 count = 1000000; /* 1s */ 15454559fbb3SYun Wu u32 val; 15464559fbb3SYun Wu 15474559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 15487611da86SDavid Daney /* 15497611da86SDavid Daney * GIC architecture specification requires the ITS to be both 15507611da86SDavid Daney * disabled and quiescent for writes to GITS_BASER<n> or 15517611da86SDavid Daney * GITS_CBASER to not have UNPREDICTABLE results. 15527611da86SDavid Daney */ 15537611da86SDavid Daney if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) 15544559fbb3SYun Wu return 0; 15554559fbb3SYun Wu 15564559fbb3SYun Wu /* Disable the generation of all interrupts to this ITS */ 15574559fbb3SYun Wu val &= ~GITS_CTLR_ENABLE; 15584559fbb3SYun Wu writel_relaxed(val, base + GITS_CTLR); 15594559fbb3SYun Wu 15604559fbb3SYun Wu /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 15614559fbb3SYun Wu while (1) { 15624559fbb3SYun Wu val = readl_relaxed(base + GITS_CTLR); 15634559fbb3SYun Wu if (val & GITS_CTLR_QUIESCENT) 15644559fbb3SYun Wu return 0; 15654559fbb3SYun Wu 15664559fbb3SYun Wu count--; 15674559fbb3SYun Wu if (!count) 15684559fbb3SYun Wu return -EBUSY; 15694559fbb3SYun Wu 15704559fbb3SYun Wu cpu_relax(); 15714559fbb3SYun Wu udelay(1); 15724559fbb3SYun Wu } 15734559fbb3SYun Wu } 15744559fbb3SYun Wu 157594100970SRobert Richter static void __maybe_unused its_enable_quirk_cavium_22375(void *data) 157694100970SRobert Richter { 157794100970SRobert Richter struct its_node *its = data; 157894100970SRobert Richter 157994100970SRobert Richter its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 158094100970SRobert Richter } 158194100970SRobert Richter 1582fbf8f40eSGanapatrao Kulkarni static void __maybe_unused its_enable_quirk_cavium_23144(void *data) 1583fbf8f40eSGanapatrao Kulkarni { 1584fbf8f40eSGanapatrao Kulkarni struct its_node *its = data; 1585fbf8f40eSGanapatrao Kulkarni 1586fbf8f40eSGanapatrao Kulkarni its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 1587fbf8f40eSGanapatrao Kulkarni } 1588fbf8f40eSGanapatrao Kulkarni 158967510ccaSRobert Richter static const struct gic_quirk its_quirks[] = { 159094100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375 159194100970SRobert Richter { 159294100970SRobert Richter .desc = "ITS: Cavium errata 22375, 24313", 159394100970SRobert Richter .iidr = 0xa100034c, /* ThunderX pass 1.x */ 159494100970SRobert Richter .mask = 0xffff0fff, 159594100970SRobert Richter .init = its_enable_quirk_cavium_22375, 159694100970SRobert Richter }, 159794100970SRobert Richter #endif 1598fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144 1599fbf8f40eSGanapatrao Kulkarni { 1600fbf8f40eSGanapatrao Kulkarni .desc = "ITS: Cavium erratum 23144", 1601fbf8f40eSGanapatrao Kulkarni .iidr = 0xa100034c, /* ThunderX pass 1.x */ 1602fbf8f40eSGanapatrao Kulkarni .mask = 0xffff0fff, 1603fbf8f40eSGanapatrao Kulkarni .init = its_enable_quirk_cavium_23144, 1604fbf8f40eSGanapatrao Kulkarni }, 1605fbf8f40eSGanapatrao Kulkarni #endif 160667510ccaSRobert Richter { 160767510ccaSRobert Richter } 160867510ccaSRobert Richter }; 160967510ccaSRobert Richter 161067510ccaSRobert Richter static void its_enable_quirks(struct its_node *its) 161167510ccaSRobert Richter { 161267510ccaSRobert Richter u32 iidr = readl_relaxed(its->base + GITS_IIDR); 161367510ccaSRobert Richter 161467510ccaSRobert Richter gic_enable_quirks(iidr, its_quirks, its); 161567510ccaSRobert Richter } 161667510ccaSRobert Richter 161704a0e4deSTomasz Nowicki static int __init its_probe(struct device_node *node, 161804a0e4deSTomasz Nowicki struct irq_domain *parent) 16194c21f3c2SMarc Zyngier { 16204c21f3c2SMarc Zyngier struct resource res; 16214c21f3c2SMarc Zyngier struct its_node *its; 16224c21f3c2SMarc Zyngier void __iomem *its_base; 162354456db9SMarc Zyngier struct irq_domain *inner_domain; 16244c21f3c2SMarc Zyngier u32 val; 16254c21f3c2SMarc Zyngier u64 baser, tmp; 16264c21f3c2SMarc Zyngier int err; 16274c21f3c2SMarc Zyngier 16284c21f3c2SMarc Zyngier err = of_address_to_resource(node, 0, &res); 16294c21f3c2SMarc Zyngier if (err) { 16304c21f3c2SMarc Zyngier pr_warn("%s: no regs?\n", node->full_name); 16314c21f3c2SMarc Zyngier return -ENXIO; 16324c21f3c2SMarc Zyngier } 16334c21f3c2SMarc Zyngier 16344c21f3c2SMarc Zyngier its_base = ioremap(res.start, resource_size(&res)); 16354c21f3c2SMarc Zyngier if (!its_base) { 16364c21f3c2SMarc Zyngier pr_warn("%s: unable to map registers\n", node->full_name); 16374c21f3c2SMarc Zyngier return -ENOMEM; 16384c21f3c2SMarc Zyngier } 16394c21f3c2SMarc Zyngier 16404c21f3c2SMarc Zyngier val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 16414c21f3c2SMarc Zyngier if (val != 0x30 && val != 0x40) { 16424c21f3c2SMarc Zyngier pr_warn("%s: no ITS detected, giving up\n", node->full_name); 16434c21f3c2SMarc Zyngier err = -ENODEV; 16444c21f3c2SMarc Zyngier goto out_unmap; 16454c21f3c2SMarc Zyngier } 16464c21f3c2SMarc Zyngier 16474559fbb3SYun Wu err = its_force_quiescent(its_base); 16484559fbb3SYun Wu if (err) { 16494559fbb3SYun Wu pr_warn("%s: failed to quiesce, giving up\n", 16504559fbb3SYun Wu node->full_name); 16514559fbb3SYun Wu goto out_unmap; 16524559fbb3SYun Wu } 16534559fbb3SYun Wu 16544c21f3c2SMarc Zyngier pr_info("ITS: %s\n", node->full_name); 16554c21f3c2SMarc Zyngier 16564c21f3c2SMarc Zyngier its = kzalloc(sizeof(*its), GFP_KERNEL); 16574c21f3c2SMarc Zyngier if (!its) { 16584c21f3c2SMarc Zyngier err = -ENOMEM; 16594c21f3c2SMarc Zyngier goto out_unmap; 16604c21f3c2SMarc Zyngier } 16614c21f3c2SMarc Zyngier 16624c21f3c2SMarc Zyngier raw_spin_lock_init(&its->lock); 16634c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->entry); 16644c21f3c2SMarc Zyngier INIT_LIST_HEAD(&its->its_device_list); 16654c21f3c2SMarc Zyngier its->base = its_base; 16664c21f3c2SMarc Zyngier its->phys_base = res.start; 16674c21f3c2SMarc Zyngier its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1668fbf8f40eSGanapatrao Kulkarni its->numa_node = of_node_to_nid(node); 16694c21f3c2SMarc Zyngier 16704c21f3c2SMarc Zyngier its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 16714c21f3c2SMarc Zyngier if (!its->cmd_base) { 16724c21f3c2SMarc Zyngier err = -ENOMEM; 16734c21f3c2SMarc Zyngier goto out_free_its; 16744c21f3c2SMarc Zyngier } 16754c21f3c2SMarc Zyngier its->cmd_write = its->cmd_base; 16764c21f3c2SMarc Zyngier 167767510ccaSRobert Richter its_enable_quirks(its); 167867510ccaSRobert Richter 16790e0b0f69SShanker Donthineni err = its_alloc_tables(its); 16804c21f3c2SMarc Zyngier if (err) 16814c21f3c2SMarc Zyngier goto out_free_cmd; 16824c21f3c2SMarc Zyngier 16834c21f3c2SMarc Zyngier err = its_alloc_collections(its); 16844c21f3c2SMarc Zyngier if (err) 16854c21f3c2SMarc Zyngier goto out_free_tables; 16864c21f3c2SMarc Zyngier 16874c21f3c2SMarc Zyngier baser = (virt_to_phys(its->cmd_base) | 16884c21f3c2SMarc Zyngier GITS_CBASER_WaWb | 16894c21f3c2SMarc Zyngier GITS_CBASER_InnerShareable | 16904c21f3c2SMarc Zyngier (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | 16914c21f3c2SMarc Zyngier GITS_CBASER_VALID); 16924c21f3c2SMarc Zyngier 16934c21f3c2SMarc Zyngier writeq_relaxed(baser, its->base + GITS_CBASER); 16944c21f3c2SMarc Zyngier tmp = readq_relaxed(its->base + GITS_CBASER); 16954c21f3c2SMarc Zyngier 16964ad3e363SMarc Zyngier if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { 1697241a386cSMarc Zyngier if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { 1698241a386cSMarc Zyngier /* 1699241a386cSMarc Zyngier * The HW reports non-shareable, we must 1700241a386cSMarc Zyngier * remove the cacheability attributes as 1701241a386cSMarc Zyngier * well. 1702241a386cSMarc Zyngier */ 1703241a386cSMarc Zyngier baser &= ~(GITS_CBASER_SHAREABILITY_MASK | 1704241a386cSMarc Zyngier GITS_CBASER_CACHEABILITY_MASK); 1705241a386cSMarc Zyngier baser |= GITS_CBASER_nC; 1706241a386cSMarc Zyngier writeq_relaxed(baser, its->base + GITS_CBASER); 1707241a386cSMarc Zyngier } 17084c21f3c2SMarc Zyngier pr_info("ITS: using cache flushing for cmd queue\n"); 17094c21f3c2SMarc Zyngier its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 17104c21f3c2SMarc Zyngier } 17114c21f3c2SMarc Zyngier 1712241a386cSMarc Zyngier writeq_relaxed(0, its->base + GITS_CWRITER); 1713241a386cSMarc Zyngier writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1714241a386cSMarc Zyngier 1715841514abSMarc Zyngier if (of_property_read_bool(node, "msi-controller")) { 171654456db9SMarc Zyngier struct msi_domain_info *info; 171754456db9SMarc Zyngier 171854456db9SMarc Zyngier info = kzalloc(sizeof(*info), GFP_KERNEL); 171954456db9SMarc Zyngier if (!info) { 172054456db9SMarc Zyngier err = -ENOMEM; 172154456db9SMarc Zyngier goto out_free_tables; 172254456db9SMarc Zyngier } 172354456db9SMarc Zyngier 1724841514abSMarc Zyngier inner_domain = irq_domain_add_tree(node, &its_domain_ops, its); 1725841514abSMarc Zyngier if (!inner_domain) { 17264c21f3c2SMarc Zyngier err = -ENOMEM; 172754456db9SMarc Zyngier kfree(info); 17284c21f3c2SMarc Zyngier goto out_free_tables; 17294c21f3c2SMarc Zyngier } 17304c21f3c2SMarc Zyngier 1731841514abSMarc Zyngier inner_domain->parent = parent; 1732841514abSMarc Zyngier inner_domain->bus_token = DOMAIN_BUS_NEXUS; 173354456db9SMarc Zyngier info->ops = &its_msi_domain_ops; 173454456db9SMarc Zyngier info->data = its; 173554456db9SMarc Zyngier inner_domain->host_data = info; 17364c21f3c2SMarc Zyngier } 17374c21f3c2SMarc Zyngier 17384c21f3c2SMarc Zyngier spin_lock(&its_lock); 17394c21f3c2SMarc Zyngier list_add(&its->entry, &its_nodes); 17404c21f3c2SMarc Zyngier spin_unlock(&its_lock); 17414c21f3c2SMarc Zyngier 17424c21f3c2SMarc Zyngier return 0; 17434c21f3c2SMarc Zyngier 17444c21f3c2SMarc Zyngier out_free_tables: 17454c21f3c2SMarc Zyngier its_free_tables(its); 17464c21f3c2SMarc Zyngier out_free_cmd: 17474c21f3c2SMarc Zyngier kfree(its->cmd_base); 17484c21f3c2SMarc Zyngier out_free_its: 17494c21f3c2SMarc Zyngier kfree(its); 17504c21f3c2SMarc Zyngier out_unmap: 17514c21f3c2SMarc Zyngier iounmap(its_base); 17524c21f3c2SMarc Zyngier pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); 17534c21f3c2SMarc Zyngier return err; 17544c21f3c2SMarc Zyngier } 17554c21f3c2SMarc Zyngier 17564c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void) 17574c21f3c2SMarc Zyngier { 17584c21f3c2SMarc Zyngier return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 17594c21f3c2SMarc Zyngier } 17604c21f3c2SMarc Zyngier 17614c21f3c2SMarc Zyngier int its_cpu_init(void) 17624c21f3c2SMarc Zyngier { 176316acae72SVladimir Murzin if (!list_empty(&its_nodes)) { 17644c21f3c2SMarc Zyngier if (!gic_rdists_supports_plpis()) { 17654c21f3c2SMarc Zyngier pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 17664c21f3c2SMarc Zyngier return -ENXIO; 17674c21f3c2SMarc Zyngier } 17684c21f3c2SMarc Zyngier its_cpu_init_lpis(); 17694c21f3c2SMarc Zyngier its_cpu_init_collection(); 17704c21f3c2SMarc Zyngier } 17714c21f3c2SMarc Zyngier 17724c21f3c2SMarc Zyngier return 0; 17734c21f3c2SMarc Zyngier } 17744c21f3c2SMarc Zyngier 17754c21f3c2SMarc Zyngier static struct of_device_id its_device_id[] = { 17764c21f3c2SMarc Zyngier { .compatible = "arm,gic-v3-its", }, 17774c21f3c2SMarc Zyngier {}, 17784c21f3c2SMarc Zyngier }; 17794c21f3c2SMarc Zyngier 178004a0e4deSTomasz Nowicki int __init its_init(struct device_node *node, struct rdists *rdists, 17814c21f3c2SMarc Zyngier struct irq_domain *parent_domain) 17824c21f3c2SMarc Zyngier { 17834c21f3c2SMarc Zyngier struct device_node *np; 17844c21f3c2SMarc Zyngier 17854c21f3c2SMarc Zyngier for (np = of_find_matching_node(node, its_device_id); np; 17864c21f3c2SMarc Zyngier np = of_find_matching_node(np, its_device_id)) { 17874c21f3c2SMarc Zyngier its_probe(np, parent_domain); 17884c21f3c2SMarc Zyngier } 17894c21f3c2SMarc Zyngier 17904c21f3c2SMarc Zyngier if (list_empty(&its_nodes)) { 17914c21f3c2SMarc Zyngier pr_warn("ITS: No ITS available, not enabling LPIs\n"); 17924c21f3c2SMarc Zyngier return -ENXIO; 17934c21f3c2SMarc Zyngier } 17944c21f3c2SMarc Zyngier 17954c21f3c2SMarc Zyngier gic_rdists = rdists; 17964c21f3c2SMarc Zyngier its_alloc_lpi_tables(); 17974c21f3c2SMarc Zyngier its_lpi_init(rdists->id_bits); 17984c21f3c2SMarc Zyngier 17994c21f3c2SMarc Zyngier return 0; 18004c21f3c2SMarc Zyngier } 1801