1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cc2d3216SMarc Zyngier /*
3d7276b80SMarc Zyngier  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4cc2d3216SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
5cc2d3216SMarc Zyngier  */
6cc2d3216SMarc Zyngier 
73f010cf1STomasz Nowicki #include <linux/acpi.h>
88d3554b8SHanjun Guo #include <linux/acpi_iort.h>
9ffedbf0cSMarc Zyngier #include <linux/bitfield.h>
10cc2d3216SMarc Zyngier #include <linux/bitmap.h>
11cc2d3216SMarc Zyngier #include <linux/cpu.h>
12c6e2ccb6SMarc Zyngier #include <linux/crash_dump.h>
13cc2d3216SMarc Zyngier #include <linux/delay.h>
1444bb7e24SRobin Murphy #include <linux/dma-iommu.h>
153fb68faeSMarc Zyngier #include <linux/efi.h>
16cc2d3216SMarc Zyngier #include <linux/interrupt.h>
173f010cf1STomasz Nowicki #include <linux/irqdomain.h>
18880cb3cdSMarc Zyngier #include <linux/list.h>
19cc2d3216SMarc Zyngier #include <linux/log2.h>
205e2c9f9aSMarc Zyngier #include <linux/memblock.h>
21cc2d3216SMarc Zyngier #include <linux/mm.h>
22cc2d3216SMarc Zyngier #include <linux/msi.h>
23cc2d3216SMarc Zyngier #include <linux/of.h>
24cc2d3216SMarc Zyngier #include <linux/of_address.h>
25cc2d3216SMarc Zyngier #include <linux/of_irq.h>
26cc2d3216SMarc Zyngier #include <linux/of_pci.h>
27cc2d3216SMarc Zyngier #include <linux/of_platform.h>
28cc2d3216SMarc Zyngier #include <linux/percpu.h>
29cc2d3216SMarc Zyngier #include <linux/slab.h>
30dba0bc7bSDerek Basehore #include <linux/syscore_ops.h>
31cc2d3216SMarc Zyngier 
3241a83e06SJoel Porquet #include <linux/irqchip.h>
33cc2d3216SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h>
34c808eea8SMarc Zyngier #include <linux/irqchip/arm-gic-v4.h>
35cc2d3216SMarc Zyngier 
36cc2d3216SMarc Zyngier #include <asm/cputype.h>
37cc2d3216SMarc Zyngier #include <asm/exception.h>
38cc2d3216SMarc Zyngier 
3967510ccaSRobert Richter #include "irq-gic-common.h"
4067510ccaSRobert Richter 
4194100970SRobert Richter #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
4294100970SRobert Richter #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
43fbf8f40eSGanapatrao Kulkarni #define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
44dba0bc7bSDerek Basehore #define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
45cc2d3216SMarc Zyngier 
46c48ed51cSMarc Zyngier #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
47c440a9d9SMarc Zyngier #define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
48c48ed51cSMarc Zyngier 
49a13b0404SMarc Zyngier static u32 lpi_id_bits;
50a13b0404SMarc Zyngier 
51a13b0404SMarc Zyngier /*
52a13b0404SMarc Zyngier  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53a13b0404SMarc Zyngier  * deal with (one configuration byte per interrupt). PENDBASE has to
54a13b0404SMarc Zyngier  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
55a13b0404SMarc Zyngier  */
56a13b0404SMarc Zyngier #define LPI_NRBITS		lpi_id_bits
57a13b0404SMarc Zyngier #define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
58a13b0404SMarc Zyngier #define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
59a13b0404SMarc Zyngier 
602130b789SJulien Thierry #define LPI_PROP_DEFAULT_PRIO	GICD_INT_DEF_PRI
61a13b0404SMarc Zyngier 
62cc2d3216SMarc Zyngier /*
63cc2d3216SMarc Zyngier  * Collection structure - just an ID, and a redistributor address to
64cc2d3216SMarc Zyngier  * ping. We use one per CPU as a bag of interrupts assigned to this
65cc2d3216SMarc Zyngier  * CPU.
66cc2d3216SMarc Zyngier  */
67cc2d3216SMarc Zyngier struct its_collection {
68cc2d3216SMarc Zyngier 	u64			target_address;
69cc2d3216SMarc Zyngier 	u16			col_id;
70cc2d3216SMarc Zyngier };
71cc2d3216SMarc Zyngier 
72cc2d3216SMarc Zyngier /*
739347359aSShanker Donthineni  * The ITS_BASER structure - contains memory information, cached
749347359aSShanker Donthineni  * value of BASER register configuration and ITS page size.
75466b7d16SShanker Donthineni  */
76466b7d16SShanker Donthineni struct its_baser {
77466b7d16SShanker Donthineni 	void		*base;
78466b7d16SShanker Donthineni 	u64		val;
79466b7d16SShanker Donthineni 	u32		order;
809347359aSShanker Donthineni 	u32		psz;
81466b7d16SShanker Donthineni };
82466b7d16SShanker Donthineni 
83558b0165SArd Biesheuvel struct its_device;
84558b0165SArd Biesheuvel 
85466b7d16SShanker Donthineni /*
86cc2d3216SMarc Zyngier  * The ITS structure - contains most of the infrastructure, with the
87841514abSMarc Zyngier  * top-level MSI domain, the command queue, the collections, and the
88841514abSMarc Zyngier  * list of devices writing to it.
899791ec7dSMarc Zyngier  *
909791ec7dSMarc Zyngier  * dev_alloc_lock has to be taken for device allocations, while the
919791ec7dSMarc Zyngier  * spinlock must be taken to parse data structures such as the device
929791ec7dSMarc Zyngier  * list.
93cc2d3216SMarc Zyngier  */
94cc2d3216SMarc Zyngier struct its_node {
95cc2d3216SMarc Zyngier 	raw_spinlock_t		lock;
969791ec7dSMarc Zyngier 	struct mutex		dev_alloc_lock;
97cc2d3216SMarc Zyngier 	struct list_head	entry;
98cc2d3216SMarc Zyngier 	void __iomem		*base;
99db40f0a7STomasz Nowicki 	phys_addr_t		phys_base;
100cc2d3216SMarc Zyngier 	struct its_cmd_block	*cmd_base;
101cc2d3216SMarc Zyngier 	struct its_cmd_block	*cmd_write;
102466b7d16SShanker Donthineni 	struct its_baser	tables[GITS_BASER_NR_REGS];
103cc2d3216SMarc Zyngier 	struct its_collection	*collections;
104558b0165SArd Biesheuvel 	struct fwnode_handle	*fwnode_handle;
105558b0165SArd Biesheuvel 	u64			(*get_msi_base)(struct its_device *its_dev);
1060dd57fedSMarc Zyngier 	u64			typer;
107dba0bc7bSDerek Basehore 	u64			cbaser_save;
108dba0bc7bSDerek Basehore 	u32			ctlr_save;
109cc2d3216SMarc Zyngier 	struct list_head	its_device_list;
110cc2d3216SMarc Zyngier 	u64			flags;
111debf6d02SMarc Zyngier 	unsigned long		list_nr;
112fbf8f40eSGanapatrao Kulkarni 	int			numa_node;
113558b0165SArd Biesheuvel 	unsigned int		msi_domain_flags;
114558b0165SArd Biesheuvel 	u32			pre_its_base; /* for Socionext Synquacer */
1155c9a882eSMarc Zyngier 	int			vlpi_redist_offset;
116cc2d3216SMarc Zyngier };
117cc2d3216SMarc Zyngier 
1180dd57fedSMarc Zyngier #define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
119576a8342SMarc Zyngier #define device_ids(its)		(FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
1200dd57fedSMarc Zyngier 
121cc2d3216SMarc Zyngier #define ITS_ITT_ALIGN		SZ_256
122cc2d3216SMarc Zyngier 
12332bd44dcSShanker Donthineni /* The maximum number of VPEID bits supported by VLPI commands */
12432bd44dcSShanker Donthineni #define ITS_MAX_VPEID_BITS	(16)
12532bd44dcSShanker Donthineni #define ITS_MAX_VPEID		(1 << (ITS_MAX_VPEID_BITS))
12632bd44dcSShanker Donthineni 
1272eca0d6cSShanker Donthineni /* Convert page order to size in bytes */
1282eca0d6cSShanker Donthineni #define PAGE_ORDER_TO_SIZE(o)	(PAGE_SIZE << (o))
1292eca0d6cSShanker Donthineni 
130591e5becSMarc Zyngier struct event_lpi_map {
131591e5becSMarc Zyngier 	unsigned long		*lpi_map;
132591e5becSMarc Zyngier 	u16			*col_map;
133591e5becSMarc Zyngier 	irq_hw_number_t		lpi_base;
134591e5becSMarc Zyngier 	int			nr_lpis;
135d011e4e6SMarc Zyngier 	struct mutex		vlpi_lock;
136d011e4e6SMarc Zyngier 	struct its_vm		*vm;
137d011e4e6SMarc Zyngier 	struct its_vlpi_map	*vlpi_maps;
138d011e4e6SMarc Zyngier 	int			nr_vlpis;
139591e5becSMarc Zyngier };
140591e5becSMarc Zyngier 
141cc2d3216SMarc Zyngier /*
142d011e4e6SMarc Zyngier  * The ITS view of a device - belongs to an ITS, owns an interrupt
143d011e4e6SMarc Zyngier  * translation table, and a list of interrupts.  If it some of its
144d011e4e6SMarc Zyngier  * LPIs are injected into a guest (GICv4), the event_map.vm field
145d011e4e6SMarc Zyngier  * indicates which one.
146cc2d3216SMarc Zyngier  */
147cc2d3216SMarc Zyngier struct its_device {
148cc2d3216SMarc Zyngier 	struct list_head	entry;
149cc2d3216SMarc Zyngier 	struct its_node		*its;
150591e5becSMarc Zyngier 	struct event_lpi_map	event_map;
151cc2d3216SMarc Zyngier 	void			*itt;
152cc2d3216SMarc Zyngier 	u32			nr_ites;
153cc2d3216SMarc Zyngier 	u32			device_id;
1549791ec7dSMarc Zyngier 	bool			shared;
155cc2d3216SMarc Zyngier };
156cc2d3216SMarc Zyngier 
15720b3d54eSMarc Zyngier static struct {
15820b3d54eSMarc Zyngier 	raw_spinlock_t		lock;
15920b3d54eSMarc Zyngier 	struct its_device	*dev;
16020b3d54eSMarc Zyngier 	struct its_vpe		**vpes;
16120b3d54eSMarc Zyngier 	int			next_victim;
16220b3d54eSMarc Zyngier } vpe_proxy;
16320b3d54eSMarc Zyngier 
1641ac19ca6SMarc Zyngier static LIST_HEAD(its_nodes);
165a8db7456SSebastian Andrzej Siewior static DEFINE_RAW_SPINLOCK(its_lock);
1661ac19ca6SMarc Zyngier static struct rdists *gic_rdists;
167db40f0a7STomasz Nowicki static struct irq_domain *its_parent;
1681ac19ca6SMarc Zyngier 
1693dfa576bSMarc Zyngier static unsigned long its_list_map;
1703171a47aSMarc Zyngier static u16 vmovp_seq_num;
1713171a47aSMarc Zyngier static DEFINE_RAW_SPINLOCK(vmovp_lock);
1723171a47aSMarc Zyngier 
1737d75bbb4SMarc Zyngier static DEFINE_IDA(its_vpeid_ida);
1743dfa576bSMarc Zyngier 
1751ac19ca6SMarc Zyngier #define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
17611e37d35SMarc Zyngier #define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
1771ac19ca6SMarc Zyngier #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
178e643d803SMarc Zyngier #define gic_data_rdist_vlpi_base()	(gic_data_rdist_rd_base() + SZ_128K)
1791ac19ca6SMarc Zyngier 
18084243125SZenghui Yu static u16 get_its_list(struct its_vm *vm)
18184243125SZenghui Yu {
18284243125SZenghui Yu 	struct its_node *its;
18384243125SZenghui Yu 	unsigned long its_list = 0;
18484243125SZenghui Yu 
18584243125SZenghui Yu 	list_for_each_entry(its, &its_nodes, entry) {
1860dd57fedSMarc Zyngier 		if (!is_v4(its))
18784243125SZenghui Yu 			continue;
18884243125SZenghui Yu 
18984243125SZenghui Yu 		if (vm->vlpi_count[its->list_nr])
19084243125SZenghui Yu 			__set_bit(its->list_nr, &its_list);
19184243125SZenghui Yu 	}
19284243125SZenghui Yu 
19384243125SZenghui Yu 	return (u16)its_list;
19484243125SZenghui Yu }
19584243125SZenghui Yu 
196425c09beSMarc Zyngier static inline u32 its_get_event_id(struct irq_data *d)
197425c09beSMarc Zyngier {
198425c09beSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
199425c09beSMarc Zyngier 	return d->hwirq - its_dev->event_map.lpi_base;
200425c09beSMarc Zyngier }
201425c09beSMarc Zyngier 
202591e5becSMarc Zyngier static struct its_collection *dev_event_to_col(struct its_device *its_dev,
203591e5becSMarc Zyngier 					       u32 event)
204591e5becSMarc Zyngier {
205591e5becSMarc Zyngier 	struct its_node *its = its_dev->its;
206591e5becSMarc Zyngier 
207591e5becSMarc Zyngier 	return its->collections + its_dev->event_map.col_map[event];
208591e5becSMarc Zyngier }
209591e5becSMarc Zyngier 
210c1d4d5cdSMarc Zyngier static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
211c1d4d5cdSMarc Zyngier 					       u32 event)
212c1d4d5cdSMarc Zyngier {
213c1d4d5cdSMarc Zyngier 	if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
214c1d4d5cdSMarc Zyngier 		return NULL;
215c1d4d5cdSMarc Zyngier 
216c1d4d5cdSMarc Zyngier 	return &its_dev->event_map.vlpi_maps[event];
217c1d4d5cdSMarc Zyngier }
218c1d4d5cdSMarc Zyngier 
219425c09beSMarc Zyngier static struct its_collection *irq_to_col(struct irq_data *d)
220425c09beSMarc Zyngier {
221425c09beSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
222425c09beSMarc Zyngier 
223425c09beSMarc Zyngier 	return dev_event_to_col(its_dev, its_get_event_id(d));
224425c09beSMarc Zyngier }
225425c09beSMarc Zyngier 
22683559b47SMarc Zyngier static struct its_collection *valid_col(struct its_collection *col)
22783559b47SMarc Zyngier {
22820faba84SJoe Perches 	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
22983559b47SMarc Zyngier 		return NULL;
23083559b47SMarc Zyngier 
23183559b47SMarc Zyngier 	return col;
23283559b47SMarc Zyngier }
23383559b47SMarc Zyngier 
234205e065dSMarc Zyngier static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
235205e065dSMarc Zyngier {
236205e065dSMarc Zyngier 	if (valid_col(its->collections + vpe->col_idx))
237205e065dSMarc Zyngier 		return vpe;
238205e065dSMarc Zyngier 
239205e065dSMarc Zyngier 	return NULL;
240205e065dSMarc Zyngier }
241205e065dSMarc Zyngier 
242cc2d3216SMarc Zyngier /*
243cc2d3216SMarc Zyngier  * ITS command descriptors - parameters to be encoded in a command
244cc2d3216SMarc Zyngier  * block.
245cc2d3216SMarc Zyngier  */
246cc2d3216SMarc Zyngier struct its_cmd_desc {
247cc2d3216SMarc Zyngier 	union {
248cc2d3216SMarc Zyngier 		struct {
249cc2d3216SMarc Zyngier 			struct its_device *dev;
250cc2d3216SMarc Zyngier 			u32 event_id;
251cc2d3216SMarc Zyngier 		} its_inv_cmd;
252cc2d3216SMarc Zyngier 
253cc2d3216SMarc Zyngier 		struct {
254cc2d3216SMarc Zyngier 			struct its_device *dev;
255cc2d3216SMarc Zyngier 			u32 event_id;
2568d85dcedSMarc Zyngier 		} its_clear_cmd;
2578d85dcedSMarc Zyngier 
2588d85dcedSMarc Zyngier 		struct {
2598d85dcedSMarc Zyngier 			struct its_device *dev;
2608d85dcedSMarc Zyngier 			u32 event_id;
261cc2d3216SMarc Zyngier 		} its_int_cmd;
262cc2d3216SMarc Zyngier 
263cc2d3216SMarc Zyngier 		struct {
264cc2d3216SMarc Zyngier 			struct its_device *dev;
265cc2d3216SMarc Zyngier 			int valid;
266cc2d3216SMarc Zyngier 		} its_mapd_cmd;
267cc2d3216SMarc Zyngier 
268cc2d3216SMarc Zyngier 		struct {
269cc2d3216SMarc Zyngier 			struct its_collection *col;
270cc2d3216SMarc Zyngier 			int valid;
271cc2d3216SMarc Zyngier 		} its_mapc_cmd;
272cc2d3216SMarc Zyngier 
273cc2d3216SMarc Zyngier 		struct {
274cc2d3216SMarc Zyngier 			struct its_device *dev;
275cc2d3216SMarc Zyngier 			u32 phys_id;
276cc2d3216SMarc Zyngier 			u32 event_id;
2776a25ad3aSMarc Zyngier 		} its_mapti_cmd;
278cc2d3216SMarc Zyngier 
279cc2d3216SMarc Zyngier 		struct {
280cc2d3216SMarc Zyngier 			struct its_device *dev;
281cc2d3216SMarc Zyngier 			struct its_collection *col;
282591e5becSMarc Zyngier 			u32 event_id;
283cc2d3216SMarc Zyngier 		} its_movi_cmd;
284cc2d3216SMarc Zyngier 
285cc2d3216SMarc Zyngier 		struct {
286cc2d3216SMarc Zyngier 			struct its_device *dev;
287cc2d3216SMarc Zyngier 			u32 event_id;
288cc2d3216SMarc Zyngier 		} its_discard_cmd;
289cc2d3216SMarc Zyngier 
290cc2d3216SMarc Zyngier 		struct {
291cc2d3216SMarc Zyngier 			struct its_collection *col;
292cc2d3216SMarc Zyngier 		} its_invall_cmd;
293d011e4e6SMarc Zyngier 
294d011e4e6SMarc Zyngier 		struct {
295d011e4e6SMarc Zyngier 			struct its_vpe *vpe;
296eb78192bSMarc Zyngier 		} its_vinvall_cmd;
297eb78192bSMarc Zyngier 
298eb78192bSMarc Zyngier 		struct {
299eb78192bSMarc Zyngier 			struct its_vpe *vpe;
300eb78192bSMarc Zyngier 			struct its_collection *col;
301eb78192bSMarc Zyngier 			bool valid;
302eb78192bSMarc Zyngier 		} its_vmapp_cmd;
303eb78192bSMarc Zyngier 
304eb78192bSMarc Zyngier 		struct {
305eb78192bSMarc Zyngier 			struct its_vpe *vpe;
306d011e4e6SMarc Zyngier 			struct its_device *dev;
307d011e4e6SMarc Zyngier 			u32 virt_id;
308d011e4e6SMarc Zyngier 			u32 event_id;
309d011e4e6SMarc Zyngier 			bool db_enabled;
310d011e4e6SMarc Zyngier 		} its_vmapti_cmd;
311d011e4e6SMarc Zyngier 
312d011e4e6SMarc Zyngier 		struct {
313d011e4e6SMarc Zyngier 			struct its_vpe *vpe;
314d011e4e6SMarc Zyngier 			struct its_device *dev;
315d011e4e6SMarc Zyngier 			u32 event_id;
316d011e4e6SMarc Zyngier 			bool db_enabled;
317d011e4e6SMarc Zyngier 		} its_vmovi_cmd;
3183171a47aSMarc Zyngier 
3193171a47aSMarc Zyngier 		struct {
3203171a47aSMarc Zyngier 			struct its_vpe *vpe;
3213171a47aSMarc Zyngier 			struct its_collection *col;
3223171a47aSMarc Zyngier 			u16 seq_num;
3233171a47aSMarc Zyngier 			u16 its_list;
3243171a47aSMarc Zyngier 		} its_vmovp_cmd;
325cc2d3216SMarc Zyngier 	};
326cc2d3216SMarc Zyngier };
327cc2d3216SMarc Zyngier 
328cc2d3216SMarc Zyngier /*
329cc2d3216SMarc Zyngier  * The ITS command block, which is what the ITS actually parses.
330cc2d3216SMarc Zyngier  */
331cc2d3216SMarc Zyngier struct its_cmd_block {
3322bbdfcc5SBen Dooks (Codethink) 	union {
333cc2d3216SMarc Zyngier 		u64	raw_cmd[4];
3342bbdfcc5SBen Dooks (Codethink) 		__le64	raw_cmd_le[4];
3352bbdfcc5SBen Dooks (Codethink) 	};
336cc2d3216SMarc Zyngier };
337cc2d3216SMarc Zyngier 
338cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_SZ		SZ_64K
339cc2d3216SMarc Zyngier #define ITS_CMD_QUEUE_NR_ENTRIES	(ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
340cc2d3216SMarc Zyngier 
34167047f90SMarc Zyngier typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
34267047f90SMarc Zyngier 						    struct its_cmd_block *,
343cc2d3216SMarc Zyngier 						    struct its_cmd_desc *);
344cc2d3216SMarc Zyngier 
34567047f90SMarc Zyngier typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
34667047f90SMarc Zyngier 					      struct its_cmd_block *,
347d011e4e6SMarc Zyngier 					      struct its_cmd_desc *);
348d011e4e6SMarc Zyngier 
3494d36f136SMarc Zyngier static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
3504d36f136SMarc Zyngier {
3514d36f136SMarc Zyngier 	u64 mask = GENMASK_ULL(h, l);
3524d36f136SMarc Zyngier 	*raw_cmd &= ~mask;
3534d36f136SMarc Zyngier 	*raw_cmd |= (val << l) & mask;
3544d36f136SMarc Zyngier }
3554d36f136SMarc Zyngier 
356cc2d3216SMarc Zyngier static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
357cc2d3216SMarc Zyngier {
3584d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
359cc2d3216SMarc Zyngier }
360cc2d3216SMarc Zyngier 
361cc2d3216SMarc Zyngier static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
362cc2d3216SMarc Zyngier {
3634d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
364cc2d3216SMarc Zyngier }
365cc2d3216SMarc Zyngier 
366cc2d3216SMarc Zyngier static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
367cc2d3216SMarc Zyngier {
3684d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
369cc2d3216SMarc Zyngier }
370cc2d3216SMarc Zyngier 
371cc2d3216SMarc Zyngier static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
372cc2d3216SMarc Zyngier {
3734d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
374cc2d3216SMarc Zyngier }
375cc2d3216SMarc Zyngier 
376cc2d3216SMarc Zyngier static void its_encode_size(struct its_cmd_block *cmd, u8 size)
377cc2d3216SMarc Zyngier {
3784d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
379cc2d3216SMarc Zyngier }
380cc2d3216SMarc Zyngier 
381cc2d3216SMarc Zyngier static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
382cc2d3216SMarc Zyngier {
38330ae9610SShanker Donthineni 	its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
384cc2d3216SMarc Zyngier }
385cc2d3216SMarc Zyngier 
386cc2d3216SMarc Zyngier static void its_encode_valid(struct its_cmd_block *cmd, int valid)
387cc2d3216SMarc Zyngier {
3884d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
389cc2d3216SMarc Zyngier }
390cc2d3216SMarc Zyngier 
391cc2d3216SMarc Zyngier static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
392cc2d3216SMarc Zyngier {
39330ae9610SShanker Donthineni 	its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
394cc2d3216SMarc Zyngier }
395cc2d3216SMarc Zyngier 
396cc2d3216SMarc Zyngier static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
397cc2d3216SMarc Zyngier {
3984d36f136SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
399cc2d3216SMarc Zyngier }
400cc2d3216SMarc Zyngier 
401d011e4e6SMarc Zyngier static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
402d011e4e6SMarc Zyngier {
403d011e4e6SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
404d011e4e6SMarc Zyngier }
405d011e4e6SMarc Zyngier 
406d011e4e6SMarc Zyngier static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
407d011e4e6SMarc Zyngier {
408d011e4e6SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
409d011e4e6SMarc Zyngier }
410d011e4e6SMarc Zyngier 
411d011e4e6SMarc Zyngier static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
412d011e4e6SMarc Zyngier {
413d011e4e6SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
414d011e4e6SMarc Zyngier }
415d011e4e6SMarc Zyngier 
416d011e4e6SMarc Zyngier static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
417d011e4e6SMarc Zyngier {
418d011e4e6SMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
419d011e4e6SMarc Zyngier }
420d011e4e6SMarc Zyngier 
4213171a47aSMarc Zyngier static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
4223171a47aSMarc Zyngier {
4233171a47aSMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
4243171a47aSMarc Zyngier }
4253171a47aSMarc Zyngier 
4263171a47aSMarc Zyngier static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
4273171a47aSMarc Zyngier {
4283171a47aSMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
4293171a47aSMarc Zyngier }
4303171a47aSMarc Zyngier 
431eb78192bSMarc Zyngier static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
432eb78192bSMarc Zyngier {
43330ae9610SShanker Donthineni 	its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
434eb78192bSMarc Zyngier }
435eb78192bSMarc Zyngier 
436eb78192bSMarc Zyngier static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
437eb78192bSMarc Zyngier {
438eb78192bSMarc Zyngier 	its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
439eb78192bSMarc Zyngier }
440eb78192bSMarc Zyngier 
441cc2d3216SMarc Zyngier static inline void its_fixup_cmd(struct its_cmd_block *cmd)
442cc2d3216SMarc Zyngier {
443cc2d3216SMarc Zyngier 	/* Let's fixup BE commands */
4442bbdfcc5SBen Dooks (Codethink) 	cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
4452bbdfcc5SBen Dooks (Codethink) 	cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
4462bbdfcc5SBen Dooks (Codethink) 	cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
4472bbdfcc5SBen Dooks (Codethink) 	cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
448cc2d3216SMarc Zyngier }
449cc2d3216SMarc Zyngier 
45067047f90SMarc Zyngier static struct its_collection *its_build_mapd_cmd(struct its_node *its,
45167047f90SMarc Zyngier 						 struct its_cmd_block *cmd,
452cc2d3216SMarc Zyngier 						 struct its_cmd_desc *desc)
453cc2d3216SMarc Zyngier {
454cc2d3216SMarc Zyngier 	unsigned long itt_addr;
455c8481267SMarc Zyngier 	u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
456cc2d3216SMarc Zyngier 
457cc2d3216SMarc Zyngier 	itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
458cc2d3216SMarc Zyngier 	itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
459cc2d3216SMarc Zyngier 
460cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_MAPD);
461cc2d3216SMarc Zyngier 	its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
462cc2d3216SMarc Zyngier 	its_encode_size(cmd, size - 1);
463cc2d3216SMarc Zyngier 	its_encode_itt(cmd, itt_addr);
464cc2d3216SMarc Zyngier 	its_encode_valid(cmd, desc->its_mapd_cmd.valid);
465cc2d3216SMarc Zyngier 
466cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
467cc2d3216SMarc Zyngier 
468591e5becSMarc Zyngier 	return NULL;
469cc2d3216SMarc Zyngier }
470cc2d3216SMarc Zyngier 
47167047f90SMarc Zyngier static struct its_collection *its_build_mapc_cmd(struct its_node *its,
47267047f90SMarc Zyngier 						 struct its_cmd_block *cmd,
473cc2d3216SMarc Zyngier 						 struct its_cmd_desc *desc)
474cc2d3216SMarc Zyngier {
475cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_MAPC);
476cc2d3216SMarc Zyngier 	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
477cc2d3216SMarc Zyngier 	its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
478cc2d3216SMarc Zyngier 	its_encode_valid(cmd, desc->its_mapc_cmd.valid);
479cc2d3216SMarc Zyngier 
480cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
481cc2d3216SMarc Zyngier 
482cc2d3216SMarc Zyngier 	return desc->its_mapc_cmd.col;
483cc2d3216SMarc Zyngier }
484cc2d3216SMarc Zyngier 
48567047f90SMarc Zyngier static struct its_collection *its_build_mapti_cmd(struct its_node *its,
48667047f90SMarc Zyngier 						  struct its_cmd_block *cmd,
487cc2d3216SMarc Zyngier 						  struct its_cmd_desc *desc)
488cc2d3216SMarc Zyngier {
489591e5becSMarc Zyngier 	struct its_collection *col;
490591e5becSMarc Zyngier 
4916a25ad3aSMarc Zyngier 	col = dev_event_to_col(desc->its_mapti_cmd.dev,
4926a25ad3aSMarc Zyngier 			       desc->its_mapti_cmd.event_id);
493591e5becSMarc Zyngier 
4946a25ad3aSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_MAPTI);
4956a25ad3aSMarc Zyngier 	its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
4966a25ad3aSMarc Zyngier 	its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
4976a25ad3aSMarc Zyngier 	its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
498591e5becSMarc Zyngier 	its_encode_collection(cmd, col->col_id);
499cc2d3216SMarc Zyngier 
500cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
501cc2d3216SMarc Zyngier 
50283559b47SMarc Zyngier 	return valid_col(col);
503cc2d3216SMarc Zyngier }
504cc2d3216SMarc Zyngier 
50567047f90SMarc Zyngier static struct its_collection *its_build_movi_cmd(struct its_node *its,
50667047f90SMarc Zyngier 						 struct its_cmd_block *cmd,
507cc2d3216SMarc Zyngier 						 struct its_cmd_desc *desc)
508cc2d3216SMarc Zyngier {
509591e5becSMarc Zyngier 	struct its_collection *col;
510591e5becSMarc Zyngier 
511591e5becSMarc Zyngier 	col = dev_event_to_col(desc->its_movi_cmd.dev,
512591e5becSMarc Zyngier 			       desc->its_movi_cmd.event_id);
513591e5becSMarc Zyngier 
514cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_MOVI);
515cc2d3216SMarc Zyngier 	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
516591e5becSMarc Zyngier 	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
517cc2d3216SMarc Zyngier 	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
518cc2d3216SMarc Zyngier 
519cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
520cc2d3216SMarc Zyngier 
52183559b47SMarc Zyngier 	return valid_col(col);
522cc2d3216SMarc Zyngier }
523cc2d3216SMarc Zyngier 
52467047f90SMarc Zyngier static struct its_collection *its_build_discard_cmd(struct its_node *its,
52567047f90SMarc Zyngier 						    struct its_cmd_block *cmd,
526cc2d3216SMarc Zyngier 						    struct its_cmd_desc *desc)
527cc2d3216SMarc Zyngier {
528591e5becSMarc Zyngier 	struct its_collection *col;
529591e5becSMarc Zyngier 
530591e5becSMarc Zyngier 	col = dev_event_to_col(desc->its_discard_cmd.dev,
531591e5becSMarc Zyngier 			       desc->its_discard_cmd.event_id);
532591e5becSMarc Zyngier 
533cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_DISCARD);
534cc2d3216SMarc Zyngier 	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
535cc2d3216SMarc Zyngier 	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
536cc2d3216SMarc Zyngier 
537cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
538cc2d3216SMarc Zyngier 
53983559b47SMarc Zyngier 	return valid_col(col);
540cc2d3216SMarc Zyngier }
541cc2d3216SMarc Zyngier 
54267047f90SMarc Zyngier static struct its_collection *its_build_inv_cmd(struct its_node *its,
54367047f90SMarc Zyngier 						struct its_cmd_block *cmd,
544cc2d3216SMarc Zyngier 						struct its_cmd_desc *desc)
545cc2d3216SMarc Zyngier {
546591e5becSMarc Zyngier 	struct its_collection *col;
547591e5becSMarc Zyngier 
548591e5becSMarc Zyngier 	col = dev_event_to_col(desc->its_inv_cmd.dev,
549591e5becSMarc Zyngier 			       desc->its_inv_cmd.event_id);
550591e5becSMarc Zyngier 
551cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_INV);
552cc2d3216SMarc Zyngier 	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
553cc2d3216SMarc Zyngier 	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
554cc2d3216SMarc Zyngier 
555cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
556cc2d3216SMarc Zyngier 
55783559b47SMarc Zyngier 	return valid_col(col);
558cc2d3216SMarc Zyngier }
559cc2d3216SMarc Zyngier 
56067047f90SMarc Zyngier static struct its_collection *its_build_int_cmd(struct its_node *its,
56167047f90SMarc Zyngier 						struct its_cmd_block *cmd,
5628d85dcedSMarc Zyngier 						struct its_cmd_desc *desc)
5638d85dcedSMarc Zyngier {
5648d85dcedSMarc Zyngier 	struct its_collection *col;
5658d85dcedSMarc Zyngier 
5668d85dcedSMarc Zyngier 	col = dev_event_to_col(desc->its_int_cmd.dev,
5678d85dcedSMarc Zyngier 			       desc->its_int_cmd.event_id);
5688d85dcedSMarc Zyngier 
5698d85dcedSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_INT);
5708d85dcedSMarc Zyngier 	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
5718d85dcedSMarc Zyngier 	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
5728d85dcedSMarc Zyngier 
5738d85dcedSMarc Zyngier 	its_fixup_cmd(cmd);
5748d85dcedSMarc Zyngier 
57583559b47SMarc Zyngier 	return valid_col(col);
5768d85dcedSMarc Zyngier }
5778d85dcedSMarc Zyngier 
57867047f90SMarc Zyngier static struct its_collection *its_build_clear_cmd(struct its_node *its,
57967047f90SMarc Zyngier 						  struct its_cmd_block *cmd,
5808d85dcedSMarc Zyngier 						  struct its_cmd_desc *desc)
5818d85dcedSMarc Zyngier {
5828d85dcedSMarc Zyngier 	struct its_collection *col;
5838d85dcedSMarc Zyngier 
5848d85dcedSMarc Zyngier 	col = dev_event_to_col(desc->its_clear_cmd.dev,
5858d85dcedSMarc Zyngier 			       desc->its_clear_cmd.event_id);
5868d85dcedSMarc Zyngier 
5878d85dcedSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_CLEAR);
5888d85dcedSMarc Zyngier 	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
5898d85dcedSMarc Zyngier 	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
5908d85dcedSMarc Zyngier 
5918d85dcedSMarc Zyngier 	its_fixup_cmd(cmd);
5928d85dcedSMarc Zyngier 
59383559b47SMarc Zyngier 	return valid_col(col);
5948d85dcedSMarc Zyngier }
5958d85dcedSMarc Zyngier 
59667047f90SMarc Zyngier static struct its_collection *its_build_invall_cmd(struct its_node *its,
59767047f90SMarc Zyngier 						   struct its_cmd_block *cmd,
598cc2d3216SMarc Zyngier 						   struct its_cmd_desc *desc)
599cc2d3216SMarc Zyngier {
600cc2d3216SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_INVALL);
601cc2d3216SMarc Zyngier 	its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
602cc2d3216SMarc Zyngier 
603cc2d3216SMarc Zyngier 	its_fixup_cmd(cmd);
604cc2d3216SMarc Zyngier 
605cc2d3216SMarc Zyngier 	return NULL;
606cc2d3216SMarc Zyngier }
607cc2d3216SMarc Zyngier 
60867047f90SMarc Zyngier static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
60967047f90SMarc Zyngier 					     struct its_cmd_block *cmd,
610eb78192bSMarc Zyngier 					     struct its_cmd_desc *desc)
611eb78192bSMarc Zyngier {
612eb78192bSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_VINVALL);
613eb78192bSMarc Zyngier 	its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
614eb78192bSMarc Zyngier 
615eb78192bSMarc Zyngier 	its_fixup_cmd(cmd);
616eb78192bSMarc Zyngier 
617205e065dSMarc Zyngier 	return valid_vpe(its, desc->its_vinvall_cmd.vpe);
618eb78192bSMarc Zyngier }
619eb78192bSMarc Zyngier 
62067047f90SMarc Zyngier static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
62167047f90SMarc Zyngier 					   struct its_cmd_block *cmd,
622eb78192bSMarc Zyngier 					   struct its_cmd_desc *desc)
623eb78192bSMarc Zyngier {
624eb78192bSMarc Zyngier 	unsigned long vpt_addr;
6255c9a882eSMarc Zyngier 	u64 target;
626eb78192bSMarc Zyngier 
627eb78192bSMarc Zyngier 	vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
6285c9a882eSMarc Zyngier 	target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
629eb78192bSMarc Zyngier 
630eb78192bSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_VMAPP);
631eb78192bSMarc Zyngier 	its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
632eb78192bSMarc Zyngier 	its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
6335c9a882eSMarc Zyngier 	its_encode_target(cmd, target);
634eb78192bSMarc Zyngier 	its_encode_vpt_addr(cmd, vpt_addr);
635eb78192bSMarc Zyngier 	its_encode_vpt_size(cmd, LPI_NRBITS - 1);
636eb78192bSMarc Zyngier 
637eb78192bSMarc Zyngier 	its_fixup_cmd(cmd);
638eb78192bSMarc Zyngier 
639205e065dSMarc Zyngier 	return valid_vpe(its, desc->its_vmapp_cmd.vpe);
640eb78192bSMarc Zyngier }
641eb78192bSMarc Zyngier 
64267047f90SMarc Zyngier static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
64367047f90SMarc Zyngier 					    struct its_cmd_block *cmd,
644d011e4e6SMarc Zyngier 					    struct its_cmd_desc *desc)
645d011e4e6SMarc Zyngier {
646d011e4e6SMarc Zyngier 	u32 db;
647d011e4e6SMarc Zyngier 
648d011e4e6SMarc Zyngier 	if (desc->its_vmapti_cmd.db_enabled)
649d011e4e6SMarc Zyngier 		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
650d011e4e6SMarc Zyngier 	else
651d011e4e6SMarc Zyngier 		db = 1023;
652d011e4e6SMarc Zyngier 
653d011e4e6SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
654d011e4e6SMarc Zyngier 	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
655d011e4e6SMarc Zyngier 	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
656d011e4e6SMarc Zyngier 	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
657d011e4e6SMarc Zyngier 	its_encode_db_phys_id(cmd, db);
658d011e4e6SMarc Zyngier 	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
659d011e4e6SMarc Zyngier 
660d011e4e6SMarc Zyngier 	its_fixup_cmd(cmd);
661d011e4e6SMarc Zyngier 
662205e065dSMarc Zyngier 	return valid_vpe(its, desc->its_vmapti_cmd.vpe);
663d011e4e6SMarc Zyngier }
664d011e4e6SMarc Zyngier 
66567047f90SMarc Zyngier static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
66667047f90SMarc Zyngier 					   struct its_cmd_block *cmd,
667d011e4e6SMarc Zyngier 					   struct its_cmd_desc *desc)
668d011e4e6SMarc Zyngier {
669d011e4e6SMarc Zyngier 	u32 db;
670d011e4e6SMarc Zyngier 
671d011e4e6SMarc Zyngier 	if (desc->its_vmovi_cmd.db_enabled)
672d011e4e6SMarc Zyngier 		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
673d011e4e6SMarc Zyngier 	else
674d011e4e6SMarc Zyngier 		db = 1023;
675d011e4e6SMarc Zyngier 
676d011e4e6SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_VMOVI);
677d011e4e6SMarc Zyngier 	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
678d011e4e6SMarc Zyngier 	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
679d011e4e6SMarc Zyngier 	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
680d011e4e6SMarc Zyngier 	its_encode_db_phys_id(cmd, db);
681d011e4e6SMarc Zyngier 	its_encode_db_valid(cmd, true);
682d011e4e6SMarc Zyngier 
683d011e4e6SMarc Zyngier 	its_fixup_cmd(cmd);
684d011e4e6SMarc Zyngier 
685205e065dSMarc Zyngier 	return valid_vpe(its, desc->its_vmovi_cmd.vpe);
686d011e4e6SMarc Zyngier }
687d011e4e6SMarc Zyngier 
68867047f90SMarc Zyngier static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
68967047f90SMarc Zyngier 					   struct its_cmd_block *cmd,
6903171a47aSMarc Zyngier 					   struct its_cmd_desc *desc)
6913171a47aSMarc Zyngier {
6925c9a882eSMarc Zyngier 	u64 target;
6935c9a882eSMarc Zyngier 
6945c9a882eSMarc Zyngier 	target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
6953171a47aSMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_VMOVP);
6963171a47aSMarc Zyngier 	its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
6973171a47aSMarc Zyngier 	its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
6983171a47aSMarc Zyngier 	its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
6995c9a882eSMarc Zyngier 	its_encode_target(cmd, target);
7003171a47aSMarc Zyngier 
7013171a47aSMarc Zyngier 	its_fixup_cmd(cmd);
7023171a47aSMarc Zyngier 
703205e065dSMarc Zyngier 	return valid_vpe(its, desc->its_vmovp_cmd.vpe);
7043171a47aSMarc Zyngier }
7053171a47aSMarc Zyngier 
70628614696SMarc Zyngier static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
70728614696SMarc Zyngier 					  struct its_cmd_block *cmd,
70828614696SMarc Zyngier 					  struct its_cmd_desc *desc)
70928614696SMarc Zyngier {
71028614696SMarc Zyngier 	struct its_vlpi_map *map;
71128614696SMarc Zyngier 
71228614696SMarc Zyngier 	map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
71328614696SMarc Zyngier 				    desc->its_inv_cmd.event_id);
71428614696SMarc Zyngier 
71528614696SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_INV);
71628614696SMarc Zyngier 	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
71728614696SMarc Zyngier 	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
71828614696SMarc Zyngier 
71928614696SMarc Zyngier 	its_fixup_cmd(cmd);
72028614696SMarc Zyngier 
72128614696SMarc Zyngier 	return valid_vpe(its, map->vpe);
72228614696SMarc Zyngier }
72328614696SMarc Zyngier 
724ed0e4aa9SMarc Zyngier static struct its_vpe *its_build_vint_cmd(struct its_node *its,
725ed0e4aa9SMarc Zyngier 					  struct its_cmd_block *cmd,
726ed0e4aa9SMarc Zyngier 					  struct its_cmd_desc *desc)
727ed0e4aa9SMarc Zyngier {
728ed0e4aa9SMarc Zyngier 	struct its_vlpi_map *map;
729ed0e4aa9SMarc Zyngier 
730ed0e4aa9SMarc Zyngier 	map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
731ed0e4aa9SMarc Zyngier 				    desc->its_int_cmd.event_id);
732ed0e4aa9SMarc Zyngier 
733ed0e4aa9SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_INT);
734ed0e4aa9SMarc Zyngier 	its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
735ed0e4aa9SMarc Zyngier 	its_encode_event_id(cmd, desc->its_int_cmd.event_id);
736ed0e4aa9SMarc Zyngier 
737ed0e4aa9SMarc Zyngier 	its_fixup_cmd(cmd);
738ed0e4aa9SMarc Zyngier 
739ed0e4aa9SMarc Zyngier 	return valid_vpe(its, map->vpe);
740ed0e4aa9SMarc Zyngier }
741ed0e4aa9SMarc Zyngier 
742ed0e4aa9SMarc Zyngier static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
743ed0e4aa9SMarc Zyngier 					    struct its_cmd_block *cmd,
744ed0e4aa9SMarc Zyngier 					    struct its_cmd_desc *desc)
745ed0e4aa9SMarc Zyngier {
746ed0e4aa9SMarc Zyngier 	struct its_vlpi_map *map;
747ed0e4aa9SMarc Zyngier 
748ed0e4aa9SMarc Zyngier 	map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
749ed0e4aa9SMarc Zyngier 				    desc->its_clear_cmd.event_id);
750ed0e4aa9SMarc Zyngier 
751ed0e4aa9SMarc Zyngier 	its_encode_cmd(cmd, GITS_CMD_CLEAR);
752ed0e4aa9SMarc Zyngier 	its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
753ed0e4aa9SMarc Zyngier 	its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
754ed0e4aa9SMarc Zyngier 
755ed0e4aa9SMarc Zyngier 	its_fixup_cmd(cmd);
756ed0e4aa9SMarc Zyngier 
757ed0e4aa9SMarc Zyngier 	return valid_vpe(its, map->vpe);
758ed0e4aa9SMarc Zyngier }
759ed0e4aa9SMarc Zyngier 
760cc2d3216SMarc Zyngier static u64 its_cmd_ptr_to_offset(struct its_node *its,
761cc2d3216SMarc Zyngier 				 struct its_cmd_block *ptr)
762cc2d3216SMarc Zyngier {
763cc2d3216SMarc Zyngier 	return (ptr - its->cmd_base) * sizeof(*ptr);
764cc2d3216SMarc Zyngier }
765cc2d3216SMarc Zyngier 
766cc2d3216SMarc Zyngier static int its_queue_full(struct its_node *its)
767cc2d3216SMarc Zyngier {
768cc2d3216SMarc Zyngier 	int widx;
769cc2d3216SMarc Zyngier 	int ridx;
770cc2d3216SMarc Zyngier 
771cc2d3216SMarc Zyngier 	widx = its->cmd_write - its->cmd_base;
772cc2d3216SMarc Zyngier 	ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
773cc2d3216SMarc Zyngier 
774cc2d3216SMarc Zyngier 	/* This is incredibly unlikely to happen, unless the ITS locks up. */
775cc2d3216SMarc Zyngier 	if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
776cc2d3216SMarc Zyngier 		return 1;
777cc2d3216SMarc Zyngier 
778cc2d3216SMarc Zyngier 	return 0;
779cc2d3216SMarc Zyngier }
780cc2d3216SMarc Zyngier 
781cc2d3216SMarc Zyngier static struct its_cmd_block *its_allocate_entry(struct its_node *its)
782cc2d3216SMarc Zyngier {
783cc2d3216SMarc Zyngier 	struct its_cmd_block *cmd;
784cc2d3216SMarc Zyngier 	u32 count = 1000000;	/* 1s! */
785cc2d3216SMarc Zyngier 
786cc2d3216SMarc Zyngier 	while (its_queue_full(its)) {
787cc2d3216SMarc Zyngier 		count--;
788cc2d3216SMarc Zyngier 		if (!count) {
789cc2d3216SMarc Zyngier 			pr_err_ratelimited("ITS queue not draining\n");
790cc2d3216SMarc Zyngier 			return NULL;
791cc2d3216SMarc Zyngier 		}
792cc2d3216SMarc Zyngier 		cpu_relax();
793cc2d3216SMarc Zyngier 		udelay(1);
794cc2d3216SMarc Zyngier 	}
795cc2d3216SMarc Zyngier 
796cc2d3216SMarc Zyngier 	cmd = its->cmd_write++;
797cc2d3216SMarc Zyngier 
798cc2d3216SMarc Zyngier 	/* Handle queue wrapping */
799cc2d3216SMarc Zyngier 	if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
800cc2d3216SMarc Zyngier 		its->cmd_write = its->cmd_base;
801cc2d3216SMarc Zyngier 
80234d677a9SMarc Zyngier 	/* Clear command  */
80334d677a9SMarc Zyngier 	cmd->raw_cmd[0] = 0;
80434d677a9SMarc Zyngier 	cmd->raw_cmd[1] = 0;
80534d677a9SMarc Zyngier 	cmd->raw_cmd[2] = 0;
80634d677a9SMarc Zyngier 	cmd->raw_cmd[3] = 0;
80734d677a9SMarc Zyngier 
808cc2d3216SMarc Zyngier 	return cmd;
809cc2d3216SMarc Zyngier }
810cc2d3216SMarc Zyngier 
811cc2d3216SMarc Zyngier static struct its_cmd_block *its_post_commands(struct its_node *its)
812cc2d3216SMarc Zyngier {
813cc2d3216SMarc Zyngier 	u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
814cc2d3216SMarc Zyngier 
815cc2d3216SMarc Zyngier 	writel_relaxed(wr, its->base + GITS_CWRITER);
816cc2d3216SMarc Zyngier 
817cc2d3216SMarc Zyngier 	return its->cmd_write;
818cc2d3216SMarc Zyngier }
819cc2d3216SMarc Zyngier 
820cc2d3216SMarc Zyngier static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
821cc2d3216SMarc Zyngier {
822cc2d3216SMarc Zyngier 	/*
823cc2d3216SMarc Zyngier 	 * Make sure the commands written to memory are observable by
824cc2d3216SMarc Zyngier 	 * the ITS.
825cc2d3216SMarc Zyngier 	 */
826cc2d3216SMarc Zyngier 	if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
827328191c0SVladimir Murzin 		gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
828cc2d3216SMarc Zyngier 	else
829cc2d3216SMarc Zyngier 		dsb(ishst);
830cc2d3216SMarc Zyngier }
831cc2d3216SMarc Zyngier 
832a19b462fSMarc Zyngier static int its_wait_for_range_completion(struct its_node *its,
833a050fa54SHeyi Guo 					 u64	prev_idx,
834cc2d3216SMarc Zyngier 					 struct its_cmd_block *to)
835cc2d3216SMarc Zyngier {
836a050fa54SHeyi Guo 	u64 rd_idx, to_idx, linear_idx;
837cc2d3216SMarc Zyngier 	u32 count = 1000000;	/* 1s! */
838cc2d3216SMarc Zyngier 
839a050fa54SHeyi Guo 	/* Linearize to_idx if the command set has wrapped around */
840cc2d3216SMarc Zyngier 	to_idx = its_cmd_ptr_to_offset(its, to);
841a050fa54SHeyi Guo 	if (to_idx < prev_idx)
842a050fa54SHeyi Guo 		to_idx += ITS_CMD_QUEUE_SZ;
843a050fa54SHeyi Guo 
844a050fa54SHeyi Guo 	linear_idx = prev_idx;
845cc2d3216SMarc Zyngier 
846cc2d3216SMarc Zyngier 	while (1) {
847a050fa54SHeyi Guo 		s64 delta;
848a050fa54SHeyi Guo 
849cc2d3216SMarc Zyngier 		rd_idx = readl_relaxed(its->base + GITS_CREADR);
8509bdd8b1cSMarc Zyngier 
851a050fa54SHeyi Guo 		/*
852a050fa54SHeyi Guo 		 * Compute the read pointer progress, taking the
853a050fa54SHeyi Guo 		 * potential wrap-around into account.
854a050fa54SHeyi Guo 		 */
855a050fa54SHeyi Guo 		delta = rd_idx - prev_idx;
856a050fa54SHeyi Guo 		if (rd_idx < prev_idx)
857a050fa54SHeyi Guo 			delta += ITS_CMD_QUEUE_SZ;
8589bdd8b1cSMarc Zyngier 
859a050fa54SHeyi Guo 		linear_idx += delta;
860a050fa54SHeyi Guo 		if (linear_idx >= to_idx)
861cc2d3216SMarc Zyngier 			break;
862cc2d3216SMarc Zyngier 
863cc2d3216SMarc Zyngier 		count--;
864cc2d3216SMarc Zyngier 		if (!count) {
865a050fa54SHeyi Guo 			pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
866a050fa54SHeyi Guo 					   to_idx, linear_idx);
867a19b462fSMarc Zyngier 			return -1;
868cc2d3216SMarc Zyngier 		}
869a050fa54SHeyi Guo 		prev_idx = rd_idx;
870cc2d3216SMarc Zyngier 		cpu_relax();
871cc2d3216SMarc Zyngier 		udelay(1);
872cc2d3216SMarc Zyngier 	}
873a19b462fSMarc Zyngier 
874a19b462fSMarc Zyngier 	return 0;
875cc2d3216SMarc Zyngier }
876cc2d3216SMarc Zyngier 
877e4f9094bSMarc Zyngier /* Warning, macro hell follows */
878e4f9094bSMarc Zyngier #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)	\
879e4f9094bSMarc Zyngier void name(struct its_node *its,						\
880e4f9094bSMarc Zyngier 	  buildtype builder,						\
881e4f9094bSMarc Zyngier 	  struct its_cmd_desc *desc)					\
882e4f9094bSMarc Zyngier {									\
883e4f9094bSMarc Zyngier 	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;		\
884e4f9094bSMarc Zyngier 	synctype *sync_obj;						\
885e4f9094bSMarc Zyngier 	unsigned long flags;						\
886a050fa54SHeyi Guo 	u64 rd_idx;							\
887e4f9094bSMarc Zyngier 									\
888e4f9094bSMarc Zyngier 	raw_spin_lock_irqsave(&its->lock, flags);			\
889e4f9094bSMarc Zyngier 									\
890e4f9094bSMarc Zyngier 	cmd = its_allocate_entry(its);					\
891e4f9094bSMarc Zyngier 	if (!cmd) {		/* We're soooooo screewed... */		\
892e4f9094bSMarc Zyngier 		raw_spin_unlock_irqrestore(&its->lock, flags);		\
893e4f9094bSMarc Zyngier 		return;							\
894e4f9094bSMarc Zyngier 	}								\
89567047f90SMarc Zyngier 	sync_obj = builder(its, cmd, desc);				\
896e4f9094bSMarc Zyngier 	its_flush_cmd(its, cmd);					\
897e4f9094bSMarc Zyngier 									\
898e4f9094bSMarc Zyngier 	if (sync_obj) {							\
899e4f9094bSMarc Zyngier 		sync_cmd = its_allocate_entry(its);			\
900e4f9094bSMarc Zyngier 		if (!sync_cmd)						\
901e4f9094bSMarc Zyngier 			goto post;					\
902e4f9094bSMarc Zyngier 									\
90367047f90SMarc Zyngier 		buildfn(its, sync_cmd, sync_obj);			\
904e4f9094bSMarc Zyngier 		its_flush_cmd(its, sync_cmd);				\
905e4f9094bSMarc Zyngier 	}								\
906e4f9094bSMarc Zyngier 									\
907e4f9094bSMarc Zyngier post:									\
908a050fa54SHeyi Guo 	rd_idx = readl_relaxed(its->base + GITS_CREADR);		\
909e4f9094bSMarc Zyngier 	next_cmd = its_post_commands(its);				\
910e4f9094bSMarc Zyngier 	raw_spin_unlock_irqrestore(&its->lock, flags);			\
911e4f9094bSMarc Zyngier 									\
912a050fa54SHeyi Guo 	if (its_wait_for_range_completion(its, rd_idx, next_cmd))	\
913a19b462fSMarc Zyngier 		pr_err_ratelimited("ITS cmd %ps failed\n", builder);	\
914e4f9094bSMarc Zyngier }
915e4f9094bSMarc Zyngier 
91667047f90SMarc Zyngier static void its_build_sync_cmd(struct its_node *its,
91767047f90SMarc Zyngier 			       struct its_cmd_block *sync_cmd,
918e4f9094bSMarc Zyngier 			       struct its_collection *sync_col)
919cc2d3216SMarc Zyngier {
920cc2d3216SMarc Zyngier 	its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
921cc2d3216SMarc Zyngier 	its_encode_target(sync_cmd, sync_col->target_address);
922e4f9094bSMarc Zyngier 
923cc2d3216SMarc Zyngier 	its_fixup_cmd(sync_cmd);
924cc2d3216SMarc Zyngier }
925cc2d3216SMarc Zyngier 
926e4f9094bSMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
927e4f9094bSMarc Zyngier 			     struct its_collection, its_build_sync_cmd)
928cc2d3216SMarc Zyngier 
92967047f90SMarc Zyngier static void its_build_vsync_cmd(struct its_node *its,
93067047f90SMarc Zyngier 				struct its_cmd_block *sync_cmd,
931d011e4e6SMarc Zyngier 				struct its_vpe *sync_vpe)
932d011e4e6SMarc Zyngier {
933d011e4e6SMarc Zyngier 	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
934d011e4e6SMarc Zyngier 	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
935d011e4e6SMarc Zyngier 
936d011e4e6SMarc Zyngier 	its_fixup_cmd(sync_cmd);
937d011e4e6SMarc Zyngier }
938d011e4e6SMarc Zyngier 
939d011e4e6SMarc Zyngier static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
940d011e4e6SMarc Zyngier 			     struct its_vpe, its_build_vsync_cmd)
941d011e4e6SMarc Zyngier 
9428d85dcedSMarc Zyngier static void its_send_int(struct its_device *dev, u32 event_id)
9438d85dcedSMarc Zyngier {
9448d85dcedSMarc Zyngier 	struct its_cmd_desc desc;
9458d85dcedSMarc Zyngier 
9468d85dcedSMarc Zyngier 	desc.its_int_cmd.dev = dev;
9478d85dcedSMarc Zyngier 	desc.its_int_cmd.event_id = event_id;
9488d85dcedSMarc Zyngier 
9498d85dcedSMarc Zyngier 	its_send_single_command(dev->its, its_build_int_cmd, &desc);
9508d85dcedSMarc Zyngier }
9518d85dcedSMarc Zyngier 
9528d85dcedSMarc Zyngier static void its_send_clear(struct its_device *dev, u32 event_id)
9538d85dcedSMarc Zyngier {
9548d85dcedSMarc Zyngier 	struct its_cmd_desc desc;
9558d85dcedSMarc Zyngier 
9568d85dcedSMarc Zyngier 	desc.its_clear_cmd.dev = dev;
9578d85dcedSMarc Zyngier 	desc.its_clear_cmd.event_id = event_id;
9588d85dcedSMarc Zyngier 
9598d85dcedSMarc Zyngier 	its_send_single_command(dev->its, its_build_clear_cmd, &desc);
960cc2d3216SMarc Zyngier }
961cc2d3216SMarc Zyngier 
962cc2d3216SMarc Zyngier static void its_send_inv(struct its_device *dev, u32 event_id)
963cc2d3216SMarc Zyngier {
964cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
965cc2d3216SMarc Zyngier 
966cc2d3216SMarc Zyngier 	desc.its_inv_cmd.dev = dev;
967cc2d3216SMarc Zyngier 	desc.its_inv_cmd.event_id = event_id;
968cc2d3216SMarc Zyngier 
969cc2d3216SMarc Zyngier 	its_send_single_command(dev->its, its_build_inv_cmd, &desc);
970cc2d3216SMarc Zyngier }
971cc2d3216SMarc Zyngier 
972cc2d3216SMarc Zyngier static void its_send_mapd(struct its_device *dev, int valid)
973cc2d3216SMarc Zyngier {
974cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
975cc2d3216SMarc Zyngier 
976cc2d3216SMarc Zyngier 	desc.its_mapd_cmd.dev = dev;
977cc2d3216SMarc Zyngier 	desc.its_mapd_cmd.valid = !!valid;
978cc2d3216SMarc Zyngier 
979cc2d3216SMarc Zyngier 	its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
980cc2d3216SMarc Zyngier }
981cc2d3216SMarc Zyngier 
982cc2d3216SMarc Zyngier static void its_send_mapc(struct its_node *its, struct its_collection *col,
983cc2d3216SMarc Zyngier 			  int valid)
984cc2d3216SMarc Zyngier {
985cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
986cc2d3216SMarc Zyngier 
987cc2d3216SMarc Zyngier 	desc.its_mapc_cmd.col = col;
988cc2d3216SMarc Zyngier 	desc.its_mapc_cmd.valid = !!valid;
989cc2d3216SMarc Zyngier 
990cc2d3216SMarc Zyngier 	its_send_single_command(its, its_build_mapc_cmd, &desc);
991cc2d3216SMarc Zyngier }
992cc2d3216SMarc Zyngier 
9936a25ad3aSMarc Zyngier static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
994cc2d3216SMarc Zyngier {
995cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
996cc2d3216SMarc Zyngier 
9976a25ad3aSMarc Zyngier 	desc.its_mapti_cmd.dev = dev;
9986a25ad3aSMarc Zyngier 	desc.its_mapti_cmd.phys_id = irq_id;
9996a25ad3aSMarc Zyngier 	desc.its_mapti_cmd.event_id = id;
1000cc2d3216SMarc Zyngier 
10016a25ad3aSMarc Zyngier 	its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1002cc2d3216SMarc Zyngier }
1003cc2d3216SMarc Zyngier 
1004cc2d3216SMarc Zyngier static void its_send_movi(struct its_device *dev,
1005cc2d3216SMarc Zyngier 			  struct its_collection *col, u32 id)
1006cc2d3216SMarc Zyngier {
1007cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
1008cc2d3216SMarc Zyngier 
1009cc2d3216SMarc Zyngier 	desc.its_movi_cmd.dev = dev;
1010cc2d3216SMarc Zyngier 	desc.its_movi_cmd.col = col;
1011591e5becSMarc Zyngier 	desc.its_movi_cmd.event_id = id;
1012cc2d3216SMarc Zyngier 
1013cc2d3216SMarc Zyngier 	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1014cc2d3216SMarc Zyngier }
1015cc2d3216SMarc Zyngier 
1016cc2d3216SMarc Zyngier static void its_send_discard(struct its_device *dev, u32 id)
1017cc2d3216SMarc Zyngier {
1018cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
1019cc2d3216SMarc Zyngier 
1020cc2d3216SMarc Zyngier 	desc.its_discard_cmd.dev = dev;
1021cc2d3216SMarc Zyngier 	desc.its_discard_cmd.event_id = id;
1022cc2d3216SMarc Zyngier 
1023cc2d3216SMarc Zyngier 	its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1024cc2d3216SMarc Zyngier }
1025cc2d3216SMarc Zyngier 
1026cc2d3216SMarc Zyngier static void its_send_invall(struct its_node *its, struct its_collection *col)
1027cc2d3216SMarc Zyngier {
1028cc2d3216SMarc Zyngier 	struct its_cmd_desc desc;
1029cc2d3216SMarc Zyngier 
1030cc2d3216SMarc Zyngier 	desc.its_invall_cmd.col = col;
1031cc2d3216SMarc Zyngier 
1032cc2d3216SMarc Zyngier 	its_send_single_command(its, its_build_invall_cmd, &desc);
1033cc2d3216SMarc Zyngier }
1034c48ed51cSMarc Zyngier 
1035d011e4e6SMarc Zyngier static void its_send_vmapti(struct its_device *dev, u32 id)
1036d011e4e6SMarc Zyngier {
1037c1d4d5cdSMarc Zyngier 	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1038d011e4e6SMarc Zyngier 	struct its_cmd_desc desc;
1039d011e4e6SMarc Zyngier 
1040d011e4e6SMarc Zyngier 	desc.its_vmapti_cmd.vpe = map->vpe;
1041d011e4e6SMarc Zyngier 	desc.its_vmapti_cmd.dev = dev;
1042d011e4e6SMarc Zyngier 	desc.its_vmapti_cmd.virt_id = map->vintid;
1043d011e4e6SMarc Zyngier 	desc.its_vmapti_cmd.event_id = id;
1044d011e4e6SMarc Zyngier 	desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1045d011e4e6SMarc Zyngier 
1046d011e4e6SMarc Zyngier 	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1047d011e4e6SMarc Zyngier }
1048d011e4e6SMarc Zyngier 
1049d011e4e6SMarc Zyngier static void its_send_vmovi(struct its_device *dev, u32 id)
1050d011e4e6SMarc Zyngier {
1051c1d4d5cdSMarc Zyngier 	struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1052d011e4e6SMarc Zyngier 	struct its_cmd_desc desc;
1053d011e4e6SMarc Zyngier 
1054d011e4e6SMarc Zyngier 	desc.its_vmovi_cmd.vpe = map->vpe;
1055d011e4e6SMarc Zyngier 	desc.its_vmovi_cmd.dev = dev;
1056d011e4e6SMarc Zyngier 	desc.its_vmovi_cmd.event_id = id;
1057d011e4e6SMarc Zyngier 	desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1058d011e4e6SMarc Zyngier 
1059d011e4e6SMarc Zyngier 	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1060d011e4e6SMarc Zyngier }
1061d011e4e6SMarc Zyngier 
106275fd951bSMarc Zyngier static void its_send_vmapp(struct its_node *its,
106375fd951bSMarc Zyngier 			   struct its_vpe *vpe, bool valid)
1064eb78192bSMarc Zyngier {
1065eb78192bSMarc Zyngier 	struct its_cmd_desc desc;
1066eb78192bSMarc Zyngier 
1067eb78192bSMarc Zyngier 	desc.its_vmapp_cmd.vpe = vpe;
1068eb78192bSMarc Zyngier 	desc.its_vmapp_cmd.valid = valid;
1069eb78192bSMarc Zyngier 	desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
107075fd951bSMarc Zyngier 
1071eb78192bSMarc Zyngier 	its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1072eb78192bSMarc Zyngier }
1073eb78192bSMarc Zyngier 
10743171a47aSMarc Zyngier static void its_send_vmovp(struct its_vpe *vpe)
10753171a47aSMarc Zyngier {
107684243125SZenghui Yu 	struct its_cmd_desc desc = {};
10773171a47aSMarc Zyngier 	struct its_node *its;
10783171a47aSMarc Zyngier 	unsigned long flags;
10793171a47aSMarc Zyngier 	int col_id = vpe->col_idx;
10803171a47aSMarc Zyngier 
10813171a47aSMarc Zyngier 	desc.its_vmovp_cmd.vpe = vpe;
10823171a47aSMarc Zyngier 
10833171a47aSMarc Zyngier 	if (!its_list_map) {
10843171a47aSMarc Zyngier 		its = list_first_entry(&its_nodes, struct its_node, entry);
10853171a47aSMarc Zyngier 		desc.its_vmovp_cmd.col = &its->collections[col_id];
10863171a47aSMarc Zyngier 		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
10873171a47aSMarc Zyngier 		return;
10883171a47aSMarc Zyngier 	}
10893171a47aSMarc Zyngier 
10903171a47aSMarc Zyngier 	/*
10913171a47aSMarc Zyngier 	 * Yet another marvel of the architecture. If using the
10923171a47aSMarc Zyngier 	 * its_list "feature", we need to make sure that all ITSs
10933171a47aSMarc Zyngier 	 * receive all VMOVP commands in the same order. The only way
10943171a47aSMarc Zyngier 	 * to guarantee this is to make vmovp a serialization point.
10953171a47aSMarc Zyngier 	 *
10963171a47aSMarc Zyngier 	 * Wall <-- Head.
10973171a47aSMarc Zyngier 	 */
10983171a47aSMarc Zyngier 	raw_spin_lock_irqsave(&vmovp_lock, flags);
10993171a47aSMarc Zyngier 
11003171a47aSMarc Zyngier 	desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
110184243125SZenghui Yu 	desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
11023171a47aSMarc Zyngier 
11033171a47aSMarc Zyngier 	/* Emit VMOVPs */
11043171a47aSMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry) {
11050dd57fedSMarc Zyngier 		if (!is_v4(its))
11063171a47aSMarc Zyngier 			continue;
11073171a47aSMarc Zyngier 
11082247e1bfSMarc Zyngier 		if (!vpe->its_vm->vlpi_count[its->list_nr])
11092247e1bfSMarc Zyngier 			continue;
11102247e1bfSMarc Zyngier 
11113171a47aSMarc Zyngier 		desc.its_vmovp_cmd.col = &its->collections[col_id];
11123171a47aSMarc Zyngier 		its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
11133171a47aSMarc Zyngier 	}
11143171a47aSMarc Zyngier 
11153171a47aSMarc Zyngier 	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
11163171a47aSMarc Zyngier }
11173171a47aSMarc Zyngier 
111840619a2eSMarc Zyngier static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1119eb78192bSMarc Zyngier {
1120eb78192bSMarc Zyngier 	struct its_cmd_desc desc;
1121eb78192bSMarc Zyngier 
1122eb78192bSMarc Zyngier 	desc.its_vinvall_cmd.vpe = vpe;
1123eb78192bSMarc Zyngier 	its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1124eb78192bSMarc Zyngier }
1125eb78192bSMarc Zyngier 
112628614696SMarc Zyngier static void its_send_vinv(struct its_device *dev, u32 event_id)
112728614696SMarc Zyngier {
112828614696SMarc Zyngier 	struct its_cmd_desc desc;
112928614696SMarc Zyngier 
113028614696SMarc Zyngier 	/*
113128614696SMarc Zyngier 	 * There is no real VINV command. This is just a normal INV,
113228614696SMarc Zyngier 	 * with a VSYNC instead of a SYNC.
113328614696SMarc Zyngier 	 */
113428614696SMarc Zyngier 	desc.its_inv_cmd.dev = dev;
113528614696SMarc Zyngier 	desc.its_inv_cmd.event_id = event_id;
113628614696SMarc Zyngier 
113728614696SMarc Zyngier 	its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
113828614696SMarc Zyngier }
113928614696SMarc Zyngier 
1140ed0e4aa9SMarc Zyngier static void its_send_vint(struct its_device *dev, u32 event_id)
1141ed0e4aa9SMarc Zyngier {
1142ed0e4aa9SMarc Zyngier 	struct its_cmd_desc desc;
1143ed0e4aa9SMarc Zyngier 
1144ed0e4aa9SMarc Zyngier 	/*
1145ed0e4aa9SMarc Zyngier 	 * There is no real VINT command. This is just a normal INT,
1146ed0e4aa9SMarc Zyngier 	 * with a VSYNC instead of a SYNC.
1147ed0e4aa9SMarc Zyngier 	 */
1148ed0e4aa9SMarc Zyngier 	desc.its_int_cmd.dev = dev;
1149ed0e4aa9SMarc Zyngier 	desc.its_int_cmd.event_id = event_id;
1150ed0e4aa9SMarc Zyngier 
1151ed0e4aa9SMarc Zyngier 	its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1152ed0e4aa9SMarc Zyngier }
1153ed0e4aa9SMarc Zyngier 
1154ed0e4aa9SMarc Zyngier static void its_send_vclear(struct its_device *dev, u32 event_id)
1155ed0e4aa9SMarc Zyngier {
1156ed0e4aa9SMarc Zyngier 	struct its_cmd_desc desc;
1157ed0e4aa9SMarc Zyngier 
1158ed0e4aa9SMarc Zyngier 	/*
1159ed0e4aa9SMarc Zyngier 	 * There is no real VCLEAR command. This is just a normal CLEAR,
1160ed0e4aa9SMarc Zyngier 	 * with a VSYNC instead of a SYNC.
1161ed0e4aa9SMarc Zyngier 	 */
1162ed0e4aa9SMarc Zyngier 	desc.its_clear_cmd.dev = dev;
1163ed0e4aa9SMarc Zyngier 	desc.its_clear_cmd.event_id = event_id;
1164ed0e4aa9SMarc Zyngier 
1165ed0e4aa9SMarc Zyngier 	its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1166ed0e4aa9SMarc Zyngier }
1167ed0e4aa9SMarc Zyngier 
1168c48ed51cSMarc Zyngier /*
1169c48ed51cSMarc Zyngier  * irqchip functions - assumes MSI, mostly.
1170c48ed51cSMarc Zyngier  */
1171c1d4d5cdSMarc Zyngier static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
1172c1d4d5cdSMarc Zyngier {
1173c1d4d5cdSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1174c1d4d5cdSMarc Zyngier 	u32 event = its_get_event_id(d);
1175c1d4d5cdSMarc Zyngier 
1176c1d4d5cdSMarc Zyngier 	if (!irqd_is_forwarded_to_vcpu(d))
1177c1d4d5cdSMarc Zyngier 		return NULL;
1178c1d4d5cdSMarc Zyngier 
1179c1d4d5cdSMarc Zyngier 	return dev_event_to_vlpi_map(its_dev, event);
1180c1d4d5cdSMarc Zyngier }
1181c48ed51cSMarc Zyngier 
1182015ec038SMarc Zyngier static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1183c48ed51cSMarc Zyngier {
1184c1d4d5cdSMarc Zyngier 	struct its_vlpi_map *map = get_vlpi_map(d);
1185015ec038SMarc Zyngier 	irq_hw_number_t hwirq;
1186e1a2e201SMarc Zyngier 	void *va;
1187adcdb94eSMarc Zyngier 	u8 *cfg;
1188c48ed51cSMarc Zyngier 
1189c1d4d5cdSMarc Zyngier 	if (map) {
1190c1d4d5cdSMarc Zyngier 		va = page_address(map->vm->vprop_page);
1191d4d7b4adSMarc Zyngier 		hwirq = map->vintid;
1192d4d7b4adSMarc Zyngier 
1193d4d7b4adSMarc Zyngier 		/* Remember the updated property */
1194d4d7b4adSMarc Zyngier 		map->properties &= ~clr;
1195d4d7b4adSMarc Zyngier 		map->properties |= set | LPI_PROP_GROUP1;
1196015ec038SMarc Zyngier 	} else {
1197e1a2e201SMarc Zyngier 		va = gic_rdists->prop_table_va;
1198015ec038SMarc Zyngier 		hwirq = d->hwirq;
1199015ec038SMarc Zyngier 	}
1200adcdb94eSMarc Zyngier 
1201e1a2e201SMarc Zyngier 	cfg = va + hwirq - 8192;
1202adcdb94eSMarc Zyngier 	*cfg &= ~clr;
1203015ec038SMarc Zyngier 	*cfg |= set | LPI_PROP_GROUP1;
1204c48ed51cSMarc Zyngier 
1205c48ed51cSMarc Zyngier 	/*
1206c48ed51cSMarc Zyngier 	 * Make the above write visible to the redistributors.
1207c48ed51cSMarc Zyngier 	 * And yes, we're flushing exactly: One. Single. Byte.
1208c48ed51cSMarc Zyngier 	 * Humpf...
1209c48ed51cSMarc Zyngier 	 */
1210c48ed51cSMarc Zyngier 	if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1211328191c0SVladimir Murzin 		gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1212c48ed51cSMarc Zyngier 	else
1213c48ed51cSMarc Zyngier 		dsb(ishst);
1214015ec038SMarc Zyngier }
1215015ec038SMarc Zyngier 
12162f4f064bSMarc Zyngier static void wait_for_syncr(void __iomem *rdbase)
12172f4f064bSMarc Zyngier {
12182f4f064bSMarc Zyngier 	while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
12192f4f064bSMarc Zyngier 		cpu_relax();
12202f4f064bSMarc Zyngier }
12212f4f064bSMarc Zyngier 
1222425c09beSMarc Zyngier static void direct_lpi_inv(struct irq_data *d)
1223425c09beSMarc Zyngier {
1224425c09beSMarc Zyngier 	struct its_collection *col;
1225425c09beSMarc Zyngier 	void __iomem *rdbase;
1226425c09beSMarc Zyngier 
1227425c09beSMarc Zyngier 	/* Target the redistributor this LPI is currently routed to */
1228425c09beSMarc Zyngier 	col = irq_to_col(d);
1229425c09beSMarc Zyngier 	rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1230425c09beSMarc Zyngier 	gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1231425c09beSMarc Zyngier 
1232425c09beSMarc Zyngier 	wait_for_syncr(rdbase);
1233425c09beSMarc Zyngier }
1234425c09beSMarc Zyngier 
1235015ec038SMarc Zyngier static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1236015ec038SMarc Zyngier {
1237015ec038SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1238015ec038SMarc Zyngier 
1239015ec038SMarc Zyngier 	lpi_write_config(d, clr, set);
1240425c09beSMarc Zyngier 	if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1241425c09beSMarc Zyngier 		direct_lpi_inv(d);
124228614696SMarc Zyngier 	else if (!irqd_is_forwarded_to_vcpu(d))
1243adcdb94eSMarc Zyngier 		its_send_inv(its_dev, its_get_event_id(d));
124428614696SMarc Zyngier 	else
124528614696SMarc Zyngier 		its_send_vinv(its_dev, its_get_event_id(d));
1246c48ed51cSMarc Zyngier }
1247c48ed51cSMarc Zyngier 
1248015ec038SMarc Zyngier static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1249015ec038SMarc Zyngier {
1250015ec038SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1251015ec038SMarc Zyngier 	u32 event = its_get_event_id(d);
1252c1d4d5cdSMarc Zyngier 	struct its_vlpi_map *map;
1253015ec038SMarc Zyngier 
1254c1d4d5cdSMarc Zyngier 	map = dev_event_to_vlpi_map(its_dev, event);
1255c1d4d5cdSMarc Zyngier 
1256c1d4d5cdSMarc Zyngier 	if (map->db_enabled == enable)
1257015ec038SMarc Zyngier 		return;
1258015ec038SMarc Zyngier 
1259c1d4d5cdSMarc Zyngier 	map->db_enabled = enable;
1260015ec038SMarc Zyngier 
1261015ec038SMarc Zyngier 	/*
1262015ec038SMarc Zyngier 	 * More fun with the architecture:
1263015ec038SMarc Zyngier 	 *
1264015ec038SMarc Zyngier 	 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1265015ec038SMarc Zyngier 	 * value or to 1023, depending on the enable bit. But that
1266015ec038SMarc Zyngier 	 * would be issueing a mapping for an /existing/ DevID+EventID
1267015ec038SMarc Zyngier 	 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1268015ec038SMarc Zyngier 	 * to the /same/ vPE, using this opportunity to adjust the
1269015ec038SMarc Zyngier 	 * doorbell. Mouahahahaha. We loves it, Precious.
1270015ec038SMarc Zyngier 	 */
1271015ec038SMarc Zyngier 	its_send_vmovi(its_dev, event);
1272c48ed51cSMarc Zyngier }
1273c48ed51cSMarc Zyngier 
1274c48ed51cSMarc Zyngier static void its_mask_irq(struct irq_data *d)
1275c48ed51cSMarc Zyngier {
1276015ec038SMarc Zyngier 	if (irqd_is_forwarded_to_vcpu(d))
1277015ec038SMarc Zyngier 		its_vlpi_set_doorbell(d, false);
1278015ec038SMarc Zyngier 
1279adcdb94eSMarc Zyngier 	lpi_update_config(d, LPI_PROP_ENABLED, 0);
1280c48ed51cSMarc Zyngier }
1281c48ed51cSMarc Zyngier 
1282c48ed51cSMarc Zyngier static void its_unmask_irq(struct irq_data *d)
1283c48ed51cSMarc Zyngier {
1284015ec038SMarc Zyngier 	if (irqd_is_forwarded_to_vcpu(d))
1285015ec038SMarc Zyngier 		its_vlpi_set_doorbell(d, true);
1286015ec038SMarc Zyngier 
1287adcdb94eSMarc Zyngier 	lpi_update_config(d, 0, LPI_PROP_ENABLED);
1288c48ed51cSMarc Zyngier }
1289c48ed51cSMarc Zyngier 
1290c48ed51cSMarc Zyngier static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1291c48ed51cSMarc Zyngier 			    bool force)
1292c48ed51cSMarc Zyngier {
1293fbf8f40eSGanapatrao Kulkarni 	unsigned int cpu;
1294fbf8f40eSGanapatrao Kulkarni 	const struct cpumask *cpu_mask = cpu_online_mask;
1295c48ed51cSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1296c48ed51cSMarc Zyngier 	struct its_collection *target_col;
1297c48ed51cSMarc Zyngier 	u32 id = its_get_event_id(d);
1298c48ed51cSMarc Zyngier 
1299015ec038SMarc Zyngier 	/* A forwarded interrupt should use irq_set_vcpu_affinity */
1300015ec038SMarc Zyngier 	if (irqd_is_forwarded_to_vcpu(d))
1301015ec038SMarc Zyngier 		return -EINVAL;
1302015ec038SMarc Zyngier 
1303fbf8f40eSGanapatrao Kulkarni        /* lpi cannot be routed to a redistributor that is on a foreign node */
1304fbf8f40eSGanapatrao Kulkarni 	if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1305fbf8f40eSGanapatrao Kulkarni 		if (its_dev->its->numa_node >= 0) {
1306fbf8f40eSGanapatrao Kulkarni 			cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1307fbf8f40eSGanapatrao Kulkarni 			if (!cpumask_intersects(mask_val, cpu_mask))
1308fbf8f40eSGanapatrao Kulkarni 				return -EINVAL;
1309fbf8f40eSGanapatrao Kulkarni 		}
1310fbf8f40eSGanapatrao Kulkarni 	}
1311fbf8f40eSGanapatrao Kulkarni 
1312fbf8f40eSGanapatrao Kulkarni 	cpu = cpumask_any_and(mask_val, cpu_mask);
1313fbf8f40eSGanapatrao Kulkarni 
1314c48ed51cSMarc Zyngier 	if (cpu >= nr_cpu_ids)
1315c48ed51cSMarc Zyngier 		return -EINVAL;
1316c48ed51cSMarc Zyngier 
13178b8d94a7SMaJun 	/* don't set the affinity when the target cpu is same as current one */
13188b8d94a7SMaJun 	if (cpu != its_dev->event_map.col_map[id]) {
1319c48ed51cSMarc Zyngier 		target_col = &its_dev->its->collections[cpu];
1320c48ed51cSMarc Zyngier 		its_send_movi(its_dev, target_col, id);
1321591e5becSMarc Zyngier 		its_dev->event_map.col_map[id] = cpu;
13220d224d35SMarc Zyngier 		irq_data_update_effective_affinity(d, cpumask_of(cpu));
13238b8d94a7SMaJun 	}
1324c48ed51cSMarc Zyngier 
1325c48ed51cSMarc Zyngier 	return IRQ_SET_MASK_OK_DONE;
1326c48ed51cSMarc Zyngier }
1327c48ed51cSMarc Zyngier 
1328558b0165SArd Biesheuvel static u64 its_irq_get_msi_base(struct its_device *its_dev)
1329558b0165SArd Biesheuvel {
1330558b0165SArd Biesheuvel 	struct its_node *its = its_dev->its;
1331558b0165SArd Biesheuvel 
1332558b0165SArd Biesheuvel 	return its->phys_base + GITS_TRANSLATER;
1333558b0165SArd Biesheuvel }
1334558b0165SArd Biesheuvel 
1335b48ac83dSMarc Zyngier static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1336b48ac83dSMarc Zyngier {
1337b48ac83dSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1338b48ac83dSMarc Zyngier 	struct its_node *its;
1339b48ac83dSMarc Zyngier 	u64 addr;
1340b48ac83dSMarc Zyngier 
1341b48ac83dSMarc Zyngier 	its = its_dev->its;
1342558b0165SArd Biesheuvel 	addr = its->get_msi_base(its_dev);
1343b48ac83dSMarc Zyngier 
1344b11283ebSVladimir Murzin 	msg->address_lo		= lower_32_bits(addr);
1345b11283ebSVladimir Murzin 	msg->address_hi		= upper_32_bits(addr);
1346b48ac83dSMarc Zyngier 	msg->data		= its_get_event_id(d);
134744bb7e24SRobin Murphy 
134835ae7df2SJulien Grall 	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1349b48ac83dSMarc Zyngier }
1350b48ac83dSMarc Zyngier 
13518d85dcedSMarc Zyngier static int its_irq_set_irqchip_state(struct irq_data *d,
13528d85dcedSMarc Zyngier 				     enum irqchip_irq_state which,
13538d85dcedSMarc Zyngier 				     bool state)
13548d85dcedSMarc Zyngier {
13558d85dcedSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
13568d85dcedSMarc Zyngier 	u32 event = its_get_event_id(d);
13578d85dcedSMarc Zyngier 
13588d85dcedSMarc Zyngier 	if (which != IRQCHIP_STATE_PENDING)
13598d85dcedSMarc Zyngier 		return -EINVAL;
13608d85dcedSMarc Zyngier 
1361ed0e4aa9SMarc Zyngier 	if (irqd_is_forwarded_to_vcpu(d)) {
1362ed0e4aa9SMarc Zyngier 		if (state)
1363ed0e4aa9SMarc Zyngier 			its_send_vint(its_dev, event);
1364ed0e4aa9SMarc Zyngier 		else
1365ed0e4aa9SMarc Zyngier 			its_send_vclear(its_dev, event);
1366ed0e4aa9SMarc Zyngier 	} else {
13678d85dcedSMarc Zyngier 		if (state)
13688d85dcedSMarc Zyngier 			its_send_int(its_dev, event);
13698d85dcedSMarc Zyngier 		else
13708d85dcedSMarc Zyngier 			its_send_clear(its_dev, event);
1371ed0e4aa9SMarc Zyngier 	}
13728d85dcedSMarc Zyngier 
13738d85dcedSMarc Zyngier 	return 0;
13748d85dcedSMarc Zyngier }
13758d85dcedSMarc Zyngier 
13762247e1bfSMarc Zyngier static void its_map_vm(struct its_node *its, struct its_vm *vm)
13772247e1bfSMarc Zyngier {
13782247e1bfSMarc Zyngier 	unsigned long flags;
13792247e1bfSMarc Zyngier 
13802247e1bfSMarc Zyngier 	/* Not using the ITS list? Everything is always mapped. */
13812247e1bfSMarc Zyngier 	if (!its_list_map)
13822247e1bfSMarc Zyngier 		return;
13832247e1bfSMarc Zyngier 
13842247e1bfSMarc Zyngier 	raw_spin_lock_irqsave(&vmovp_lock, flags);
13852247e1bfSMarc Zyngier 
13862247e1bfSMarc Zyngier 	/*
13872247e1bfSMarc Zyngier 	 * If the VM wasn't mapped yet, iterate over the vpes and get
13882247e1bfSMarc Zyngier 	 * them mapped now.
13892247e1bfSMarc Zyngier 	 */
13902247e1bfSMarc Zyngier 	vm->vlpi_count[its->list_nr]++;
13912247e1bfSMarc Zyngier 
13922247e1bfSMarc Zyngier 	if (vm->vlpi_count[its->list_nr] == 1) {
13932247e1bfSMarc Zyngier 		int i;
13942247e1bfSMarc Zyngier 
13952247e1bfSMarc Zyngier 		for (i = 0; i < vm->nr_vpes; i++) {
13962247e1bfSMarc Zyngier 			struct its_vpe *vpe = vm->vpes[i];
139744c4c25eSMarc Zyngier 			struct irq_data *d = irq_get_irq_data(vpe->irq);
13982247e1bfSMarc Zyngier 
13992247e1bfSMarc Zyngier 			/* Map the VPE to the first possible CPU */
14002247e1bfSMarc Zyngier 			vpe->col_idx = cpumask_first(cpu_online_mask);
14012247e1bfSMarc Zyngier 			its_send_vmapp(its, vpe, true);
14022247e1bfSMarc Zyngier 			its_send_vinvall(its, vpe);
140344c4c25eSMarc Zyngier 			irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
14042247e1bfSMarc Zyngier 		}
14052247e1bfSMarc Zyngier 	}
14062247e1bfSMarc Zyngier 
14072247e1bfSMarc Zyngier 	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
14082247e1bfSMarc Zyngier }
14092247e1bfSMarc Zyngier 
14102247e1bfSMarc Zyngier static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
14112247e1bfSMarc Zyngier {
14122247e1bfSMarc Zyngier 	unsigned long flags;
14132247e1bfSMarc Zyngier 
14142247e1bfSMarc Zyngier 	/* Not using the ITS list? Everything is always mapped. */
14152247e1bfSMarc Zyngier 	if (!its_list_map)
14162247e1bfSMarc Zyngier 		return;
14172247e1bfSMarc Zyngier 
14182247e1bfSMarc Zyngier 	raw_spin_lock_irqsave(&vmovp_lock, flags);
14192247e1bfSMarc Zyngier 
14202247e1bfSMarc Zyngier 	if (!--vm->vlpi_count[its->list_nr]) {
14212247e1bfSMarc Zyngier 		int i;
14222247e1bfSMarc Zyngier 
14232247e1bfSMarc Zyngier 		for (i = 0; i < vm->nr_vpes; i++)
14242247e1bfSMarc Zyngier 			its_send_vmapp(its, vm->vpes[i], false);
14252247e1bfSMarc Zyngier 	}
14262247e1bfSMarc Zyngier 
14272247e1bfSMarc Zyngier 	raw_spin_unlock_irqrestore(&vmovp_lock, flags);
14282247e1bfSMarc Zyngier }
14292247e1bfSMarc Zyngier 
1430d011e4e6SMarc Zyngier static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1431d011e4e6SMarc Zyngier {
1432d011e4e6SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1433d011e4e6SMarc Zyngier 	u32 event = its_get_event_id(d);
1434d011e4e6SMarc Zyngier 	int ret = 0;
1435d011e4e6SMarc Zyngier 
1436d011e4e6SMarc Zyngier 	if (!info->map)
1437d011e4e6SMarc Zyngier 		return -EINVAL;
1438d011e4e6SMarc Zyngier 
1439d011e4e6SMarc Zyngier 	mutex_lock(&its_dev->event_map.vlpi_lock);
1440d011e4e6SMarc Zyngier 
1441d011e4e6SMarc Zyngier 	if (!its_dev->event_map.vm) {
1442d011e4e6SMarc Zyngier 		struct its_vlpi_map *maps;
1443d011e4e6SMarc Zyngier 
14446396bb22SKees Cook 		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1445d011e4e6SMarc Zyngier 			       GFP_KERNEL);
1446d011e4e6SMarc Zyngier 		if (!maps) {
1447d011e4e6SMarc Zyngier 			ret = -ENOMEM;
1448d011e4e6SMarc Zyngier 			goto out;
1449d011e4e6SMarc Zyngier 		}
1450d011e4e6SMarc Zyngier 
1451d011e4e6SMarc Zyngier 		its_dev->event_map.vm = info->map->vm;
1452d011e4e6SMarc Zyngier 		its_dev->event_map.vlpi_maps = maps;
1453d011e4e6SMarc Zyngier 	} else if (its_dev->event_map.vm != info->map->vm) {
1454d011e4e6SMarc Zyngier 		ret = -EINVAL;
1455d011e4e6SMarc Zyngier 		goto out;
1456d011e4e6SMarc Zyngier 	}
1457d011e4e6SMarc Zyngier 
1458d011e4e6SMarc Zyngier 	/* Get our private copy of the mapping information */
1459d011e4e6SMarc Zyngier 	its_dev->event_map.vlpi_maps[event] = *info->map;
1460d011e4e6SMarc Zyngier 
1461d011e4e6SMarc Zyngier 	if (irqd_is_forwarded_to_vcpu(d)) {
1462d011e4e6SMarc Zyngier 		/* Already mapped, move it around */
1463d011e4e6SMarc Zyngier 		its_send_vmovi(its_dev, event);
1464d011e4e6SMarc Zyngier 	} else {
14652247e1bfSMarc Zyngier 		/* Ensure all the VPEs are mapped on this ITS */
14662247e1bfSMarc Zyngier 		its_map_vm(its_dev->its, info->map->vm);
14672247e1bfSMarc Zyngier 
1468d4d7b4adSMarc Zyngier 		/*
1469d4d7b4adSMarc Zyngier 		 * Flag the interrupt as forwarded so that we can
1470d4d7b4adSMarc Zyngier 		 * start poking the virtual property table.
1471d4d7b4adSMarc Zyngier 		 */
1472d4d7b4adSMarc Zyngier 		irqd_set_forwarded_to_vcpu(d);
1473d4d7b4adSMarc Zyngier 
1474d4d7b4adSMarc Zyngier 		/* Write out the property to the prop table */
1475d4d7b4adSMarc Zyngier 		lpi_write_config(d, 0xff, info->map->properties);
1476d4d7b4adSMarc Zyngier 
1477d011e4e6SMarc Zyngier 		/* Drop the physical mapping */
1478d011e4e6SMarc Zyngier 		its_send_discard(its_dev, event);
1479d011e4e6SMarc Zyngier 
1480d011e4e6SMarc Zyngier 		/* and install the virtual one */
1481d011e4e6SMarc Zyngier 		its_send_vmapti(its_dev, event);
1482d011e4e6SMarc Zyngier 
1483d011e4e6SMarc Zyngier 		/* Increment the number of VLPIs */
1484d011e4e6SMarc Zyngier 		its_dev->event_map.nr_vlpis++;
1485d011e4e6SMarc Zyngier 	}
1486d011e4e6SMarc Zyngier 
1487d011e4e6SMarc Zyngier out:
1488d011e4e6SMarc Zyngier 	mutex_unlock(&its_dev->event_map.vlpi_lock);
1489d011e4e6SMarc Zyngier 	return ret;
1490d011e4e6SMarc Zyngier }
1491d011e4e6SMarc Zyngier 
1492d011e4e6SMarc Zyngier static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1493d011e4e6SMarc Zyngier {
1494d011e4e6SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1495046b5054SMarc Zyngier 	struct its_vlpi_map *map;
1496d011e4e6SMarc Zyngier 	int ret = 0;
1497d011e4e6SMarc Zyngier 
1498d011e4e6SMarc Zyngier 	mutex_lock(&its_dev->event_map.vlpi_lock);
1499d011e4e6SMarc Zyngier 
1500046b5054SMarc Zyngier 	map = get_vlpi_map(d);
1501046b5054SMarc Zyngier 
1502046b5054SMarc Zyngier 	if (!its_dev->event_map.vm || !map) {
1503d011e4e6SMarc Zyngier 		ret = -EINVAL;
1504d011e4e6SMarc Zyngier 		goto out;
1505d011e4e6SMarc Zyngier 	}
1506d011e4e6SMarc Zyngier 
1507d011e4e6SMarc Zyngier 	/* Copy our mapping information to the incoming request */
1508c1d4d5cdSMarc Zyngier 	*info->map = *map;
1509d011e4e6SMarc Zyngier 
1510d011e4e6SMarc Zyngier out:
1511d011e4e6SMarc Zyngier 	mutex_unlock(&its_dev->event_map.vlpi_lock);
1512d011e4e6SMarc Zyngier 	return ret;
1513d011e4e6SMarc Zyngier }
1514d011e4e6SMarc Zyngier 
1515d011e4e6SMarc Zyngier static int its_vlpi_unmap(struct irq_data *d)
1516d011e4e6SMarc Zyngier {
1517d011e4e6SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1518d011e4e6SMarc Zyngier 	u32 event = its_get_event_id(d);
1519d011e4e6SMarc Zyngier 	int ret = 0;
1520d011e4e6SMarc Zyngier 
1521d011e4e6SMarc Zyngier 	mutex_lock(&its_dev->event_map.vlpi_lock);
1522d011e4e6SMarc Zyngier 
1523d011e4e6SMarc Zyngier 	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1524d011e4e6SMarc Zyngier 		ret = -EINVAL;
1525d011e4e6SMarc Zyngier 		goto out;
1526d011e4e6SMarc Zyngier 	}
1527d011e4e6SMarc Zyngier 
1528d011e4e6SMarc Zyngier 	/* Drop the virtual mapping */
1529d011e4e6SMarc Zyngier 	its_send_discard(its_dev, event);
1530d011e4e6SMarc Zyngier 
1531d011e4e6SMarc Zyngier 	/* and restore the physical one */
1532d011e4e6SMarc Zyngier 	irqd_clr_forwarded_to_vcpu(d);
1533d011e4e6SMarc Zyngier 	its_send_mapti(its_dev, d->hwirq, event);
1534d011e4e6SMarc Zyngier 	lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1535d011e4e6SMarc Zyngier 				    LPI_PROP_ENABLED |
1536d011e4e6SMarc Zyngier 				    LPI_PROP_GROUP1));
1537d011e4e6SMarc Zyngier 
15382247e1bfSMarc Zyngier 	/* Potentially unmap the VM from this ITS */
15392247e1bfSMarc Zyngier 	its_unmap_vm(its_dev->its, its_dev->event_map.vm);
15402247e1bfSMarc Zyngier 
1541d011e4e6SMarc Zyngier 	/*
1542d011e4e6SMarc Zyngier 	 * Drop the refcount and make the device available again if
1543d011e4e6SMarc Zyngier 	 * this was the last VLPI.
1544d011e4e6SMarc Zyngier 	 */
1545d011e4e6SMarc Zyngier 	if (!--its_dev->event_map.nr_vlpis) {
1546d011e4e6SMarc Zyngier 		its_dev->event_map.vm = NULL;
1547d011e4e6SMarc Zyngier 		kfree(its_dev->event_map.vlpi_maps);
1548d011e4e6SMarc Zyngier 	}
1549d011e4e6SMarc Zyngier 
1550d011e4e6SMarc Zyngier out:
1551d011e4e6SMarc Zyngier 	mutex_unlock(&its_dev->event_map.vlpi_lock);
1552d011e4e6SMarc Zyngier 	return ret;
1553d011e4e6SMarc Zyngier }
1554d011e4e6SMarc Zyngier 
1555015ec038SMarc Zyngier static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1556015ec038SMarc Zyngier {
1557015ec038SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1558015ec038SMarc Zyngier 
1559015ec038SMarc Zyngier 	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1560015ec038SMarc Zyngier 		return -EINVAL;
1561015ec038SMarc Zyngier 
1562015ec038SMarc Zyngier 	if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1563015ec038SMarc Zyngier 		lpi_update_config(d, 0xff, info->config);
1564015ec038SMarc Zyngier 	else
1565015ec038SMarc Zyngier 		lpi_write_config(d, 0xff, info->config);
1566015ec038SMarc Zyngier 	its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1567015ec038SMarc Zyngier 
1568015ec038SMarc Zyngier 	return 0;
1569015ec038SMarc Zyngier }
1570015ec038SMarc Zyngier 
1571c808eea8SMarc Zyngier static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1572c808eea8SMarc Zyngier {
1573c808eea8SMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1574c808eea8SMarc Zyngier 	struct its_cmd_info *info = vcpu_info;
1575c808eea8SMarc Zyngier 
1576c808eea8SMarc Zyngier 	/* Need a v4 ITS */
15770dd57fedSMarc Zyngier 	if (!is_v4(its_dev->its))
1578c808eea8SMarc Zyngier 		return -EINVAL;
1579c808eea8SMarc Zyngier 
1580d011e4e6SMarc Zyngier 	/* Unmap request? */
1581d011e4e6SMarc Zyngier 	if (!info)
1582d011e4e6SMarc Zyngier 		return its_vlpi_unmap(d);
1583d011e4e6SMarc Zyngier 
1584c808eea8SMarc Zyngier 	switch (info->cmd_type) {
1585c808eea8SMarc Zyngier 	case MAP_VLPI:
1586d011e4e6SMarc Zyngier 		return its_vlpi_map(d, info);
1587c808eea8SMarc Zyngier 
1588c808eea8SMarc Zyngier 	case GET_VLPI:
1589d011e4e6SMarc Zyngier 		return its_vlpi_get(d, info);
1590c808eea8SMarc Zyngier 
1591c808eea8SMarc Zyngier 	case PROP_UPDATE_VLPI:
1592c808eea8SMarc Zyngier 	case PROP_UPDATE_AND_INV_VLPI:
1593015ec038SMarc Zyngier 		return its_vlpi_prop_update(d, info);
1594c808eea8SMarc Zyngier 
1595c808eea8SMarc Zyngier 	default:
1596c808eea8SMarc Zyngier 		return -EINVAL;
1597c808eea8SMarc Zyngier 	}
1598c808eea8SMarc Zyngier }
1599c808eea8SMarc Zyngier 
1600c48ed51cSMarc Zyngier static struct irq_chip its_irq_chip = {
1601c48ed51cSMarc Zyngier 	.name			= "ITS",
1602c48ed51cSMarc Zyngier 	.irq_mask		= its_mask_irq,
1603c48ed51cSMarc Zyngier 	.irq_unmask		= its_unmask_irq,
1604004fa08dSAshok Kumar 	.irq_eoi		= irq_chip_eoi_parent,
1605c48ed51cSMarc Zyngier 	.irq_set_affinity	= its_set_affinity,
1606b48ac83dSMarc Zyngier 	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
16078d85dcedSMarc Zyngier 	.irq_set_irqchip_state	= its_irq_set_irqchip_state,
1608c808eea8SMarc Zyngier 	.irq_set_vcpu_affinity	= its_irq_set_vcpu_affinity,
1609b48ac83dSMarc Zyngier };
1610b48ac83dSMarc Zyngier 
1611880cb3cdSMarc Zyngier 
1612bf9529f8SMarc Zyngier /*
1613bf9529f8SMarc Zyngier  * How we allocate LPIs:
1614bf9529f8SMarc Zyngier  *
1615880cb3cdSMarc Zyngier  * lpi_range_list contains ranges of LPIs that are to available to
1616880cb3cdSMarc Zyngier  * allocate from. To allocate LPIs, just pick the first range that
1617880cb3cdSMarc Zyngier  * fits the required allocation, and reduce it by the required
1618880cb3cdSMarc Zyngier  * amount. Once empty, remove the range from the list.
1619bf9529f8SMarc Zyngier  *
1620880cb3cdSMarc Zyngier  * To free a range of LPIs, add a free range to the list, sort it and
1621880cb3cdSMarc Zyngier  * merge the result if the new range happens to be adjacent to an
1622880cb3cdSMarc Zyngier  * already free block.
1623880cb3cdSMarc Zyngier  *
1624880cb3cdSMarc Zyngier  * The consequence of the above is that allocation is cost is low, but
1625880cb3cdSMarc Zyngier  * freeing is expensive. We assumes that freeing rarely occurs.
1626880cb3cdSMarc Zyngier  */
16274cb205c0SJia He #define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
1628880cb3cdSMarc Zyngier 
1629880cb3cdSMarc Zyngier static DEFINE_MUTEX(lpi_range_lock);
1630880cb3cdSMarc Zyngier static LIST_HEAD(lpi_range_list);
1631bf9529f8SMarc Zyngier 
1632880cb3cdSMarc Zyngier struct lpi_range {
1633880cb3cdSMarc Zyngier 	struct list_head	entry;
1634880cb3cdSMarc Zyngier 	u32			base_id;
1635880cb3cdSMarc Zyngier 	u32			span;
1636880cb3cdSMarc Zyngier };
1637880cb3cdSMarc Zyngier 
1638880cb3cdSMarc Zyngier static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1639bf9529f8SMarc Zyngier {
1640880cb3cdSMarc Zyngier 	struct lpi_range *range;
1641880cb3cdSMarc Zyngier 
16421c73fac5SRasmus Villemoes 	range = kmalloc(sizeof(*range), GFP_KERNEL);
1643880cb3cdSMarc Zyngier 	if (range) {
1644880cb3cdSMarc Zyngier 		range->base_id = base;
1645880cb3cdSMarc Zyngier 		range->span = span;
1646bf9529f8SMarc Zyngier 	}
1647bf9529f8SMarc Zyngier 
1648880cb3cdSMarc Zyngier 	return range;
1649880cb3cdSMarc Zyngier }
1650880cb3cdSMarc Zyngier 
1651880cb3cdSMarc Zyngier static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1652880cb3cdSMarc Zyngier {
1653880cb3cdSMarc Zyngier 	struct lpi_range *range, *tmp;
1654880cb3cdSMarc Zyngier 	int err = -ENOSPC;
1655880cb3cdSMarc Zyngier 
1656880cb3cdSMarc Zyngier 	mutex_lock(&lpi_range_lock);
1657880cb3cdSMarc Zyngier 
1658880cb3cdSMarc Zyngier 	list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1659880cb3cdSMarc Zyngier 		if (range->span >= nr_lpis) {
1660880cb3cdSMarc Zyngier 			*base = range->base_id;
1661880cb3cdSMarc Zyngier 			range->base_id += nr_lpis;
1662880cb3cdSMarc Zyngier 			range->span -= nr_lpis;
1663880cb3cdSMarc Zyngier 
1664880cb3cdSMarc Zyngier 			if (range->span == 0) {
1665880cb3cdSMarc Zyngier 				list_del(&range->entry);
1666880cb3cdSMarc Zyngier 				kfree(range);
1667880cb3cdSMarc Zyngier 			}
1668880cb3cdSMarc Zyngier 
1669880cb3cdSMarc Zyngier 			err = 0;
1670880cb3cdSMarc Zyngier 			break;
1671880cb3cdSMarc Zyngier 		}
1672880cb3cdSMarc Zyngier 	}
1673880cb3cdSMarc Zyngier 
1674880cb3cdSMarc Zyngier 	mutex_unlock(&lpi_range_lock);
1675880cb3cdSMarc Zyngier 
1676880cb3cdSMarc Zyngier 	pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1677880cb3cdSMarc Zyngier 	return err;
1678880cb3cdSMarc Zyngier }
1679880cb3cdSMarc Zyngier 
168012eade12SRasmus Villemoes static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
168112eade12SRasmus Villemoes {
168212eade12SRasmus Villemoes 	if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
168312eade12SRasmus Villemoes 		return;
168412eade12SRasmus Villemoes 	if (a->base_id + a->span != b->base_id)
168512eade12SRasmus Villemoes 		return;
168612eade12SRasmus Villemoes 	b->base_id = a->base_id;
168712eade12SRasmus Villemoes 	b->span += a->span;
168812eade12SRasmus Villemoes 	list_del(&a->entry);
168912eade12SRasmus Villemoes 	kfree(a);
169012eade12SRasmus Villemoes }
169112eade12SRasmus Villemoes 
1692880cb3cdSMarc Zyngier static int free_lpi_range(u32 base, u32 nr_lpis)
1693880cb3cdSMarc Zyngier {
169412eade12SRasmus Villemoes 	struct lpi_range *new, *old;
1695880cb3cdSMarc Zyngier 
1696880cb3cdSMarc Zyngier 	new = mk_lpi_range(base, nr_lpis);
1697b31a3838SRasmus Villemoes 	if (!new)
1698b31a3838SRasmus Villemoes 		return -ENOMEM;
1699880cb3cdSMarc Zyngier 
1700880cb3cdSMarc Zyngier 	mutex_lock(&lpi_range_lock);
1701880cb3cdSMarc Zyngier 
170212eade12SRasmus Villemoes 	list_for_each_entry_reverse(old, &lpi_range_list, entry) {
170312eade12SRasmus Villemoes 		if (old->base_id < base)
170412eade12SRasmus Villemoes 			break;
1705880cb3cdSMarc Zyngier 	}
170612eade12SRasmus Villemoes 	/*
170712eade12SRasmus Villemoes 	 * old is the last element with ->base_id smaller than base,
170812eade12SRasmus Villemoes 	 * so new goes right after it. If there are no elements with
170912eade12SRasmus Villemoes 	 * ->base_id smaller than base, &old->entry ends up pointing
171012eade12SRasmus Villemoes 	 * at the head of the list, and inserting new it the start of
171112eade12SRasmus Villemoes 	 * the list is the right thing to do in that case as well.
171212eade12SRasmus Villemoes 	 */
171312eade12SRasmus Villemoes 	list_add(&new->entry, &old->entry);
171412eade12SRasmus Villemoes 	/*
171512eade12SRasmus Villemoes 	 * Now check if we can merge with the preceding and/or
171612eade12SRasmus Villemoes 	 * following ranges.
171712eade12SRasmus Villemoes 	 */
171812eade12SRasmus Villemoes 	merge_lpi_ranges(old, new);
171912eade12SRasmus Villemoes 	merge_lpi_ranges(new, list_next_entry(new, entry));
1720880cb3cdSMarc Zyngier 
1721880cb3cdSMarc Zyngier 	mutex_unlock(&lpi_range_lock);
1722b31a3838SRasmus Villemoes 	return 0;
1723bf9529f8SMarc Zyngier }
1724bf9529f8SMarc Zyngier 
172504a0e4deSTomasz Nowicki static int __init its_lpi_init(u32 id_bits)
1726bf9529f8SMarc Zyngier {
1727880cb3cdSMarc Zyngier 	u32 lpis = (1UL << id_bits) - 8192;
172812b2905aSMarc Zyngier 	u32 numlpis;
1729880cb3cdSMarc Zyngier 	int err;
1730bf9529f8SMarc Zyngier 
173112b2905aSMarc Zyngier 	numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
173212b2905aSMarc Zyngier 
173312b2905aSMarc Zyngier 	if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
173412b2905aSMarc Zyngier 		lpis = numlpis;
173512b2905aSMarc Zyngier 		pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
173612b2905aSMarc Zyngier 			lpis);
173712b2905aSMarc Zyngier 	}
173812b2905aSMarc Zyngier 
1739880cb3cdSMarc Zyngier 	/*
1740880cb3cdSMarc Zyngier 	 * Initializing the allocator is just the same as freeing the
1741880cb3cdSMarc Zyngier 	 * full range of LPIs.
1742880cb3cdSMarc Zyngier 	 */
1743880cb3cdSMarc Zyngier 	err = free_lpi_range(8192, lpis);
1744880cb3cdSMarc Zyngier 	pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1745880cb3cdSMarc Zyngier 	return err;
1746bf9529f8SMarc Zyngier }
1747bf9529f8SMarc Zyngier 
174838dd7c49SMarc Zyngier static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1749bf9529f8SMarc Zyngier {
1750bf9529f8SMarc Zyngier 	unsigned long *bitmap = NULL;
1751880cb3cdSMarc Zyngier 	int err = 0;
1752bf9529f8SMarc Zyngier 
1753bf9529f8SMarc Zyngier 	do {
175438dd7c49SMarc Zyngier 		err = alloc_lpi_range(nr_irqs, base);
1755880cb3cdSMarc Zyngier 		if (!err)
1756bf9529f8SMarc Zyngier 			break;
1757bf9529f8SMarc Zyngier 
175838dd7c49SMarc Zyngier 		nr_irqs /= 2;
175938dd7c49SMarc Zyngier 	} while (nr_irqs > 0);
1760bf9529f8SMarc Zyngier 
176145725e0fSMarc Zyngier 	if (!nr_irqs)
176245725e0fSMarc Zyngier 		err = -ENOSPC;
176345725e0fSMarc Zyngier 
1764880cb3cdSMarc Zyngier 	if (err)
1765bf9529f8SMarc Zyngier 		goto out;
1766bf9529f8SMarc Zyngier 
176738dd7c49SMarc Zyngier 	bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1768bf9529f8SMarc Zyngier 	if (!bitmap)
1769bf9529f8SMarc Zyngier 		goto out;
1770bf9529f8SMarc Zyngier 
177138dd7c49SMarc Zyngier 	*nr_ids = nr_irqs;
1772bf9529f8SMarc Zyngier 
1773bf9529f8SMarc Zyngier out:
1774c8415b94SMarc Zyngier 	if (!bitmap)
1775c8415b94SMarc Zyngier 		*base = *nr_ids = 0;
1776c8415b94SMarc Zyngier 
1777bf9529f8SMarc Zyngier 	return bitmap;
1778bf9529f8SMarc Zyngier }
1779bf9529f8SMarc Zyngier 
178038dd7c49SMarc Zyngier static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1781bf9529f8SMarc Zyngier {
1782880cb3cdSMarc Zyngier 	WARN_ON(free_lpi_range(base, nr_ids));
1783cf2be8baSMarc Zyngier 	kfree(bitmap);
1784bf9529f8SMarc Zyngier }
17851ac19ca6SMarc Zyngier 
1786053be485SMarc Zyngier static void gic_reset_prop_table(void *va)
1787053be485SMarc Zyngier {
1788053be485SMarc Zyngier 	/* Priority 0xa0, Group-1, disabled */
1789053be485SMarc Zyngier 	memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1790053be485SMarc Zyngier 
1791053be485SMarc Zyngier 	/* Make sure the GIC will observe the written configuration */
1792053be485SMarc Zyngier 	gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1793053be485SMarc Zyngier }
1794053be485SMarc Zyngier 
17950e5ccf91SMarc Zyngier static struct page *its_allocate_prop_table(gfp_t gfp_flags)
17960e5ccf91SMarc Zyngier {
17970e5ccf91SMarc Zyngier 	struct page *prop_page;
17981ac19ca6SMarc Zyngier 
17990e5ccf91SMarc Zyngier 	prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
18000e5ccf91SMarc Zyngier 	if (!prop_page)
18010e5ccf91SMarc Zyngier 		return NULL;
18020e5ccf91SMarc Zyngier 
1803053be485SMarc Zyngier 	gic_reset_prop_table(page_address(prop_page));
18040e5ccf91SMarc Zyngier 
18050e5ccf91SMarc Zyngier 	return prop_page;
18060e5ccf91SMarc Zyngier }
18070e5ccf91SMarc Zyngier 
18087d75bbb4SMarc Zyngier static void its_free_prop_table(struct page *prop_page)
18097d75bbb4SMarc Zyngier {
18107d75bbb4SMarc Zyngier 	free_pages((unsigned long)page_address(prop_page),
18117d75bbb4SMarc Zyngier 		   get_order(LPI_PROPBASE_SZ));
18127d75bbb4SMarc Zyngier }
18131ac19ca6SMarc Zyngier 
18145e2c9f9aSMarc Zyngier static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
18155e2c9f9aSMarc Zyngier {
18165e2c9f9aSMarc Zyngier 	phys_addr_t start, end, addr_end;
18175e2c9f9aSMarc Zyngier 	u64 i;
18185e2c9f9aSMarc Zyngier 
18195e2c9f9aSMarc Zyngier 	/*
18205e2c9f9aSMarc Zyngier 	 * We don't bother checking for a kdump kernel as by
18215e2c9f9aSMarc Zyngier 	 * construction, the LPI tables are out of this kernel's
18225e2c9f9aSMarc Zyngier 	 * memory map.
18235e2c9f9aSMarc Zyngier 	 */
18245e2c9f9aSMarc Zyngier 	if (is_kdump_kernel())
18255e2c9f9aSMarc Zyngier 		return true;
18265e2c9f9aSMarc Zyngier 
18275e2c9f9aSMarc Zyngier 	addr_end = addr + size - 1;
18285e2c9f9aSMarc Zyngier 
18295e2c9f9aSMarc Zyngier 	for_each_reserved_mem_region(i, &start, &end) {
18305e2c9f9aSMarc Zyngier 		if (addr >= start && addr_end <= end)
18315e2c9f9aSMarc Zyngier 			return true;
18325e2c9f9aSMarc Zyngier 	}
18335e2c9f9aSMarc Zyngier 
18345e2c9f9aSMarc Zyngier 	/* Not found, not a good sign... */
18355e2c9f9aSMarc Zyngier 	pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
18365e2c9f9aSMarc Zyngier 		&addr, &addr_end);
18375e2c9f9aSMarc Zyngier 	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
18385e2c9f9aSMarc Zyngier 	return false;
18395e2c9f9aSMarc Zyngier }
18405e2c9f9aSMarc Zyngier 
18413fb68faeSMarc Zyngier static int gic_reserve_range(phys_addr_t addr, unsigned long size)
18423fb68faeSMarc Zyngier {
18433fb68faeSMarc Zyngier 	if (efi_enabled(EFI_CONFIG_TABLES))
18443fb68faeSMarc Zyngier 		return efi_mem_reserve_persistent(addr, size);
18453fb68faeSMarc Zyngier 
18463fb68faeSMarc Zyngier 	return 0;
18473fb68faeSMarc Zyngier }
18483fb68faeSMarc Zyngier 
184911e37d35SMarc Zyngier static int __init its_setup_lpi_prop_table(void)
18501ac19ca6SMarc Zyngier {
1851c440a9d9SMarc Zyngier 	if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1852c440a9d9SMarc Zyngier 		u64 val;
1853c440a9d9SMarc Zyngier 
1854c440a9d9SMarc Zyngier 		val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1855c440a9d9SMarc Zyngier 		lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1856c440a9d9SMarc Zyngier 
1857c440a9d9SMarc Zyngier 		gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1858c440a9d9SMarc Zyngier 		gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1859c440a9d9SMarc Zyngier 						     LPI_PROPBASE_SZ,
1860c440a9d9SMarc Zyngier 						     MEMREMAP_WB);
1861c440a9d9SMarc Zyngier 		gic_reset_prop_table(gic_rdists->prop_table_va);
1862c440a9d9SMarc Zyngier 	} else {
1863e1a2e201SMarc Zyngier 		struct page *page;
18641ac19ca6SMarc Zyngier 
1865c440a9d9SMarc Zyngier 		lpi_id_bits = min_t(u32,
1866c440a9d9SMarc Zyngier 				    GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
18674cb205c0SJia He 				    ITS_MAX_LPI_NRBITS);
1868e1a2e201SMarc Zyngier 		page = its_allocate_prop_table(GFP_NOWAIT);
1869e1a2e201SMarc Zyngier 		if (!page) {
18701ac19ca6SMarc Zyngier 			pr_err("Failed to allocate PROPBASE\n");
18711ac19ca6SMarc Zyngier 			return -ENOMEM;
18721ac19ca6SMarc Zyngier 		}
18731ac19ca6SMarc Zyngier 
1874e1a2e201SMarc Zyngier 		gic_rdists->prop_table_pa = page_to_phys(page);
1875e1a2e201SMarc Zyngier 		gic_rdists->prop_table_va = page_address(page);
18763fb68faeSMarc Zyngier 		WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
18773fb68faeSMarc Zyngier 					  LPI_PROPBASE_SZ));
1878c440a9d9SMarc Zyngier 	}
1879e1a2e201SMarc Zyngier 
1880e1a2e201SMarc Zyngier 	pr_info("GICv3: using LPI property table @%pa\n",
1881e1a2e201SMarc Zyngier 		&gic_rdists->prop_table_pa);
18821ac19ca6SMarc Zyngier 
18836c31e123SShanker Donthineni 	return its_lpi_init(lpi_id_bits);
18841ac19ca6SMarc Zyngier }
18851ac19ca6SMarc Zyngier 
18861ac19ca6SMarc Zyngier static const char *its_base_type_string[] = {
18871ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_DEVICE]	= "Devices",
18881ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_VCPU]		= "Virtual CPUs",
18894f46de9dSMarc Zyngier 	[GITS_BASER_TYPE_RESERVED3]	= "Reserved (3)",
18901ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_COLLECTION]	= "Interrupt Collections",
18911ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_RESERVED5] 	= "Reserved (5)",
18921ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_RESERVED6] 	= "Reserved (6)",
18931ac19ca6SMarc Zyngier 	[GITS_BASER_TYPE_RESERVED7] 	= "Reserved (7)",
18941ac19ca6SMarc Zyngier };
18951ac19ca6SMarc Zyngier 
18962d81d425SShanker Donthineni static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
18972d81d425SShanker Donthineni {
18982d81d425SShanker Donthineni 	u32 idx = baser - its->tables;
18992d81d425SShanker Donthineni 
19000968a619SVladimir Murzin 	return gits_read_baser(its->base + GITS_BASER + (idx << 3));
19012d81d425SShanker Donthineni }
19022d81d425SShanker Donthineni 
19032d81d425SShanker Donthineni static void its_write_baser(struct its_node *its, struct its_baser *baser,
19042d81d425SShanker Donthineni 			    u64 val)
19052d81d425SShanker Donthineni {
19062d81d425SShanker Donthineni 	u32 idx = baser - its->tables;
19072d81d425SShanker Donthineni 
19080968a619SVladimir Murzin 	gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
19092d81d425SShanker Donthineni 	baser->val = its_read_baser(its, baser);
19102d81d425SShanker Donthineni }
19112d81d425SShanker Donthineni 
19129347359aSShanker Donthineni static int its_setup_baser(struct its_node *its, struct its_baser *baser,
19133faf24eaSShanker Donthineni 			   u64 cache, u64 shr, u32 psz, u32 order,
19143faf24eaSShanker Donthineni 			   bool indirect)
19159347359aSShanker Donthineni {
19169347359aSShanker Donthineni 	u64 val = its_read_baser(its, baser);
19179347359aSShanker Donthineni 	u64 esz = GITS_BASER_ENTRY_SIZE(val);
19189347359aSShanker Donthineni 	u64 type = GITS_BASER_TYPE(val);
191930ae9610SShanker Donthineni 	u64 baser_phys, tmp;
19209347359aSShanker Donthineni 	u32 alloc_pages;
1921539d3782SShanker Donthineni 	struct page *page;
19229347359aSShanker Donthineni 	void *base;
19239347359aSShanker Donthineni 
19249347359aSShanker Donthineni retry_alloc_baser:
19259347359aSShanker Donthineni 	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
19269347359aSShanker Donthineni 	if (alloc_pages > GITS_BASER_PAGES_MAX) {
19279347359aSShanker Donthineni 		pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
19289347359aSShanker Donthineni 			&its->phys_base, its_base_type_string[type],
19299347359aSShanker Donthineni 			alloc_pages, GITS_BASER_PAGES_MAX);
19309347359aSShanker Donthineni 		alloc_pages = GITS_BASER_PAGES_MAX;
19319347359aSShanker Donthineni 		order = get_order(GITS_BASER_PAGES_MAX * psz);
19329347359aSShanker Donthineni 	}
19339347359aSShanker Donthineni 
1934539d3782SShanker Donthineni 	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1935539d3782SShanker Donthineni 	if (!page)
19369347359aSShanker Donthineni 		return -ENOMEM;
19379347359aSShanker Donthineni 
1938539d3782SShanker Donthineni 	base = (void *)page_address(page);
193930ae9610SShanker Donthineni 	baser_phys = virt_to_phys(base);
194030ae9610SShanker Donthineni 
194130ae9610SShanker Donthineni 	/* Check if the physical address of the memory is above 48bits */
194230ae9610SShanker Donthineni 	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
194330ae9610SShanker Donthineni 
194430ae9610SShanker Donthineni 		/* 52bit PA is supported only when PageSize=64K */
194530ae9610SShanker Donthineni 		if (psz != SZ_64K) {
194630ae9610SShanker Donthineni 			pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
194730ae9610SShanker Donthineni 			free_pages((unsigned long)base, order);
194830ae9610SShanker Donthineni 			return -ENXIO;
194930ae9610SShanker Donthineni 		}
195030ae9610SShanker Donthineni 
195130ae9610SShanker Donthineni 		/* Convert 52bit PA to 48bit field */
195230ae9610SShanker Donthineni 		baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
195330ae9610SShanker Donthineni 	}
195430ae9610SShanker Donthineni 
19559347359aSShanker Donthineni retry_baser:
195630ae9610SShanker Donthineni 	val = (baser_phys					 |
19579347359aSShanker Donthineni 		(type << GITS_BASER_TYPE_SHIFT)			 |
19589347359aSShanker Donthineni 		((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)	 |
19599347359aSShanker Donthineni 		((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)	 |
19609347359aSShanker Donthineni 		cache						 |
19619347359aSShanker Donthineni 		shr						 |
19629347359aSShanker Donthineni 		GITS_BASER_VALID);
19639347359aSShanker Donthineni 
19643faf24eaSShanker Donthineni 	val |=	indirect ? GITS_BASER_INDIRECT : 0x0;
19653faf24eaSShanker Donthineni 
19669347359aSShanker Donthineni 	switch (psz) {
19679347359aSShanker Donthineni 	case SZ_4K:
19689347359aSShanker Donthineni 		val |= GITS_BASER_PAGE_SIZE_4K;
19699347359aSShanker Donthineni 		break;
19709347359aSShanker Donthineni 	case SZ_16K:
19719347359aSShanker Donthineni 		val |= GITS_BASER_PAGE_SIZE_16K;
19729347359aSShanker Donthineni 		break;
19739347359aSShanker Donthineni 	case SZ_64K:
19749347359aSShanker Donthineni 		val |= GITS_BASER_PAGE_SIZE_64K;
19759347359aSShanker Donthineni 		break;
19769347359aSShanker Donthineni 	}
19779347359aSShanker Donthineni 
19789347359aSShanker Donthineni 	its_write_baser(its, baser, val);
19799347359aSShanker Donthineni 	tmp = baser->val;
19809347359aSShanker Donthineni 
19819347359aSShanker Donthineni 	if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
19829347359aSShanker Donthineni 		/*
19839347359aSShanker Donthineni 		 * Shareability didn't stick. Just use
19849347359aSShanker Donthineni 		 * whatever the read reported, which is likely
19859347359aSShanker Donthineni 		 * to be the only thing this redistributor
19869347359aSShanker Donthineni 		 * supports. If that's zero, make it
19879347359aSShanker Donthineni 		 * non-cacheable as well.
19889347359aSShanker Donthineni 		 */
19899347359aSShanker Donthineni 		shr = tmp & GITS_BASER_SHAREABILITY_MASK;
19909347359aSShanker Donthineni 		if (!shr) {
19919347359aSShanker Donthineni 			cache = GITS_BASER_nC;
1992328191c0SVladimir Murzin 			gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
19939347359aSShanker Donthineni 		}
19949347359aSShanker Donthineni 		goto retry_baser;
19959347359aSShanker Donthineni 	}
19969347359aSShanker Donthineni 
19979347359aSShanker Donthineni 	if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
19989347359aSShanker Donthineni 		/*
19999347359aSShanker Donthineni 		 * Page size didn't stick. Let's try a smaller
20009347359aSShanker Donthineni 		 * size and retry. If we reach 4K, then
20019347359aSShanker Donthineni 		 * something is horribly wrong...
20029347359aSShanker Donthineni 		 */
20039347359aSShanker Donthineni 		free_pages((unsigned long)base, order);
20049347359aSShanker Donthineni 		baser->base = NULL;
20059347359aSShanker Donthineni 
20069347359aSShanker Donthineni 		switch (psz) {
20079347359aSShanker Donthineni 		case SZ_16K:
20089347359aSShanker Donthineni 			psz = SZ_4K;
20099347359aSShanker Donthineni 			goto retry_alloc_baser;
20109347359aSShanker Donthineni 		case SZ_64K:
20119347359aSShanker Donthineni 			psz = SZ_16K;
20129347359aSShanker Donthineni 			goto retry_alloc_baser;
20139347359aSShanker Donthineni 		}
20149347359aSShanker Donthineni 	}
20159347359aSShanker Donthineni 
20169347359aSShanker Donthineni 	if (val != tmp) {
2017b11283ebSVladimir Murzin 		pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
20189347359aSShanker Donthineni 		       &its->phys_base, its_base_type_string[type],
2019b11283ebSVladimir Murzin 		       val, tmp);
20209347359aSShanker Donthineni 		free_pages((unsigned long)base, order);
20219347359aSShanker Donthineni 		return -ENXIO;
20229347359aSShanker Donthineni 	}
20239347359aSShanker Donthineni 
20249347359aSShanker Donthineni 	baser->order = order;
20259347359aSShanker Donthineni 	baser->base = base;
20269347359aSShanker Donthineni 	baser->psz = psz;
20273faf24eaSShanker Donthineni 	tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
20289347359aSShanker Donthineni 
20293faf24eaSShanker Donthineni 	pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2030d524eaa2SVladimir Murzin 		&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
20319347359aSShanker Donthineni 		its_base_type_string[type],
20329347359aSShanker Donthineni 		(unsigned long)virt_to_phys(base),
20333faf24eaSShanker Donthineni 		indirect ? "indirect" : "flat", (int)esz,
20349347359aSShanker Donthineni 		psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
20359347359aSShanker Donthineni 
20369347359aSShanker Donthineni 	return 0;
20379347359aSShanker Donthineni }
20389347359aSShanker Donthineni 
20394cacac57SMarc Zyngier static bool its_parse_indirect_baser(struct its_node *its,
20404cacac57SMarc Zyngier 				     struct its_baser *baser,
204132bd44dcSShanker Donthineni 				     u32 psz, u32 *order, u32 ids)
20424b75c459SShanker Donthineni {
20434cacac57SMarc Zyngier 	u64 tmp = its_read_baser(its, baser);
20444cacac57SMarc Zyngier 	u64 type = GITS_BASER_TYPE(tmp);
20454cacac57SMarc Zyngier 	u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
20462fd632a0SShanker Donthineni 	u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
20474b75c459SShanker Donthineni 	u32 new_order = *order;
20483faf24eaSShanker Donthineni 	bool indirect = false;
20493faf24eaSShanker Donthineni 
20503faf24eaSShanker Donthineni 	/* No need to enable Indirection if memory requirement < (psz*2)bytes */
20513faf24eaSShanker Donthineni 	if ((esz << ids) > (psz * 2)) {
20523faf24eaSShanker Donthineni 		/*
20533faf24eaSShanker Donthineni 		 * Find out whether hw supports a single or two-level table by
20543faf24eaSShanker Donthineni 		 * table by reading bit at offset '62' after writing '1' to it.
20553faf24eaSShanker Donthineni 		 */
20563faf24eaSShanker Donthineni 		its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
20573faf24eaSShanker Donthineni 		indirect = !!(baser->val & GITS_BASER_INDIRECT);
20583faf24eaSShanker Donthineni 
20593faf24eaSShanker Donthineni 		if (indirect) {
20603faf24eaSShanker Donthineni 			/*
20613faf24eaSShanker Donthineni 			 * The size of the lvl2 table is equal to ITS page size
20623faf24eaSShanker Donthineni 			 * which is 'psz'. For computing lvl1 table size,
20633faf24eaSShanker Donthineni 			 * subtract ID bits that sparse lvl2 table from 'ids'
20643faf24eaSShanker Donthineni 			 * which is reported by ITS hardware times lvl1 table
20653faf24eaSShanker Donthineni 			 * entry size.
20663faf24eaSShanker Donthineni 			 */
2067d524eaa2SVladimir Murzin 			ids -= ilog2(psz / (int)esz);
20683faf24eaSShanker Donthineni 			esz = GITS_LVL1_ENTRY_SIZE;
20693faf24eaSShanker Donthineni 		}
20703faf24eaSShanker Donthineni 	}
20714b75c459SShanker Donthineni 
20724b75c459SShanker Donthineni 	/*
20734b75c459SShanker Donthineni 	 * Allocate as many entries as required to fit the
20744b75c459SShanker Donthineni 	 * range of device IDs that the ITS can grok... The ID
20754b75c459SShanker Donthineni 	 * space being incredibly sparse, this results in a
20763faf24eaSShanker Donthineni 	 * massive waste of memory if two-level device table
20773faf24eaSShanker Donthineni 	 * feature is not supported by hardware.
20784b75c459SShanker Donthineni 	 */
20794b75c459SShanker Donthineni 	new_order = max_t(u32, get_order(esz << ids), new_order);
20804b75c459SShanker Donthineni 	if (new_order >= MAX_ORDER) {
20814b75c459SShanker Donthineni 		new_order = MAX_ORDER - 1;
2082d524eaa2SVladimir Murzin 		ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2083576a8342SMarc Zyngier 		pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
20844cacac57SMarc Zyngier 			&its->phys_base, its_base_type_string[type],
2085576a8342SMarc Zyngier 			device_ids(its), ids);
20864b75c459SShanker Donthineni 	}
20874b75c459SShanker Donthineni 
20884b75c459SShanker Donthineni 	*order = new_order;
20893faf24eaSShanker Donthineni 
20903faf24eaSShanker Donthineni 	return indirect;
20914b75c459SShanker Donthineni }
20924b75c459SShanker Donthineni 
20931ac19ca6SMarc Zyngier static void its_free_tables(struct its_node *its)
20941ac19ca6SMarc Zyngier {
20951ac19ca6SMarc Zyngier 	int i;
20961ac19ca6SMarc Zyngier 
20971ac19ca6SMarc Zyngier 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
20981a485f4dSShanker Donthineni 		if (its->tables[i].base) {
20991a485f4dSShanker Donthineni 			free_pages((unsigned long)its->tables[i].base,
21001a485f4dSShanker Donthineni 				   its->tables[i].order);
21011a485f4dSShanker Donthineni 			its->tables[i].base = NULL;
21021ac19ca6SMarc Zyngier 		}
21031ac19ca6SMarc Zyngier 	}
21041ac19ca6SMarc Zyngier }
21051ac19ca6SMarc Zyngier 
21060e0b0f69SShanker Donthineni static int its_alloc_tables(struct its_node *its)
21071ac19ca6SMarc Zyngier {
21081ac19ca6SMarc Zyngier 	u64 shr = GITS_BASER_InnerShareable;
21092fd632a0SShanker Donthineni 	u64 cache = GITS_BASER_RaWaWb;
21109347359aSShanker Donthineni 	u32 psz = SZ_64K;
21119347359aSShanker Donthineni 	int err, i;
211294100970SRobert Richter 
2113fa150019SArd Biesheuvel 	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2114fa150019SArd Biesheuvel 		/* erratum 24313: ignore memory access type */
21159347359aSShanker Donthineni 		cache = GITS_BASER_nCnB;
2116466b7d16SShanker Donthineni 
21171ac19ca6SMarc Zyngier 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
21182d81d425SShanker Donthineni 		struct its_baser *baser = its->tables + i;
21192d81d425SShanker Donthineni 		u64 val = its_read_baser(its, baser);
21201ac19ca6SMarc Zyngier 		u64 type = GITS_BASER_TYPE(val);
21219347359aSShanker Donthineni 		u32 order = get_order(psz);
21223faf24eaSShanker Donthineni 		bool indirect = false;
21231ac19ca6SMarc Zyngier 
21244cacac57SMarc Zyngier 		switch (type) {
21254cacac57SMarc Zyngier 		case GITS_BASER_TYPE_NONE:
21261ac19ca6SMarc Zyngier 			continue;
21271ac19ca6SMarc Zyngier 
21284cacac57SMarc Zyngier 		case GITS_BASER_TYPE_DEVICE:
212932bd44dcSShanker Donthineni 			indirect = its_parse_indirect_baser(its, baser,
213032bd44dcSShanker Donthineni 							    psz, &order,
2131576a8342SMarc Zyngier 							    device_ids(its));
21328d565748SZenghui Yu 			break;
21338d565748SZenghui Yu 
21344cacac57SMarc Zyngier 		case GITS_BASER_TYPE_VCPU:
21354cacac57SMarc Zyngier 			indirect = its_parse_indirect_baser(its, baser,
213632bd44dcSShanker Donthineni 							    psz, &order,
213732bd44dcSShanker Donthineni 							    ITS_MAX_VPEID_BITS);
21384cacac57SMarc Zyngier 			break;
21394cacac57SMarc Zyngier 		}
2140f54b97edSMarc Zyngier 
21413faf24eaSShanker Donthineni 		err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
21429347359aSShanker Donthineni 		if (err < 0) {
21439347359aSShanker Donthineni 			its_free_tables(its);
21449347359aSShanker Donthineni 			return err;
214530f21363SRobert Richter 		}
214630f21363SRobert Richter 
21479347359aSShanker Donthineni 		/* Update settings which will be used for next BASERn */
21489347359aSShanker Donthineni 		psz = baser->psz;
21499347359aSShanker Donthineni 		cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
21509347359aSShanker Donthineni 		shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
21511ac19ca6SMarc Zyngier 	}
21521ac19ca6SMarc Zyngier 
21531ac19ca6SMarc Zyngier 	return 0;
21541ac19ca6SMarc Zyngier }
21551ac19ca6SMarc Zyngier 
21561ac19ca6SMarc Zyngier static int its_alloc_collections(struct its_node *its)
21571ac19ca6SMarc Zyngier {
215883559b47SMarc Zyngier 	int i;
215983559b47SMarc Zyngier 
21606396bb22SKees Cook 	its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
21611ac19ca6SMarc Zyngier 				   GFP_KERNEL);
21621ac19ca6SMarc Zyngier 	if (!its->collections)
21631ac19ca6SMarc Zyngier 		return -ENOMEM;
21641ac19ca6SMarc Zyngier 
216583559b47SMarc Zyngier 	for (i = 0; i < nr_cpu_ids; i++)
216683559b47SMarc Zyngier 		its->collections[i].target_address = ~0ULL;
216783559b47SMarc Zyngier 
21681ac19ca6SMarc Zyngier 	return 0;
21691ac19ca6SMarc Zyngier }
21701ac19ca6SMarc Zyngier 
21717c297a2dSMarc Zyngier static struct page *its_allocate_pending_table(gfp_t gfp_flags)
21727c297a2dSMarc Zyngier {
21737c297a2dSMarc Zyngier 	struct page *pend_page;
2174adaab500SMarc Zyngier 
21757c297a2dSMarc Zyngier 	pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2176adaab500SMarc Zyngier 				get_order(LPI_PENDBASE_SZ));
21777c297a2dSMarc Zyngier 	if (!pend_page)
21787c297a2dSMarc Zyngier 		return NULL;
21797c297a2dSMarc Zyngier 
21807c297a2dSMarc Zyngier 	/* Make sure the GIC will observe the zero-ed page */
21817c297a2dSMarc Zyngier 	gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
21827c297a2dSMarc Zyngier 
21837c297a2dSMarc Zyngier 	return pend_page;
21847c297a2dSMarc Zyngier }
21857c297a2dSMarc Zyngier 
21867d75bbb4SMarc Zyngier static void its_free_pending_table(struct page *pt)
21877d75bbb4SMarc Zyngier {
2188adaab500SMarc Zyngier 	free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
21897d75bbb4SMarc Zyngier }
21907d75bbb4SMarc Zyngier 
2191c6e2ccb6SMarc Zyngier /*
21925e2c9f9aSMarc Zyngier  * Booting with kdump and LPIs enabled is generally fine. Any other
21935e2c9f9aSMarc Zyngier  * case is wrong in the absence of firmware/EFI support.
2194c6e2ccb6SMarc Zyngier  */
2195c440a9d9SMarc Zyngier static bool enabled_lpis_allowed(void)
2196c440a9d9SMarc Zyngier {
21975e2c9f9aSMarc Zyngier 	phys_addr_t addr;
21985e2c9f9aSMarc Zyngier 	u64 val;
2199c6e2ccb6SMarc Zyngier 
22005e2c9f9aSMarc Zyngier 	/* Check whether the property table is in a reserved region */
22015e2c9f9aSMarc Zyngier 	val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
22025e2c9f9aSMarc Zyngier 	addr = val & GENMASK_ULL(51, 12);
22035e2c9f9aSMarc Zyngier 
22045e2c9f9aSMarc Zyngier 	return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2205c440a9d9SMarc Zyngier }
2206c440a9d9SMarc Zyngier 
220711e37d35SMarc Zyngier static int __init allocate_lpi_tables(void)
220811e37d35SMarc Zyngier {
2209c440a9d9SMarc Zyngier 	u64 val;
221011e37d35SMarc Zyngier 	int err, cpu;
221111e37d35SMarc Zyngier 
2212c440a9d9SMarc Zyngier 	/*
2213c440a9d9SMarc Zyngier 	 * If LPIs are enabled while we run this from the boot CPU,
2214c440a9d9SMarc Zyngier 	 * flag the RD tables as pre-allocated if the stars do align.
2215c440a9d9SMarc Zyngier 	 */
2216c440a9d9SMarc Zyngier 	val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2217c440a9d9SMarc Zyngier 	if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2218c440a9d9SMarc Zyngier 		gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2219c440a9d9SMarc Zyngier 				      RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2220c440a9d9SMarc Zyngier 		pr_info("GICv3: Using preallocated redistributor tables\n");
2221c440a9d9SMarc Zyngier 	}
2222c440a9d9SMarc Zyngier 
222311e37d35SMarc Zyngier 	err = its_setup_lpi_prop_table();
222411e37d35SMarc Zyngier 	if (err)
222511e37d35SMarc Zyngier 		return err;
222611e37d35SMarc Zyngier 
222711e37d35SMarc Zyngier 	/*
222811e37d35SMarc Zyngier 	 * We allocate all the pending tables anyway, as we may have a
222911e37d35SMarc Zyngier 	 * mix of RDs that have had LPIs enabled, and some that
223011e37d35SMarc Zyngier 	 * don't. We'll free the unused ones as each CPU comes online.
223111e37d35SMarc Zyngier 	 */
223211e37d35SMarc Zyngier 	for_each_possible_cpu(cpu) {
223311e37d35SMarc Zyngier 		struct page *pend_page;
223411e37d35SMarc Zyngier 
223511e37d35SMarc Zyngier 		pend_page = its_allocate_pending_table(GFP_NOWAIT);
223611e37d35SMarc Zyngier 		if (!pend_page) {
223711e37d35SMarc Zyngier 			pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
223811e37d35SMarc Zyngier 			return -ENOMEM;
223911e37d35SMarc Zyngier 		}
224011e37d35SMarc Zyngier 
224111e37d35SMarc Zyngier 		gic_data_rdist_cpu(cpu)->pend_page = pend_page;
224211e37d35SMarc Zyngier 	}
224311e37d35SMarc Zyngier 
224411e37d35SMarc Zyngier 	return 0;
224511e37d35SMarc Zyngier }
224611e37d35SMarc Zyngier 
22476479450fSHeyi Guo static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
22486479450fSHeyi Guo {
22496479450fSHeyi Guo 	u32 count = 1000000;	/* 1s! */
22506479450fSHeyi Guo 	bool clean;
22516479450fSHeyi Guo 	u64 val;
22526479450fSHeyi Guo 
22536479450fSHeyi Guo 	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
22546479450fSHeyi Guo 	val &= ~GICR_VPENDBASER_Valid;
22556479450fSHeyi Guo 	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
22566479450fSHeyi Guo 
22576479450fSHeyi Guo 	do {
22586479450fSHeyi Guo 		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
22596479450fSHeyi Guo 		clean = !(val & GICR_VPENDBASER_Dirty);
22606479450fSHeyi Guo 		if (!clean) {
22616479450fSHeyi Guo 			count--;
22626479450fSHeyi Guo 			cpu_relax();
22636479450fSHeyi Guo 			udelay(1);
22646479450fSHeyi Guo 		}
22656479450fSHeyi Guo 	} while (!clean && count);
22666479450fSHeyi Guo 
22676479450fSHeyi Guo 	return val;
22686479450fSHeyi Guo }
22696479450fSHeyi Guo 
22701ac19ca6SMarc Zyngier static void its_cpu_init_lpis(void)
22711ac19ca6SMarc Zyngier {
22721ac19ca6SMarc Zyngier 	void __iomem *rbase = gic_data_rdist_rd_base();
22731ac19ca6SMarc Zyngier 	struct page *pend_page;
227411e37d35SMarc Zyngier 	phys_addr_t paddr;
22751ac19ca6SMarc Zyngier 	u64 val, tmp;
22761ac19ca6SMarc Zyngier 
227711e37d35SMarc Zyngier 	if (gic_data_rdist()->lpi_enabled)
22781ac19ca6SMarc Zyngier 		return;
22791ac19ca6SMarc Zyngier 
2280c440a9d9SMarc Zyngier 	val = readl_relaxed(rbase + GICR_CTLR);
2281c440a9d9SMarc Zyngier 	if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2282c440a9d9SMarc Zyngier 	    (val & GICR_CTLR_ENABLE_LPIS)) {
2283f842ca8eSMarc Zyngier 		/*
2284f842ca8eSMarc Zyngier 		 * Check that we get the same property table on all
2285f842ca8eSMarc Zyngier 		 * RDs. If we don't, this is hopeless.
2286f842ca8eSMarc Zyngier 		 */
2287f842ca8eSMarc Zyngier 		paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2288f842ca8eSMarc Zyngier 		paddr &= GENMASK_ULL(51, 12);
2289f842ca8eSMarc Zyngier 		if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2290f842ca8eSMarc Zyngier 			add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2291f842ca8eSMarc Zyngier 
2292c440a9d9SMarc Zyngier 		paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2293c440a9d9SMarc Zyngier 		paddr &= GENMASK_ULL(51, 16);
2294c440a9d9SMarc Zyngier 
22955e2c9f9aSMarc Zyngier 		WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2296c440a9d9SMarc Zyngier 		its_free_pending_table(gic_data_rdist()->pend_page);
2297c440a9d9SMarc Zyngier 		gic_data_rdist()->pend_page = NULL;
2298c440a9d9SMarc Zyngier 
2299c440a9d9SMarc Zyngier 		goto out;
2300c440a9d9SMarc Zyngier 	}
2301c440a9d9SMarc Zyngier 
230211e37d35SMarc Zyngier 	pend_page = gic_data_rdist()->pend_page;
23031ac19ca6SMarc Zyngier 	paddr = page_to_phys(pend_page);
23043fb68faeSMarc Zyngier 	WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
23051ac19ca6SMarc Zyngier 
23061ac19ca6SMarc Zyngier 	/* set PROPBASE */
2307e1a2e201SMarc Zyngier 	val = (gic_rdists->prop_table_pa |
23081ac19ca6SMarc Zyngier 	       GICR_PROPBASER_InnerShareable |
23092fd632a0SShanker Donthineni 	       GICR_PROPBASER_RaWaWb |
23101ac19ca6SMarc Zyngier 	       ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
23111ac19ca6SMarc Zyngier 
23120968a619SVladimir Murzin 	gicr_write_propbaser(val, rbase + GICR_PROPBASER);
23130968a619SVladimir Murzin 	tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
23141ac19ca6SMarc Zyngier 
23151ac19ca6SMarc Zyngier 	if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2316241a386cSMarc Zyngier 		if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2317241a386cSMarc Zyngier 			/*
2318241a386cSMarc Zyngier 			 * The HW reports non-shareable, we must
2319241a386cSMarc Zyngier 			 * remove the cacheability attributes as
2320241a386cSMarc Zyngier 			 * well.
2321241a386cSMarc Zyngier 			 */
2322241a386cSMarc Zyngier 			val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2323241a386cSMarc Zyngier 				 GICR_PROPBASER_CACHEABILITY_MASK);
2324241a386cSMarc Zyngier 			val |= GICR_PROPBASER_nC;
23250968a619SVladimir Murzin 			gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2326241a386cSMarc Zyngier 		}
23271ac19ca6SMarc Zyngier 		pr_info_once("GIC: using cache flushing for LPI property table\n");
23281ac19ca6SMarc Zyngier 		gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
23291ac19ca6SMarc Zyngier 	}
23301ac19ca6SMarc Zyngier 
23311ac19ca6SMarc Zyngier 	/* set PENDBASE */
23321ac19ca6SMarc Zyngier 	val = (page_to_phys(pend_page) |
23334ad3e363SMarc Zyngier 	       GICR_PENDBASER_InnerShareable |
23342fd632a0SShanker Donthineni 	       GICR_PENDBASER_RaWaWb);
23351ac19ca6SMarc Zyngier 
23360968a619SVladimir Murzin 	gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
23370968a619SVladimir Murzin 	tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2338241a386cSMarc Zyngier 
2339241a386cSMarc Zyngier 	if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2340241a386cSMarc Zyngier 		/*
2341241a386cSMarc Zyngier 		 * The HW reports non-shareable, we must remove the
2342241a386cSMarc Zyngier 		 * cacheability attributes as well.
2343241a386cSMarc Zyngier 		 */
2344241a386cSMarc Zyngier 		val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2345241a386cSMarc Zyngier 			 GICR_PENDBASER_CACHEABILITY_MASK);
2346241a386cSMarc Zyngier 		val |= GICR_PENDBASER_nC;
23470968a619SVladimir Murzin 		gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2348241a386cSMarc Zyngier 	}
23491ac19ca6SMarc Zyngier 
23501ac19ca6SMarc Zyngier 	/* Enable LPIs */
23511ac19ca6SMarc Zyngier 	val = readl_relaxed(rbase + GICR_CTLR);
23521ac19ca6SMarc Zyngier 	val |= GICR_CTLR_ENABLE_LPIS;
23531ac19ca6SMarc Zyngier 	writel_relaxed(val, rbase + GICR_CTLR);
23541ac19ca6SMarc Zyngier 
23556479450fSHeyi Guo 	if (gic_rdists->has_vlpis) {
23566479450fSHeyi Guo 		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
23576479450fSHeyi Guo 
23586479450fSHeyi Guo 		/*
23596479450fSHeyi Guo 		 * It's possible for CPU to receive VLPIs before it is
23606479450fSHeyi Guo 		 * sheduled as a vPE, especially for the first CPU, and the
23616479450fSHeyi Guo 		 * VLPI with INTID larger than 2^(IDbits+1) will be considered
23626479450fSHeyi Guo 		 * as out of range and dropped by GIC.
23636479450fSHeyi Guo 		 * So we initialize IDbits to known value to avoid VLPI drop.
23646479450fSHeyi Guo 		 */
23656479450fSHeyi Guo 		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
23666479450fSHeyi Guo 		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
23676479450fSHeyi Guo 			smp_processor_id(), val);
23686479450fSHeyi Guo 		gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
23696479450fSHeyi Guo 
23706479450fSHeyi Guo 		/*
23716479450fSHeyi Guo 		 * Also clear Valid bit of GICR_VPENDBASER, in case some
23726479450fSHeyi Guo 		 * ancient programming gets left in and has possibility of
23736479450fSHeyi Guo 		 * corrupting memory.
23746479450fSHeyi Guo 		 */
23756479450fSHeyi Guo 		val = its_clear_vpend_valid(vlpi_base);
23766479450fSHeyi Guo 		WARN_ON(val & GICR_VPENDBASER_Dirty);
23776479450fSHeyi Guo 	}
23786479450fSHeyi Guo 
23791ac19ca6SMarc Zyngier 	/* Make sure the GIC has seen the above */
23801ac19ca6SMarc Zyngier 	dsb(sy);
2381c440a9d9SMarc Zyngier out:
238211e37d35SMarc Zyngier 	gic_data_rdist()->lpi_enabled = true;
2383c440a9d9SMarc Zyngier 	pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
238411e37d35SMarc Zyngier 		smp_processor_id(),
2385c440a9d9SMarc Zyngier 		gic_data_rdist()->pend_page ? "allocated" : "reserved",
238611e37d35SMarc Zyngier 		&paddr);
23871ac19ca6SMarc Zyngier }
23881ac19ca6SMarc Zyngier 
2389920181ceSDerek Basehore static void its_cpu_init_collection(struct its_node *its)
23901ac19ca6SMarc Zyngier {
2391920181ceSDerek Basehore 	int cpu = smp_processor_id();
23921ac19ca6SMarc Zyngier 	u64 target;
23931ac19ca6SMarc Zyngier 
2394fbf8f40eSGanapatrao Kulkarni 	/* avoid cross node collections and its mapping */
2395fbf8f40eSGanapatrao Kulkarni 	if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2396fbf8f40eSGanapatrao Kulkarni 		struct device_node *cpu_node;
2397fbf8f40eSGanapatrao Kulkarni 
2398fbf8f40eSGanapatrao Kulkarni 		cpu_node = of_get_cpu_node(cpu, NULL);
2399fbf8f40eSGanapatrao Kulkarni 		if (its->numa_node != NUMA_NO_NODE &&
2400fbf8f40eSGanapatrao Kulkarni 			its->numa_node != of_node_to_nid(cpu_node))
2401920181ceSDerek Basehore 			return;
2402fbf8f40eSGanapatrao Kulkarni 	}
2403fbf8f40eSGanapatrao Kulkarni 
24041ac19ca6SMarc Zyngier 	/*
24051ac19ca6SMarc Zyngier 	 * We now have to bind each collection to its target
24061ac19ca6SMarc Zyngier 	 * redistributor.
24071ac19ca6SMarc Zyngier 	 */
2408589ce5f4SMarc Zyngier 	if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
24091ac19ca6SMarc Zyngier 		/*
24101ac19ca6SMarc Zyngier 		 * This ITS wants the physical address of the
24111ac19ca6SMarc Zyngier 		 * redistributor.
24121ac19ca6SMarc Zyngier 		 */
24131ac19ca6SMarc Zyngier 		target = gic_data_rdist()->phys_base;
24141ac19ca6SMarc Zyngier 	} else {
2415920181ceSDerek Basehore 		/* This ITS wants a linear CPU number. */
2416589ce5f4SMarc Zyngier 		target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2417263fcd31SMarc Zyngier 		target = GICR_TYPER_CPU_NUMBER(target) << 16;
24181ac19ca6SMarc Zyngier 	}
24191ac19ca6SMarc Zyngier 
24201ac19ca6SMarc Zyngier 	/* Perform collection mapping */
24211ac19ca6SMarc Zyngier 	its->collections[cpu].target_address = target;
24221ac19ca6SMarc Zyngier 	its->collections[cpu].col_id = cpu;
24231ac19ca6SMarc Zyngier 
24241ac19ca6SMarc Zyngier 	its_send_mapc(its, &its->collections[cpu], 1);
24251ac19ca6SMarc Zyngier 	its_send_invall(its, &its->collections[cpu]);
24261ac19ca6SMarc Zyngier }
24271ac19ca6SMarc Zyngier 
2428920181ceSDerek Basehore static void its_cpu_init_collections(void)
2429920181ceSDerek Basehore {
2430920181ceSDerek Basehore 	struct its_node *its;
2431920181ceSDerek Basehore 
2432a8db7456SSebastian Andrzej Siewior 	raw_spin_lock(&its_lock);
2433920181ceSDerek Basehore 
2434920181ceSDerek Basehore 	list_for_each_entry(its, &its_nodes, entry)
2435920181ceSDerek Basehore 		its_cpu_init_collection(its);
2436920181ceSDerek Basehore 
2437a8db7456SSebastian Andrzej Siewior 	raw_spin_unlock(&its_lock);
24381ac19ca6SMarc Zyngier }
243984a6a2e7SMarc Zyngier 
244084a6a2e7SMarc Zyngier static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
244184a6a2e7SMarc Zyngier {
244284a6a2e7SMarc Zyngier 	struct its_device *its_dev = NULL, *tmp;
24433e39e8f5SMarc Zyngier 	unsigned long flags;
244484a6a2e7SMarc Zyngier 
24453e39e8f5SMarc Zyngier 	raw_spin_lock_irqsave(&its->lock, flags);
244684a6a2e7SMarc Zyngier 
244784a6a2e7SMarc Zyngier 	list_for_each_entry(tmp, &its->its_device_list, entry) {
244884a6a2e7SMarc Zyngier 		if (tmp->device_id == dev_id) {
244984a6a2e7SMarc Zyngier 			its_dev = tmp;
245084a6a2e7SMarc Zyngier 			break;
245184a6a2e7SMarc Zyngier 		}
245284a6a2e7SMarc Zyngier 	}
245384a6a2e7SMarc Zyngier 
24543e39e8f5SMarc Zyngier 	raw_spin_unlock_irqrestore(&its->lock, flags);
245584a6a2e7SMarc Zyngier 
245684a6a2e7SMarc Zyngier 	return its_dev;
245784a6a2e7SMarc Zyngier }
245884a6a2e7SMarc Zyngier 
2459466b7d16SShanker Donthineni static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2460466b7d16SShanker Donthineni {
2461466b7d16SShanker Donthineni 	int i;
2462466b7d16SShanker Donthineni 
2463466b7d16SShanker Donthineni 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2464466b7d16SShanker Donthineni 		if (GITS_BASER_TYPE(its->tables[i].val) == type)
2465466b7d16SShanker Donthineni 			return &its->tables[i];
2466466b7d16SShanker Donthineni 	}
2467466b7d16SShanker Donthineni 
2468466b7d16SShanker Donthineni 	return NULL;
2469466b7d16SShanker Donthineni }
2470466b7d16SShanker Donthineni 
2471539d3782SShanker Donthineni static bool its_alloc_table_entry(struct its_node *its,
2472539d3782SShanker Donthineni 				  struct its_baser *baser, u32 id)
24733faf24eaSShanker Donthineni {
24743faf24eaSShanker Donthineni 	struct page *page;
24753faf24eaSShanker Donthineni 	u32 esz, idx;
24763faf24eaSShanker Donthineni 	__le64 *table;
24773faf24eaSShanker Donthineni 
24783faf24eaSShanker Donthineni 	/* Don't allow device id that exceeds single, flat table limit */
24793faf24eaSShanker Donthineni 	esz = GITS_BASER_ENTRY_SIZE(baser->val);
24803faf24eaSShanker Donthineni 	if (!(baser->val & GITS_BASER_INDIRECT))
248170cc81edSMarc Zyngier 		return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
24823faf24eaSShanker Donthineni 
24833faf24eaSShanker Donthineni 	/* Compute 1st level table index & check if that exceeds table limit */
248470cc81edSMarc Zyngier 	idx = id >> ilog2(baser->psz / esz);
24853faf24eaSShanker Donthineni 	if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
24863faf24eaSShanker Donthineni 		return false;
24873faf24eaSShanker Donthineni 
24883faf24eaSShanker Donthineni 	table = baser->base;
24893faf24eaSShanker Donthineni 
24903faf24eaSShanker Donthineni 	/* Allocate memory for 2nd level table */
24913faf24eaSShanker Donthineni 	if (!table[idx]) {
2492539d3782SShanker Donthineni 		page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2493539d3782SShanker Donthineni 					get_order(baser->psz));
24943faf24eaSShanker Donthineni 		if (!page)
24953faf24eaSShanker Donthineni 			return false;
24963faf24eaSShanker Donthineni 
24973faf24eaSShanker Donthineni 		/* Flush Lvl2 table to PoC if hw doesn't support coherency */
24983faf24eaSShanker Donthineni 		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2499328191c0SVladimir Murzin 			gic_flush_dcache_to_poc(page_address(page), baser->psz);
25003faf24eaSShanker Donthineni 
25013faf24eaSShanker Donthineni 		table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
25023faf24eaSShanker Donthineni 
25033faf24eaSShanker Donthineni 		/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
25043faf24eaSShanker Donthineni 		if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2505328191c0SVladimir Murzin 			gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
25063faf24eaSShanker Donthineni 
25073faf24eaSShanker Donthineni 		/* Ensure updated table contents are visible to ITS hardware */
25083faf24eaSShanker Donthineni 		dsb(sy);
25093faf24eaSShanker Donthineni 	}
25103faf24eaSShanker Donthineni 
25113faf24eaSShanker Donthineni 	return true;
25123faf24eaSShanker Donthineni }
25133faf24eaSShanker Donthineni 
251470cc81edSMarc Zyngier static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
251570cc81edSMarc Zyngier {
251670cc81edSMarc Zyngier 	struct its_baser *baser;
251770cc81edSMarc Zyngier 
251870cc81edSMarc Zyngier 	baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
251970cc81edSMarc Zyngier 
252070cc81edSMarc Zyngier 	/* Don't allow device id that exceeds ITS hardware limit */
252170cc81edSMarc Zyngier 	if (!baser)
2522576a8342SMarc Zyngier 		return (ilog2(dev_id) < device_ids(its));
252370cc81edSMarc Zyngier 
2524539d3782SShanker Donthineni 	return its_alloc_table_entry(its, baser, dev_id);
252570cc81edSMarc Zyngier }
252670cc81edSMarc Zyngier 
25277d75bbb4SMarc Zyngier static bool its_alloc_vpe_table(u32 vpe_id)
25287d75bbb4SMarc Zyngier {
25297d75bbb4SMarc Zyngier 	struct its_node *its;
25307d75bbb4SMarc Zyngier 
25317d75bbb4SMarc Zyngier 	/*
25327d75bbb4SMarc Zyngier 	 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
25337d75bbb4SMarc Zyngier 	 * could try and only do it on ITSs corresponding to devices
25347d75bbb4SMarc Zyngier 	 * that have interrupts targeted at this VPE, but the
25357d75bbb4SMarc Zyngier 	 * complexity becomes crazy (and you have tons of memory
25367d75bbb4SMarc Zyngier 	 * anyway, right?).
25377d75bbb4SMarc Zyngier 	 */
25387d75bbb4SMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry) {
25397d75bbb4SMarc Zyngier 		struct its_baser *baser;
25407d75bbb4SMarc Zyngier 
25410dd57fedSMarc Zyngier 		if (!is_v4(its))
25427d75bbb4SMarc Zyngier 			continue;
25437d75bbb4SMarc Zyngier 
25447d75bbb4SMarc Zyngier 		baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
25457d75bbb4SMarc Zyngier 		if (!baser)
25467d75bbb4SMarc Zyngier 			return false;
25477d75bbb4SMarc Zyngier 
2548539d3782SShanker Donthineni 		if (!its_alloc_table_entry(its, baser, vpe_id))
25497d75bbb4SMarc Zyngier 			return false;
25507d75bbb4SMarc Zyngier 	}
25517d75bbb4SMarc Zyngier 
25527d75bbb4SMarc Zyngier 	return true;
25537d75bbb4SMarc Zyngier }
25547d75bbb4SMarc Zyngier 
255584a6a2e7SMarc Zyngier static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
255693f94ea0SMarc Zyngier 					    int nvecs, bool alloc_lpis)
255784a6a2e7SMarc Zyngier {
255884a6a2e7SMarc Zyngier 	struct its_device *dev;
255993f94ea0SMarc Zyngier 	unsigned long *lpi_map = NULL;
25603e39e8f5SMarc Zyngier 	unsigned long flags;
2561591e5becSMarc Zyngier 	u16 *col_map = NULL;
256284a6a2e7SMarc Zyngier 	void *itt;
256384a6a2e7SMarc Zyngier 	int lpi_base;
256484a6a2e7SMarc Zyngier 	int nr_lpis;
2565c8481267SMarc Zyngier 	int nr_ites;
256684a6a2e7SMarc Zyngier 	int sz;
256784a6a2e7SMarc Zyngier 
25683faf24eaSShanker Donthineni 	if (!its_alloc_device_table(its, dev_id))
2569466b7d16SShanker Donthineni 		return NULL;
2570466b7d16SShanker Donthineni 
2571147c8f37SMarc Zyngier 	if (WARN_ON(!is_power_of_2(nvecs)))
2572147c8f37SMarc Zyngier 		nvecs = roundup_pow_of_two(nvecs);
2573147c8f37SMarc Zyngier 
257484a6a2e7SMarc Zyngier 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2575c8481267SMarc Zyngier 	/*
2576147c8f37SMarc Zyngier 	 * Even if the device wants a single LPI, the ITT must be
2577147c8f37SMarc Zyngier 	 * sized as a power of two (and you need at least one bit...).
2578c8481267SMarc Zyngier 	 */
2579147c8f37SMarc Zyngier 	nr_ites = max(2, nvecs);
2580ffedbf0cSMarc Zyngier 	sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
258184a6a2e7SMarc Zyngier 	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2582539d3782SShanker Donthineni 	itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
258393f94ea0SMarc Zyngier 	if (alloc_lpis) {
258438dd7c49SMarc Zyngier 		lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2585591e5becSMarc Zyngier 		if (lpi_map)
25866396bb22SKees Cook 			col_map = kcalloc(nr_lpis, sizeof(*col_map),
258793f94ea0SMarc Zyngier 					  GFP_KERNEL);
258893f94ea0SMarc Zyngier 	} else {
25896396bb22SKees Cook 		col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
259093f94ea0SMarc Zyngier 		nr_lpis = 0;
259193f94ea0SMarc Zyngier 		lpi_base = 0;
259293f94ea0SMarc Zyngier 	}
259384a6a2e7SMarc Zyngier 
259493f94ea0SMarc Zyngier 	if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
259584a6a2e7SMarc Zyngier 		kfree(dev);
259684a6a2e7SMarc Zyngier 		kfree(itt);
259784a6a2e7SMarc Zyngier 		kfree(lpi_map);
2598591e5becSMarc Zyngier 		kfree(col_map);
259984a6a2e7SMarc Zyngier 		return NULL;
260084a6a2e7SMarc Zyngier 	}
260184a6a2e7SMarc Zyngier 
2602328191c0SVladimir Murzin 	gic_flush_dcache_to_poc(itt, sz);
26035a9a8915SMarc Zyngier 
260484a6a2e7SMarc Zyngier 	dev->its = its;
260584a6a2e7SMarc Zyngier 	dev->itt = itt;
2606c8481267SMarc Zyngier 	dev->nr_ites = nr_ites;
2607591e5becSMarc Zyngier 	dev->event_map.lpi_map = lpi_map;
2608591e5becSMarc Zyngier 	dev->event_map.col_map = col_map;
2609591e5becSMarc Zyngier 	dev->event_map.lpi_base = lpi_base;
2610591e5becSMarc Zyngier 	dev->event_map.nr_lpis = nr_lpis;
2611d011e4e6SMarc Zyngier 	mutex_init(&dev->event_map.vlpi_lock);
261284a6a2e7SMarc Zyngier 	dev->device_id = dev_id;
261384a6a2e7SMarc Zyngier 	INIT_LIST_HEAD(&dev->entry);
261484a6a2e7SMarc Zyngier 
26153e39e8f5SMarc Zyngier 	raw_spin_lock_irqsave(&its->lock, flags);
261684a6a2e7SMarc Zyngier 	list_add(&dev->entry, &its->its_device_list);
26173e39e8f5SMarc Zyngier 	raw_spin_unlock_irqrestore(&its->lock, flags);
261884a6a2e7SMarc Zyngier 
261984a6a2e7SMarc Zyngier 	/* Map device to its ITT */
262084a6a2e7SMarc Zyngier 	its_send_mapd(dev, 1);
262184a6a2e7SMarc Zyngier 
262284a6a2e7SMarc Zyngier 	return dev;
262384a6a2e7SMarc Zyngier }
262484a6a2e7SMarc Zyngier 
262584a6a2e7SMarc Zyngier static void its_free_device(struct its_device *its_dev)
262684a6a2e7SMarc Zyngier {
26273e39e8f5SMarc Zyngier 	unsigned long flags;
26283e39e8f5SMarc Zyngier 
26293e39e8f5SMarc Zyngier 	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
263084a6a2e7SMarc Zyngier 	list_del(&its_dev->entry);
26313e39e8f5SMarc Zyngier 	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2632898aa5ceSMarc Zyngier 	kfree(its_dev->event_map.col_map);
263384a6a2e7SMarc Zyngier 	kfree(its_dev->itt);
263484a6a2e7SMarc Zyngier 	kfree(its_dev);
263584a6a2e7SMarc Zyngier }
2636b48ac83dSMarc Zyngier 
26378208d170SMarc Zyngier static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2638b48ac83dSMarc Zyngier {
2639b48ac83dSMarc Zyngier 	int idx;
2640b48ac83dSMarc Zyngier 
2641342be106SZenghui Yu 	/* Find a free LPI region in lpi_map and allocate them. */
26428208d170SMarc Zyngier 	idx = bitmap_find_free_region(dev->event_map.lpi_map,
26438208d170SMarc Zyngier 				      dev->event_map.nr_lpis,
26448208d170SMarc Zyngier 				      get_count_order(nvecs));
26458208d170SMarc Zyngier 	if (idx < 0)
2646b48ac83dSMarc Zyngier 		return -ENOSPC;
2647b48ac83dSMarc Zyngier 
2648591e5becSMarc Zyngier 	*hwirq = dev->event_map.lpi_base + idx;
2649b48ac83dSMarc Zyngier 
2650b48ac83dSMarc Zyngier 	return 0;
2651b48ac83dSMarc Zyngier }
2652b48ac83dSMarc Zyngier 
265354456db9SMarc Zyngier static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2654b48ac83dSMarc Zyngier 			   int nvec, msi_alloc_info_t *info)
2655b48ac83dSMarc Zyngier {
2656b48ac83dSMarc Zyngier 	struct its_node *its;
2657b48ac83dSMarc Zyngier 	struct its_device *its_dev;
265854456db9SMarc Zyngier 	struct msi_domain_info *msi_info;
265954456db9SMarc Zyngier 	u32 dev_id;
26609791ec7dSMarc Zyngier 	int err = 0;
2661b48ac83dSMarc Zyngier 
266254456db9SMarc Zyngier 	/*
2663a7c90f51SJulien Grall 	 * We ignore "dev" entirely, and rely on the dev_id that has
266454456db9SMarc Zyngier 	 * been passed via the scratchpad. This limits this domain's
266554456db9SMarc Zyngier 	 * usefulness to upper layers that definitely know that they
266654456db9SMarc Zyngier 	 * are built on top of the ITS.
266754456db9SMarc Zyngier 	 */
266854456db9SMarc Zyngier 	dev_id = info->scratchpad[0].ul;
266954456db9SMarc Zyngier 
267054456db9SMarc Zyngier 	msi_info = msi_get_domain_info(domain);
267154456db9SMarc Zyngier 	its = msi_info->data;
267254456db9SMarc Zyngier 
267320b3d54eSMarc Zyngier 	if (!gic_rdists->has_direct_lpi &&
267420b3d54eSMarc Zyngier 	    vpe_proxy.dev &&
267520b3d54eSMarc Zyngier 	    vpe_proxy.dev->its == its &&
267620b3d54eSMarc Zyngier 	    dev_id == vpe_proxy.dev->device_id) {
267720b3d54eSMarc Zyngier 		/* Bad luck. Get yourself a better implementation */
267820b3d54eSMarc Zyngier 		WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
267920b3d54eSMarc Zyngier 			  dev_id);
268020b3d54eSMarc Zyngier 		return -EINVAL;
268120b3d54eSMarc Zyngier 	}
268220b3d54eSMarc Zyngier 
26839791ec7dSMarc Zyngier 	mutex_lock(&its->dev_alloc_lock);
2684f130420eSMarc Zyngier 	its_dev = its_find_device(its, dev_id);
2685e8137f4fSMarc Zyngier 	if (its_dev) {
2686e8137f4fSMarc Zyngier 		/*
2687e8137f4fSMarc Zyngier 		 * We already have seen this ID, probably through
2688e8137f4fSMarc Zyngier 		 * another alias (PCI bridge of some sort). No need to
2689e8137f4fSMarc Zyngier 		 * create the device.
2690e8137f4fSMarc Zyngier 		 */
26919791ec7dSMarc Zyngier 		its_dev->shared = true;
2692f130420eSMarc Zyngier 		pr_debug("Reusing ITT for devID %x\n", dev_id);
2693e8137f4fSMarc Zyngier 		goto out;
2694e8137f4fSMarc Zyngier 	}
2695b48ac83dSMarc Zyngier 
269693f94ea0SMarc Zyngier 	its_dev = its_create_device(its, dev_id, nvec, true);
26979791ec7dSMarc Zyngier 	if (!its_dev) {
26989791ec7dSMarc Zyngier 		err = -ENOMEM;
26999791ec7dSMarc Zyngier 		goto out;
27009791ec7dSMarc Zyngier 	}
2701b48ac83dSMarc Zyngier 
2702f130420eSMarc Zyngier 	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2703e8137f4fSMarc Zyngier out:
27049791ec7dSMarc Zyngier 	mutex_unlock(&its->dev_alloc_lock);
2705b48ac83dSMarc Zyngier 	info->scratchpad[0].ptr = its_dev;
27069791ec7dSMarc Zyngier 	return err;
2707b48ac83dSMarc Zyngier }
2708b48ac83dSMarc Zyngier 
270954456db9SMarc Zyngier static struct msi_domain_ops its_msi_domain_ops = {
271054456db9SMarc Zyngier 	.msi_prepare	= its_msi_prepare,
271154456db9SMarc Zyngier };
271254456db9SMarc Zyngier 
2713b48ac83dSMarc Zyngier static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2714b48ac83dSMarc Zyngier 				    unsigned int virq,
2715b48ac83dSMarc Zyngier 				    irq_hw_number_t hwirq)
2716b48ac83dSMarc Zyngier {
2717f833f57fSMarc Zyngier 	struct irq_fwspec fwspec;
2718b48ac83dSMarc Zyngier 
2719f833f57fSMarc Zyngier 	if (irq_domain_get_of_node(domain->parent)) {
2720f833f57fSMarc Zyngier 		fwspec.fwnode = domain->parent->fwnode;
2721f833f57fSMarc Zyngier 		fwspec.param_count = 3;
2722f833f57fSMarc Zyngier 		fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2723f833f57fSMarc Zyngier 		fwspec.param[1] = hwirq;
2724f833f57fSMarc Zyngier 		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
27253f010cf1STomasz Nowicki 	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
27263f010cf1STomasz Nowicki 		fwspec.fwnode = domain->parent->fwnode;
27273f010cf1STomasz Nowicki 		fwspec.param_count = 2;
27283f010cf1STomasz Nowicki 		fwspec.param[0] = hwirq;
27293f010cf1STomasz Nowicki 		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2730f833f57fSMarc Zyngier 	} else {
2731f833f57fSMarc Zyngier 		return -EINVAL;
2732f833f57fSMarc Zyngier 	}
2733b48ac83dSMarc Zyngier 
2734f833f57fSMarc Zyngier 	return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2735b48ac83dSMarc Zyngier }
2736b48ac83dSMarc Zyngier 
2737b48ac83dSMarc Zyngier static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2738b48ac83dSMarc Zyngier 				unsigned int nr_irqs, void *args)
2739b48ac83dSMarc Zyngier {
2740b48ac83dSMarc Zyngier 	msi_alloc_info_t *info = args;
2741b48ac83dSMarc Zyngier 	struct its_device *its_dev = info->scratchpad[0].ptr;
274235ae7df2SJulien Grall 	struct its_node *its = its_dev->its;
2743b48ac83dSMarc Zyngier 	irq_hw_number_t hwirq;
2744b48ac83dSMarc Zyngier 	int err;
2745b48ac83dSMarc Zyngier 	int i;
2746b48ac83dSMarc Zyngier 
27478208d170SMarc Zyngier 	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2748b48ac83dSMarc Zyngier 	if (err)
2749b48ac83dSMarc Zyngier 		return err;
2750b48ac83dSMarc Zyngier 
275135ae7df2SJulien Grall 	err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
275235ae7df2SJulien Grall 	if (err)
275335ae7df2SJulien Grall 		return err;
275435ae7df2SJulien Grall 
27558208d170SMarc Zyngier 	for (i = 0; i < nr_irqs; i++) {
27568208d170SMarc Zyngier 		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2757b48ac83dSMarc Zyngier 		if (err)
2758b48ac83dSMarc Zyngier 			return err;
2759b48ac83dSMarc Zyngier 
2760b48ac83dSMarc Zyngier 		irq_domain_set_hwirq_and_chip(domain, virq + i,
27618208d170SMarc Zyngier 					      hwirq + i, &its_irq_chip, its_dev);
27620d224d35SMarc Zyngier 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2763f130420eSMarc Zyngier 		pr_debug("ID:%d pID:%d vID:%d\n",
27648208d170SMarc Zyngier 			 (int)(hwirq + i - its_dev->event_map.lpi_base),
27658208d170SMarc Zyngier 			 (int)(hwirq + i), virq + i);
2766b48ac83dSMarc Zyngier 	}
2767b48ac83dSMarc Zyngier 
2768b48ac83dSMarc Zyngier 	return 0;
2769b48ac83dSMarc Zyngier }
2770b48ac83dSMarc Zyngier 
277172491643SThomas Gleixner static int its_irq_domain_activate(struct irq_domain *domain,
2772702cb0a0SThomas Gleixner 				   struct irq_data *d, bool reserve)
2773aca268dfSMarc Zyngier {
2774aca268dfSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2775aca268dfSMarc Zyngier 	u32 event = its_get_event_id(d);
2776fbf8f40eSGanapatrao Kulkarni 	const struct cpumask *cpu_mask = cpu_online_mask;
27770d224d35SMarc Zyngier 	int cpu;
2778fbf8f40eSGanapatrao Kulkarni 
2779fbf8f40eSGanapatrao Kulkarni 	/* get the cpu_mask of local node */
2780fbf8f40eSGanapatrao Kulkarni 	if (its_dev->its->numa_node >= 0)
2781fbf8f40eSGanapatrao Kulkarni 		cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2782aca268dfSMarc Zyngier 
2783591e5becSMarc Zyngier 	/* Bind the LPI to the first possible CPU */
2784c1797b11SYang Yingliang 	cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2785c1797b11SYang Yingliang 	if (cpu >= nr_cpu_ids) {
2786c1797b11SYang Yingliang 		if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2787c1797b11SYang Yingliang 			return -EINVAL;
2788c1797b11SYang Yingliang 
2789c1797b11SYang Yingliang 		cpu = cpumask_first(cpu_online_mask);
2790c1797b11SYang Yingliang 	}
2791c1797b11SYang Yingliang 
27920d224d35SMarc Zyngier 	its_dev->event_map.col_map[event] = cpu;
27930d224d35SMarc Zyngier 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
2794591e5becSMarc Zyngier 
2795aca268dfSMarc Zyngier 	/* Map the GIC IRQ and event to the device */
27966a25ad3aSMarc Zyngier 	its_send_mapti(its_dev, d->hwirq, event);
279772491643SThomas Gleixner 	return 0;
2798aca268dfSMarc Zyngier }
2799aca268dfSMarc Zyngier 
2800aca268dfSMarc Zyngier static void its_irq_domain_deactivate(struct irq_domain *domain,
2801aca268dfSMarc Zyngier 				      struct irq_data *d)
2802aca268dfSMarc Zyngier {
2803aca268dfSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2804aca268dfSMarc Zyngier 	u32 event = its_get_event_id(d);
2805aca268dfSMarc Zyngier 
2806aca268dfSMarc Zyngier 	/* Stop the delivery of interrupts */
2807aca268dfSMarc Zyngier 	its_send_discard(its_dev, event);
2808aca268dfSMarc Zyngier }
2809aca268dfSMarc Zyngier 
2810b48ac83dSMarc Zyngier static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2811b48ac83dSMarc Zyngier 				unsigned int nr_irqs)
2812b48ac83dSMarc Zyngier {
2813b48ac83dSMarc Zyngier 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2814b48ac83dSMarc Zyngier 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
28159791ec7dSMarc Zyngier 	struct its_node *its = its_dev->its;
2816b48ac83dSMarc Zyngier 	int i;
2817b48ac83dSMarc Zyngier 
2818c9c96e30SMarc Zyngier 	bitmap_release_region(its_dev->event_map.lpi_map,
2819c9c96e30SMarc Zyngier 			      its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2820c9c96e30SMarc Zyngier 			      get_count_order(nr_irqs));
2821c9c96e30SMarc Zyngier 
2822b48ac83dSMarc Zyngier 	for (i = 0; i < nr_irqs; i++) {
2823b48ac83dSMarc Zyngier 		struct irq_data *data = irq_domain_get_irq_data(domain,
2824b48ac83dSMarc Zyngier 								virq + i);
2825b48ac83dSMarc Zyngier 		/* Nuke the entry in the domain */
28262da39949SMarc Zyngier 		irq_domain_reset_irq_data(data);
2827b48ac83dSMarc Zyngier 	}
2828b48ac83dSMarc Zyngier 
28299791ec7dSMarc Zyngier 	mutex_lock(&its->dev_alloc_lock);
28309791ec7dSMarc Zyngier 
28319791ec7dSMarc Zyngier 	/*
28329791ec7dSMarc Zyngier 	 * If all interrupts have been freed, start mopping the
28339791ec7dSMarc Zyngier 	 * floor. This is conditionned on the device not being shared.
28349791ec7dSMarc Zyngier 	 */
28359791ec7dSMarc Zyngier 	if (!its_dev->shared &&
28369791ec7dSMarc Zyngier 	    bitmap_empty(its_dev->event_map.lpi_map,
2837591e5becSMarc Zyngier 			 its_dev->event_map.nr_lpis)) {
283838dd7c49SMarc Zyngier 		its_lpi_free(its_dev->event_map.lpi_map,
2839cf2be8baSMarc Zyngier 			     its_dev->event_map.lpi_base,
2840cf2be8baSMarc Zyngier 			     its_dev->event_map.nr_lpis);
2841b48ac83dSMarc Zyngier 
2842b48ac83dSMarc Zyngier 		/* Unmap device/itt */
2843b48ac83dSMarc Zyngier 		its_send_mapd(its_dev, 0);
2844b48ac83dSMarc Zyngier 		its_free_device(its_dev);
2845b48ac83dSMarc Zyngier 	}
2846b48ac83dSMarc Zyngier 
28479791ec7dSMarc Zyngier 	mutex_unlock(&its->dev_alloc_lock);
28489791ec7dSMarc Zyngier 
2849b48ac83dSMarc Zyngier 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2850b48ac83dSMarc Zyngier }
2851b48ac83dSMarc Zyngier 
2852b48ac83dSMarc Zyngier static const struct irq_domain_ops its_domain_ops = {
2853b48ac83dSMarc Zyngier 	.alloc			= its_irq_domain_alloc,
2854b48ac83dSMarc Zyngier 	.free			= its_irq_domain_free,
2855aca268dfSMarc Zyngier 	.activate		= its_irq_domain_activate,
2856aca268dfSMarc Zyngier 	.deactivate		= its_irq_domain_deactivate,
2857b48ac83dSMarc Zyngier };
28584c21f3c2SMarc Zyngier 
285920b3d54eSMarc Zyngier /*
286020b3d54eSMarc Zyngier  * This is insane.
286120b3d54eSMarc Zyngier  *
286220b3d54eSMarc Zyngier  * If a GICv4 doesn't implement Direct LPIs (which is extremely
286320b3d54eSMarc Zyngier  * likely), the only way to perform an invalidate is to use a fake
286420b3d54eSMarc Zyngier  * device to issue an INV command, implying that the LPI has first
286520b3d54eSMarc Zyngier  * been mapped to some event on that device. Since this is not exactly
286620b3d54eSMarc Zyngier  * cheap, we try to keep that mapping around as long as possible, and
286720b3d54eSMarc Zyngier  * only issue an UNMAP if we're short on available slots.
286820b3d54eSMarc Zyngier  *
286920b3d54eSMarc Zyngier  * Broken by design(tm).
287020b3d54eSMarc Zyngier  */
287120b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
287220b3d54eSMarc Zyngier {
287320b3d54eSMarc Zyngier 	/* Already unmapped? */
287420b3d54eSMarc Zyngier 	if (vpe->vpe_proxy_event == -1)
287520b3d54eSMarc Zyngier 		return;
287620b3d54eSMarc Zyngier 
287720b3d54eSMarc Zyngier 	its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
287820b3d54eSMarc Zyngier 	vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
287920b3d54eSMarc Zyngier 
288020b3d54eSMarc Zyngier 	/*
288120b3d54eSMarc Zyngier 	 * We don't track empty slots at all, so let's move the
288220b3d54eSMarc Zyngier 	 * next_victim pointer if we can quickly reuse that slot
288320b3d54eSMarc Zyngier 	 * instead of nuking an existing entry. Not clear that this is
288420b3d54eSMarc Zyngier 	 * always a win though, and this might just generate a ripple
288520b3d54eSMarc Zyngier 	 * effect... Let's just hope VPEs don't migrate too often.
288620b3d54eSMarc Zyngier 	 */
288720b3d54eSMarc Zyngier 	if (vpe_proxy.vpes[vpe_proxy.next_victim])
288820b3d54eSMarc Zyngier 		vpe_proxy.next_victim = vpe->vpe_proxy_event;
288920b3d54eSMarc Zyngier 
289020b3d54eSMarc Zyngier 	vpe->vpe_proxy_event = -1;
289120b3d54eSMarc Zyngier }
289220b3d54eSMarc Zyngier 
289320b3d54eSMarc Zyngier static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
289420b3d54eSMarc Zyngier {
289520b3d54eSMarc Zyngier 	if (!gic_rdists->has_direct_lpi) {
289620b3d54eSMarc Zyngier 		unsigned long flags;
289720b3d54eSMarc Zyngier 
289820b3d54eSMarc Zyngier 		raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
289920b3d54eSMarc Zyngier 		its_vpe_db_proxy_unmap_locked(vpe);
290020b3d54eSMarc Zyngier 		raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
290120b3d54eSMarc Zyngier 	}
290220b3d54eSMarc Zyngier }
290320b3d54eSMarc Zyngier 
290420b3d54eSMarc Zyngier static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
290520b3d54eSMarc Zyngier {
290620b3d54eSMarc Zyngier 	/* Already mapped? */
290720b3d54eSMarc Zyngier 	if (vpe->vpe_proxy_event != -1)
290820b3d54eSMarc Zyngier 		return;
290920b3d54eSMarc Zyngier 
291020b3d54eSMarc Zyngier 	/* This slot was already allocated. Kick the other VPE out. */
291120b3d54eSMarc Zyngier 	if (vpe_proxy.vpes[vpe_proxy.next_victim])
291220b3d54eSMarc Zyngier 		its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
291320b3d54eSMarc Zyngier 
291420b3d54eSMarc Zyngier 	/* Map the new VPE instead */
291520b3d54eSMarc Zyngier 	vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
291620b3d54eSMarc Zyngier 	vpe->vpe_proxy_event = vpe_proxy.next_victim;
291720b3d54eSMarc Zyngier 	vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
291820b3d54eSMarc Zyngier 
291920b3d54eSMarc Zyngier 	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
292020b3d54eSMarc Zyngier 	its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
292120b3d54eSMarc Zyngier }
292220b3d54eSMarc Zyngier 
2923958b90d1SMarc Zyngier static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2924958b90d1SMarc Zyngier {
2925958b90d1SMarc Zyngier 	unsigned long flags;
2926958b90d1SMarc Zyngier 	struct its_collection *target_col;
2927958b90d1SMarc Zyngier 
2928958b90d1SMarc Zyngier 	if (gic_rdists->has_direct_lpi) {
2929958b90d1SMarc Zyngier 		void __iomem *rdbase;
2930958b90d1SMarc Zyngier 
2931958b90d1SMarc Zyngier 		rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2932958b90d1SMarc Zyngier 		gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
29332f4f064bSMarc Zyngier 		wait_for_syncr(rdbase);
2934958b90d1SMarc Zyngier 
2935958b90d1SMarc Zyngier 		return;
2936958b90d1SMarc Zyngier 	}
2937958b90d1SMarc Zyngier 
2938958b90d1SMarc Zyngier 	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2939958b90d1SMarc Zyngier 
2940958b90d1SMarc Zyngier 	its_vpe_db_proxy_map_locked(vpe);
2941958b90d1SMarc Zyngier 
2942958b90d1SMarc Zyngier 	target_col = &vpe_proxy.dev->its->collections[to];
2943958b90d1SMarc Zyngier 	its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2944958b90d1SMarc Zyngier 	vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2945958b90d1SMarc Zyngier 
2946958b90d1SMarc Zyngier 	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2947958b90d1SMarc Zyngier }
2948958b90d1SMarc Zyngier 
29493171a47aSMarc Zyngier static int its_vpe_set_affinity(struct irq_data *d,
29503171a47aSMarc Zyngier 				const struct cpumask *mask_val,
29513171a47aSMarc Zyngier 				bool force)
29523171a47aSMarc Zyngier {
29533171a47aSMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
29543171a47aSMarc Zyngier 	int cpu = cpumask_first(mask_val);
29553171a47aSMarc Zyngier 
29563171a47aSMarc Zyngier 	/*
29573171a47aSMarc Zyngier 	 * Changing affinity is mega expensive, so let's be as lazy as
295820b3d54eSMarc Zyngier 	 * we can and only do it if we really have to. Also, if mapped
2959958b90d1SMarc Zyngier 	 * into the proxy device, we need to move the doorbell
2960958b90d1SMarc Zyngier 	 * interrupt to its new location.
29613171a47aSMarc Zyngier 	 */
29623171a47aSMarc Zyngier 	if (vpe->col_idx != cpu) {
2963958b90d1SMarc Zyngier 		int from = vpe->col_idx;
2964958b90d1SMarc Zyngier 
29653171a47aSMarc Zyngier 		vpe->col_idx = cpu;
29663171a47aSMarc Zyngier 		its_send_vmovp(vpe);
2967958b90d1SMarc Zyngier 		its_vpe_db_proxy_move(vpe, from, cpu);
29683171a47aSMarc Zyngier 	}
29693171a47aSMarc Zyngier 
297044c4c25eSMarc Zyngier 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
297144c4c25eSMarc Zyngier 
29723171a47aSMarc Zyngier 	return IRQ_SET_MASK_OK_DONE;
29733171a47aSMarc Zyngier }
29743171a47aSMarc Zyngier 
2975e643d803SMarc Zyngier static void its_vpe_schedule(struct its_vpe *vpe)
2976e643d803SMarc Zyngier {
297750c33097SRobin Murphy 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2978e643d803SMarc Zyngier 	u64 val;
2979e643d803SMarc Zyngier 
2980e643d803SMarc Zyngier 	/* Schedule the VPE */
2981e643d803SMarc Zyngier 	val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2982e643d803SMarc Zyngier 		GENMASK_ULL(51, 12);
2983e643d803SMarc Zyngier 	val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2984e643d803SMarc Zyngier 	val |= GICR_VPROPBASER_RaWb;
2985e643d803SMarc Zyngier 	val |= GICR_VPROPBASER_InnerShareable;
2986e643d803SMarc Zyngier 	gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2987e643d803SMarc Zyngier 
2988e643d803SMarc Zyngier 	val  = virt_to_phys(page_address(vpe->vpt_page)) &
2989e643d803SMarc Zyngier 		GENMASK_ULL(51, 16);
2990e643d803SMarc Zyngier 	val |= GICR_VPENDBASER_RaWaWb;
2991e643d803SMarc Zyngier 	val |= GICR_VPENDBASER_NonShareable;
2992e643d803SMarc Zyngier 	/*
2993e643d803SMarc Zyngier 	 * There is no good way of finding out if the pending table is
2994e643d803SMarc Zyngier 	 * empty as we can race against the doorbell interrupt very
2995e643d803SMarc Zyngier 	 * easily. So in the end, vpe->pending_last is only an
2996e643d803SMarc Zyngier 	 * indication that the vcpu has something pending, not one
2997e643d803SMarc Zyngier 	 * that the pending table is empty. A good implementation
2998e643d803SMarc Zyngier 	 * would be able to read its coarse map pretty quickly anyway,
2999e643d803SMarc Zyngier 	 * making this a tolerable issue.
3000e643d803SMarc Zyngier 	 */
3001e643d803SMarc Zyngier 	val |= GICR_VPENDBASER_PendingLast;
3002e643d803SMarc Zyngier 	val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3003e643d803SMarc Zyngier 	val |= GICR_VPENDBASER_Valid;
3004e643d803SMarc Zyngier 	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3005e643d803SMarc Zyngier }
3006e643d803SMarc Zyngier 
3007e643d803SMarc Zyngier static void its_vpe_deschedule(struct its_vpe *vpe)
3008e643d803SMarc Zyngier {
300950c33097SRobin Murphy 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3010e643d803SMarc Zyngier 	u64 val;
3011e643d803SMarc Zyngier 
30126479450fSHeyi Guo 	val = its_clear_vpend_valid(vlpi_base);
3013e643d803SMarc Zyngier 
30146479450fSHeyi Guo 	if (unlikely(val & GICR_VPENDBASER_Dirty)) {
3015e643d803SMarc Zyngier 		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3016e643d803SMarc Zyngier 		vpe->idai = false;
3017e643d803SMarc Zyngier 		vpe->pending_last = true;
3018e643d803SMarc Zyngier 	} else {
3019e643d803SMarc Zyngier 		vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3020e643d803SMarc Zyngier 		vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3021e643d803SMarc Zyngier 	}
3022e643d803SMarc Zyngier }
3023e643d803SMarc Zyngier 
302440619a2eSMarc Zyngier static void its_vpe_invall(struct its_vpe *vpe)
302540619a2eSMarc Zyngier {
302640619a2eSMarc Zyngier 	struct its_node *its;
302740619a2eSMarc Zyngier 
302840619a2eSMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry) {
30290dd57fedSMarc Zyngier 		if (!is_v4(its))
303040619a2eSMarc Zyngier 			continue;
303140619a2eSMarc Zyngier 
30322247e1bfSMarc Zyngier 		if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
30332247e1bfSMarc Zyngier 			continue;
30342247e1bfSMarc Zyngier 
30353c1cceebSMarc Zyngier 		/*
30363c1cceebSMarc Zyngier 		 * Sending a VINVALL to a single ITS is enough, as all
30373c1cceebSMarc Zyngier 		 * we need is to reach the redistributors.
30383c1cceebSMarc Zyngier 		 */
303940619a2eSMarc Zyngier 		its_send_vinvall(its, vpe);
30403c1cceebSMarc Zyngier 		return;
304140619a2eSMarc Zyngier 	}
304240619a2eSMarc Zyngier }
304340619a2eSMarc Zyngier 
3044e643d803SMarc Zyngier static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3045e643d803SMarc Zyngier {
3046e643d803SMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3047e643d803SMarc Zyngier 	struct its_cmd_info *info = vcpu_info;
3048e643d803SMarc Zyngier 
3049e643d803SMarc Zyngier 	switch (info->cmd_type) {
3050e643d803SMarc Zyngier 	case SCHEDULE_VPE:
3051e643d803SMarc Zyngier 		its_vpe_schedule(vpe);
3052e643d803SMarc Zyngier 		return 0;
3053e643d803SMarc Zyngier 
3054e643d803SMarc Zyngier 	case DESCHEDULE_VPE:
3055e643d803SMarc Zyngier 		its_vpe_deschedule(vpe);
3056e643d803SMarc Zyngier 		return 0;
3057e643d803SMarc Zyngier 
30585e2f7642SMarc Zyngier 	case INVALL_VPE:
305940619a2eSMarc Zyngier 		its_vpe_invall(vpe);
30605e2f7642SMarc Zyngier 		return 0;
30615e2f7642SMarc Zyngier 
3062e643d803SMarc Zyngier 	default:
3063e643d803SMarc Zyngier 		return -EINVAL;
3064e643d803SMarc Zyngier 	}
3065e643d803SMarc Zyngier }
3066e643d803SMarc Zyngier 
306720b3d54eSMarc Zyngier static void its_vpe_send_cmd(struct its_vpe *vpe,
306820b3d54eSMarc Zyngier 			     void (*cmd)(struct its_device *, u32))
306920b3d54eSMarc Zyngier {
307020b3d54eSMarc Zyngier 	unsigned long flags;
307120b3d54eSMarc Zyngier 
307220b3d54eSMarc Zyngier 	raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
307320b3d54eSMarc Zyngier 
307420b3d54eSMarc Zyngier 	its_vpe_db_proxy_map_locked(vpe);
307520b3d54eSMarc Zyngier 	cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
307620b3d54eSMarc Zyngier 
307720b3d54eSMarc Zyngier 	raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
307820b3d54eSMarc Zyngier }
307920b3d54eSMarc Zyngier 
3080f6a91da7SMarc Zyngier static void its_vpe_send_inv(struct irq_data *d)
3081f6a91da7SMarc Zyngier {
3082f6a91da7SMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
308320b3d54eSMarc Zyngier 
308420b3d54eSMarc Zyngier 	if (gic_rdists->has_direct_lpi) {
3085f6a91da7SMarc Zyngier 		void __iomem *rdbase;
3086f6a91da7SMarc Zyngier 
3087425c09beSMarc Zyngier 		/* Target the redistributor this VPE is currently known on */
3088f6a91da7SMarc Zyngier 		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3089425c09beSMarc Zyngier 		gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
30902f4f064bSMarc Zyngier 		wait_for_syncr(rdbase);
309120b3d54eSMarc Zyngier 	} else {
309220b3d54eSMarc Zyngier 		its_vpe_send_cmd(vpe, its_send_inv);
309320b3d54eSMarc Zyngier 	}
3094f6a91da7SMarc Zyngier }
3095f6a91da7SMarc Zyngier 
3096f6a91da7SMarc Zyngier static void its_vpe_mask_irq(struct irq_data *d)
3097f6a91da7SMarc Zyngier {
3098f6a91da7SMarc Zyngier 	/*
3099f6a91da7SMarc Zyngier 	 * We need to unmask the LPI, which is described by the parent
3100f6a91da7SMarc Zyngier 	 * irq_data. Instead of calling into the parent (which won't
3101f6a91da7SMarc Zyngier 	 * exactly do the right thing, let's simply use the
3102f6a91da7SMarc Zyngier 	 * parent_data pointer. Yes, I'm naughty.
3103f6a91da7SMarc Zyngier 	 */
3104f6a91da7SMarc Zyngier 	lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3105f6a91da7SMarc Zyngier 	its_vpe_send_inv(d);
3106f6a91da7SMarc Zyngier }
3107f6a91da7SMarc Zyngier 
3108f6a91da7SMarc Zyngier static void its_vpe_unmask_irq(struct irq_data *d)
3109f6a91da7SMarc Zyngier {
3110f6a91da7SMarc Zyngier 	/* Same hack as above... */
3111f6a91da7SMarc Zyngier 	lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3112f6a91da7SMarc Zyngier 	its_vpe_send_inv(d);
3113f6a91da7SMarc Zyngier }
3114f6a91da7SMarc Zyngier 
3115e57a3e28SMarc Zyngier static int its_vpe_set_irqchip_state(struct irq_data *d,
3116e57a3e28SMarc Zyngier 				     enum irqchip_irq_state which,
3117e57a3e28SMarc Zyngier 				     bool state)
3118e57a3e28SMarc Zyngier {
3119e57a3e28SMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3120e57a3e28SMarc Zyngier 
3121e57a3e28SMarc Zyngier 	if (which != IRQCHIP_STATE_PENDING)
3122e57a3e28SMarc Zyngier 		return -EINVAL;
3123e57a3e28SMarc Zyngier 
3124e57a3e28SMarc Zyngier 	if (gic_rdists->has_direct_lpi) {
3125e57a3e28SMarc Zyngier 		void __iomem *rdbase;
3126e57a3e28SMarc Zyngier 
3127e57a3e28SMarc Zyngier 		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3128e57a3e28SMarc Zyngier 		if (state) {
3129e57a3e28SMarc Zyngier 			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3130e57a3e28SMarc Zyngier 		} else {
3131e57a3e28SMarc Zyngier 			gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
31322f4f064bSMarc Zyngier 			wait_for_syncr(rdbase);
3133e57a3e28SMarc Zyngier 		}
3134e57a3e28SMarc Zyngier 	} else {
3135e57a3e28SMarc Zyngier 		if (state)
3136e57a3e28SMarc Zyngier 			its_vpe_send_cmd(vpe, its_send_int);
3137e57a3e28SMarc Zyngier 		else
3138e57a3e28SMarc Zyngier 			its_vpe_send_cmd(vpe, its_send_clear);
3139e57a3e28SMarc Zyngier 	}
3140e57a3e28SMarc Zyngier 
3141e57a3e28SMarc Zyngier 	return 0;
3142e57a3e28SMarc Zyngier }
3143e57a3e28SMarc Zyngier 
31448fff27aeSMarc Zyngier static struct irq_chip its_vpe_irq_chip = {
31458fff27aeSMarc Zyngier 	.name			= "GICv4-vpe",
3146f6a91da7SMarc Zyngier 	.irq_mask		= its_vpe_mask_irq,
3147f6a91da7SMarc Zyngier 	.irq_unmask		= its_vpe_unmask_irq,
3148f6a91da7SMarc Zyngier 	.irq_eoi		= irq_chip_eoi_parent,
31493171a47aSMarc Zyngier 	.irq_set_affinity	= its_vpe_set_affinity,
3150e57a3e28SMarc Zyngier 	.irq_set_irqchip_state	= its_vpe_set_irqchip_state,
3151e643d803SMarc Zyngier 	.irq_set_vcpu_affinity	= its_vpe_set_vcpu_affinity,
31528fff27aeSMarc Zyngier };
31538fff27aeSMarc Zyngier 
31547d75bbb4SMarc Zyngier static int its_vpe_id_alloc(void)
31557d75bbb4SMarc Zyngier {
315632bd44dcSShanker Donthineni 	return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
31577d75bbb4SMarc Zyngier }
31587d75bbb4SMarc Zyngier 
31597d75bbb4SMarc Zyngier static void its_vpe_id_free(u16 id)
31607d75bbb4SMarc Zyngier {
31617d75bbb4SMarc Zyngier 	ida_simple_remove(&its_vpeid_ida, id);
31627d75bbb4SMarc Zyngier }
31637d75bbb4SMarc Zyngier 
31647d75bbb4SMarc Zyngier static int its_vpe_init(struct its_vpe *vpe)
31657d75bbb4SMarc Zyngier {
31667d75bbb4SMarc Zyngier 	struct page *vpt_page;
31677d75bbb4SMarc Zyngier 	int vpe_id;
31687d75bbb4SMarc Zyngier 
31697d75bbb4SMarc Zyngier 	/* Allocate vpe_id */
31707d75bbb4SMarc Zyngier 	vpe_id = its_vpe_id_alloc();
31717d75bbb4SMarc Zyngier 	if (vpe_id < 0)
31727d75bbb4SMarc Zyngier 		return vpe_id;
31737d75bbb4SMarc Zyngier 
31747d75bbb4SMarc Zyngier 	/* Allocate VPT */
31757d75bbb4SMarc Zyngier 	vpt_page = its_allocate_pending_table(GFP_KERNEL);
31767d75bbb4SMarc Zyngier 	if (!vpt_page) {
31777d75bbb4SMarc Zyngier 		its_vpe_id_free(vpe_id);
31787d75bbb4SMarc Zyngier 		return -ENOMEM;
31797d75bbb4SMarc Zyngier 	}
31807d75bbb4SMarc Zyngier 
31817d75bbb4SMarc Zyngier 	if (!its_alloc_vpe_table(vpe_id)) {
31827d75bbb4SMarc Zyngier 		its_vpe_id_free(vpe_id);
318334f8eb92SNianyao Tang 		its_free_pending_table(vpt_page);
31847d75bbb4SMarc Zyngier 		return -ENOMEM;
31857d75bbb4SMarc Zyngier 	}
31867d75bbb4SMarc Zyngier 
31877d75bbb4SMarc Zyngier 	vpe->vpe_id = vpe_id;
31887d75bbb4SMarc Zyngier 	vpe->vpt_page = vpt_page;
318920b3d54eSMarc Zyngier 	vpe->vpe_proxy_event = -1;
31907d75bbb4SMarc Zyngier 
31917d75bbb4SMarc Zyngier 	return 0;
31927d75bbb4SMarc Zyngier }
31937d75bbb4SMarc Zyngier 
31947d75bbb4SMarc Zyngier static void its_vpe_teardown(struct its_vpe *vpe)
31957d75bbb4SMarc Zyngier {
319620b3d54eSMarc Zyngier 	its_vpe_db_proxy_unmap(vpe);
31977d75bbb4SMarc Zyngier 	its_vpe_id_free(vpe->vpe_id);
31987d75bbb4SMarc Zyngier 	its_free_pending_table(vpe->vpt_page);
31997d75bbb4SMarc Zyngier }
32007d75bbb4SMarc Zyngier 
32017d75bbb4SMarc Zyngier static void its_vpe_irq_domain_free(struct irq_domain *domain,
32027d75bbb4SMarc Zyngier 				    unsigned int virq,
32037d75bbb4SMarc Zyngier 				    unsigned int nr_irqs)
32047d75bbb4SMarc Zyngier {
32057d75bbb4SMarc Zyngier 	struct its_vm *vm = domain->host_data;
32067d75bbb4SMarc Zyngier 	int i;
32077d75bbb4SMarc Zyngier 
32087d75bbb4SMarc Zyngier 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
32097d75bbb4SMarc Zyngier 
32107d75bbb4SMarc Zyngier 	for (i = 0; i < nr_irqs; i++) {
32117d75bbb4SMarc Zyngier 		struct irq_data *data = irq_domain_get_irq_data(domain,
32127d75bbb4SMarc Zyngier 								virq + i);
32137d75bbb4SMarc Zyngier 		struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
32147d75bbb4SMarc Zyngier 
32157d75bbb4SMarc Zyngier 		BUG_ON(vm != vpe->its_vm);
32167d75bbb4SMarc Zyngier 
32177d75bbb4SMarc Zyngier 		clear_bit(data->hwirq, vm->db_bitmap);
32187d75bbb4SMarc Zyngier 		its_vpe_teardown(vpe);
32197d75bbb4SMarc Zyngier 		irq_domain_reset_irq_data(data);
32207d75bbb4SMarc Zyngier 	}
32217d75bbb4SMarc Zyngier 
32227d75bbb4SMarc Zyngier 	if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
322338dd7c49SMarc Zyngier 		its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
32247d75bbb4SMarc Zyngier 		its_free_prop_table(vm->vprop_page);
32257d75bbb4SMarc Zyngier 	}
32267d75bbb4SMarc Zyngier }
32277d75bbb4SMarc Zyngier 
32287d75bbb4SMarc Zyngier static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
32297d75bbb4SMarc Zyngier 				    unsigned int nr_irqs, void *args)
32307d75bbb4SMarc Zyngier {
32317d75bbb4SMarc Zyngier 	struct its_vm *vm = args;
32327d75bbb4SMarc Zyngier 	unsigned long *bitmap;
32337d75bbb4SMarc Zyngier 	struct page *vprop_page;
32347d75bbb4SMarc Zyngier 	int base, nr_ids, i, err = 0;
32357d75bbb4SMarc Zyngier 
32367d75bbb4SMarc Zyngier 	BUG_ON(!vm);
32377d75bbb4SMarc Zyngier 
323838dd7c49SMarc Zyngier 	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
32397d75bbb4SMarc Zyngier 	if (!bitmap)
32407d75bbb4SMarc Zyngier 		return -ENOMEM;
32417d75bbb4SMarc Zyngier 
32427d75bbb4SMarc Zyngier 	if (nr_ids < nr_irqs) {
324338dd7c49SMarc Zyngier 		its_lpi_free(bitmap, base, nr_ids);
32447d75bbb4SMarc Zyngier 		return -ENOMEM;
32457d75bbb4SMarc Zyngier 	}
32467d75bbb4SMarc Zyngier 
32477d75bbb4SMarc Zyngier 	vprop_page = its_allocate_prop_table(GFP_KERNEL);
32487d75bbb4SMarc Zyngier 	if (!vprop_page) {
324938dd7c49SMarc Zyngier 		its_lpi_free(bitmap, base, nr_ids);
32507d75bbb4SMarc Zyngier 		return -ENOMEM;
32517d75bbb4SMarc Zyngier 	}
32527d75bbb4SMarc Zyngier 
32537d75bbb4SMarc Zyngier 	vm->db_bitmap = bitmap;
32547d75bbb4SMarc Zyngier 	vm->db_lpi_base = base;
32557d75bbb4SMarc Zyngier 	vm->nr_db_lpis = nr_ids;
32567d75bbb4SMarc Zyngier 	vm->vprop_page = vprop_page;
32577d75bbb4SMarc Zyngier 
32587d75bbb4SMarc Zyngier 	for (i = 0; i < nr_irqs; i++) {
32597d75bbb4SMarc Zyngier 		vm->vpes[i]->vpe_db_lpi = base + i;
32607d75bbb4SMarc Zyngier 		err = its_vpe_init(vm->vpes[i]);
32617d75bbb4SMarc Zyngier 		if (err)
32627d75bbb4SMarc Zyngier 			break;
32637d75bbb4SMarc Zyngier 		err = its_irq_gic_domain_alloc(domain, virq + i,
32647d75bbb4SMarc Zyngier 					       vm->vpes[i]->vpe_db_lpi);
32657d75bbb4SMarc Zyngier 		if (err)
32667d75bbb4SMarc Zyngier 			break;
32677d75bbb4SMarc Zyngier 		irq_domain_set_hwirq_and_chip(domain, virq + i, i,
32687d75bbb4SMarc Zyngier 					      &its_vpe_irq_chip, vm->vpes[i]);
32697d75bbb4SMarc Zyngier 		set_bit(i, bitmap);
32707d75bbb4SMarc Zyngier 	}
32717d75bbb4SMarc Zyngier 
32727d75bbb4SMarc Zyngier 	if (err) {
32737d75bbb4SMarc Zyngier 		if (i > 0)
32747d75bbb4SMarc Zyngier 			its_vpe_irq_domain_free(domain, virq, i - 1);
32757d75bbb4SMarc Zyngier 
327638dd7c49SMarc Zyngier 		its_lpi_free(bitmap, base, nr_ids);
32777d75bbb4SMarc Zyngier 		its_free_prop_table(vprop_page);
32787d75bbb4SMarc Zyngier 	}
32797d75bbb4SMarc Zyngier 
32807d75bbb4SMarc Zyngier 	return err;
32817d75bbb4SMarc Zyngier }
32827d75bbb4SMarc Zyngier 
328372491643SThomas Gleixner static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3284702cb0a0SThomas Gleixner 				       struct irq_data *d, bool reserve)
3285eb78192bSMarc Zyngier {
3286eb78192bSMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
328740619a2eSMarc Zyngier 	struct its_node *its;
3288eb78192bSMarc Zyngier 
32892247e1bfSMarc Zyngier 	/* If we use the list map, we issue VMAPP on demand... */
32902247e1bfSMarc Zyngier 	if (its_list_map)
32916ef930f2SMarc Zyngier 		return 0;
3292eb78192bSMarc Zyngier 
3293eb78192bSMarc Zyngier 	/* Map the VPE to the first possible CPU */
3294eb78192bSMarc Zyngier 	vpe->col_idx = cpumask_first(cpu_online_mask);
329540619a2eSMarc Zyngier 
329640619a2eSMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry) {
32970dd57fedSMarc Zyngier 		if (!is_v4(its))
329840619a2eSMarc Zyngier 			continue;
329940619a2eSMarc Zyngier 
330075fd951bSMarc Zyngier 		its_send_vmapp(its, vpe, true);
330140619a2eSMarc Zyngier 		its_send_vinvall(its, vpe);
330240619a2eSMarc Zyngier 	}
330340619a2eSMarc Zyngier 
330444c4c25eSMarc Zyngier 	irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
330544c4c25eSMarc Zyngier 
330672491643SThomas Gleixner 	return 0;
3307eb78192bSMarc Zyngier }
3308eb78192bSMarc Zyngier 
3309eb78192bSMarc Zyngier static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3310eb78192bSMarc Zyngier 					  struct irq_data *d)
3311eb78192bSMarc Zyngier {
3312eb78192bSMarc Zyngier 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
331375fd951bSMarc Zyngier 	struct its_node *its;
3314eb78192bSMarc Zyngier 
33152247e1bfSMarc Zyngier 	/*
33162247e1bfSMarc Zyngier 	 * If we use the list map, we unmap the VPE once no VLPIs are
33172247e1bfSMarc Zyngier 	 * associated with the VM.
33182247e1bfSMarc Zyngier 	 */
33192247e1bfSMarc Zyngier 	if (its_list_map)
33202247e1bfSMarc Zyngier 		return;
33212247e1bfSMarc Zyngier 
332275fd951bSMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry) {
33230dd57fedSMarc Zyngier 		if (!is_v4(its))
332475fd951bSMarc Zyngier 			continue;
332575fd951bSMarc Zyngier 
332675fd951bSMarc Zyngier 		its_send_vmapp(its, vpe, false);
332775fd951bSMarc Zyngier 	}
3328eb78192bSMarc Zyngier }
3329eb78192bSMarc Zyngier 
33308fff27aeSMarc Zyngier static const struct irq_domain_ops its_vpe_domain_ops = {
33317d75bbb4SMarc Zyngier 	.alloc			= its_vpe_irq_domain_alloc,
33327d75bbb4SMarc Zyngier 	.free			= its_vpe_irq_domain_free,
3333eb78192bSMarc Zyngier 	.activate		= its_vpe_irq_domain_activate,
3334eb78192bSMarc Zyngier 	.deactivate		= its_vpe_irq_domain_deactivate,
33358fff27aeSMarc Zyngier };
33368fff27aeSMarc Zyngier 
33374559fbb3SYun Wu static int its_force_quiescent(void __iomem *base)
33384559fbb3SYun Wu {
33394559fbb3SYun Wu 	u32 count = 1000000;	/* 1s */
33404559fbb3SYun Wu 	u32 val;
33414559fbb3SYun Wu 
33424559fbb3SYun Wu 	val = readl_relaxed(base + GITS_CTLR);
33437611da86SDavid Daney 	/*
33447611da86SDavid Daney 	 * GIC architecture specification requires the ITS to be both
33457611da86SDavid Daney 	 * disabled and quiescent for writes to GITS_BASER<n> or
33467611da86SDavid Daney 	 * GITS_CBASER to not have UNPREDICTABLE results.
33477611da86SDavid Daney 	 */
33487611da86SDavid Daney 	if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
33494559fbb3SYun Wu 		return 0;
33504559fbb3SYun Wu 
33514559fbb3SYun Wu 	/* Disable the generation of all interrupts to this ITS */
3352d51c4b4dSMarc Zyngier 	val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
33534559fbb3SYun Wu 	writel_relaxed(val, base + GITS_CTLR);
33544559fbb3SYun Wu 
33554559fbb3SYun Wu 	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
33564559fbb3SYun Wu 	while (1) {
33574559fbb3SYun Wu 		val = readl_relaxed(base + GITS_CTLR);
33584559fbb3SYun Wu 		if (val & GITS_CTLR_QUIESCENT)
33594559fbb3SYun Wu 			return 0;
33604559fbb3SYun Wu 
33614559fbb3SYun Wu 		count--;
33624559fbb3SYun Wu 		if (!count)
33634559fbb3SYun Wu 			return -EBUSY;
33644559fbb3SYun Wu 
33654559fbb3SYun Wu 		cpu_relax();
33664559fbb3SYun Wu 		udelay(1);
33674559fbb3SYun Wu 	}
33684559fbb3SYun Wu }
33694559fbb3SYun Wu 
33709d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
337194100970SRobert Richter {
337294100970SRobert Richter 	struct its_node *its = data;
337394100970SRobert Richter 
3374576a8342SMarc Zyngier 	/* erratum 22375: only alloc 8MB table size (20 bits) */
3375576a8342SMarc Zyngier 	its->typer &= ~GITS_TYPER_DEVBITS;
3376576a8342SMarc Zyngier 	its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
337794100970SRobert Richter 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
33789d111d49SArd Biesheuvel 
33799d111d49SArd Biesheuvel 	return true;
338094100970SRobert Richter }
338194100970SRobert Richter 
33829d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3383fbf8f40eSGanapatrao Kulkarni {
3384fbf8f40eSGanapatrao Kulkarni 	struct its_node *its = data;
3385fbf8f40eSGanapatrao Kulkarni 
3386fbf8f40eSGanapatrao Kulkarni 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
33879d111d49SArd Biesheuvel 
33889d111d49SArd Biesheuvel 	return true;
3389fbf8f40eSGanapatrao Kulkarni }
3390fbf8f40eSGanapatrao Kulkarni 
33919d111d49SArd Biesheuvel static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
339290922a2dSShanker Donthineni {
339390922a2dSShanker Donthineni 	struct its_node *its = data;
339490922a2dSShanker Donthineni 
339590922a2dSShanker Donthineni 	/* On QDF2400, the size of the ITE is 16Bytes */
3396ffedbf0cSMarc Zyngier 	its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3397ffedbf0cSMarc Zyngier 	its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
33989d111d49SArd Biesheuvel 
33999d111d49SArd Biesheuvel 	return true;
340090922a2dSShanker Donthineni }
340190922a2dSShanker Donthineni 
3402558b0165SArd Biesheuvel static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3403558b0165SArd Biesheuvel {
3404558b0165SArd Biesheuvel 	struct its_node *its = its_dev->its;
3405558b0165SArd Biesheuvel 
3406558b0165SArd Biesheuvel 	/*
3407558b0165SArd Biesheuvel 	 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3408558b0165SArd Biesheuvel 	 * which maps 32-bit writes targeted at a separate window of
3409558b0165SArd Biesheuvel 	 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3410558b0165SArd Biesheuvel 	 * with device ID taken from bits [device_id_bits + 1:2] of
3411558b0165SArd Biesheuvel 	 * the window offset.
3412558b0165SArd Biesheuvel 	 */
3413558b0165SArd Biesheuvel 	return its->pre_its_base + (its_dev->device_id << 2);
3414558b0165SArd Biesheuvel }
3415558b0165SArd Biesheuvel 
3416558b0165SArd Biesheuvel static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3417558b0165SArd Biesheuvel {
3418558b0165SArd Biesheuvel 	struct its_node *its = data;
3419558b0165SArd Biesheuvel 	u32 pre_its_window[2];
3420558b0165SArd Biesheuvel 	u32 ids;
3421558b0165SArd Biesheuvel 
3422558b0165SArd Biesheuvel 	if (!fwnode_property_read_u32_array(its->fwnode_handle,
3423558b0165SArd Biesheuvel 					   "socionext,synquacer-pre-its",
3424558b0165SArd Biesheuvel 					   pre_its_window,
3425558b0165SArd Biesheuvel 					   ARRAY_SIZE(pre_its_window))) {
3426558b0165SArd Biesheuvel 
3427558b0165SArd Biesheuvel 		its->pre_its_base = pre_its_window[0];
3428558b0165SArd Biesheuvel 		its->get_msi_base = its_irq_get_msi_base_pre_its;
3429558b0165SArd Biesheuvel 
3430558b0165SArd Biesheuvel 		ids = ilog2(pre_its_window[1]) - 2;
3431576a8342SMarc Zyngier 		if (device_ids(its) > ids) {
3432576a8342SMarc Zyngier 			its->typer &= ~GITS_TYPER_DEVBITS;
3433576a8342SMarc Zyngier 			its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3434576a8342SMarc Zyngier 		}
3435558b0165SArd Biesheuvel 
3436558b0165SArd Biesheuvel 		/* the pre-ITS breaks isolation, so disable MSI remapping */
3437558b0165SArd Biesheuvel 		its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3438558b0165SArd Biesheuvel 		return true;
3439558b0165SArd Biesheuvel 	}
3440558b0165SArd Biesheuvel 	return false;
3441558b0165SArd Biesheuvel }
3442558b0165SArd Biesheuvel 
34435c9a882eSMarc Zyngier static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
34445c9a882eSMarc Zyngier {
34455c9a882eSMarc Zyngier 	struct its_node *its = data;
34465c9a882eSMarc Zyngier 
34475c9a882eSMarc Zyngier 	/*
34485c9a882eSMarc Zyngier 	 * Hip07 insists on using the wrong address for the VLPI
34495c9a882eSMarc Zyngier 	 * page. Trick it into doing the right thing...
34505c9a882eSMarc Zyngier 	 */
34515c9a882eSMarc Zyngier 	its->vlpi_redist_offset = SZ_128K;
34525c9a882eSMarc Zyngier 	return true;
3453cc2d3216SMarc Zyngier }
34544c21f3c2SMarc Zyngier 
345567510ccaSRobert Richter static const struct gic_quirk its_quirks[] = {
345694100970SRobert Richter #ifdef CONFIG_CAVIUM_ERRATUM_22375
345794100970SRobert Richter 	{
345894100970SRobert Richter 		.desc	= "ITS: Cavium errata 22375, 24313",
345994100970SRobert Richter 		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
346094100970SRobert Richter 		.mask	= 0xffff0fff,
346194100970SRobert Richter 		.init	= its_enable_quirk_cavium_22375,
346294100970SRobert Richter 	},
346394100970SRobert Richter #endif
3464fbf8f40eSGanapatrao Kulkarni #ifdef CONFIG_CAVIUM_ERRATUM_23144
3465fbf8f40eSGanapatrao Kulkarni 	{
3466fbf8f40eSGanapatrao Kulkarni 		.desc	= "ITS: Cavium erratum 23144",
3467fbf8f40eSGanapatrao Kulkarni 		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
3468fbf8f40eSGanapatrao Kulkarni 		.mask	= 0xffff0fff,
3469fbf8f40eSGanapatrao Kulkarni 		.init	= its_enable_quirk_cavium_23144,
3470fbf8f40eSGanapatrao Kulkarni 	},
3471fbf8f40eSGanapatrao Kulkarni #endif
347290922a2dSShanker Donthineni #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
347390922a2dSShanker Donthineni 	{
347490922a2dSShanker Donthineni 		.desc	= "ITS: QDF2400 erratum 0065",
347590922a2dSShanker Donthineni 		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
347690922a2dSShanker Donthineni 		.mask	= 0xffffffff,
347790922a2dSShanker Donthineni 		.init	= its_enable_quirk_qdf2400_e0065,
347890922a2dSShanker Donthineni 	},
347990922a2dSShanker Donthineni #endif
3480558b0165SArd Biesheuvel #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3481558b0165SArd Biesheuvel 	{
3482558b0165SArd Biesheuvel 		/*
3483558b0165SArd Biesheuvel 		 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3484558b0165SArd Biesheuvel 		 * implementation, but with a 'pre-ITS' added that requires
3485558b0165SArd Biesheuvel 		 * special handling in software.
3486558b0165SArd Biesheuvel 		 */
3487558b0165SArd Biesheuvel 		.desc	= "ITS: Socionext Synquacer pre-ITS",
3488558b0165SArd Biesheuvel 		.iidr	= 0x0001143b,
3489558b0165SArd Biesheuvel 		.mask	= 0xffffffff,
3490558b0165SArd Biesheuvel 		.init	= its_enable_quirk_socionext_synquacer,
3491558b0165SArd Biesheuvel 	},
3492558b0165SArd Biesheuvel #endif
34935c9a882eSMarc Zyngier #ifdef CONFIG_HISILICON_ERRATUM_161600802
34945c9a882eSMarc Zyngier 	{
34955c9a882eSMarc Zyngier 		.desc	= "ITS: Hip07 erratum 161600802",
34965c9a882eSMarc Zyngier 		.iidr	= 0x00000004,
34975c9a882eSMarc Zyngier 		.mask	= 0xffffffff,
34985c9a882eSMarc Zyngier 		.init	= its_enable_quirk_hip07_161600802,
34995c9a882eSMarc Zyngier 	},
35005c9a882eSMarc Zyngier #endif
350167510ccaSRobert Richter 	{
350267510ccaSRobert Richter 	}
350367510ccaSRobert Richter };
350467510ccaSRobert Richter 
350567510ccaSRobert Richter static void its_enable_quirks(struct its_node *its)
350667510ccaSRobert Richter {
350767510ccaSRobert Richter 	u32 iidr = readl_relaxed(its->base + GITS_IIDR);
350867510ccaSRobert Richter 
350967510ccaSRobert Richter 	gic_enable_quirks(iidr, its_quirks, its);
351067510ccaSRobert Richter }
351167510ccaSRobert Richter 
3512dba0bc7bSDerek Basehore static int its_save_disable(void)
3513dba0bc7bSDerek Basehore {
3514dba0bc7bSDerek Basehore 	struct its_node *its;
3515dba0bc7bSDerek Basehore 	int err = 0;
3516dba0bc7bSDerek Basehore 
3517a8db7456SSebastian Andrzej Siewior 	raw_spin_lock(&its_lock);
3518dba0bc7bSDerek Basehore 	list_for_each_entry(its, &its_nodes, entry) {
3519dba0bc7bSDerek Basehore 		void __iomem *base;
3520dba0bc7bSDerek Basehore 
3521dba0bc7bSDerek Basehore 		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3522dba0bc7bSDerek Basehore 			continue;
3523dba0bc7bSDerek Basehore 
3524dba0bc7bSDerek Basehore 		base = its->base;
3525dba0bc7bSDerek Basehore 		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3526dba0bc7bSDerek Basehore 		err = its_force_quiescent(base);
3527dba0bc7bSDerek Basehore 		if (err) {
3528dba0bc7bSDerek Basehore 			pr_err("ITS@%pa: failed to quiesce: %d\n",
3529dba0bc7bSDerek Basehore 			       &its->phys_base, err);
3530dba0bc7bSDerek Basehore 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3531dba0bc7bSDerek Basehore 			goto err;
3532dba0bc7bSDerek Basehore 		}
3533dba0bc7bSDerek Basehore 
3534dba0bc7bSDerek Basehore 		its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3535dba0bc7bSDerek Basehore 	}
3536dba0bc7bSDerek Basehore 
3537dba0bc7bSDerek Basehore err:
3538dba0bc7bSDerek Basehore 	if (err) {
3539dba0bc7bSDerek Basehore 		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3540dba0bc7bSDerek Basehore 			void __iomem *base;
3541dba0bc7bSDerek Basehore 
3542dba0bc7bSDerek Basehore 			if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3543dba0bc7bSDerek Basehore 				continue;
3544dba0bc7bSDerek Basehore 
3545dba0bc7bSDerek Basehore 			base = its->base;
3546dba0bc7bSDerek Basehore 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3547dba0bc7bSDerek Basehore 		}
3548dba0bc7bSDerek Basehore 	}
3549a8db7456SSebastian Andrzej Siewior 	raw_spin_unlock(&its_lock);
3550dba0bc7bSDerek Basehore 
3551dba0bc7bSDerek Basehore 	return err;
3552dba0bc7bSDerek Basehore }
3553dba0bc7bSDerek Basehore 
3554dba0bc7bSDerek Basehore static void its_restore_enable(void)
3555dba0bc7bSDerek Basehore {
3556dba0bc7bSDerek Basehore 	struct its_node *its;
3557dba0bc7bSDerek Basehore 	int ret;
3558dba0bc7bSDerek Basehore 
3559a8db7456SSebastian Andrzej Siewior 	raw_spin_lock(&its_lock);
3560dba0bc7bSDerek Basehore 	list_for_each_entry(its, &its_nodes, entry) {
3561dba0bc7bSDerek Basehore 		void __iomem *base;
3562dba0bc7bSDerek Basehore 		int i;
3563dba0bc7bSDerek Basehore 
3564dba0bc7bSDerek Basehore 		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3565dba0bc7bSDerek Basehore 			continue;
3566dba0bc7bSDerek Basehore 
3567dba0bc7bSDerek Basehore 		base = its->base;
3568dba0bc7bSDerek Basehore 
3569dba0bc7bSDerek Basehore 		/*
3570dba0bc7bSDerek Basehore 		 * Make sure that the ITS is disabled. If it fails to quiesce,
3571dba0bc7bSDerek Basehore 		 * don't restore it since writing to CBASER or BASER<n>
3572dba0bc7bSDerek Basehore 		 * registers is undefined according to the GIC v3 ITS
3573dba0bc7bSDerek Basehore 		 * Specification.
3574dba0bc7bSDerek Basehore 		 */
3575dba0bc7bSDerek Basehore 		ret = its_force_quiescent(base);
3576dba0bc7bSDerek Basehore 		if (ret) {
3577dba0bc7bSDerek Basehore 			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3578dba0bc7bSDerek Basehore 			       &its->phys_base, ret);
3579dba0bc7bSDerek Basehore 			continue;
3580dba0bc7bSDerek Basehore 		}
3581dba0bc7bSDerek Basehore 
3582dba0bc7bSDerek Basehore 		gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3583dba0bc7bSDerek Basehore 
3584dba0bc7bSDerek Basehore 		/*
3585dba0bc7bSDerek Basehore 		 * Writing CBASER resets CREADR to 0, so make CWRITER and
3586dba0bc7bSDerek Basehore 		 * cmd_write line up with it.
3587dba0bc7bSDerek Basehore 		 */
3588dba0bc7bSDerek Basehore 		its->cmd_write = its->cmd_base;
3589dba0bc7bSDerek Basehore 		gits_write_cwriter(0, base + GITS_CWRITER);
3590dba0bc7bSDerek Basehore 
3591dba0bc7bSDerek Basehore 		/* Restore GITS_BASER from the value cache. */
3592dba0bc7bSDerek Basehore 		for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3593dba0bc7bSDerek Basehore 			struct its_baser *baser = &its->tables[i];
3594dba0bc7bSDerek Basehore 
3595dba0bc7bSDerek Basehore 			if (!(baser->val & GITS_BASER_VALID))
3596dba0bc7bSDerek Basehore 				continue;
3597dba0bc7bSDerek Basehore 
3598dba0bc7bSDerek Basehore 			its_write_baser(its, baser, baser->val);
3599dba0bc7bSDerek Basehore 		}
3600dba0bc7bSDerek Basehore 		writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3601920181ceSDerek Basehore 
3602920181ceSDerek Basehore 		/*
3603920181ceSDerek Basehore 		 * Reinit the collection if it's stored in the ITS. This is
3604920181ceSDerek Basehore 		 * indicated by the col_id being less than the HCC field.
3605920181ceSDerek Basehore 		 * CID < HCC as specified in the GIC v3 Documentation.
3606920181ceSDerek Basehore 		 */
3607920181ceSDerek Basehore 		if (its->collections[smp_processor_id()].col_id <
3608920181ceSDerek Basehore 		    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3609920181ceSDerek Basehore 			its_cpu_init_collection(its);
3610dba0bc7bSDerek Basehore 	}
3611a8db7456SSebastian Andrzej Siewior 	raw_spin_unlock(&its_lock);
3612dba0bc7bSDerek Basehore }
3613dba0bc7bSDerek Basehore 
3614dba0bc7bSDerek Basehore static struct syscore_ops its_syscore_ops = {
3615dba0bc7bSDerek Basehore 	.suspend = its_save_disable,
3616dba0bc7bSDerek Basehore 	.resume = its_restore_enable,
3617dba0bc7bSDerek Basehore };
3618dba0bc7bSDerek Basehore 
3619db40f0a7STomasz Nowicki static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3620d14ae5e6STomasz Nowicki {
3621d14ae5e6STomasz Nowicki 	struct irq_domain *inner_domain;
3622d14ae5e6STomasz Nowicki 	struct msi_domain_info *info;
3623d14ae5e6STomasz Nowicki 
3624d14ae5e6STomasz Nowicki 	info = kzalloc(sizeof(*info), GFP_KERNEL);
3625d14ae5e6STomasz Nowicki 	if (!info)
3626d14ae5e6STomasz Nowicki 		return -ENOMEM;
3627d14ae5e6STomasz Nowicki 
3628db40f0a7STomasz Nowicki 	inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3629d14ae5e6STomasz Nowicki 	if (!inner_domain) {
3630d14ae5e6STomasz Nowicki 		kfree(info);
3631d14ae5e6STomasz Nowicki 		return -ENOMEM;
3632d14ae5e6STomasz Nowicki 	}
3633d14ae5e6STomasz Nowicki 
3634db40f0a7STomasz Nowicki 	inner_domain->parent = its_parent;
363596f0d93aSMarc Zyngier 	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3636558b0165SArd Biesheuvel 	inner_domain->flags |= its->msi_domain_flags;
3637d14ae5e6STomasz Nowicki 	info->ops = &its_msi_domain_ops;
3638d14ae5e6STomasz Nowicki 	info->data = its;
3639d14ae5e6STomasz Nowicki 	inner_domain->host_data = info;
3640d14ae5e6STomasz Nowicki 
3641d14ae5e6STomasz Nowicki 	return 0;
3642d14ae5e6STomasz Nowicki }
3643d14ae5e6STomasz Nowicki 
36448fff27aeSMarc Zyngier static int its_init_vpe_domain(void)
36458fff27aeSMarc Zyngier {
364620b3d54eSMarc Zyngier 	struct its_node *its;
364720b3d54eSMarc Zyngier 	u32 devid;
364820b3d54eSMarc Zyngier 	int entries;
364920b3d54eSMarc Zyngier 
365020b3d54eSMarc Zyngier 	if (gic_rdists->has_direct_lpi) {
365120b3d54eSMarc Zyngier 		pr_info("ITS: Using DirectLPI for VPE invalidation\n");
365220b3d54eSMarc Zyngier 		return 0;
365320b3d54eSMarc Zyngier 	}
365420b3d54eSMarc Zyngier 
365520b3d54eSMarc Zyngier 	/* Any ITS will do, even if not v4 */
365620b3d54eSMarc Zyngier 	its = list_first_entry(&its_nodes, struct its_node, entry);
365720b3d54eSMarc Zyngier 
365820b3d54eSMarc Zyngier 	entries = roundup_pow_of_two(nr_cpu_ids);
36596396bb22SKees Cook 	vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
366020b3d54eSMarc Zyngier 				 GFP_KERNEL);
366120b3d54eSMarc Zyngier 	if (!vpe_proxy.vpes) {
366220b3d54eSMarc Zyngier 		pr_err("ITS: Can't allocate GICv4 proxy device array\n");
366320b3d54eSMarc Zyngier 		return -ENOMEM;
366420b3d54eSMarc Zyngier 	}
366520b3d54eSMarc Zyngier 
366620b3d54eSMarc Zyngier 	/* Use the last possible DevID */
3667576a8342SMarc Zyngier 	devid = GENMASK(device_ids(its) - 1, 0);
366820b3d54eSMarc Zyngier 	vpe_proxy.dev = its_create_device(its, devid, entries, false);
366920b3d54eSMarc Zyngier 	if (!vpe_proxy.dev) {
367020b3d54eSMarc Zyngier 		kfree(vpe_proxy.vpes);
367120b3d54eSMarc Zyngier 		pr_err("ITS: Can't allocate GICv4 proxy device\n");
367220b3d54eSMarc Zyngier 		return -ENOMEM;
367320b3d54eSMarc Zyngier 	}
367420b3d54eSMarc Zyngier 
3675c427a475SShanker Donthineni 	BUG_ON(entries > vpe_proxy.dev->nr_ites);
367620b3d54eSMarc Zyngier 
367720b3d54eSMarc Zyngier 	raw_spin_lock_init(&vpe_proxy.lock);
367820b3d54eSMarc Zyngier 	vpe_proxy.next_victim = 0;
367920b3d54eSMarc Zyngier 	pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
368020b3d54eSMarc Zyngier 		devid, vpe_proxy.dev->nr_ites);
368120b3d54eSMarc Zyngier 
36828fff27aeSMarc Zyngier 	return 0;
36838fff27aeSMarc Zyngier }
36848fff27aeSMarc Zyngier 
36853dfa576bSMarc Zyngier static int __init its_compute_its_list_map(struct resource *res,
36863dfa576bSMarc Zyngier 					   void __iomem *its_base)
36873dfa576bSMarc Zyngier {
36883dfa576bSMarc Zyngier 	int its_number;
36893dfa576bSMarc Zyngier 	u32 ctlr;
36903dfa576bSMarc Zyngier 
36913dfa576bSMarc Zyngier 	/*
36923dfa576bSMarc Zyngier 	 * This is assumed to be done early enough that we're
36933dfa576bSMarc Zyngier 	 * guaranteed to be single-threaded, hence no
36943dfa576bSMarc Zyngier 	 * locking. Should this change, we should address
36953dfa576bSMarc Zyngier 	 * this.
36963dfa576bSMarc Zyngier 	 */
3697ab60491eSMarc Zyngier 	its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3698ab60491eSMarc Zyngier 	if (its_number >= GICv4_ITS_LIST_MAX) {
36993dfa576bSMarc Zyngier 		pr_err("ITS@%pa: No ITSList entry available!\n",
37003dfa576bSMarc Zyngier 		       &res->start);
37013dfa576bSMarc Zyngier 		return -EINVAL;
37023dfa576bSMarc Zyngier 	}
37033dfa576bSMarc Zyngier 
37043dfa576bSMarc Zyngier 	ctlr = readl_relaxed(its_base + GITS_CTLR);
37053dfa576bSMarc Zyngier 	ctlr &= ~GITS_CTLR_ITS_NUMBER;
37063dfa576bSMarc Zyngier 	ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
37073dfa576bSMarc Zyngier 	writel_relaxed(ctlr, its_base + GITS_CTLR);
37083dfa576bSMarc Zyngier 	ctlr = readl_relaxed(its_base + GITS_CTLR);
37093dfa576bSMarc Zyngier 	if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
37103dfa576bSMarc Zyngier 		its_number = ctlr & GITS_CTLR_ITS_NUMBER;
37113dfa576bSMarc Zyngier 		its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
37123dfa576bSMarc Zyngier 	}
37133dfa576bSMarc Zyngier 
37143dfa576bSMarc Zyngier 	if (test_and_set_bit(its_number, &its_list_map)) {
37153dfa576bSMarc Zyngier 		pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
37163dfa576bSMarc Zyngier 		       &res->start, its_number);
37173dfa576bSMarc Zyngier 		return -EINVAL;
37183dfa576bSMarc Zyngier 	}
37193dfa576bSMarc Zyngier 
37203dfa576bSMarc Zyngier 	return its_number;
37213dfa576bSMarc Zyngier }
37223dfa576bSMarc Zyngier 
3723db40f0a7STomasz Nowicki static int __init its_probe_one(struct resource *res,
3724db40f0a7STomasz Nowicki 				struct fwnode_handle *handle, int numa_node)
37254c21f3c2SMarc Zyngier {
37264c21f3c2SMarc Zyngier 	struct its_node *its;
37274c21f3c2SMarc Zyngier 	void __iomem *its_base;
37283dfa576bSMarc Zyngier 	u32 val, ctlr;
37293dfa576bSMarc Zyngier 	u64 baser, tmp, typer;
3730539d3782SShanker Donthineni 	struct page *page;
37314c21f3c2SMarc Zyngier 	int err;
37324c21f3c2SMarc Zyngier 
3733db40f0a7STomasz Nowicki 	its_base = ioremap(res->start, resource_size(res));
37344c21f3c2SMarc Zyngier 	if (!its_base) {
3735db40f0a7STomasz Nowicki 		pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
37364c21f3c2SMarc Zyngier 		return -ENOMEM;
37374c21f3c2SMarc Zyngier 	}
37384c21f3c2SMarc Zyngier 
37394c21f3c2SMarc Zyngier 	val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
37404c21f3c2SMarc Zyngier 	if (val != 0x30 && val != 0x40) {
3741db40f0a7STomasz Nowicki 		pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
37424c21f3c2SMarc Zyngier 		err = -ENODEV;
37434c21f3c2SMarc Zyngier 		goto out_unmap;
37444c21f3c2SMarc Zyngier 	}
37454c21f3c2SMarc Zyngier 
37464559fbb3SYun Wu 	err = its_force_quiescent(its_base);
37474559fbb3SYun Wu 	if (err) {
3748db40f0a7STomasz Nowicki 		pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
37494559fbb3SYun Wu 		goto out_unmap;
37504559fbb3SYun Wu 	}
37514559fbb3SYun Wu 
3752db40f0a7STomasz Nowicki 	pr_info("ITS %pR\n", res);
37534c21f3c2SMarc Zyngier 
37544c21f3c2SMarc Zyngier 	its = kzalloc(sizeof(*its), GFP_KERNEL);
37554c21f3c2SMarc Zyngier 	if (!its) {
37564c21f3c2SMarc Zyngier 		err = -ENOMEM;
37574c21f3c2SMarc Zyngier 		goto out_unmap;
37584c21f3c2SMarc Zyngier 	}
37594c21f3c2SMarc Zyngier 
37604c21f3c2SMarc Zyngier 	raw_spin_lock_init(&its->lock);
37619791ec7dSMarc Zyngier 	mutex_init(&its->dev_alloc_lock);
37624c21f3c2SMarc Zyngier 	INIT_LIST_HEAD(&its->entry);
37634c21f3c2SMarc Zyngier 	INIT_LIST_HEAD(&its->its_device_list);
37643dfa576bSMarc Zyngier 	typer = gic_read_typer(its_base + GITS_TYPER);
37650dd57fedSMarc Zyngier 	its->typer = typer;
37664c21f3c2SMarc Zyngier 	its->base = its_base;
3767db40f0a7STomasz Nowicki 	its->phys_base = res->start;
37680dd57fedSMarc Zyngier 	if (is_v4(its)) {
37693dfa576bSMarc Zyngier 		if (!(typer & GITS_TYPER_VMOVP)) {
37703dfa576bSMarc Zyngier 			err = its_compute_its_list_map(res, its_base);
37713dfa576bSMarc Zyngier 			if (err < 0)
37723dfa576bSMarc Zyngier 				goto out_free_its;
37733dfa576bSMarc Zyngier 
3774debf6d02SMarc Zyngier 			its->list_nr = err;
3775debf6d02SMarc Zyngier 
37763dfa576bSMarc Zyngier 			pr_info("ITS@%pa: Using ITS number %d\n",
37773dfa576bSMarc Zyngier 				&res->start, err);
37783dfa576bSMarc Zyngier 		} else {
37793dfa576bSMarc Zyngier 			pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
37803dfa576bSMarc Zyngier 		}
37813dfa576bSMarc Zyngier 	}
37823dfa576bSMarc Zyngier 
3783db40f0a7STomasz Nowicki 	its->numa_node = numa_node;
37844c21f3c2SMarc Zyngier 
3785539d3782SShanker Donthineni 	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
37865bc13c2cSRobert Richter 				get_order(ITS_CMD_QUEUE_SZ));
3787539d3782SShanker Donthineni 	if (!page) {
37884c21f3c2SMarc Zyngier 		err = -ENOMEM;
37894c21f3c2SMarc Zyngier 		goto out_free_its;
37904c21f3c2SMarc Zyngier 	}
3791539d3782SShanker Donthineni 	its->cmd_base = (void *)page_address(page);
37924c21f3c2SMarc Zyngier 	its->cmd_write = its->cmd_base;
3793558b0165SArd Biesheuvel 	its->fwnode_handle = handle;
3794558b0165SArd Biesheuvel 	its->get_msi_base = its_irq_get_msi_base;
3795558b0165SArd Biesheuvel 	its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
37964c21f3c2SMarc Zyngier 
379767510ccaSRobert Richter 	its_enable_quirks(its);
379867510ccaSRobert Richter 
37990e0b0f69SShanker Donthineni 	err = its_alloc_tables(its);
38004c21f3c2SMarc Zyngier 	if (err)
38014c21f3c2SMarc Zyngier 		goto out_free_cmd;
38024c21f3c2SMarc Zyngier 
38034c21f3c2SMarc Zyngier 	err = its_alloc_collections(its);
38044c21f3c2SMarc Zyngier 	if (err)
38054c21f3c2SMarc Zyngier 		goto out_free_tables;
38064c21f3c2SMarc Zyngier 
38074c21f3c2SMarc Zyngier 	baser = (virt_to_phys(its->cmd_base)	|
38082fd632a0SShanker Donthineni 		 GITS_CBASER_RaWaWb		|
38094c21f3c2SMarc Zyngier 		 GITS_CBASER_InnerShareable	|
38104c21f3c2SMarc Zyngier 		 (ITS_CMD_QUEUE_SZ / SZ_4K - 1)	|
38114c21f3c2SMarc Zyngier 		 GITS_CBASER_VALID);
38124c21f3c2SMarc Zyngier 
38130968a619SVladimir Murzin 	gits_write_cbaser(baser, its->base + GITS_CBASER);
38140968a619SVladimir Murzin 	tmp = gits_read_cbaser(its->base + GITS_CBASER);
38154c21f3c2SMarc Zyngier 
38164ad3e363SMarc Zyngier 	if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3817241a386cSMarc Zyngier 		if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3818241a386cSMarc Zyngier 			/*
3819241a386cSMarc Zyngier 			 * The HW reports non-shareable, we must
3820241a386cSMarc Zyngier 			 * remove the cacheability attributes as
3821241a386cSMarc Zyngier 			 * well.
3822241a386cSMarc Zyngier 			 */
3823241a386cSMarc Zyngier 			baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3824241a386cSMarc Zyngier 				   GITS_CBASER_CACHEABILITY_MASK);
3825241a386cSMarc Zyngier 			baser |= GITS_CBASER_nC;
38260968a619SVladimir Murzin 			gits_write_cbaser(baser, its->base + GITS_CBASER);
3827241a386cSMarc Zyngier 		}
38284c21f3c2SMarc Zyngier 		pr_info("ITS: using cache flushing for cmd queue\n");
38294c21f3c2SMarc Zyngier 		its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
38304c21f3c2SMarc Zyngier 	}
38314c21f3c2SMarc Zyngier 
38320968a619SVladimir Murzin 	gits_write_cwriter(0, its->base + GITS_CWRITER);
38333dfa576bSMarc Zyngier 	ctlr = readl_relaxed(its->base + GITS_CTLR);
3834d51c4b4dSMarc Zyngier 	ctlr |= GITS_CTLR_ENABLE;
38350dd57fedSMarc Zyngier 	if (is_v4(its))
3836d51c4b4dSMarc Zyngier 		ctlr |= GITS_CTLR_ImDe;
3837d51c4b4dSMarc Zyngier 	writel_relaxed(ctlr, its->base + GITS_CTLR);
3838241a386cSMarc Zyngier 
3839dba0bc7bSDerek Basehore 	if (GITS_TYPER_HCC(typer))
3840dba0bc7bSDerek Basehore 		its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3841dba0bc7bSDerek Basehore 
3842db40f0a7STomasz Nowicki 	err = its_init_domain(handle, its);
3843d14ae5e6STomasz Nowicki 	if (err)
384454456db9SMarc Zyngier 		goto out_free_tables;
38454c21f3c2SMarc Zyngier 
3846a8db7456SSebastian Andrzej Siewior 	raw_spin_lock(&its_lock);
38474c21f3c2SMarc Zyngier 	list_add(&its->entry, &its_nodes);
3848a8db7456SSebastian Andrzej Siewior 	raw_spin_unlock(&its_lock);
38494c21f3c2SMarc Zyngier 
38504c21f3c2SMarc Zyngier 	return 0;
38514c21f3c2SMarc Zyngier 
38524c21f3c2SMarc Zyngier out_free_tables:
38534c21f3c2SMarc Zyngier 	its_free_tables(its);
38544c21f3c2SMarc Zyngier out_free_cmd:
38555bc13c2cSRobert Richter 	free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
38564c21f3c2SMarc Zyngier out_free_its:
38574c21f3c2SMarc Zyngier 	kfree(its);
38584c21f3c2SMarc Zyngier out_unmap:
38594c21f3c2SMarc Zyngier 	iounmap(its_base);
3860db40f0a7STomasz Nowicki 	pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
38614c21f3c2SMarc Zyngier 	return err;
38624c21f3c2SMarc Zyngier }
38634c21f3c2SMarc Zyngier 
38644c21f3c2SMarc Zyngier static bool gic_rdists_supports_plpis(void)
38654c21f3c2SMarc Zyngier {
3866589ce5f4SMarc Zyngier 	return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
38674c21f3c2SMarc Zyngier }
38684c21f3c2SMarc Zyngier 
38696eb486b6SShanker Donthineni static int redist_disable_lpis(void)
38704c21f3c2SMarc Zyngier {
38716eb486b6SShanker Donthineni 	void __iomem *rbase = gic_data_rdist_rd_base();
38726eb486b6SShanker Donthineni 	u64 timeout = USEC_PER_SEC;
38736eb486b6SShanker Donthineni 	u64 val;
38746eb486b6SShanker Donthineni 
38754c21f3c2SMarc Zyngier 	if (!gic_rdists_supports_plpis()) {
38764c21f3c2SMarc Zyngier 		pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
38774c21f3c2SMarc Zyngier 		return -ENXIO;
38784c21f3c2SMarc Zyngier 	}
38796eb486b6SShanker Donthineni 
38806eb486b6SShanker Donthineni 	val = readl_relaxed(rbase + GICR_CTLR);
38816eb486b6SShanker Donthineni 	if (!(val & GICR_CTLR_ENABLE_LPIS))
38826eb486b6SShanker Donthineni 		return 0;
38836eb486b6SShanker Donthineni 
388411e37d35SMarc Zyngier 	/*
388511e37d35SMarc Zyngier 	 * If coming via a CPU hotplug event, we don't need to disable
388611e37d35SMarc Zyngier 	 * LPIs before trying to re-enable them. They are already
388711e37d35SMarc Zyngier 	 * configured and all is well in the world.
3888c440a9d9SMarc Zyngier 	 *
3889c440a9d9SMarc Zyngier 	 * If running with preallocated tables, there is nothing to do.
389011e37d35SMarc Zyngier 	 */
3891c440a9d9SMarc Zyngier 	if (gic_data_rdist()->lpi_enabled ||
3892c440a9d9SMarc Zyngier 	    (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
389311e37d35SMarc Zyngier 		return 0;
389411e37d35SMarc Zyngier 
389511e37d35SMarc Zyngier 	/*
389611e37d35SMarc Zyngier 	 * From that point on, we only try to do some damage control.
389711e37d35SMarc Zyngier 	 */
389811e37d35SMarc Zyngier 	pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
38996eb486b6SShanker Donthineni 		smp_processor_id());
39006eb486b6SShanker Donthineni 	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
39016eb486b6SShanker Donthineni 
39026eb486b6SShanker Donthineni 	/* Disable LPIs */
39036eb486b6SShanker Donthineni 	val &= ~GICR_CTLR_ENABLE_LPIS;
39046eb486b6SShanker Donthineni 	writel_relaxed(val, rbase + GICR_CTLR);
39056eb486b6SShanker Donthineni 
39066eb486b6SShanker Donthineni 	/* Make sure any change to GICR_CTLR is observable by the GIC */
39076eb486b6SShanker Donthineni 	dsb(sy);
39086eb486b6SShanker Donthineni 
39096eb486b6SShanker Donthineni 	/*
39106eb486b6SShanker Donthineni 	 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
39116eb486b6SShanker Donthineni 	 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
39126eb486b6SShanker Donthineni 	 * Error out if we time out waiting for RWP to clear.
39136eb486b6SShanker Donthineni 	 */
39146eb486b6SShanker Donthineni 	while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
39156eb486b6SShanker Donthineni 		if (!timeout) {
39166eb486b6SShanker Donthineni 			pr_err("CPU%d: Timeout while disabling LPIs\n",
39176eb486b6SShanker Donthineni 			       smp_processor_id());
39186eb486b6SShanker Donthineni 			return -ETIMEDOUT;
39196eb486b6SShanker Donthineni 		}
39206eb486b6SShanker Donthineni 		udelay(1);
39216eb486b6SShanker Donthineni 		timeout--;
39226eb486b6SShanker Donthineni 	}
39236eb486b6SShanker Donthineni 
39246eb486b6SShanker Donthineni 	/*
39256eb486b6SShanker Donthineni 	 * After it has been written to 1, it is IMPLEMENTATION
39266eb486b6SShanker Donthineni 	 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
39276eb486b6SShanker Donthineni 	 * cleared to 0. Error out if clearing the bit failed.
39286eb486b6SShanker Donthineni 	 */
39296eb486b6SShanker Donthineni 	if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
39306eb486b6SShanker Donthineni 		pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
39316eb486b6SShanker Donthineni 		return -EBUSY;
39326eb486b6SShanker Donthineni 	}
39336eb486b6SShanker Donthineni 
39346eb486b6SShanker Donthineni 	return 0;
39356eb486b6SShanker Donthineni }
39366eb486b6SShanker Donthineni 
39376eb486b6SShanker Donthineni int its_cpu_init(void)
39386eb486b6SShanker Donthineni {
39396eb486b6SShanker Donthineni 	if (!list_empty(&its_nodes)) {
39406eb486b6SShanker Donthineni 		int ret;
39416eb486b6SShanker Donthineni 
39426eb486b6SShanker Donthineni 		ret = redist_disable_lpis();
39436eb486b6SShanker Donthineni 		if (ret)
39446eb486b6SShanker Donthineni 			return ret;
39456eb486b6SShanker Donthineni 
39464c21f3c2SMarc Zyngier 		its_cpu_init_lpis();
3947920181ceSDerek Basehore 		its_cpu_init_collections();
39484c21f3c2SMarc Zyngier 	}
39494c21f3c2SMarc Zyngier 
39504c21f3c2SMarc Zyngier 	return 0;
39514c21f3c2SMarc Zyngier }
39524c21f3c2SMarc Zyngier 
3953935bba7cSArvind Yadav static const struct of_device_id its_device_id[] = {
39544c21f3c2SMarc Zyngier 	{	.compatible	= "arm,gic-v3-its",	},
39554c21f3c2SMarc Zyngier 	{},
39564c21f3c2SMarc Zyngier };
39574c21f3c2SMarc Zyngier 
3958db40f0a7STomasz Nowicki static int __init its_of_probe(struct device_node *node)
39594c21f3c2SMarc Zyngier {
39604c21f3c2SMarc Zyngier 	struct device_node *np;
3961db40f0a7STomasz Nowicki 	struct resource res;
39624c21f3c2SMarc Zyngier 
39634c21f3c2SMarc Zyngier 	for (np = of_find_matching_node(node, its_device_id); np;
39644c21f3c2SMarc Zyngier 	     np = of_find_matching_node(np, its_device_id)) {
396595a25625SStephen Boyd 		if (!of_device_is_available(np))
396695a25625SStephen Boyd 			continue;
3967d14ae5e6STomasz Nowicki 		if (!of_property_read_bool(np, "msi-controller")) {
3968e81f54c6SRob Herring 			pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3969e81f54c6SRob Herring 				np);
3970d14ae5e6STomasz Nowicki 			continue;
3971d14ae5e6STomasz Nowicki 		}
3972d14ae5e6STomasz Nowicki 
3973db40f0a7STomasz Nowicki 		if (of_address_to_resource(np, 0, &res)) {
3974e81f54c6SRob Herring 			pr_warn("%pOF: no regs?\n", np);
3975db40f0a7STomasz Nowicki 			continue;
39764c21f3c2SMarc Zyngier 		}
39774c21f3c2SMarc Zyngier 
3978db40f0a7STomasz Nowicki 		its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3979db40f0a7STomasz Nowicki 	}
3980db40f0a7STomasz Nowicki 	return 0;
3981db40f0a7STomasz Nowicki }
3982db40f0a7STomasz Nowicki 
39833f010cf1STomasz Nowicki #ifdef CONFIG_ACPI
39843f010cf1STomasz Nowicki 
39853f010cf1STomasz Nowicki #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
39863f010cf1STomasz Nowicki 
3987d1ce263fSRobert Richter #ifdef CONFIG_ACPI_NUMA
3988dbd2b826SGanapatrao Kulkarni struct its_srat_map {
3989dbd2b826SGanapatrao Kulkarni 	/* numa node id */
3990dbd2b826SGanapatrao Kulkarni 	u32	numa_node;
3991dbd2b826SGanapatrao Kulkarni 	/* GIC ITS ID */
3992dbd2b826SGanapatrao Kulkarni 	u32	its_id;
3993dbd2b826SGanapatrao Kulkarni };
3994dbd2b826SGanapatrao Kulkarni 
3995fdf6e7a8SHanjun Guo static struct its_srat_map *its_srat_maps __initdata;
3996dbd2b826SGanapatrao Kulkarni static int its_in_srat __initdata;
3997dbd2b826SGanapatrao Kulkarni 
3998dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id)
3999dbd2b826SGanapatrao Kulkarni {
4000dbd2b826SGanapatrao Kulkarni 	int i;
4001dbd2b826SGanapatrao Kulkarni 
4002dbd2b826SGanapatrao Kulkarni 	for (i = 0; i < its_in_srat; i++) {
4003dbd2b826SGanapatrao Kulkarni 		if (its_id == its_srat_maps[i].its_id)
4004dbd2b826SGanapatrao Kulkarni 			return its_srat_maps[i].numa_node;
4005dbd2b826SGanapatrao Kulkarni 	}
4006dbd2b826SGanapatrao Kulkarni 	return NUMA_NO_NODE;
4007dbd2b826SGanapatrao Kulkarni }
4008dbd2b826SGanapatrao Kulkarni 
400960574d1eSKeith Busch static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
4010fdf6e7a8SHanjun Guo 					  const unsigned long end)
4011fdf6e7a8SHanjun Guo {
4012fdf6e7a8SHanjun Guo 	return 0;
4013fdf6e7a8SHanjun Guo }
4014fdf6e7a8SHanjun Guo 
401560574d1eSKeith Busch static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
4016dbd2b826SGanapatrao Kulkarni 			 const unsigned long end)
4017dbd2b826SGanapatrao Kulkarni {
4018dbd2b826SGanapatrao Kulkarni 	int node;
4019dbd2b826SGanapatrao Kulkarni 	struct acpi_srat_gic_its_affinity *its_affinity;
4020dbd2b826SGanapatrao Kulkarni 
4021dbd2b826SGanapatrao Kulkarni 	its_affinity = (struct acpi_srat_gic_its_affinity *)header;
4022dbd2b826SGanapatrao Kulkarni 	if (!its_affinity)
4023dbd2b826SGanapatrao Kulkarni 		return -EINVAL;
4024dbd2b826SGanapatrao Kulkarni 
4025dbd2b826SGanapatrao Kulkarni 	if (its_affinity->header.length < sizeof(*its_affinity)) {
4026dbd2b826SGanapatrao Kulkarni 		pr_err("SRAT: Invalid header length %d in ITS affinity\n",
4027dbd2b826SGanapatrao Kulkarni 			its_affinity->header.length);
4028dbd2b826SGanapatrao Kulkarni 		return -EINVAL;
4029dbd2b826SGanapatrao Kulkarni 	}
4030dbd2b826SGanapatrao Kulkarni 
4031dbd2b826SGanapatrao Kulkarni 	node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
4032dbd2b826SGanapatrao Kulkarni 
4033dbd2b826SGanapatrao Kulkarni 	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
4034dbd2b826SGanapatrao Kulkarni 		pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
4035dbd2b826SGanapatrao Kulkarni 		return 0;
4036dbd2b826SGanapatrao Kulkarni 	}
4037dbd2b826SGanapatrao Kulkarni 
4038dbd2b826SGanapatrao Kulkarni 	its_srat_maps[its_in_srat].numa_node = node;
4039dbd2b826SGanapatrao Kulkarni 	its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
4040dbd2b826SGanapatrao Kulkarni 	its_in_srat++;
4041dbd2b826SGanapatrao Kulkarni 	pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
4042dbd2b826SGanapatrao Kulkarni 		its_affinity->proximity_domain, its_affinity->its_id, node);
4043dbd2b826SGanapatrao Kulkarni 
4044dbd2b826SGanapatrao Kulkarni 	return 0;
4045dbd2b826SGanapatrao Kulkarni }
4046dbd2b826SGanapatrao Kulkarni 
4047dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void)
4048dbd2b826SGanapatrao Kulkarni {
4049fdf6e7a8SHanjun Guo 	int count;
4050fdf6e7a8SHanjun Guo 
4051fdf6e7a8SHanjun Guo 	count = acpi_table_parse_entries(ACPI_SIG_SRAT,
4052fdf6e7a8SHanjun Guo 			sizeof(struct acpi_table_srat),
4053fdf6e7a8SHanjun Guo 			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4054fdf6e7a8SHanjun Guo 			gic_acpi_match_srat_its, 0);
4055fdf6e7a8SHanjun Guo 	if (count <= 0)
4056fdf6e7a8SHanjun Guo 		return;
4057fdf6e7a8SHanjun Guo 
40586da2ec56SKees Cook 	its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
4059fdf6e7a8SHanjun Guo 				      GFP_KERNEL);
4060fdf6e7a8SHanjun Guo 	if (!its_srat_maps) {
4061fdf6e7a8SHanjun Guo 		pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
4062fdf6e7a8SHanjun Guo 		return;
4063fdf6e7a8SHanjun Guo 	}
4064fdf6e7a8SHanjun Guo 
4065dbd2b826SGanapatrao Kulkarni 	acpi_table_parse_entries(ACPI_SIG_SRAT,
4066dbd2b826SGanapatrao Kulkarni 			sizeof(struct acpi_table_srat),
4067dbd2b826SGanapatrao Kulkarni 			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4068dbd2b826SGanapatrao Kulkarni 			gic_acpi_parse_srat_its, 0);
4069dbd2b826SGanapatrao Kulkarni }
4070fdf6e7a8SHanjun Guo 
4071fdf6e7a8SHanjun Guo /* free the its_srat_maps after ITS probing */
4072fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void)
4073fdf6e7a8SHanjun Guo {
4074fdf6e7a8SHanjun Guo 	kfree(its_srat_maps);
4075fdf6e7a8SHanjun Guo }
4076dbd2b826SGanapatrao Kulkarni #else
4077dbd2b826SGanapatrao Kulkarni static void __init acpi_table_parse_srat_its(void)	{ }
4078dbd2b826SGanapatrao Kulkarni static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
4079fdf6e7a8SHanjun Guo static void __init acpi_its_srat_maps_free(void) { }
4080dbd2b826SGanapatrao Kulkarni #endif
4081dbd2b826SGanapatrao Kulkarni 
408260574d1eSKeith Busch static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
40833f010cf1STomasz Nowicki 					  const unsigned long end)
40843f010cf1STomasz Nowicki {
40853f010cf1STomasz Nowicki 	struct acpi_madt_generic_translator *its_entry;
40863f010cf1STomasz Nowicki 	struct fwnode_handle *dom_handle;
40873f010cf1STomasz Nowicki 	struct resource res;
40883f010cf1STomasz Nowicki 	int err;
40893f010cf1STomasz Nowicki 
40903f010cf1STomasz Nowicki 	its_entry = (struct acpi_madt_generic_translator *)header;
40913f010cf1STomasz Nowicki 	memset(&res, 0, sizeof(res));
40923f010cf1STomasz Nowicki 	res.start = its_entry->base_address;
40933f010cf1STomasz Nowicki 	res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
40943f010cf1STomasz Nowicki 	res.flags = IORESOURCE_MEM;
40953f010cf1STomasz Nowicki 
40965778cc77SMarc Zyngier 	dom_handle = irq_domain_alloc_fwnode(&res.start);
40973f010cf1STomasz Nowicki 	if (!dom_handle) {
40983f010cf1STomasz Nowicki 		pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
40993f010cf1STomasz Nowicki 		       &res.start);
41003f010cf1STomasz Nowicki 		return -ENOMEM;
41013f010cf1STomasz Nowicki 	}
41023f010cf1STomasz Nowicki 
41038b4282e6SShameer Kolothum 	err = iort_register_domain_token(its_entry->translation_id, res.start,
41048b4282e6SShameer Kolothum 					 dom_handle);
41053f010cf1STomasz Nowicki 	if (err) {
41063f010cf1STomasz Nowicki 		pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
41073f010cf1STomasz Nowicki 		       &res.start, its_entry->translation_id);
41083f010cf1STomasz Nowicki 		goto dom_err;
41093f010cf1STomasz Nowicki 	}
41103f010cf1STomasz Nowicki 
4111dbd2b826SGanapatrao Kulkarni 	err = its_probe_one(&res, dom_handle,
4112dbd2b826SGanapatrao Kulkarni 			acpi_get_its_numa_node(its_entry->translation_id));
41133f010cf1STomasz Nowicki 	if (!err)
41143f010cf1STomasz Nowicki 		return 0;
41153f010cf1STomasz Nowicki 
41163f010cf1STomasz Nowicki 	iort_deregister_domain_token(its_entry->translation_id);
41173f010cf1STomasz Nowicki dom_err:
41183f010cf1STomasz Nowicki 	irq_domain_free_fwnode(dom_handle);
41193f010cf1STomasz Nowicki 	return err;
41203f010cf1STomasz Nowicki }
41213f010cf1STomasz Nowicki 
41223f010cf1STomasz Nowicki static void __init its_acpi_probe(void)
41233f010cf1STomasz Nowicki {
4124dbd2b826SGanapatrao Kulkarni 	acpi_table_parse_srat_its();
41253f010cf1STomasz Nowicki 	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
41263f010cf1STomasz Nowicki 			      gic_acpi_parse_madt_its, 0);
4127fdf6e7a8SHanjun Guo 	acpi_its_srat_maps_free();
41283f010cf1STomasz Nowicki }
41293f010cf1STomasz Nowicki #else
41303f010cf1STomasz Nowicki static void __init its_acpi_probe(void) { }
41313f010cf1STomasz Nowicki #endif
41323f010cf1STomasz Nowicki 
4133db40f0a7STomasz Nowicki int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4134db40f0a7STomasz Nowicki 		    struct irq_domain *parent_domain)
4135db40f0a7STomasz Nowicki {
4136db40f0a7STomasz Nowicki 	struct device_node *of_node;
41378fff27aeSMarc Zyngier 	struct its_node *its;
41388fff27aeSMarc Zyngier 	bool has_v4 = false;
41398fff27aeSMarc Zyngier 	int err;
4140db40f0a7STomasz Nowicki 
4141db40f0a7STomasz Nowicki 	its_parent = parent_domain;
4142db40f0a7STomasz Nowicki 	of_node = to_of_node(handle);
4143db40f0a7STomasz Nowicki 	if (of_node)
4144db40f0a7STomasz Nowicki 		its_of_probe(of_node);
4145db40f0a7STomasz Nowicki 	else
41463f010cf1STomasz Nowicki 		its_acpi_probe();
4147db40f0a7STomasz Nowicki 
41484c21f3c2SMarc Zyngier 	if (list_empty(&its_nodes)) {
41494c21f3c2SMarc Zyngier 		pr_warn("ITS: No ITS available, not enabling LPIs\n");
41504c21f3c2SMarc Zyngier 		return -ENXIO;
41514c21f3c2SMarc Zyngier 	}
41524c21f3c2SMarc Zyngier 
41534c21f3c2SMarc Zyngier 	gic_rdists = rdists;
415411e37d35SMarc Zyngier 
415511e37d35SMarc Zyngier 	err = allocate_lpi_tables();
41568fff27aeSMarc Zyngier 	if (err)
41578fff27aeSMarc Zyngier 		return err;
41588fff27aeSMarc Zyngier 
41598fff27aeSMarc Zyngier 	list_for_each_entry(its, &its_nodes, entry)
41600dd57fedSMarc Zyngier 		has_v4 |= is_v4(its);
41618fff27aeSMarc Zyngier 
41628fff27aeSMarc Zyngier 	if (has_v4 & rdists->has_vlpis) {
41633d63cb53SMarc Zyngier 		if (its_init_vpe_domain() ||
41643d63cb53SMarc Zyngier 		    its_init_v4(parent_domain, &its_vpe_domain_ops)) {
41658fff27aeSMarc Zyngier 			rdists->has_vlpis = false;
41668fff27aeSMarc Zyngier 			pr_err("ITS: Disabling GICv4 support\n");
41678fff27aeSMarc Zyngier 		}
41688fff27aeSMarc Zyngier 	}
41698fff27aeSMarc Zyngier 
4170dba0bc7bSDerek Basehore 	register_syscore_ops(&its_syscore_ops);
4171dba0bc7bSDerek Basehore 
41728fff27aeSMarc Zyngier 	return 0;
41734c21f3c2SMarc Zyngier }
4174