1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/interrupt.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqdomain.h>
19 #include <linux/list.h>
20 #include <linux/log2.h>
21 #include <linux/memblock.h>
22 #include <linux/mm.h>
23 #include <linux/msi.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/syscore_ops.h>
32
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
36
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39
40 #include "irq-gic-common.h"
41
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45 #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
46
47 #define RD_LOCAL_LPI_ENABLED BIT(0)
48 #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
49 #define RD_LOCAL_MEMRESERVE_DONE BIT(2)
50
51 static u32 lpi_id_bits;
52
53 /*
54 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
55 * deal with (one configuration byte per interrupt). PENDBASE has to
56 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
57 */
58 #define LPI_NRBITS lpi_id_bits
59 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
60 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
61
62 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63
64 /*
65 * Collection structure - just an ID, and a redistributor address to
66 * ping. We use one per CPU as a bag of interrupts assigned to this
67 * CPU.
68 */
69 struct its_collection {
70 u64 target_address;
71 u16 col_id;
72 };
73
74 /*
75 * The ITS_BASER structure - contains memory information, cached
76 * value of BASER register configuration and ITS page size.
77 */
78 struct its_baser {
79 void *base;
80 u64 val;
81 u32 order;
82 u32 psz;
83 };
84
85 struct its_device;
86
87 /*
88 * The ITS structure - contains most of the infrastructure, with the
89 * top-level MSI domain, the command queue, the collections, and the
90 * list of devices writing to it.
91 *
92 * dev_alloc_lock has to be taken for device allocations, while the
93 * spinlock must be taken to parse data structures such as the device
94 * list.
95 */
96 struct its_node {
97 raw_spinlock_t lock;
98 struct mutex dev_alloc_lock;
99 struct list_head entry;
100 void __iomem *base;
101 void __iomem *sgir_base;
102 phys_addr_t phys_base;
103 struct its_cmd_block *cmd_base;
104 struct its_cmd_block *cmd_write;
105 struct its_baser tables[GITS_BASER_NR_REGS];
106 struct its_collection *collections;
107 struct fwnode_handle *fwnode_handle;
108 u64 (*get_msi_base)(struct its_device *its_dev);
109 u64 typer;
110 u64 cbaser_save;
111 u32 ctlr_save;
112 u32 mpidr;
113 struct list_head its_device_list;
114 u64 flags;
115 unsigned long list_nr;
116 int numa_node;
117 unsigned int msi_domain_flags;
118 u32 pre_its_base; /* for Socionext Synquacer */
119 int vlpi_redist_offset;
120 };
121
122 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
123 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
124 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
125
126 #define ITS_ITT_ALIGN SZ_256
127
128 /* The maximum number of VPEID bits supported by VLPI commands */
129 #define ITS_MAX_VPEID_BITS \
130 ({ \
131 int nvpeid = 16; \
132 if (gic_rdists->has_rvpeid && \
133 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
134 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
135 GICD_TYPER2_VID); \
136 \
137 nvpeid; \
138 })
139 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
140
141 /* Convert page order to size in bytes */
142 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
143
144 struct event_lpi_map {
145 unsigned long *lpi_map;
146 u16 *col_map;
147 irq_hw_number_t lpi_base;
148 int nr_lpis;
149 raw_spinlock_t vlpi_lock;
150 struct its_vm *vm;
151 struct its_vlpi_map *vlpi_maps;
152 int nr_vlpis;
153 };
154
155 /*
156 * The ITS view of a device - belongs to an ITS, owns an interrupt
157 * translation table, and a list of interrupts. If it some of its
158 * LPIs are injected into a guest (GICv4), the event_map.vm field
159 * indicates which one.
160 */
161 struct its_device {
162 struct list_head entry;
163 struct its_node *its;
164 struct event_lpi_map event_map;
165 void *itt;
166 u32 nr_ites;
167 u32 device_id;
168 bool shared;
169 };
170
171 static struct {
172 raw_spinlock_t lock;
173 struct its_device *dev;
174 struct its_vpe **vpes;
175 int next_victim;
176 } vpe_proxy;
177
178 struct cpu_lpi_count {
179 atomic_t managed;
180 atomic_t unmanaged;
181 };
182
183 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
184
185 static LIST_HEAD(its_nodes);
186 static DEFINE_RAW_SPINLOCK(its_lock);
187 static struct rdists *gic_rdists;
188 static struct irq_domain *its_parent;
189
190 static unsigned long its_list_map;
191 static u16 vmovp_seq_num;
192 static DEFINE_RAW_SPINLOCK(vmovp_lock);
193
194 static DEFINE_IDA(its_vpeid_ida);
195
196 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
197 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
198 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
199 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
200
201 /*
202 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
203 * always have vSGIs mapped.
204 */
require_its_list_vmovp(struct its_vm * vm,struct its_node * its)205 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
206 {
207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
208 }
209
rdists_support_shareable(void)210 static bool rdists_support_shareable(void)
211 {
212 return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
213 }
214
get_its_list(struct its_vm * vm)215 static u16 get_its_list(struct its_vm *vm)
216 {
217 struct its_node *its;
218 unsigned long its_list = 0;
219
220 list_for_each_entry(its, &its_nodes, entry) {
221 if (!is_v4(its))
222 continue;
223
224 if (require_its_list_vmovp(vm, its))
225 __set_bit(its->list_nr, &its_list);
226 }
227
228 return (u16)its_list;
229 }
230
its_get_event_id(struct irq_data * d)231 static inline u32 its_get_event_id(struct irq_data *d)
232 {
233 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 return d->hwirq - its_dev->event_map.lpi_base;
235 }
236
dev_event_to_col(struct its_device * its_dev,u32 event)237 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
238 u32 event)
239 {
240 struct its_node *its = its_dev->its;
241
242 return its->collections + its_dev->event_map.col_map[event];
243 }
244
dev_event_to_vlpi_map(struct its_device * its_dev,u32 event)245 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
246 u32 event)
247 {
248 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
249 return NULL;
250
251 return &its_dev->event_map.vlpi_maps[event];
252 }
253
get_vlpi_map(struct irq_data * d)254 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
255 {
256 if (irqd_is_forwarded_to_vcpu(d)) {
257 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
258 u32 event = its_get_event_id(d);
259
260 return dev_event_to_vlpi_map(its_dev, event);
261 }
262
263 return NULL;
264 }
265
vpe_to_cpuid_lock(struct its_vpe * vpe,unsigned long * flags)266 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
267 {
268 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
269 return vpe->col_idx;
270 }
271
vpe_to_cpuid_unlock(struct its_vpe * vpe,unsigned long flags)272 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
273 {
274 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
275 }
276
277 static struct irq_chip its_vpe_irq_chip;
278
irq_to_cpuid_lock(struct irq_data * d,unsigned long * flags)279 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
280 {
281 struct its_vpe *vpe = NULL;
282 int cpu;
283
284 if (d->chip == &its_vpe_irq_chip) {
285 vpe = irq_data_get_irq_chip_data(d);
286 } else {
287 struct its_vlpi_map *map = get_vlpi_map(d);
288 if (map)
289 vpe = map->vpe;
290 }
291
292 if (vpe) {
293 cpu = vpe_to_cpuid_lock(vpe, flags);
294 } else {
295 /* Physical LPIs are already locked via the irq_desc lock */
296 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
297 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
298 /* Keep GCC quiet... */
299 *flags = 0;
300 }
301
302 return cpu;
303 }
304
irq_to_cpuid_unlock(struct irq_data * d,unsigned long flags)305 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
306 {
307 struct its_vpe *vpe = NULL;
308
309 if (d->chip == &its_vpe_irq_chip) {
310 vpe = irq_data_get_irq_chip_data(d);
311 } else {
312 struct its_vlpi_map *map = get_vlpi_map(d);
313 if (map)
314 vpe = map->vpe;
315 }
316
317 if (vpe)
318 vpe_to_cpuid_unlock(vpe, flags);
319 }
320
valid_col(struct its_collection * col)321 static struct its_collection *valid_col(struct its_collection *col)
322 {
323 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
324 return NULL;
325
326 return col;
327 }
328
valid_vpe(struct its_node * its,struct its_vpe * vpe)329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
330 {
331 if (valid_col(its->collections + vpe->col_idx))
332 return vpe;
333
334 return NULL;
335 }
336
337 /*
338 * ITS command descriptors - parameters to be encoded in a command
339 * block.
340 */
341 struct its_cmd_desc {
342 union {
343 struct {
344 struct its_device *dev;
345 u32 event_id;
346 } its_inv_cmd;
347
348 struct {
349 struct its_device *dev;
350 u32 event_id;
351 } its_clear_cmd;
352
353 struct {
354 struct its_device *dev;
355 u32 event_id;
356 } its_int_cmd;
357
358 struct {
359 struct its_device *dev;
360 int valid;
361 } its_mapd_cmd;
362
363 struct {
364 struct its_collection *col;
365 int valid;
366 } its_mapc_cmd;
367
368 struct {
369 struct its_device *dev;
370 u32 phys_id;
371 u32 event_id;
372 } its_mapti_cmd;
373
374 struct {
375 struct its_device *dev;
376 struct its_collection *col;
377 u32 event_id;
378 } its_movi_cmd;
379
380 struct {
381 struct its_device *dev;
382 u32 event_id;
383 } its_discard_cmd;
384
385 struct {
386 struct its_collection *col;
387 } its_invall_cmd;
388
389 struct {
390 struct its_vpe *vpe;
391 } its_vinvall_cmd;
392
393 struct {
394 struct its_vpe *vpe;
395 struct its_collection *col;
396 bool valid;
397 } its_vmapp_cmd;
398
399 struct {
400 struct its_vpe *vpe;
401 struct its_device *dev;
402 u32 virt_id;
403 u32 event_id;
404 bool db_enabled;
405 } its_vmapti_cmd;
406
407 struct {
408 struct its_vpe *vpe;
409 struct its_device *dev;
410 u32 event_id;
411 bool db_enabled;
412 } its_vmovi_cmd;
413
414 struct {
415 struct its_vpe *vpe;
416 struct its_collection *col;
417 u16 seq_num;
418 u16 its_list;
419 } its_vmovp_cmd;
420
421 struct {
422 struct its_vpe *vpe;
423 } its_invdb_cmd;
424
425 struct {
426 struct its_vpe *vpe;
427 u8 sgi;
428 u8 priority;
429 bool enable;
430 bool group;
431 bool clear;
432 } its_vsgi_cmd;
433 };
434 };
435
436 /*
437 * The ITS command block, which is what the ITS actually parses.
438 */
439 struct its_cmd_block {
440 union {
441 u64 raw_cmd[4];
442 __le64 raw_cmd_le[4];
443 };
444 };
445
446 #define ITS_CMD_QUEUE_SZ SZ_64K
447 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
448
449 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
450 struct its_cmd_block *,
451 struct its_cmd_desc *);
452
453 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
454 struct its_cmd_block *,
455 struct its_cmd_desc *);
456
457 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
458 {
459 u64 mask = GENMASK_ULL(h, l);
460 *raw_cmd &= ~mask;
461 *raw_cmd |= (val << l) & mask;
462 }
463
its_encode_cmd(struct its_cmd_block * cmd,u8 cmd_nr)464 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
465 {
466 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
467 }
468
its_encode_devid(struct its_cmd_block * cmd,u32 devid)469 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
470 {
471 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
472 }
473
its_encode_event_id(struct its_cmd_block * cmd,u32 id)474 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
475 {
476 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
477 }
478
its_encode_phys_id(struct its_cmd_block * cmd,u32 phys_id)479 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
480 {
481 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
482 }
483
its_encode_size(struct its_cmd_block * cmd,u8 size)484 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
485 {
486 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
487 }
488
its_encode_itt(struct its_cmd_block * cmd,u64 itt_addr)489 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
490 {
491 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
492 }
493
its_encode_valid(struct its_cmd_block * cmd,int valid)494 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
495 {
496 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
497 }
498
its_encode_target(struct its_cmd_block * cmd,u64 target_addr)499 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
500 {
501 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
502 }
503
its_encode_collection(struct its_cmd_block * cmd,u16 col)504 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
505 {
506 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
507 }
508
its_encode_vpeid(struct its_cmd_block * cmd,u16 vpeid)509 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
510 {
511 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
512 }
513
its_encode_virt_id(struct its_cmd_block * cmd,u32 virt_id)514 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
515 {
516 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
517 }
518
its_encode_db_phys_id(struct its_cmd_block * cmd,u32 db_phys_id)519 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
520 {
521 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
522 }
523
its_encode_db_valid(struct its_cmd_block * cmd,bool db_valid)524 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
525 {
526 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
527 }
528
its_encode_seq_num(struct its_cmd_block * cmd,u16 seq_num)529 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
530 {
531 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
532 }
533
its_encode_its_list(struct its_cmd_block * cmd,u16 its_list)534 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
535 {
536 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
537 }
538
its_encode_vpt_addr(struct its_cmd_block * cmd,u64 vpt_pa)539 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
540 {
541 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
542 }
543
its_encode_vpt_size(struct its_cmd_block * cmd,u8 vpt_size)544 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
545 {
546 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
547 }
548
its_encode_vconf_addr(struct its_cmd_block * cmd,u64 vconf_pa)549 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
550 {
551 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
552 }
553
its_encode_alloc(struct its_cmd_block * cmd,bool alloc)554 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
555 {
556 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
557 }
558
its_encode_ptz(struct its_cmd_block * cmd,bool ptz)559 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
560 {
561 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
562 }
563
its_encode_vmapp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)564 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
565 u32 vpe_db_lpi)
566 {
567 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
568 }
569
its_encode_vmovp_default_db(struct its_cmd_block * cmd,u32 vpe_db_lpi)570 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
571 u32 vpe_db_lpi)
572 {
573 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
574 }
575
its_encode_db(struct its_cmd_block * cmd,bool db)576 static void its_encode_db(struct its_cmd_block *cmd, bool db)
577 {
578 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
579 }
580
its_encode_sgi_intid(struct its_cmd_block * cmd,u8 sgi)581 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
582 {
583 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
584 }
585
its_encode_sgi_priority(struct its_cmd_block * cmd,u8 prio)586 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
587 {
588 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
589 }
590
its_encode_sgi_group(struct its_cmd_block * cmd,bool grp)591 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
592 {
593 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
594 }
595
its_encode_sgi_clear(struct its_cmd_block * cmd,bool clr)596 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
597 {
598 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
599 }
600
its_encode_sgi_enable(struct its_cmd_block * cmd,bool en)601 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
602 {
603 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
604 }
605
its_fixup_cmd(struct its_cmd_block * cmd)606 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
607 {
608 /* Let's fixup BE commands */
609 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
610 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
611 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
612 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
613 }
614
its_build_mapd_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)615 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
616 struct its_cmd_block *cmd,
617 struct its_cmd_desc *desc)
618 {
619 unsigned long itt_addr;
620 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
621
622 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
623 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
624
625 its_encode_cmd(cmd, GITS_CMD_MAPD);
626 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
627 its_encode_size(cmd, size - 1);
628 its_encode_itt(cmd, itt_addr);
629 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
630
631 its_fixup_cmd(cmd);
632
633 return NULL;
634 }
635
its_build_mapc_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)636 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
637 struct its_cmd_block *cmd,
638 struct its_cmd_desc *desc)
639 {
640 its_encode_cmd(cmd, GITS_CMD_MAPC);
641 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
642 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
643 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
644
645 its_fixup_cmd(cmd);
646
647 return desc->its_mapc_cmd.col;
648 }
649
its_build_mapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)650 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
651 struct its_cmd_block *cmd,
652 struct its_cmd_desc *desc)
653 {
654 struct its_collection *col;
655
656 col = dev_event_to_col(desc->its_mapti_cmd.dev,
657 desc->its_mapti_cmd.event_id);
658
659 its_encode_cmd(cmd, GITS_CMD_MAPTI);
660 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
661 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
662 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
663 its_encode_collection(cmd, col->col_id);
664
665 its_fixup_cmd(cmd);
666
667 return valid_col(col);
668 }
669
its_build_movi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)670 static struct its_collection *its_build_movi_cmd(struct its_node *its,
671 struct its_cmd_block *cmd,
672 struct its_cmd_desc *desc)
673 {
674 struct its_collection *col;
675
676 col = dev_event_to_col(desc->its_movi_cmd.dev,
677 desc->its_movi_cmd.event_id);
678
679 its_encode_cmd(cmd, GITS_CMD_MOVI);
680 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
681 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
682 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
683
684 its_fixup_cmd(cmd);
685
686 return valid_col(col);
687 }
688
its_build_discard_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)689 static struct its_collection *its_build_discard_cmd(struct its_node *its,
690 struct its_cmd_block *cmd,
691 struct its_cmd_desc *desc)
692 {
693 struct its_collection *col;
694
695 col = dev_event_to_col(desc->its_discard_cmd.dev,
696 desc->its_discard_cmd.event_id);
697
698 its_encode_cmd(cmd, GITS_CMD_DISCARD);
699 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
700 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
701
702 its_fixup_cmd(cmd);
703
704 return valid_col(col);
705 }
706
its_build_inv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)707 static struct its_collection *its_build_inv_cmd(struct its_node *its,
708 struct its_cmd_block *cmd,
709 struct its_cmd_desc *desc)
710 {
711 struct its_collection *col;
712
713 col = dev_event_to_col(desc->its_inv_cmd.dev,
714 desc->its_inv_cmd.event_id);
715
716 its_encode_cmd(cmd, GITS_CMD_INV);
717 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
718 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
719
720 its_fixup_cmd(cmd);
721
722 return valid_col(col);
723 }
724
its_build_int_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)725 static struct its_collection *its_build_int_cmd(struct its_node *its,
726 struct its_cmd_block *cmd,
727 struct its_cmd_desc *desc)
728 {
729 struct its_collection *col;
730
731 col = dev_event_to_col(desc->its_int_cmd.dev,
732 desc->its_int_cmd.event_id);
733
734 its_encode_cmd(cmd, GITS_CMD_INT);
735 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
736 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
737
738 its_fixup_cmd(cmd);
739
740 return valid_col(col);
741 }
742
its_build_clear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)743 static struct its_collection *its_build_clear_cmd(struct its_node *its,
744 struct its_cmd_block *cmd,
745 struct its_cmd_desc *desc)
746 {
747 struct its_collection *col;
748
749 col = dev_event_to_col(desc->its_clear_cmd.dev,
750 desc->its_clear_cmd.event_id);
751
752 its_encode_cmd(cmd, GITS_CMD_CLEAR);
753 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
754 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
755
756 its_fixup_cmd(cmd);
757
758 return valid_col(col);
759 }
760
its_build_invall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)761 static struct its_collection *its_build_invall_cmd(struct its_node *its,
762 struct its_cmd_block *cmd,
763 struct its_cmd_desc *desc)
764 {
765 its_encode_cmd(cmd, GITS_CMD_INVALL);
766 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
767
768 its_fixup_cmd(cmd);
769
770 return desc->its_invall_cmd.col;
771 }
772
its_build_vinvall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)773 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
774 struct its_cmd_block *cmd,
775 struct its_cmd_desc *desc)
776 {
777 its_encode_cmd(cmd, GITS_CMD_VINVALL);
778 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
779
780 its_fixup_cmd(cmd);
781
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
783 }
784
its_build_vmapp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)785 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
786 struct its_cmd_block *cmd,
787 struct its_cmd_desc *desc)
788 {
789 unsigned long vpt_addr, vconf_addr;
790 u64 target;
791 bool alloc;
792
793 its_encode_cmd(cmd, GITS_CMD_VMAPP);
794 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
795 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
796
797 if (!desc->its_vmapp_cmd.valid) {
798 if (is_v4_1(its)) {
799 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
800 its_encode_alloc(cmd, alloc);
801 }
802
803 goto out;
804 }
805
806 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
807 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
808
809 its_encode_target(cmd, target);
810 its_encode_vpt_addr(cmd, vpt_addr);
811 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
812
813 if (!is_v4_1(its))
814 goto out;
815
816 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
817
818 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
819
820 its_encode_alloc(cmd, alloc);
821
822 /*
823 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
824 * to be unmapped first, and in this case, we may remap the vPE
825 * back while the VPT is not empty. So we can't assume that the
826 * VPT is empty on map. This is why we never advertise PTZ.
827 */
828 its_encode_ptz(cmd, false);
829 its_encode_vconf_addr(cmd, vconf_addr);
830 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
831
832 out:
833 its_fixup_cmd(cmd);
834
835 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
836 }
837
its_build_vmapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)838 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
839 struct its_cmd_block *cmd,
840 struct its_cmd_desc *desc)
841 {
842 u32 db;
843
844 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
845 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
846 else
847 db = 1023;
848
849 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
850 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
851 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
852 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
853 its_encode_db_phys_id(cmd, db);
854 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
855
856 its_fixup_cmd(cmd);
857
858 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
859 }
860
its_build_vmovi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)861 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
862 struct its_cmd_block *cmd,
863 struct its_cmd_desc *desc)
864 {
865 u32 db;
866
867 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
868 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
869 else
870 db = 1023;
871
872 its_encode_cmd(cmd, GITS_CMD_VMOVI);
873 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
874 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
875 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
876 its_encode_db_phys_id(cmd, db);
877 its_encode_db_valid(cmd, true);
878
879 its_fixup_cmd(cmd);
880
881 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
882 }
883
its_build_vmovp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)884 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
885 struct its_cmd_block *cmd,
886 struct its_cmd_desc *desc)
887 {
888 u64 target;
889
890 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
891 its_encode_cmd(cmd, GITS_CMD_VMOVP);
892 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
893 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
894 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
895 its_encode_target(cmd, target);
896
897 if (is_v4_1(its)) {
898 its_encode_db(cmd, true);
899 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
900 }
901
902 its_fixup_cmd(cmd);
903
904 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
905 }
906
its_build_vinv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)907 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
908 struct its_cmd_block *cmd,
909 struct its_cmd_desc *desc)
910 {
911 struct its_vlpi_map *map;
912
913 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
914 desc->its_inv_cmd.event_id);
915
916 its_encode_cmd(cmd, GITS_CMD_INV);
917 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
918 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
919
920 its_fixup_cmd(cmd);
921
922 return valid_vpe(its, map->vpe);
923 }
924
its_build_vint_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)925 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
926 struct its_cmd_block *cmd,
927 struct its_cmd_desc *desc)
928 {
929 struct its_vlpi_map *map;
930
931 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
932 desc->its_int_cmd.event_id);
933
934 its_encode_cmd(cmd, GITS_CMD_INT);
935 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
936 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
937
938 its_fixup_cmd(cmd);
939
940 return valid_vpe(its, map->vpe);
941 }
942
its_build_vclear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)943 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
944 struct its_cmd_block *cmd,
945 struct its_cmd_desc *desc)
946 {
947 struct its_vlpi_map *map;
948
949 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
950 desc->its_clear_cmd.event_id);
951
952 its_encode_cmd(cmd, GITS_CMD_CLEAR);
953 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
954 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
955
956 its_fixup_cmd(cmd);
957
958 return valid_vpe(its, map->vpe);
959 }
960
its_build_invdb_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)961 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
962 struct its_cmd_block *cmd,
963 struct its_cmd_desc *desc)
964 {
965 if (WARN_ON(!is_v4_1(its)))
966 return NULL;
967
968 its_encode_cmd(cmd, GITS_CMD_INVDB);
969 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
970
971 its_fixup_cmd(cmd);
972
973 return valid_vpe(its, desc->its_invdb_cmd.vpe);
974 }
975
its_build_vsgi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)976 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
977 struct its_cmd_block *cmd,
978 struct its_cmd_desc *desc)
979 {
980 if (WARN_ON(!is_v4_1(its)))
981 return NULL;
982
983 its_encode_cmd(cmd, GITS_CMD_VSGI);
984 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
985 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
986 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
987 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
988 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
989 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
990
991 its_fixup_cmd(cmd);
992
993 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
994 }
995
its_cmd_ptr_to_offset(struct its_node * its,struct its_cmd_block * ptr)996 static u64 its_cmd_ptr_to_offset(struct its_node *its,
997 struct its_cmd_block *ptr)
998 {
999 return (ptr - its->cmd_base) * sizeof(*ptr);
1000 }
1001
its_queue_full(struct its_node * its)1002 static int its_queue_full(struct its_node *its)
1003 {
1004 int widx;
1005 int ridx;
1006
1007 widx = its->cmd_write - its->cmd_base;
1008 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1009
1010 /* This is incredibly unlikely to happen, unless the ITS locks up. */
1011 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1012 return 1;
1013
1014 return 0;
1015 }
1016
its_allocate_entry(struct its_node * its)1017 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1018 {
1019 struct its_cmd_block *cmd;
1020 u32 count = 1000000; /* 1s! */
1021
1022 while (its_queue_full(its)) {
1023 count--;
1024 if (!count) {
1025 pr_err_ratelimited("ITS queue not draining\n");
1026 return NULL;
1027 }
1028 cpu_relax();
1029 udelay(1);
1030 }
1031
1032 cmd = its->cmd_write++;
1033
1034 /* Handle queue wrapping */
1035 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1036 its->cmd_write = its->cmd_base;
1037
1038 /* Clear command */
1039 cmd->raw_cmd[0] = 0;
1040 cmd->raw_cmd[1] = 0;
1041 cmd->raw_cmd[2] = 0;
1042 cmd->raw_cmd[3] = 0;
1043
1044 return cmd;
1045 }
1046
its_post_commands(struct its_node * its)1047 static struct its_cmd_block *its_post_commands(struct its_node *its)
1048 {
1049 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1050
1051 writel_relaxed(wr, its->base + GITS_CWRITER);
1052
1053 return its->cmd_write;
1054 }
1055
its_flush_cmd(struct its_node * its,struct its_cmd_block * cmd)1056 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1057 {
1058 /*
1059 * Make sure the commands written to memory are observable by
1060 * the ITS.
1061 */
1062 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1063 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1064 else
1065 dsb(ishst);
1066 }
1067
its_wait_for_range_completion(struct its_node * its,u64 prev_idx,struct its_cmd_block * to)1068 static int its_wait_for_range_completion(struct its_node *its,
1069 u64 prev_idx,
1070 struct its_cmd_block *to)
1071 {
1072 u64 rd_idx, to_idx, linear_idx;
1073 u32 count = 1000000; /* 1s! */
1074
1075 /* Linearize to_idx if the command set has wrapped around */
1076 to_idx = its_cmd_ptr_to_offset(its, to);
1077 if (to_idx < prev_idx)
1078 to_idx += ITS_CMD_QUEUE_SZ;
1079
1080 linear_idx = prev_idx;
1081
1082 while (1) {
1083 s64 delta;
1084
1085 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1086
1087 /*
1088 * Compute the read pointer progress, taking the
1089 * potential wrap-around into account.
1090 */
1091 delta = rd_idx - prev_idx;
1092 if (rd_idx < prev_idx)
1093 delta += ITS_CMD_QUEUE_SZ;
1094
1095 linear_idx += delta;
1096 if (linear_idx >= to_idx)
1097 break;
1098
1099 count--;
1100 if (!count) {
1101 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1102 to_idx, linear_idx);
1103 return -1;
1104 }
1105 prev_idx = rd_idx;
1106 cpu_relax();
1107 udelay(1);
1108 }
1109
1110 return 0;
1111 }
1112
1113 /* Warning, macro hell follows */
1114 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1115 void name(struct its_node *its, \
1116 buildtype builder, \
1117 struct its_cmd_desc *desc) \
1118 { \
1119 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1120 synctype *sync_obj; \
1121 unsigned long flags; \
1122 u64 rd_idx; \
1123 \
1124 raw_spin_lock_irqsave(&its->lock, flags); \
1125 \
1126 cmd = its_allocate_entry(its); \
1127 if (!cmd) { /* We're soooooo screewed... */ \
1128 raw_spin_unlock_irqrestore(&its->lock, flags); \
1129 return; \
1130 } \
1131 sync_obj = builder(its, cmd, desc); \
1132 its_flush_cmd(its, cmd); \
1133 \
1134 if (sync_obj) { \
1135 sync_cmd = its_allocate_entry(its); \
1136 if (!sync_cmd) \
1137 goto post; \
1138 \
1139 buildfn(its, sync_cmd, sync_obj); \
1140 its_flush_cmd(its, sync_cmd); \
1141 } \
1142 \
1143 post: \
1144 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1145 next_cmd = its_post_commands(its); \
1146 raw_spin_unlock_irqrestore(&its->lock, flags); \
1147 \
1148 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1149 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1150 }
1151
its_build_sync_cmd(struct its_node * its,struct its_cmd_block * sync_cmd,struct its_collection * sync_col)1152 static void its_build_sync_cmd(struct its_node *its,
1153 struct its_cmd_block *sync_cmd,
1154 struct its_collection *sync_col)
1155 {
1156 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1157 its_encode_target(sync_cmd, sync_col->target_address);
1158
1159 its_fixup_cmd(sync_cmd);
1160 }
1161
BUILD_SINGLE_CMD_FUNC(its_send_single_command,its_cmd_builder_t,struct its_collection,its_build_sync_cmd)1162 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1163 struct its_collection, its_build_sync_cmd)
1164
1165 static void its_build_vsync_cmd(struct its_node *its,
1166 struct its_cmd_block *sync_cmd,
1167 struct its_vpe *sync_vpe)
1168 {
1169 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1170 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1171
1172 its_fixup_cmd(sync_cmd);
1173 }
1174
BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand,its_cmd_vbuilder_t,struct its_vpe,its_build_vsync_cmd)1175 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1176 struct its_vpe, its_build_vsync_cmd)
1177
1178 static void its_send_int(struct its_device *dev, u32 event_id)
1179 {
1180 struct its_cmd_desc desc;
1181
1182 desc.its_int_cmd.dev = dev;
1183 desc.its_int_cmd.event_id = event_id;
1184
1185 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1186 }
1187
its_send_clear(struct its_device * dev,u32 event_id)1188 static void its_send_clear(struct its_device *dev, u32 event_id)
1189 {
1190 struct its_cmd_desc desc;
1191
1192 desc.its_clear_cmd.dev = dev;
1193 desc.its_clear_cmd.event_id = event_id;
1194
1195 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1196 }
1197
its_send_inv(struct its_device * dev,u32 event_id)1198 static void its_send_inv(struct its_device *dev, u32 event_id)
1199 {
1200 struct its_cmd_desc desc;
1201
1202 desc.its_inv_cmd.dev = dev;
1203 desc.its_inv_cmd.event_id = event_id;
1204
1205 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1206 }
1207
its_send_mapd(struct its_device * dev,int valid)1208 static void its_send_mapd(struct its_device *dev, int valid)
1209 {
1210 struct its_cmd_desc desc;
1211
1212 desc.its_mapd_cmd.dev = dev;
1213 desc.its_mapd_cmd.valid = !!valid;
1214
1215 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1216 }
1217
its_send_mapc(struct its_node * its,struct its_collection * col,int valid)1218 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1219 int valid)
1220 {
1221 struct its_cmd_desc desc;
1222
1223 desc.its_mapc_cmd.col = col;
1224 desc.its_mapc_cmd.valid = !!valid;
1225
1226 its_send_single_command(its, its_build_mapc_cmd, &desc);
1227 }
1228
its_send_mapti(struct its_device * dev,u32 irq_id,u32 id)1229 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1230 {
1231 struct its_cmd_desc desc;
1232
1233 desc.its_mapti_cmd.dev = dev;
1234 desc.its_mapti_cmd.phys_id = irq_id;
1235 desc.its_mapti_cmd.event_id = id;
1236
1237 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1238 }
1239
its_send_movi(struct its_device * dev,struct its_collection * col,u32 id)1240 static void its_send_movi(struct its_device *dev,
1241 struct its_collection *col, u32 id)
1242 {
1243 struct its_cmd_desc desc;
1244
1245 desc.its_movi_cmd.dev = dev;
1246 desc.its_movi_cmd.col = col;
1247 desc.its_movi_cmd.event_id = id;
1248
1249 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1250 }
1251
its_send_discard(struct its_device * dev,u32 id)1252 static void its_send_discard(struct its_device *dev, u32 id)
1253 {
1254 struct its_cmd_desc desc;
1255
1256 desc.its_discard_cmd.dev = dev;
1257 desc.its_discard_cmd.event_id = id;
1258
1259 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1260 }
1261
its_send_invall(struct its_node * its,struct its_collection * col)1262 static void its_send_invall(struct its_node *its, struct its_collection *col)
1263 {
1264 struct its_cmd_desc desc;
1265
1266 desc.its_invall_cmd.col = col;
1267
1268 its_send_single_command(its, its_build_invall_cmd, &desc);
1269 }
1270
its_send_vmapti(struct its_device * dev,u32 id)1271 static void its_send_vmapti(struct its_device *dev, u32 id)
1272 {
1273 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1274 struct its_cmd_desc desc;
1275
1276 desc.its_vmapti_cmd.vpe = map->vpe;
1277 desc.its_vmapti_cmd.dev = dev;
1278 desc.its_vmapti_cmd.virt_id = map->vintid;
1279 desc.its_vmapti_cmd.event_id = id;
1280 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1281
1282 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1283 }
1284
its_send_vmovi(struct its_device * dev,u32 id)1285 static void its_send_vmovi(struct its_device *dev, u32 id)
1286 {
1287 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1288 struct its_cmd_desc desc;
1289
1290 desc.its_vmovi_cmd.vpe = map->vpe;
1291 desc.its_vmovi_cmd.dev = dev;
1292 desc.its_vmovi_cmd.event_id = id;
1293 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1294
1295 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1296 }
1297
its_send_vmapp(struct its_node * its,struct its_vpe * vpe,bool valid)1298 static void its_send_vmapp(struct its_node *its,
1299 struct its_vpe *vpe, bool valid)
1300 {
1301 struct its_cmd_desc desc;
1302
1303 desc.its_vmapp_cmd.vpe = vpe;
1304 desc.its_vmapp_cmd.valid = valid;
1305 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1306
1307 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1308 }
1309
its_send_vmovp(struct its_vpe * vpe)1310 static void its_send_vmovp(struct its_vpe *vpe)
1311 {
1312 struct its_cmd_desc desc = {};
1313 struct its_node *its;
1314 unsigned long flags;
1315 int col_id = vpe->col_idx;
1316
1317 desc.its_vmovp_cmd.vpe = vpe;
1318
1319 if (!its_list_map) {
1320 its = list_first_entry(&its_nodes, struct its_node, entry);
1321 desc.its_vmovp_cmd.col = &its->collections[col_id];
1322 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1323 return;
1324 }
1325
1326 /*
1327 * Yet another marvel of the architecture. If using the
1328 * its_list "feature", we need to make sure that all ITSs
1329 * receive all VMOVP commands in the same order. The only way
1330 * to guarantee this is to make vmovp a serialization point.
1331 *
1332 * Wall <-- Head.
1333 */
1334 raw_spin_lock_irqsave(&vmovp_lock, flags);
1335
1336 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1337 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1338
1339 /* Emit VMOVPs */
1340 list_for_each_entry(its, &its_nodes, entry) {
1341 if (!is_v4(its))
1342 continue;
1343
1344 if (!require_its_list_vmovp(vpe->its_vm, its))
1345 continue;
1346
1347 desc.its_vmovp_cmd.col = &its->collections[col_id];
1348 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1349 }
1350
1351 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1352 }
1353
its_send_vinvall(struct its_node * its,struct its_vpe * vpe)1354 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1355 {
1356 struct its_cmd_desc desc;
1357
1358 desc.its_vinvall_cmd.vpe = vpe;
1359 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1360 }
1361
its_send_vinv(struct its_device * dev,u32 event_id)1362 static void its_send_vinv(struct its_device *dev, u32 event_id)
1363 {
1364 struct its_cmd_desc desc;
1365
1366 /*
1367 * There is no real VINV command. This is just a normal INV,
1368 * with a VSYNC instead of a SYNC.
1369 */
1370 desc.its_inv_cmd.dev = dev;
1371 desc.its_inv_cmd.event_id = event_id;
1372
1373 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1374 }
1375
its_send_vint(struct its_device * dev,u32 event_id)1376 static void its_send_vint(struct its_device *dev, u32 event_id)
1377 {
1378 struct its_cmd_desc desc;
1379
1380 /*
1381 * There is no real VINT command. This is just a normal INT,
1382 * with a VSYNC instead of a SYNC.
1383 */
1384 desc.its_int_cmd.dev = dev;
1385 desc.its_int_cmd.event_id = event_id;
1386
1387 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1388 }
1389
its_send_vclear(struct its_device * dev,u32 event_id)1390 static void its_send_vclear(struct its_device *dev, u32 event_id)
1391 {
1392 struct its_cmd_desc desc;
1393
1394 /*
1395 * There is no real VCLEAR command. This is just a normal CLEAR,
1396 * with a VSYNC instead of a SYNC.
1397 */
1398 desc.its_clear_cmd.dev = dev;
1399 desc.its_clear_cmd.event_id = event_id;
1400
1401 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1402 }
1403
its_send_invdb(struct its_node * its,struct its_vpe * vpe)1404 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1405 {
1406 struct its_cmd_desc desc;
1407
1408 desc.its_invdb_cmd.vpe = vpe;
1409 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1410 }
1411
1412 /*
1413 * irqchip functions - assumes MSI, mostly.
1414 */
lpi_write_config(struct irq_data * d,u8 clr,u8 set)1415 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1416 {
1417 struct its_vlpi_map *map = get_vlpi_map(d);
1418 irq_hw_number_t hwirq;
1419 void *va;
1420 u8 *cfg;
1421
1422 if (map) {
1423 va = page_address(map->vm->vprop_page);
1424 hwirq = map->vintid;
1425
1426 /* Remember the updated property */
1427 map->properties &= ~clr;
1428 map->properties |= set | LPI_PROP_GROUP1;
1429 } else {
1430 va = gic_rdists->prop_table_va;
1431 hwirq = d->hwirq;
1432 }
1433
1434 cfg = va + hwirq - 8192;
1435 *cfg &= ~clr;
1436 *cfg |= set | LPI_PROP_GROUP1;
1437
1438 /*
1439 * Make the above write visible to the redistributors.
1440 * And yes, we're flushing exactly: One. Single. Byte.
1441 * Humpf...
1442 */
1443 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1444 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1445 else
1446 dsb(ishst);
1447 }
1448
wait_for_syncr(void __iomem * rdbase)1449 static void wait_for_syncr(void __iomem *rdbase)
1450 {
1451 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1452 cpu_relax();
1453 }
1454
__direct_lpi_inv(struct irq_data * d,u64 val)1455 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1456 {
1457 void __iomem *rdbase;
1458 unsigned long flags;
1459 int cpu;
1460
1461 /* Target the redistributor this LPI is currently routed to */
1462 cpu = irq_to_cpuid_lock(d, &flags);
1463 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1464
1465 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1466 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1467 wait_for_syncr(rdbase);
1468
1469 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1470 irq_to_cpuid_unlock(d, flags);
1471 }
1472
direct_lpi_inv(struct irq_data * d)1473 static void direct_lpi_inv(struct irq_data *d)
1474 {
1475 struct its_vlpi_map *map = get_vlpi_map(d);
1476 u64 val;
1477
1478 if (map) {
1479 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1480
1481 WARN_ON(!is_v4_1(its_dev->its));
1482
1483 val = GICR_INVLPIR_V;
1484 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1485 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1486 } else {
1487 val = d->hwirq;
1488 }
1489
1490 __direct_lpi_inv(d, val);
1491 }
1492
lpi_update_config(struct irq_data * d,u8 clr,u8 set)1493 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1494 {
1495 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1496
1497 lpi_write_config(d, clr, set);
1498 if (gic_rdists->has_direct_lpi &&
1499 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1500 direct_lpi_inv(d);
1501 else if (!irqd_is_forwarded_to_vcpu(d))
1502 its_send_inv(its_dev, its_get_event_id(d));
1503 else
1504 its_send_vinv(its_dev, its_get_event_id(d));
1505 }
1506
its_vlpi_set_doorbell(struct irq_data * d,bool enable)1507 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1508 {
1509 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1510 u32 event = its_get_event_id(d);
1511 struct its_vlpi_map *map;
1512
1513 /*
1514 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1515 * here.
1516 */
1517 if (is_v4_1(its_dev->its))
1518 return;
1519
1520 map = dev_event_to_vlpi_map(its_dev, event);
1521
1522 if (map->db_enabled == enable)
1523 return;
1524
1525 map->db_enabled = enable;
1526
1527 /*
1528 * More fun with the architecture:
1529 *
1530 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1531 * value or to 1023, depending on the enable bit. But that
1532 * would be issuing a mapping for an /existing/ DevID+EventID
1533 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1534 * to the /same/ vPE, using this opportunity to adjust the
1535 * doorbell. Mouahahahaha. We loves it, Precious.
1536 */
1537 its_send_vmovi(its_dev, event);
1538 }
1539
its_mask_irq(struct irq_data * d)1540 static void its_mask_irq(struct irq_data *d)
1541 {
1542 if (irqd_is_forwarded_to_vcpu(d))
1543 its_vlpi_set_doorbell(d, false);
1544
1545 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1546 }
1547
its_unmask_irq(struct irq_data * d)1548 static void its_unmask_irq(struct irq_data *d)
1549 {
1550 if (irqd_is_forwarded_to_vcpu(d))
1551 its_vlpi_set_doorbell(d, true);
1552
1553 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1554 }
1555
its_read_lpi_count(struct irq_data * d,int cpu)1556 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1557 {
1558 if (irqd_affinity_is_managed(d))
1559 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1560
1561 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1562 }
1563
its_inc_lpi_count(struct irq_data * d,int cpu)1564 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1565 {
1566 if (irqd_affinity_is_managed(d))
1567 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1568 else
1569 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1570 }
1571
its_dec_lpi_count(struct irq_data * d,int cpu)1572 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1573 {
1574 if (irqd_affinity_is_managed(d))
1575 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1576 else
1577 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1578 }
1579
cpumask_pick_least_loaded(struct irq_data * d,const struct cpumask * cpu_mask)1580 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1581 const struct cpumask *cpu_mask)
1582 {
1583 unsigned int cpu = nr_cpu_ids, tmp;
1584 int count = S32_MAX;
1585
1586 for_each_cpu(tmp, cpu_mask) {
1587 int this_count = its_read_lpi_count(d, tmp);
1588 if (this_count < count) {
1589 cpu = tmp;
1590 count = this_count;
1591 }
1592 }
1593
1594 return cpu;
1595 }
1596
1597 /*
1598 * As suggested by Thomas Gleixner in:
1599 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1600 */
its_select_cpu(struct irq_data * d,const struct cpumask * aff_mask)1601 static int its_select_cpu(struct irq_data *d,
1602 const struct cpumask *aff_mask)
1603 {
1604 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1605 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1606 static struct cpumask __tmpmask;
1607 struct cpumask *tmpmask;
1608 unsigned long flags;
1609 int cpu, node;
1610 node = its_dev->its->numa_node;
1611 tmpmask = &__tmpmask;
1612
1613 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1614
1615 if (!irqd_affinity_is_managed(d)) {
1616 /* First try the NUMA node */
1617 if (node != NUMA_NO_NODE) {
1618 /*
1619 * Try the intersection of the affinity mask and the
1620 * node mask (and the online mask, just to be safe).
1621 */
1622 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1623 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1624
1625 /*
1626 * Ideally, we would check if the mask is empty, and
1627 * try again on the full node here.
1628 *
1629 * But it turns out that the way ACPI describes the
1630 * affinity for ITSs only deals about memory, and
1631 * not target CPUs, so it cannot describe a single
1632 * ITS placed next to two NUMA nodes.
1633 *
1634 * Instead, just fallback on the online mask. This
1635 * diverges from Thomas' suggestion above.
1636 */
1637 cpu = cpumask_pick_least_loaded(d, tmpmask);
1638 if (cpu < nr_cpu_ids)
1639 goto out;
1640
1641 /* If we can't cross sockets, give up */
1642 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1643 goto out;
1644
1645 /* If the above failed, expand the search */
1646 }
1647
1648 /* Try the intersection of the affinity and online masks */
1649 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1650
1651 /* If that doesn't fly, the online mask is the last resort */
1652 if (cpumask_empty(tmpmask))
1653 cpumask_copy(tmpmask, cpu_online_mask);
1654
1655 cpu = cpumask_pick_least_loaded(d, tmpmask);
1656 } else {
1657 cpumask_copy(tmpmask, aff_mask);
1658
1659 /* If we cannot cross sockets, limit the search to that node */
1660 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1661 node != NUMA_NO_NODE)
1662 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1663
1664 cpu = cpumask_pick_least_loaded(d, tmpmask);
1665 }
1666 out:
1667 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1668
1669 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1670 return cpu;
1671 }
1672
its_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1673 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1674 bool force)
1675 {
1676 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1677 struct its_collection *target_col;
1678 u32 id = its_get_event_id(d);
1679 int cpu, prev_cpu;
1680
1681 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1682 if (irqd_is_forwarded_to_vcpu(d))
1683 return -EINVAL;
1684
1685 prev_cpu = its_dev->event_map.col_map[id];
1686 its_dec_lpi_count(d, prev_cpu);
1687
1688 if (!force)
1689 cpu = its_select_cpu(d, mask_val);
1690 else
1691 cpu = cpumask_pick_least_loaded(d, mask_val);
1692
1693 if (cpu < 0 || cpu >= nr_cpu_ids)
1694 goto err;
1695
1696 /* don't set the affinity when the target cpu is same as current one */
1697 if (cpu != prev_cpu) {
1698 target_col = &its_dev->its->collections[cpu];
1699 its_send_movi(its_dev, target_col, id);
1700 its_dev->event_map.col_map[id] = cpu;
1701 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1702 }
1703
1704 its_inc_lpi_count(d, cpu);
1705
1706 return IRQ_SET_MASK_OK_DONE;
1707
1708 err:
1709 its_inc_lpi_count(d, prev_cpu);
1710 return -EINVAL;
1711 }
1712
its_irq_get_msi_base(struct its_device * its_dev)1713 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1714 {
1715 struct its_node *its = its_dev->its;
1716
1717 return its->phys_base + GITS_TRANSLATER;
1718 }
1719
its_irq_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)1720 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1721 {
1722 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1723 struct its_node *its;
1724 u64 addr;
1725
1726 its = its_dev->its;
1727 addr = its->get_msi_base(its_dev);
1728
1729 msg->address_lo = lower_32_bits(addr);
1730 msg->address_hi = upper_32_bits(addr);
1731 msg->data = its_get_event_id(d);
1732
1733 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1734 }
1735
its_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)1736 static int its_irq_set_irqchip_state(struct irq_data *d,
1737 enum irqchip_irq_state which,
1738 bool state)
1739 {
1740 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1741 u32 event = its_get_event_id(d);
1742
1743 if (which != IRQCHIP_STATE_PENDING)
1744 return -EINVAL;
1745
1746 if (irqd_is_forwarded_to_vcpu(d)) {
1747 if (state)
1748 its_send_vint(its_dev, event);
1749 else
1750 its_send_vclear(its_dev, event);
1751 } else {
1752 if (state)
1753 its_send_int(its_dev, event);
1754 else
1755 its_send_clear(its_dev, event);
1756 }
1757
1758 return 0;
1759 }
1760
its_irq_retrigger(struct irq_data * d)1761 static int its_irq_retrigger(struct irq_data *d)
1762 {
1763 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1764 }
1765
1766 /*
1767 * Two favourable cases:
1768 *
1769 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1770 * for vSGI delivery
1771 *
1772 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1773 * and we're better off mapping all VPEs always
1774 *
1775 * If neither (a) nor (b) is true, then we map vPEs on demand.
1776 *
1777 */
gic_requires_eager_mapping(void)1778 static bool gic_requires_eager_mapping(void)
1779 {
1780 if (!its_list_map || gic_rdists->has_rvpeid)
1781 return true;
1782
1783 return false;
1784 }
1785
its_map_vm(struct its_node * its,struct its_vm * vm)1786 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1787 {
1788 unsigned long flags;
1789
1790 if (gic_requires_eager_mapping())
1791 return;
1792
1793 raw_spin_lock_irqsave(&vmovp_lock, flags);
1794
1795 /*
1796 * If the VM wasn't mapped yet, iterate over the vpes and get
1797 * them mapped now.
1798 */
1799 vm->vlpi_count[its->list_nr]++;
1800
1801 if (vm->vlpi_count[its->list_nr] == 1) {
1802 int i;
1803
1804 for (i = 0; i < vm->nr_vpes; i++) {
1805 struct its_vpe *vpe = vm->vpes[i];
1806 struct irq_data *d = irq_get_irq_data(vpe->irq);
1807
1808 /* Map the VPE to the first possible CPU */
1809 vpe->col_idx = cpumask_first(cpu_online_mask);
1810 its_send_vmapp(its, vpe, true);
1811 its_send_vinvall(its, vpe);
1812 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1813 }
1814 }
1815
1816 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1817 }
1818
its_unmap_vm(struct its_node * its,struct its_vm * vm)1819 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1820 {
1821 unsigned long flags;
1822
1823 /* Not using the ITS list? Everything is always mapped. */
1824 if (gic_requires_eager_mapping())
1825 return;
1826
1827 raw_spin_lock_irqsave(&vmovp_lock, flags);
1828
1829 if (!--vm->vlpi_count[its->list_nr]) {
1830 int i;
1831
1832 for (i = 0; i < vm->nr_vpes; i++)
1833 its_send_vmapp(its, vm->vpes[i], false);
1834 }
1835
1836 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1837 }
1838
its_vlpi_map(struct irq_data * d,struct its_cmd_info * info)1839 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1840 {
1841 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1842 u32 event = its_get_event_id(d);
1843
1844 if (!info->map)
1845 return -EINVAL;
1846
1847 if (!its_dev->event_map.vm) {
1848 struct its_vlpi_map *maps;
1849
1850 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1851 GFP_ATOMIC);
1852 if (!maps)
1853 return -ENOMEM;
1854
1855 its_dev->event_map.vm = info->map->vm;
1856 its_dev->event_map.vlpi_maps = maps;
1857 } else if (its_dev->event_map.vm != info->map->vm) {
1858 return -EINVAL;
1859 }
1860
1861 /* Get our private copy of the mapping information */
1862 its_dev->event_map.vlpi_maps[event] = *info->map;
1863
1864 if (irqd_is_forwarded_to_vcpu(d)) {
1865 /* Already mapped, move it around */
1866 its_send_vmovi(its_dev, event);
1867 } else {
1868 /* Ensure all the VPEs are mapped on this ITS */
1869 its_map_vm(its_dev->its, info->map->vm);
1870
1871 /*
1872 * Flag the interrupt as forwarded so that we can
1873 * start poking the virtual property table.
1874 */
1875 irqd_set_forwarded_to_vcpu(d);
1876
1877 /* Write out the property to the prop table */
1878 lpi_write_config(d, 0xff, info->map->properties);
1879
1880 /* Drop the physical mapping */
1881 its_send_discard(its_dev, event);
1882
1883 /* and install the virtual one */
1884 its_send_vmapti(its_dev, event);
1885
1886 /* Increment the number of VLPIs */
1887 its_dev->event_map.nr_vlpis++;
1888 }
1889
1890 return 0;
1891 }
1892
its_vlpi_get(struct irq_data * d,struct its_cmd_info * info)1893 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1894 {
1895 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1896 struct its_vlpi_map *map;
1897
1898 map = get_vlpi_map(d);
1899
1900 if (!its_dev->event_map.vm || !map)
1901 return -EINVAL;
1902
1903 /* Copy our mapping information to the incoming request */
1904 *info->map = *map;
1905
1906 return 0;
1907 }
1908
its_vlpi_unmap(struct irq_data * d)1909 static int its_vlpi_unmap(struct irq_data *d)
1910 {
1911 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1912 u32 event = its_get_event_id(d);
1913
1914 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1915 return -EINVAL;
1916
1917 /* Drop the virtual mapping */
1918 its_send_discard(its_dev, event);
1919
1920 /* and restore the physical one */
1921 irqd_clr_forwarded_to_vcpu(d);
1922 its_send_mapti(its_dev, d->hwirq, event);
1923 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1924 LPI_PROP_ENABLED |
1925 LPI_PROP_GROUP1));
1926
1927 /* Potentially unmap the VM from this ITS */
1928 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1929
1930 /*
1931 * Drop the refcount and make the device available again if
1932 * this was the last VLPI.
1933 */
1934 if (!--its_dev->event_map.nr_vlpis) {
1935 its_dev->event_map.vm = NULL;
1936 kfree(its_dev->event_map.vlpi_maps);
1937 }
1938
1939 return 0;
1940 }
1941
its_vlpi_prop_update(struct irq_data * d,struct its_cmd_info * info)1942 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1943 {
1944 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1945
1946 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1947 return -EINVAL;
1948
1949 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1950 lpi_update_config(d, 0xff, info->config);
1951 else
1952 lpi_write_config(d, 0xff, info->config);
1953 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1954
1955 return 0;
1956 }
1957
its_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)1958 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1959 {
1960 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1961 struct its_cmd_info *info = vcpu_info;
1962
1963 /* Need a v4 ITS */
1964 if (!is_v4(its_dev->its))
1965 return -EINVAL;
1966
1967 guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
1968
1969 /* Unmap request? */
1970 if (!info)
1971 return its_vlpi_unmap(d);
1972
1973 switch (info->cmd_type) {
1974 case MAP_VLPI:
1975 return its_vlpi_map(d, info);
1976
1977 case GET_VLPI:
1978 return its_vlpi_get(d, info);
1979
1980 case PROP_UPDATE_VLPI:
1981 case PROP_UPDATE_AND_INV_VLPI:
1982 return its_vlpi_prop_update(d, info);
1983
1984 default:
1985 return -EINVAL;
1986 }
1987 }
1988
1989 static struct irq_chip its_irq_chip = {
1990 .name = "ITS",
1991 .irq_mask = its_mask_irq,
1992 .irq_unmask = its_unmask_irq,
1993 .irq_eoi = irq_chip_eoi_parent,
1994 .irq_set_affinity = its_set_affinity,
1995 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1996 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1997 .irq_retrigger = its_irq_retrigger,
1998 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1999 };
2000
2001
2002 /*
2003 * How we allocate LPIs:
2004 *
2005 * lpi_range_list contains ranges of LPIs that are to available to
2006 * allocate from. To allocate LPIs, just pick the first range that
2007 * fits the required allocation, and reduce it by the required
2008 * amount. Once empty, remove the range from the list.
2009 *
2010 * To free a range of LPIs, add a free range to the list, sort it and
2011 * merge the result if the new range happens to be adjacent to an
2012 * already free block.
2013 *
2014 * The consequence of the above is that allocation is cost is low, but
2015 * freeing is expensive. We assumes that freeing rarely occurs.
2016 */
2017 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2018
2019 static DEFINE_MUTEX(lpi_range_lock);
2020 static LIST_HEAD(lpi_range_list);
2021
2022 struct lpi_range {
2023 struct list_head entry;
2024 u32 base_id;
2025 u32 span;
2026 };
2027
mk_lpi_range(u32 base,u32 span)2028 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2029 {
2030 struct lpi_range *range;
2031
2032 range = kmalloc(sizeof(*range), GFP_KERNEL);
2033 if (range) {
2034 range->base_id = base;
2035 range->span = span;
2036 }
2037
2038 return range;
2039 }
2040
alloc_lpi_range(u32 nr_lpis,u32 * base)2041 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2042 {
2043 struct lpi_range *range, *tmp;
2044 int err = -ENOSPC;
2045
2046 mutex_lock(&lpi_range_lock);
2047
2048 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2049 if (range->span >= nr_lpis) {
2050 *base = range->base_id;
2051 range->base_id += nr_lpis;
2052 range->span -= nr_lpis;
2053
2054 if (range->span == 0) {
2055 list_del(&range->entry);
2056 kfree(range);
2057 }
2058
2059 err = 0;
2060 break;
2061 }
2062 }
2063
2064 mutex_unlock(&lpi_range_lock);
2065
2066 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2067 return err;
2068 }
2069
merge_lpi_ranges(struct lpi_range * a,struct lpi_range * b)2070 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2071 {
2072 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2073 return;
2074 if (a->base_id + a->span != b->base_id)
2075 return;
2076 b->base_id = a->base_id;
2077 b->span += a->span;
2078 list_del(&a->entry);
2079 kfree(a);
2080 }
2081
free_lpi_range(u32 base,u32 nr_lpis)2082 static int free_lpi_range(u32 base, u32 nr_lpis)
2083 {
2084 struct lpi_range *new, *old;
2085
2086 new = mk_lpi_range(base, nr_lpis);
2087 if (!new)
2088 return -ENOMEM;
2089
2090 mutex_lock(&lpi_range_lock);
2091
2092 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2093 if (old->base_id < base)
2094 break;
2095 }
2096 /*
2097 * old is the last element with ->base_id smaller than base,
2098 * so new goes right after it. If there are no elements with
2099 * ->base_id smaller than base, &old->entry ends up pointing
2100 * at the head of the list, and inserting new it the start of
2101 * the list is the right thing to do in that case as well.
2102 */
2103 list_add(&new->entry, &old->entry);
2104 /*
2105 * Now check if we can merge with the preceding and/or
2106 * following ranges.
2107 */
2108 merge_lpi_ranges(old, new);
2109 merge_lpi_ranges(new, list_next_entry(new, entry));
2110
2111 mutex_unlock(&lpi_range_lock);
2112 return 0;
2113 }
2114
its_lpi_init(u32 id_bits)2115 static int __init its_lpi_init(u32 id_bits)
2116 {
2117 u32 lpis = (1UL << id_bits) - 8192;
2118 u32 numlpis;
2119 int err;
2120
2121 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2122
2123 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2124 lpis = numlpis;
2125 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2126 lpis);
2127 }
2128
2129 /*
2130 * Initializing the allocator is just the same as freeing the
2131 * full range of LPIs.
2132 */
2133 err = free_lpi_range(8192, lpis);
2134 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2135 return err;
2136 }
2137
its_lpi_alloc(int nr_irqs,u32 * base,int * nr_ids)2138 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2139 {
2140 unsigned long *bitmap = NULL;
2141 int err = 0;
2142
2143 do {
2144 err = alloc_lpi_range(nr_irqs, base);
2145 if (!err)
2146 break;
2147
2148 nr_irqs /= 2;
2149 } while (nr_irqs > 0);
2150
2151 if (!nr_irqs)
2152 err = -ENOSPC;
2153
2154 if (err)
2155 goto out;
2156
2157 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2158 if (!bitmap)
2159 goto out;
2160
2161 *nr_ids = nr_irqs;
2162
2163 out:
2164 if (!bitmap)
2165 *base = *nr_ids = 0;
2166
2167 return bitmap;
2168 }
2169
its_lpi_free(unsigned long * bitmap,u32 base,u32 nr_ids)2170 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2171 {
2172 WARN_ON(free_lpi_range(base, nr_ids));
2173 bitmap_free(bitmap);
2174 }
2175
gic_reset_prop_table(void * va)2176 static void gic_reset_prop_table(void *va)
2177 {
2178 /* Priority 0xa0, Group-1, disabled */
2179 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2180
2181 /* Make sure the GIC will observe the written configuration */
2182 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2183 }
2184
its_allocate_prop_table(gfp_t gfp_flags)2185 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2186 {
2187 struct page *prop_page;
2188
2189 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2190 if (!prop_page)
2191 return NULL;
2192
2193 gic_reset_prop_table(page_address(prop_page));
2194
2195 return prop_page;
2196 }
2197
its_free_prop_table(struct page * prop_page)2198 static void its_free_prop_table(struct page *prop_page)
2199 {
2200 free_pages((unsigned long)page_address(prop_page),
2201 get_order(LPI_PROPBASE_SZ));
2202 }
2203
gic_check_reserved_range(phys_addr_t addr,unsigned long size)2204 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2205 {
2206 phys_addr_t start, end, addr_end;
2207 u64 i;
2208
2209 /*
2210 * We don't bother checking for a kdump kernel as by
2211 * construction, the LPI tables are out of this kernel's
2212 * memory map.
2213 */
2214 if (is_kdump_kernel())
2215 return true;
2216
2217 addr_end = addr + size - 1;
2218
2219 for_each_reserved_mem_range(i, &start, &end) {
2220 if (addr >= start && addr_end <= end)
2221 return true;
2222 }
2223
2224 /* Not found, not a good sign... */
2225 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2226 &addr, &addr_end);
2227 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2228 return false;
2229 }
2230
gic_reserve_range(phys_addr_t addr,unsigned long size)2231 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2232 {
2233 if (efi_enabled(EFI_CONFIG_TABLES))
2234 return efi_mem_reserve_persistent(addr, size);
2235
2236 return 0;
2237 }
2238
its_setup_lpi_prop_table(void)2239 static int __init its_setup_lpi_prop_table(void)
2240 {
2241 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2242 u64 val;
2243
2244 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2245 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2246
2247 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2248 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2249 LPI_PROPBASE_SZ,
2250 MEMREMAP_WB);
2251 gic_reset_prop_table(gic_rdists->prop_table_va);
2252 } else {
2253 struct page *page;
2254
2255 lpi_id_bits = min_t(u32,
2256 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2257 ITS_MAX_LPI_NRBITS);
2258 page = its_allocate_prop_table(GFP_NOWAIT);
2259 if (!page) {
2260 pr_err("Failed to allocate PROPBASE\n");
2261 return -ENOMEM;
2262 }
2263
2264 gic_rdists->prop_table_pa = page_to_phys(page);
2265 gic_rdists->prop_table_va = page_address(page);
2266 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2267 LPI_PROPBASE_SZ));
2268 }
2269
2270 pr_info("GICv3: using LPI property table @%pa\n",
2271 &gic_rdists->prop_table_pa);
2272
2273 return its_lpi_init(lpi_id_bits);
2274 }
2275
2276 static const char *its_base_type_string[] = {
2277 [GITS_BASER_TYPE_DEVICE] = "Devices",
2278 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2279 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2280 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2281 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2282 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2283 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2284 };
2285
its_read_baser(struct its_node * its,struct its_baser * baser)2286 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2287 {
2288 u32 idx = baser - its->tables;
2289
2290 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2291 }
2292
its_write_baser(struct its_node * its,struct its_baser * baser,u64 val)2293 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2294 u64 val)
2295 {
2296 u32 idx = baser - its->tables;
2297
2298 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2299 baser->val = its_read_baser(its, baser);
2300 }
2301
its_setup_baser(struct its_node * its,struct its_baser * baser,u64 cache,u64 shr,u32 order,bool indirect)2302 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2303 u64 cache, u64 shr, u32 order, bool indirect)
2304 {
2305 u64 val = its_read_baser(its, baser);
2306 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2307 u64 type = GITS_BASER_TYPE(val);
2308 u64 baser_phys, tmp;
2309 u32 alloc_pages, psz;
2310 struct page *page;
2311 void *base;
2312
2313 psz = baser->psz;
2314 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2315 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2316 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2317 &its->phys_base, its_base_type_string[type],
2318 alloc_pages, GITS_BASER_PAGES_MAX);
2319 alloc_pages = GITS_BASER_PAGES_MAX;
2320 order = get_order(GITS_BASER_PAGES_MAX * psz);
2321 }
2322
2323 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2324 if (!page)
2325 return -ENOMEM;
2326
2327 base = (void *)page_address(page);
2328 baser_phys = virt_to_phys(base);
2329
2330 /* Check if the physical address of the memory is above 48bits */
2331 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2332
2333 /* 52bit PA is supported only when PageSize=64K */
2334 if (psz != SZ_64K) {
2335 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2336 free_pages((unsigned long)base, order);
2337 return -ENXIO;
2338 }
2339
2340 /* Convert 52bit PA to 48bit field */
2341 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2342 }
2343
2344 retry_baser:
2345 val = (baser_phys |
2346 (type << GITS_BASER_TYPE_SHIFT) |
2347 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2348 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2349 cache |
2350 shr |
2351 GITS_BASER_VALID);
2352
2353 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2354
2355 switch (psz) {
2356 case SZ_4K:
2357 val |= GITS_BASER_PAGE_SIZE_4K;
2358 break;
2359 case SZ_16K:
2360 val |= GITS_BASER_PAGE_SIZE_16K;
2361 break;
2362 case SZ_64K:
2363 val |= GITS_BASER_PAGE_SIZE_64K;
2364 break;
2365 }
2366
2367 if (!shr)
2368 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2369
2370 its_write_baser(its, baser, val);
2371 tmp = baser->val;
2372
2373 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2374 /*
2375 * Shareability didn't stick. Just use
2376 * whatever the read reported, which is likely
2377 * to be the only thing this redistributor
2378 * supports. If that's zero, make it
2379 * non-cacheable as well.
2380 */
2381 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2382 if (!shr)
2383 cache = GITS_BASER_nC;
2384
2385 goto retry_baser;
2386 }
2387
2388 if (val != tmp) {
2389 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2390 &its->phys_base, its_base_type_string[type],
2391 val, tmp);
2392 free_pages((unsigned long)base, order);
2393 return -ENXIO;
2394 }
2395
2396 baser->order = order;
2397 baser->base = base;
2398 baser->psz = psz;
2399 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2400
2401 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2402 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2403 its_base_type_string[type],
2404 (unsigned long)virt_to_phys(base),
2405 indirect ? "indirect" : "flat", (int)esz,
2406 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2407
2408 return 0;
2409 }
2410
its_parse_indirect_baser(struct its_node * its,struct its_baser * baser,u32 * order,u32 ids)2411 static bool its_parse_indirect_baser(struct its_node *its,
2412 struct its_baser *baser,
2413 u32 *order, u32 ids)
2414 {
2415 u64 tmp = its_read_baser(its, baser);
2416 u64 type = GITS_BASER_TYPE(tmp);
2417 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2418 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2419 u32 new_order = *order;
2420 u32 psz = baser->psz;
2421 bool indirect = false;
2422
2423 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2424 if ((esz << ids) > (psz * 2)) {
2425 /*
2426 * Find out whether hw supports a single or two-level table by
2427 * table by reading bit at offset '62' after writing '1' to it.
2428 */
2429 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2430 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2431
2432 if (indirect) {
2433 /*
2434 * The size of the lvl2 table is equal to ITS page size
2435 * which is 'psz'. For computing lvl1 table size,
2436 * subtract ID bits that sparse lvl2 table from 'ids'
2437 * which is reported by ITS hardware times lvl1 table
2438 * entry size.
2439 */
2440 ids -= ilog2(psz / (int)esz);
2441 esz = GITS_LVL1_ENTRY_SIZE;
2442 }
2443 }
2444
2445 /*
2446 * Allocate as many entries as required to fit the
2447 * range of device IDs that the ITS can grok... The ID
2448 * space being incredibly sparse, this results in a
2449 * massive waste of memory if two-level device table
2450 * feature is not supported by hardware.
2451 */
2452 new_order = max_t(u32, get_order(esz << ids), new_order);
2453 if (new_order > MAX_ORDER) {
2454 new_order = MAX_ORDER;
2455 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2456 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2457 &its->phys_base, its_base_type_string[type],
2458 device_ids(its), ids);
2459 }
2460
2461 *order = new_order;
2462
2463 return indirect;
2464 }
2465
compute_common_aff(u64 val)2466 static u32 compute_common_aff(u64 val)
2467 {
2468 u32 aff, clpiaff;
2469
2470 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2471 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2472
2473 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2474 }
2475
compute_its_aff(struct its_node * its)2476 static u32 compute_its_aff(struct its_node *its)
2477 {
2478 u64 val;
2479 u32 svpet;
2480
2481 /*
2482 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2483 * the resulting affinity. We then use that to see if this match
2484 * our own affinity.
2485 */
2486 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2487 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2488 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2489 return compute_common_aff(val);
2490 }
2491
find_sibling_its(struct its_node * cur_its)2492 static struct its_node *find_sibling_its(struct its_node *cur_its)
2493 {
2494 struct its_node *its;
2495 u32 aff;
2496
2497 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2498 return NULL;
2499
2500 aff = compute_its_aff(cur_its);
2501
2502 list_for_each_entry(its, &its_nodes, entry) {
2503 u64 baser;
2504
2505 if (!is_v4_1(its) || its == cur_its)
2506 continue;
2507
2508 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2509 continue;
2510
2511 if (aff != compute_its_aff(its))
2512 continue;
2513
2514 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2515 baser = its->tables[2].val;
2516 if (!(baser & GITS_BASER_VALID))
2517 continue;
2518
2519 return its;
2520 }
2521
2522 return NULL;
2523 }
2524
its_free_tables(struct its_node * its)2525 static void its_free_tables(struct its_node *its)
2526 {
2527 int i;
2528
2529 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2530 if (its->tables[i].base) {
2531 free_pages((unsigned long)its->tables[i].base,
2532 its->tables[i].order);
2533 its->tables[i].base = NULL;
2534 }
2535 }
2536 }
2537
its_probe_baser_psz(struct its_node * its,struct its_baser * baser)2538 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2539 {
2540 u64 psz = SZ_64K;
2541
2542 while (psz) {
2543 u64 val, gpsz;
2544
2545 val = its_read_baser(its, baser);
2546 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2547
2548 switch (psz) {
2549 case SZ_64K:
2550 gpsz = GITS_BASER_PAGE_SIZE_64K;
2551 break;
2552 case SZ_16K:
2553 gpsz = GITS_BASER_PAGE_SIZE_16K;
2554 break;
2555 case SZ_4K:
2556 default:
2557 gpsz = GITS_BASER_PAGE_SIZE_4K;
2558 break;
2559 }
2560
2561 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2562
2563 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2564 its_write_baser(its, baser, val);
2565
2566 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2567 break;
2568
2569 switch (psz) {
2570 case SZ_64K:
2571 psz = SZ_16K;
2572 break;
2573 case SZ_16K:
2574 psz = SZ_4K;
2575 break;
2576 case SZ_4K:
2577 default:
2578 return -1;
2579 }
2580 }
2581
2582 baser->psz = psz;
2583 return 0;
2584 }
2585
its_alloc_tables(struct its_node * its)2586 static int its_alloc_tables(struct its_node *its)
2587 {
2588 u64 shr = GITS_BASER_InnerShareable;
2589 u64 cache = GITS_BASER_RaWaWb;
2590 int err, i;
2591
2592 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2593 /* erratum 24313: ignore memory access type */
2594 cache = GITS_BASER_nCnB;
2595
2596 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2597 cache = GITS_BASER_nC;
2598 shr = 0;
2599 }
2600
2601 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2602 struct its_baser *baser = its->tables + i;
2603 u64 val = its_read_baser(its, baser);
2604 u64 type = GITS_BASER_TYPE(val);
2605 bool indirect = false;
2606 u32 order;
2607
2608 if (type == GITS_BASER_TYPE_NONE)
2609 continue;
2610
2611 if (its_probe_baser_psz(its, baser)) {
2612 its_free_tables(its);
2613 return -ENXIO;
2614 }
2615
2616 order = get_order(baser->psz);
2617
2618 switch (type) {
2619 case GITS_BASER_TYPE_DEVICE:
2620 indirect = its_parse_indirect_baser(its, baser, &order,
2621 device_ids(its));
2622 break;
2623
2624 case GITS_BASER_TYPE_VCPU:
2625 if (is_v4_1(its)) {
2626 struct its_node *sibling;
2627
2628 WARN_ON(i != 2);
2629 if ((sibling = find_sibling_its(its))) {
2630 *baser = sibling->tables[2];
2631 its_write_baser(its, baser, baser->val);
2632 continue;
2633 }
2634 }
2635
2636 indirect = its_parse_indirect_baser(its, baser, &order,
2637 ITS_MAX_VPEID_BITS);
2638 break;
2639 }
2640
2641 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2642 if (err < 0) {
2643 its_free_tables(its);
2644 return err;
2645 }
2646
2647 /* Update settings which will be used for next BASERn */
2648 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2649 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2650 }
2651
2652 return 0;
2653 }
2654
inherit_vpe_l1_table_from_its(void)2655 static u64 inherit_vpe_l1_table_from_its(void)
2656 {
2657 struct its_node *its;
2658 u64 val;
2659 u32 aff;
2660
2661 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2662 aff = compute_common_aff(val);
2663
2664 list_for_each_entry(its, &its_nodes, entry) {
2665 u64 baser, addr;
2666
2667 if (!is_v4_1(its))
2668 continue;
2669
2670 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2671 continue;
2672
2673 if (aff != compute_its_aff(its))
2674 continue;
2675
2676 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2677 baser = its->tables[2].val;
2678 if (!(baser & GITS_BASER_VALID))
2679 continue;
2680
2681 /* We have a winner! */
2682 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2683
2684 val = GICR_VPROPBASER_4_1_VALID;
2685 if (baser & GITS_BASER_INDIRECT)
2686 val |= GICR_VPROPBASER_4_1_INDIRECT;
2687 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2688 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2689 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2690 case GIC_PAGE_SIZE_64K:
2691 addr = GITS_BASER_ADDR_48_to_52(baser);
2692 break;
2693 default:
2694 addr = baser & GENMASK_ULL(47, 12);
2695 break;
2696 }
2697 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2698 if (rdists_support_shareable()) {
2699 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2700 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2701 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2702 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2703 }
2704 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2705
2706 return val;
2707 }
2708
2709 return 0;
2710 }
2711
inherit_vpe_l1_table_from_rd(cpumask_t ** mask)2712 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2713 {
2714 u32 aff;
2715 u64 val;
2716 int cpu;
2717
2718 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2719 aff = compute_common_aff(val);
2720
2721 for_each_possible_cpu(cpu) {
2722 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2723
2724 if (!base || cpu == smp_processor_id())
2725 continue;
2726
2727 val = gic_read_typer(base + GICR_TYPER);
2728 if (aff != compute_common_aff(val))
2729 continue;
2730
2731 /*
2732 * At this point, we have a victim. This particular CPU
2733 * has already booted, and has an affinity that matches
2734 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2735 * Make sure we don't write the Z bit in that case.
2736 */
2737 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2738 val &= ~GICR_VPROPBASER_4_1_Z;
2739
2740 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2741 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2742
2743 return val;
2744 }
2745
2746 return 0;
2747 }
2748
allocate_vpe_l2_table(int cpu,u32 id)2749 static bool allocate_vpe_l2_table(int cpu, u32 id)
2750 {
2751 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2752 unsigned int psz, esz, idx, npg, gpsz;
2753 u64 val;
2754 struct page *page;
2755 __le64 *table;
2756
2757 if (!gic_rdists->has_rvpeid)
2758 return true;
2759
2760 /* Skip non-present CPUs */
2761 if (!base)
2762 return true;
2763
2764 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2765
2766 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2767 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2768 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2769
2770 switch (gpsz) {
2771 default:
2772 WARN_ON(1);
2773 fallthrough;
2774 case GIC_PAGE_SIZE_4K:
2775 psz = SZ_4K;
2776 break;
2777 case GIC_PAGE_SIZE_16K:
2778 psz = SZ_16K;
2779 break;
2780 case GIC_PAGE_SIZE_64K:
2781 psz = SZ_64K;
2782 break;
2783 }
2784
2785 /* Don't allow vpe_id that exceeds single, flat table limit */
2786 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2787 return (id < (npg * psz / (esz * SZ_8)));
2788
2789 /* Compute 1st level table index & check if that exceeds table limit */
2790 idx = id >> ilog2(psz / (esz * SZ_8));
2791 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2792 return false;
2793
2794 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2795
2796 /* Allocate memory for 2nd level table */
2797 if (!table[idx]) {
2798 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2799 if (!page)
2800 return false;
2801
2802 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2803 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2804 gic_flush_dcache_to_poc(page_address(page), psz);
2805
2806 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2807
2808 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2809 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2810 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2811
2812 /* Ensure updated table contents are visible to RD hardware */
2813 dsb(sy);
2814 }
2815
2816 return true;
2817 }
2818
allocate_vpe_l1_table(void)2819 static int allocate_vpe_l1_table(void)
2820 {
2821 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2822 u64 val, gpsz, npg, pa;
2823 unsigned int psz = SZ_64K;
2824 unsigned int np, epp, esz;
2825 struct page *page;
2826
2827 if (!gic_rdists->has_rvpeid)
2828 return 0;
2829
2830 /*
2831 * if VPENDBASER.Valid is set, disable any previously programmed
2832 * VPE by setting PendingLast while clearing Valid. This has the
2833 * effect of making sure no doorbell will be generated and we can
2834 * then safely clear VPROPBASER.Valid.
2835 */
2836 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2837 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2838 vlpi_base + GICR_VPENDBASER);
2839
2840 /*
2841 * If we can inherit the configuration from another RD, let's do
2842 * so. Otherwise, we have to go through the allocation process. We
2843 * assume that all RDs have the exact same requirements, as
2844 * nothing will work otherwise.
2845 */
2846 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2847 if (val & GICR_VPROPBASER_4_1_VALID)
2848 goto out;
2849
2850 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2851 if (!gic_data_rdist()->vpe_table_mask)
2852 return -ENOMEM;
2853
2854 val = inherit_vpe_l1_table_from_its();
2855 if (val & GICR_VPROPBASER_4_1_VALID)
2856 goto out;
2857
2858 /* First probe the page size */
2859 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2860 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2861 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2862 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2863 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2864
2865 switch (gpsz) {
2866 default:
2867 gpsz = GIC_PAGE_SIZE_4K;
2868 fallthrough;
2869 case GIC_PAGE_SIZE_4K:
2870 psz = SZ_4K;
2871 break;
2872 case GIC_PAGE_SIZE_16K:
2873 psz = SZ_16K;
2874 break;
2875 case GIC_PAGE_SIZE_64K:
2876 psz = SZ_64K;
2877 break;
2878 }
2879
2880 /*
2881 * Start populating the register from scratch, including RO fields
2882 * (which we want to print in debug cases...)
2883 */
2884 val = 0;
2885 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2886 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2887
2888 /* How many entries per GIC page? */
2889 esz++;
2890 epp = psz / (esz * SZ_8);
2891
2892 /*
2893 * If we need more than just a single L1 page, flag the table
2894 * as indirect and compute the number of required L1 pages.
2895 */
2896 if (epp < ITS_MAX_VPEID) {
2897 int nl2;
2898
2899 val |= GICR_VPROPBASER_4_1_INDIRECT;
2900
2901 /* Number of L2 pages required to cover the VPEID space */
2902 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2903
2904 /* Number of L1 pages to point to the L2 pages */
2905 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2906 } else {
2907 npg = 1;
2908 }
2909
2910 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2911
2912 /* Right, that's the number of CPU pages we need for L1 */
2913 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2914
2915 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2916 np, npg, psz, epp, esz);
2917 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2918 if (!page)
2919 return -ENOMEM;
2920
2921 gic_data_rdist()->vpe_l1_base = page_address(page);
2922 pa = virt_to_phys(page_address(page));
2923 WARN_ON(!IS_ALIGNED(pa, psz));
2924
2925 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2926 if (rdists_support_shareable()) {
2927 val |= GICR_VPROPBASER_RaWb;
2928 val |= GICR_VPROPBASER_InnerShareable;
2929 }
2930 val |= GICR_VPROPBASER_4_1_Z;
2931 val |= GICR_VPROPBASER_4_1_VALID;
2932
2933 out:
2934 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2935 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2936
2937 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2938 smp_processor_id(), val,
2939 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2940
2941 return 0;
2942 }
2943
its_alloc_collections(struct its_node * its)2944 static int its_alloc_collections(struct its_node *its)
2945 {
2946 int i;
2947
2948 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2949 GFP_KERNEL);
2950 if (!its->collections)
2951 return -ENOMEM;
2952
2953 for (i = 0; i < nr_cpu_ids; i++)
2954 its->collections[i].target_address = ~0ULL;
2955
2956 return 0;
2957 }
2958
its_allocate_pending_table(gfp_t gfp_flags)2959 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2960 {
2961 struct page *pend_page;
2962
2963 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2964 get_order(LPI_PENDBASE_SZ));
2965 if (!pend_page)
2966 return NULL;
2967
2968 /* Make sure the GIC will observe the zero-ed page */
2969 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2970
2971 return pend_page;
2972 }
2973
its_free_pending_table(struct page * pt)2974 static void its_free_pending_table(struct page *pt)
2975 {
2976 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2977 }
2978
2979 /*
2980 * Booting with kdump and LPIs enabled is generally fine. Any other
2981 * case is wrong in the absence of firmware/EFI support.
2982 */
enabled_lpis_allowed(void)2983 static bool enabled_lpis_allowed(void)
2984 {
2985 phys_addr_t addr;
2986 u64 val;
2987
2988 /* Check whether the property table is in a reserved region */
2989 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2990 addr = val & GENMASK_ULL(51, 12);
2991
2992 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2993 }
2994
allocate_lpi_tables(void)2995 static int __init allocate_lpi_tables(void)
2996 {
2997 u64 val;
2998 int err, cpu;
2999
3000 /*
3001 * If LPIs are enabled while we run this from the boot CPU,
3002 * flag the RD tables as pre-allocated if the stars do align.
3003 */
3004 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3005 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3006 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3007 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3008 pr_info("GICv3: Using preallocated redistributor tables\n");
3009 }
3010
3011 err = its_setup_lpi_prop_table();
3012 if (err)
3013 return err;
3014
3015 /*
3016 * We allocate all the pending tables anyway, as we may have a
3017 * mix of RDs that have had LPIs enabled, and some that
3018 * don't. We'll free the unused ones as each CPU comes online.
3019 */
3020 for_each_possible_cpu(cpu) {
3021 struct page *pend_page;
3022
3023 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3024 if (!pend_page) {
3025 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3026 return -ENOMEM;
3027 }
3028
3029 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3030 }
3031
3032 return 0;
3033 }
3034
read_vpend_dirty_clear(void __iomem * vlpi_base)3035 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3036 {
3037 u32 count = 1000000; /* 1s! */
3038 bool clean;
3039 u64 val;
3040
3041 do {
3042 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3043 clean = !(val & GICR_VPENDBASER_Dirty);
3044 if (!clean) {
3045 count--;
3046 cpu_relax();
3047 udelay(1);
3048 }
3049 } while (!clean && count);
3050
3051 if (unlikely(!clean))
3052 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3053
3054 return val;
3055 }
3056
its_clear_vpend_valid(void __iomem * vlpi_base,u64 clr,u64 set)3057 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3058 {
3059 u64 val;
3060
3061 /* Make sure we wait until the RD is done with the initial scan */
3062 val = read_vpend_dirty_clear(vlpi_base);
3063 val &= ~GICR_VPENDBASER_Valid;
3064 val &= ~clr;
3065 val |= set;
3066 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3067
3068 val = read_vpend_dirty_clear(vlpi_base);
3069 if (unlikely(val & GICR_VPENDBASER_Dirty))
3070 val |= GICR_VPENDBASER_PendingLast;
3071
3072 return val;
3073 }
3074
its_cpu_init_lpis(void)3075 static void its_cpu_init_lpis(void)
3076 {
3077 void __iomem *rbase = gic_data_rdist_rd_base();
3078 struct page *pend_page;
3079 phys_addr_t paddr;
3080 u64 val, tmp;
3081
3082 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3083 return;
3084
3085 val = readl_relaxed(rbase + GICR_CTLR);
3086 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3087 (val & GICR_CTLR_ENABLE_LPIS)) {
3088 /*
3089 * Check that we get the same property table on all
3090 * RDs. If we don't, this is hopeless.
3091 */
3092 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3093 paddr &= GENMASK_ULL(51, 12);
3094 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3095 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3096
3097 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3098 paddr &= GENMASK_ULL(51, 16);
3099
3100 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3101 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3102
3103 goto out;
3104 }
3105
3106 pend_page = gic_data_rdist()->pend_page;
3107 paddr = page_to_phys(pend_page);
3108
3109 /* set PROPBASE */
3110 val = (gic_rdists->prop_table_pa |
3111 GICR_PROPBASER_InnerShareable |
3112 GICR_PROPBASER_RaWaWb |
3113 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3114
3115 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3116 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3117
3118 if (!rdists_support_shareable())
3119 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3120
3121 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3122 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3123 /*
3124 * The HW reports non-shareable, we must
3125 * remove the cacheability attributes as
3126 * well.
3127 */
3128 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3129 GICR_PROPBASER_CACHEABILITY_MASK);
3130 val |= GICR_PROPBASER_nC;
3131 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3132 }
3133 pr_info_once("GIC: using cache flushing for LPI property table\n");
3134 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3135 }
3136
3137 /* set PENDBASE */
3138 val = (page_to_phys(pend_page) |
3139 GICR_PENDBASER_InnerShareable |
3140 GICR_PENDBASER_RaWaWb);
3141
3142 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3143 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3144
3145 if (!rdists_support_shareable())
3146 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3147
3148 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3149 /*
3150 * The HW reports non-shareable, we must remove the
3151 * cacheability attributes as well.
3152 */
3153 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3154 GICR_PENDBASER_CACHEABILITY_MASK);
3155 val |= GICR_PENDBASER_nC;
3156 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3157 }
3158
3159 /* Enable LPIs */
3160 val = readl_relaxed(rbase + GICR_CTLR);
3161 val |= GICR_CTLR_ENABLE_LPIS;
3162 writel_relaxed(val, rbase + GICR_CTLR);
3163
3164 out:
3165 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3166 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3167
3168 /*
3169 * It's possible for CPU to receive VLPIs before it is
3170 * scheduled as a vPE, especially for the first CPU, and the
3171 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3172 * as out of range and dropped by GIC.
3173 * So we initialize IDbits to known value to avoid VLPI drop.
3174 */
3175 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3176 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3177 smp_processor_id(), val);
3178 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3179
3180 /*
3181 * Also clear Valid bit of GICR_VPENDBASER, in case some
3182 * ancient programming gets left in and has possibility of
3183 * corrupting memory.
3184 */
3185 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3186 }
3187
3188 if (allocate_vpe_l1_table()) {
3189 /*
3190 * If the allocation has failed, we're in massive trouble.
3191 * Disable direct injection, and pray that no VM was
3192 * already running...
3193 */
3194 gic_rdists->has_rvpeid = false;
3195 gic_rdists->has_vlpis = false;
3196 }
3197
3198 /* Make sure the GIC has seen the above */
3199 dsb(sy);
3200 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3201 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3202 smp_processor_id(),
3203 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3204 "reserved" : "allocated",
3205 &paddr);
3206 }
3207
its_cpu_init_collection(struct its_node * its)3208 static void its_cpu_init_collection(struct its_node *its)
3209 {
3210 int cpu = smp_processor_id();
3211 u64 target;
3212
3213 /* avoid cross node collections and its mapping */
3214 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3215 struct device_node *cpu_node;
3216
3217 cpu_node = of_get_cpu_node(cpu, NULL);
3218 if (its->numa_node != NUMA_NO_NODE &&
3219 its->numa_node != of_node_to_nid(cpu_node))
3220 return;
3221 }
3222
3223 /*
3224 * We now have to bind each collection to its target
3225 * redistributor.
3226 */
3227 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3228 /*
3229 * This ITS wants the physical address of the
3230 * redistributor.
3231 */
3232 target = gic_data_rdist()->phys_base;
3233 } else {
3234 /* This ITS wants a linear CPU number. */
3235 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3236 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3237 }
3238
3239 /* Perform collection mapping */
3240 its->collections[cpu].target_address = target;
3241 its->collections[cpu].col_id = cpu;
3242
3243 its_send_mapc(its, &its->collections[cpu], 1);
3244 its_send_invall(its, &its->collections[cpu]);
3245 }
3246
its_cpu_init_collections(void)3247 static void its_cpu_init_collections(void)
3248 {
3249 struct its_node *its;
3250
3251 raw_spin_lock(&its_lock);
3252
3253 list_for_each_entry(its, &its_nodes, entry)
3254 its_cpu_init_collection(its);
3255
3256 raw_spin_unlock(&its_lock);
3257 }
3258
its_find_device(struct its_node * its,u32 dev_id)3259 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3260 {
3261 struct its_device *its_dev = NULL, *tmp;
3262 unsigned long flags;
3263
3264 raw_spin_lock_irqsave(&its->lock, flags);
3265
3266 list_for_each_entry(tmp, &its->its_device_list, entry) {
3267 if (tmp->device_id == dev_id) {
3268 its_dev = tmp;
3269 break;
3270 }
3271 }
3272
3273 raw_spin_unlock_irqrestore(&its->lock, flags);
3274
3275 return its_dev;
3276 }
3277
its_get_baser(struct its_node * its,u32 type)3278 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3279 {
3280 int i;
3281
3282 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3283 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3284 return &its->tables[i];
3285 }
3286
3287 return NULL;
3288 }
3289
its_alloc_table_entry(struct its_node * its,struct its_baser * baser,u32 id)3290 static bool its_alloc_table_entry(struct its_node *its,
3291 struct its_baser *baser, u32 id)
3292 {
3293 struct page *page;
3294 u32 esz, idx;
3295 __le64 *table;
3296
3297 /* Don't allow device id that exceeds single, flat table limit */
3298 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3299 if (!(baser->val & GITS_BASER_INDIRECT))
3300 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3301
3302 /* Compute 1st level table index & check if that exceeds table limit */
3303 idx = id >> ilog2(baser->psz / esz);
3304 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3305 return false;
3306
3307 table = baser->base;
3308
3309 /* Allocate memory for 2nd level table */
3310 if (!table[idx]) {
3311 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3312 get_order(baser->psz));
3313 if (!page)
3314 return false;
3315
3316 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3317 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3318 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3319
3320 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3321
3322 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3323 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3324 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3325
3326 /* Ensure updated table contents are visible to ITS hardware */
3327 dsb(sy);
3328 }
3329
3330 return true;
3331 }
3332
its_alloc_device_table(struct its_node * its,u32 dev_id)3333 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3334 {
3335 struct its_baser *baser;
3336
3337 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3338
3339 /* Don't allow device id that exceeds ITS hardware limit */
3340 if (!baser)
3341 return (ilog2(dev_id) < device_ids(its));
3342
3343 return its_alloc_table_entry(its, baser, dev_id);
3344 }
3345
its_alloc_vpe_table(u32 vpe_id)3346 static bool its_alloc_vpe_table(u32 vpe_id)
3347 {
3348 struct its_node *its;
3349 int cpu;
3350
3351 /*
3352 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3353 * could try and only do it on ITSs corresponding to devices
3354 * that have interrupts targeted at this VPE, but the
3355 * complexity becomes crazy (and you have tons of memory
3356 * anyway, right?).
3357 */
3358 list_for_each_entry(its, &its_nodes, entry) {
3359 struct its_baser *baser;
3360
3361 if (!is_v4(its))
3362 continue;
3363
3364 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3365 if (!baser)
3366 return false;
3367
3368 if (!its_alloc_table_entry(its, baser, vpe_id))
3369 return false;
3370 }
3371
3372 /* Non v4.1? No need to iterate RDs and go back early. */
3373 if (!gic_rdists->has_rvpeid)
3374 return true;
3375
3376 /*
3377 * Make sure the L2 tables are allocated for all copies of
3378 * the L1 table on *all* v4.1 RDs.
3379 */
3380 for_each_possible_cpu(cpu) {
3381 if (!allocate_vpe_l2_table(cpu, vpe_id))
3382 return false;
3383 }
3384
3385 return true;
3386 }
3387
its_create_device(struct its_node * its,u32 dev_id,int nvecs,bool alloc_lpis)3388 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3389 int nvecs, bool alloc_lpis)
3390 {
3391 struct its_device *dev;
3392 unsigned long *lpi_map = NULL;
3393 unsigned long flags;
3394 u16 *col_map = NULL;
3395 void *itt;
3396 int lpi_base;
3397 int nr_lpis;
3398 int nr_ites;
3399 int sz;
3400
3401 if (!its_alloc_device_table(its, dev_id))
3402 return NULL;
3403
3404 if (WARN_ON(!is_power_of_2(nvecs)))
3405 nvecs = roundup_pow_of_two(nvecs);
3406
3407 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3408 /*
3409 * Even if the device wants a single LPI, the ITT must be
3410 * sized as a power of two (and you need at least one bit...).
3411 */
3412 nr_ites = max(2, nvecs);
3413 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3414 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3415 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3416 if (alloc_lpis) {
3417 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3418 if (lpi_map)
3419 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3420 GFP_KERNEL);
3421 } else {
3422 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3423 nr_lpis = 0;
3424 lpi_base = 0;
3425 }
3426
3427 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3428 kfree(dev);
3429 kfree(itt);
3430 bitmap_free(lpi_map);
3431 kfree(col_map);
3432 return NULL;
3433 }
3434
3435 gic_flush_dcache_to_poc(itt, sz);
3436
3437 dev->its = its;
3438 dev->itt = itt;
3439 dev->nr_ites = nr_ites;
3440 dev->event_map.lpi_map = lpi_map;
3441 dev->event_map.col_map = col_map;
3442 dev->event_map.lpi_base = lpi_base;
3443 dev->event_map.nr_lpis = nr_lpis;
3444 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3445 dev->device_id = dev_id;
3446 INIT_LIST_HEAD(&dev->entry);
3447
3448 raw_spin_lock_irqsave(&its->lock, flags);
3449 list_add(&dev->entry, &its->its_device_list);
3450 raw_spin_unlock_irqrestore(&its->lock, flags);
3451
3452 /* Map device to its ITT */
3453 its_send_mapd(dev, 1);
3454
3455 return dev;
3456 }
3457
its_free_device(struct its_device * its_dev)3458 static void its_free_device(struct its_device *its_dev)
3459 {
3460 unsigned long flags;
3461
3462 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3463 list_del(&its_dev->entry);
3464 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3465 kfree(its_dev->event_map.col_map);
3466 kfree(its_dev->itt);
3467 kfree(its_dev);
3468 }
3469
its_alloc_device_irq(struct its_device * dev,int nvecs,irq_hw_number_t * hwirq)3470 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3471 {
3472 int idx;
3473
3474 /* Find a free LPI region in lpi_map and allocate them. */
3475 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3476 dev->event_map.nr_lpis,
3477 get_count_order(nvecs));
3478 if (idx < 0)
3479 return -ENOSPC;
3480
3481 *hwirq = dev->event_map.lpi_base + idx;
3482
3483 return 0;
3484 }
3485
its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)3486 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3487 int nvec, msi_alloc_info_t *info)
3488 {
3489 struct its_node *its;
3490 struct its_device *its_dev;
3491 struct msi_domain_info *msi_info;
3492 u32 dev_id;
3493 int err = 0;
3494
3495 /*
3496 * We ignore "dev" entirely, and rely on the dev_id that has
3497 * been passed via the scratchpad. This limits this domain's
3498 * usefulness to upper layers that definitely know that they
3499 * are built on top of the ITS.
3500 */
3501 dev_id = info->scratchpad[0].ul;
3502
3503 msi_info = msi_get_domain_info(domain);
3504 its = msi_info->data;
3505
3506 if (!gic_rdists->has_direct_lpi &&
3507 vpe_proxy.dev &&
3508 vpe_proxy.dev->its == its &&
3509 dev_id == vpe_proxy.dev->device_id) {
3510 /* Bad luck. Get yourself a better implementation */
3511 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3512 dev_id);
3513 return -EINVAL;
3514 }
3515
3516 mutex_lock(&its->dev_alloc_lock);
3517 its_dev = its_find_device(its, dev_id);
3518 if (its_dev) {
3519 /*
3520 * We already have seen this ID, probably through
3521 * another alias (PCI bridge of some sort). No need to
3522 * create the device.
3523 */
3524 its_dev->shared = true;
3525 pr_debug("Reusing ITT for devID %x\n", dev_id);
3526 goto out;
3527 }
3528
3529 its_dev = its_create_device(its, dev_id, nvec, true);
3530 if (!its_dev) {
3531 err = -ENOMEM;
3532 goto out;
3533 }
3534
3535 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3536 its_dev->shared = true;
3537
3538 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3539 out:
3540 mutex_unlock(&its->dev_alloc_lock);
3541 info->scratchpad[0].ptr = its_dev;
3542 return err;
3543 }
3544
3545 static struct msi_domain_ops its_msi_domain_ops = {
3546 .msi_prepare = its_msi_prepare,
3547 };
3548
its_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)3549 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3550 unsigned int virq,
3551 irq_hw_number_t hwirq)
3552 {
3553 struct irq_fwspec fwspec;
3554
3555 if (irq_domain_get_of_node(domain->parent)) {
3556 fwspec.fwnode = domain->parent->fwnode;
3557 fwspec.param_count = 3;
3558 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3559 fwspec.param[1] = hwirq;
3560 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3561 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3562 fwspec.fwnode = domain->parent->fwnode;
3563 fwspec.param_count = 2;
3564 fwspec.param[0] = hwirq;
3565 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3566 } else {
3567 return -EINVAL;
3568 }
3569
3570 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3571 }
3572
its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)3573 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3574 unsigned int nr_irqs, void *args)
3575 {
3576 msi_alloc_info_t *info = args;
3577 struct its_device *its_dev = info->scratchpad[0].ptr;
3578 struct its_node *its = its_dev->its;
3579 struct irq_data *irqd;
3580 irq_hw_number_t hwirq;
3581 int err;
3582 int i;
3583
3584 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3585 if (err)
3586 return err;
3587
3588 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3589 if (err)
3590 return err;
3591
3592 for (i = 0; i < nr_irqs; i++) {
3593 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3594 if (err)
3595 return err;
3596
3597 irq_domain_set_hwirq_and_chip(domain, virq + i,
3598 hwirq + i, &its_irq_chip, its_dev);
3599 irqd = irq_get_irq_data(virq + i);
3600 irqd_set_single_target(irqd);
3601 irqd_set_affinity_on_activate(irqd);
3602 irqd_set_resend_when_in_progress(irqd);
3603 pr_debug("ID:%d pID:%d vID:%d\n",
3604 (int)(hwirq + i - its_dev->event_map.lpi_base),
3605 (int)(hwirq + i), virq + i);
3606 }
3607
3608 return 0;
3609 }
3610
its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)3611 static int its_irq_domain_activate(struct irq_domain *domain,
3612 struct irq_data *d, bool reserve)
3613 {
3614 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3615 u32 event = its_get_event_id(d);
3616 int cpu;
3617
3618 cpu = its_select_cpu(d, cpu_online_mask);
3619 if (cpu < 0 || cpu >= nr_cpu_ids)
3620 return -EINVAL;
3621
3622 its_inc_lpi_count(d, cpu);
3623 its_dev->event_map.col_map[event] = cpu;
3624 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3625
3626 /* Map the GIC IRQ and event to the device */
3627 its_send_mapti(its_dev, d->hwirq, event);
3628 return 0;
3629 }
3630
its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)3631 static void its_irq_domain_deactivate(struct irq_domain *domain,
3632 struct irq_data *d)
3633 {
3634 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3635 u32 event = its_get_event_id(d);
3636
3637 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3638 /* Stop the delivery of interrupts */
3639 its_send_discard(its_dev, event);
3640 }
3641
its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)3642 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3643 unsigned int nr_irqs)
3644 {
3645 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3646 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3647 struct its_node *its = its_dev->its;
3648 int i;
3649
3650 bitmap_release_region(its_dev->event_map.lpi_map,
3651 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3652 get_count_order(nr_irqs));
3653
3654 for (i = 0; i < nr_irqs; i++) {
3655 struct irq_data *data = irq_domain_get_irq_data(domain,
3656 virq + i);
3657 /* Nuke the entry in the domain */
3658 irq_domain_reset_irq_data(data);
3659 }
3660
3661 mutex_lock(&its->dev_alloc_lock);
3662
3663 /*
3664 * If all interrupts have been freed, start mopping the
3665 * floor. This is conditioned on the device not being shared.
3666 */
3667 if (!its_dev->shared &&
3668 bitmap_empty(its_dev->event_map.lpi_map,
3669 its_dev->event_map.nr_lpis)) {
3670 its_lpi_free(its_dev->event_map.lpi_map,
3671 its_dev->event_map.lpi_base,
3672 its_dev->event_map.nr_lpis);
3673
3674 /* Unmap device/itt */
3675 its_send_mapd(its_dev, 0);
3676 its_free_device(its_dev);
3677 }
3678
3679 mutex_unlock(&its->dev_alloc_lock);
3680
3681 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3682 }
3683
3684 static const struct irq_domain_ops its_domain_ops = {
3685 .alloc = its_irq_domain_alloc,
3686 .free = its_irq_domain_free,
3687 .activate = its_irq_domain_activate,
3688 .deactivate = its_irq_domain_deactivate,
3689 };
3690
3691 /*
3692 * This is insane.
3693 *
3694 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3695 * likely), the only way to perform an invalidate is to use a fake
3696 * device to issue an INV command, implying that the LPI has first
3697 * been mapped to some event on that device. Since this is not exactly
3698 * cheap, we try to keep that mapping around as long as possible, and
3699 * only issue an UNMAP if we're short on available slots.
3700 *
3701 * Broken by design(tm).
3702 *
3703 * GICv4.1, on the other hand, mandates that we're able to invalidate
3704 * by writing to a MMIO register. It doesn't implement the whole of
3705 * DirectLPI, but that's good enough. And most of the time, we don't
3706 * even have to invalidate anything, as the redistributor can be told
3707 * whether to generate a doorbell or not (we thus leave it enabled,
3708 * always).
3709 */
its_vpe_db_proxy_unmap_locked(struct its_vpe * vpe)3710 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3711 {
3712 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3713 if (gic_rdists->has_rvpeid)
3714 return;
3715
3716 /* Already unmapped? */
3717 if (vpe->vpe_proxy_event == -1)
3718 return;
3719
3720 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3721 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3722
3723 /*
3724 * We don't track empty slots at all, so let's move the
3725 * next_victim pointer if we can quickly reuse that slot
3726 * instead of nuking an existing entry. Not clear that this is
3727 * always a win though, and this might just generate a ripple
3728 * effect... Let's just hope VPEs don't migrate too often.
3729 */
3730 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3731 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3732
3733 vpe->vpe_proxy_event = -1;
3734 }
3735
its_vpe_db_proxy_unmap(struct its_vpe * vpe)3736 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3737 {
3738 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3739 if (gic_rdists->has_rvpeid)
3740 return;
3741
3742 if (!gic_rdists->has_direct_lpi) {
3743 unsigned long flags;
3744
3745 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3746 its_vpe_db_proxy_unmap_locked(vpe);
3747 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3748 }
3749 }
3750
its_vpe_db_proxy_map_locked(struct its_vpe * vpe)3751 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3752 {
3753 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3754 if (gic_rdists->has_rvpeid)
3755 return;
3756
3757 /* Already mapped? */
3758 if (vpe->vpe_proxy_event != -1)
3759 return;
3760
3761 /* This slot was already allocated. Kick the other VPE out. */
3762 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3763 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3764
3765 /* Map the new VPE instead */
3766 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3767 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3768 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3769
3770 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3771 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3772 }
3773
its_vpe_db_proxy_move(struct its_vpe * vpe,int from,int to)3774 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3775 {
3776 unsigned long flags;
3777 struct its_collection *target_col;
3778
3779 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3780 if (gic_rdists->has_rvpeid)
3781 return;
3782
3783 if (gic_rdists->has_direct_lpi) {
3784 void __iomem *rdbase;
3785
3786 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3787 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3788 wait_for_syncr(rdbase);
3789
3790 return;
3791 }
3792
3793 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3794
3795 its_vpe_db_proxy_map_locked(vpe);
3796
3797 target_col = &vpe_proxy.dev->its->collections[to];
3798 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3799 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3800
3801 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3802 }
3803
its_vpe_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)3804 static int its_vpe_set_affinity(struct irq_data *d,
3805 const struct cpumask *mask_val,
3806 bool force)
3807 {
3808 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3809 struct cpumask common, *table_mask;
3810 unsigned long flags;
3811 int from, cpu;
3812
3813 /*
3814 * Changing affinity is mega expensive, so let's be as lazy as
3815 * we can and only do it if we really have to. Also, if mapped
3816 * into the proxy device, we need to move the doorbell
3817 * interrupt to its new location.
3818 *
3819 * Another thing is that changing the affinity of a vPE affects
3820 * *other interrupts* such as all the vLPIs that are routed to
3821 * this vPE. This means that the irq_desc lock is not enough to
3822 * protect us, and that we must ensure nobody samples vpe->col_idx
3823 * during the update, hence the lock below which must also be
3824 * taken on any vLPI handling path that evaluates vpe->col_idx.
3825 */
3826 from = vpe_to_cpuid_lock(vpe, &flags);
3827 table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3828
3829 /*
3830 * If we are offered another CPU in the same GICv4.1 ITS
3831 * affinity, pick this one. Otherwise, any CPU will do.
3832 */
3833 if (table_mask && cpumask_and(&common, mask_val, table_mask))
3834 cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
3835 else
3836 cpu = cpumask_first(mask_val);
3837
3838 if (from == cpu)
3839 goto out;
3840
3841 vpe->col_idx = cpu;
3842
3843 its_send_vmovp(vpe);
3844 its_vpe_db_proxy_move(vpe, from, cpu);
3845
3846 out:
3847 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3848 vpe_to_cpuid_unlock(vpe, flags);
3849
3850 return IRQ_SET_MASK_OK_DONE;
3851 }
3852
its_wait_vpt_parse_complete(void)3853 static void its_wait_vpt_parse_complete(void)
3854 {
3855 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3856 u64 val;
3857
3858 if (!gic_rdists->has_vpend_valid_dirty)
3859 return;
3860
3861 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3862 val,
3863 !(val & GICR_VPENDBASER_Dirty),
3864 1, 500));
3865 }
3866
its_vpe_schedule(struct its_vpe * vpe)3867 static void its_vpe_schedule(struct its_vpe *vpe)
3868 {
3869 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3870 u64 val;
3871
3872 /* Schedule the VPE */
3873 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3874 GENMASK_ULL(51, 12);
3875 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3876 if (rdists_support_shareable()) {
3877 val |= GICR_VPROPBASER_RaWb;
3878 val |= GICR_VPROPBASER_InnerShareable;
3879 }
3880 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3881
3882 val = virt_to_phys(page_address(vpe->vpt_page)) &
3883 GENMASK_ULL(51, 16);
3884 if (rdists_support_shareable()) {
3885 val |= GICR_VPENDBASER_RaWaWb;
3886 val |= GICR_VPENDBASER_InnerShareable;
3887 }
3888 /*
3889 * There is no good way of finding out if the pending table is
3890 * empty as we can race against the doorbell interrupt very
3891 * easily. So in the end, vpe->pending_last is only an
3892 * indication that the vcpu has something pending, not one
3893 * that the pending table is empty. A good implementation
3894 * would be able to read its coarse map pretty quickly anyway,
3895 * making this a tolerable issue.
3896 */
3897 val |= GICR_VPENDBASER_PendingLast;
3898 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3899 val |= GICR_VPENDBASER_Valid;
3900 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3901 }
3902
its_vpe_deschedule(struct its_vpe * vpe)3903 static void its_vpe_deschedule(struct its_vpe *vpe)
3904 {
3905 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3906 u64 val;
3907
3908 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3909
3910 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3911 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3912 }
3913
its_vpe_invall(struct its_vpe * vpe)3914 static void its_vpe_invall(struct its_vpe *vpe)
3915 {
3916 struct its_node *its;
3917
3918 list_for_each_entry(its, &its_nodes, entry) {
3919 if (!is_v4(its))
3920 continue;
3921
3922 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3923 continue;
3924
3925 /*
3926 * Sending a VINVALL to a single ITS is enough, as all
3927 * we need is to reach the redistributors.
3928 */
3929 its_send_vinvall(its, vpe);
3930 return;
3931 }
3932 }
3933
its_vpe_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)3934 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3935 {
3936 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3937 struct its_cmd_info *info = vcpu_info;
3938
3939 switch (info->cmd_type) {
3940 case SCHEDULE_VPE:
3941 its_vpe_schedule(vpe);
3942 return 0;
3943
3944 case DESCHEDULE_VPE:
3945 its_vpe_deschedule(vpe);
3946 return 0;
3947
3948 case COMMIT_VPE:
3949 its_wait_vpt_parse_complete();
3950 return 0;
3951
3952 case INVALL_VPE:
3953 its_vpe_invall(vpe);
3954 return 0;
3955
3956 default:
3957 return -EINVAL;
3958 }
3959 }
3960
its_vpe_send_cmd(struct its_vpe * vpe,void (* cmd)(struct its_device *,u32))3961 static void its_vpe_send_cmd(struct its_vpe *vpe,
3962 void (*cmd)(struct its_device *, u32))
3963 {
3964 unsigned long flags;
3965
3966 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3967
3968 its_vpe_db_proxy_map_locked(vpe);
3969 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3970
3971 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3972 }
3973
its_vpe_send_inv(struct irq_data * d)3974 static void its_vpe_send_inv(struct irq_data *d)
3975 {
3976 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3977
3978 if (gic_rdists->has_direct_lpi)
3979 __direct_lpi_inv(d, d->parent_data->hwirq);
3980 else
3981 its_vpe_send_cmd(vpe, its_send_inv);
3982 }
3983
its_vpe_mask_irq(struct irq_data * d)3984 static void its_vpe_mask_irq(struct irq_data *d)
3985 {
3986 /*
3987 * We need to unmask the LPI, which is described by the parent
3988 * irq_data. Instead of calling into the parent (which won't
3989 * exactly do the right thing, let's simply use the
3990 * parent_data pointer. Yes, I'm naughty.
3991 */
3992 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3993 its_vpe_send_inv(d);
3994 }
3995
its_vpe_unmask_irq(struct irq_data * d)3996 static void its_vpe_unmask_irq(struct irq_data *d)
3997 {
3998 /* Same hack as above... */
3999 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4000 its_vpe_send_inv(d);
4001 }
4002
its_vpe_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4003 static int its_vpe_set_irqchip_state(struct irq_data *d,
4004 enum irqchip_irq_state which,
4005 bool state)
4006 {
4007 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4008
4009 if (which != IRQCHIP_STATE_PENDING)
4010 return -EINVAL;
4011
4012 if (gic_rdists->has_direct_lpi) {
4013 void __iomem *rdbase;
4014
4015 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4016 if (state) {
4017 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4018 } else {
4019 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4020 wait_for_syncr(rdbase);
4021 }
4022 } else {
4023 if (state)
4024 its_vpe_send_cmd(vpe, its_send_int);
4025 else
4026 its_vpe_send_cmd(vpe, its_send_clear);
4027 }
4028
4029 return 0;
4030 }
4031
its_vpe_retrigger(struct irq_data * d)4032 static int its_vpe_retrigger(struct irq_data *d)
4033 {
4034 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4035 }
4036
4037 static struct irq_chip its_vpe_irq_chip = {
4038 .name = "GICv4-vpe",
4039 .irq_mask = its_vpe_mask_irq,
4040 .irq_unmask = its_vpe_unmask_irq,
4041 .irq_eoi = irq_chip_eoi_parent,
4042 .irq_set_affinity = its_vpe_set_affinity,
4043 .irq_retrigger = its_vpe_retrigger,
4044 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4045 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4046 };
4047
find_4_1_its(void)4048 static struct its_node *find_4_1_its(void)
4049 {
4050 static struct its_node *its = NULL;
4051
4052 if (!its) {
4053 list_for_each_entry(its, &its_nodes, entry) {
4054 if (is_v4_1(its))
4055 return its;
4056 }
4057
4058 /* Oops? */
4059 its = NULL;
4060 }
4061
4062 return its;
4063 }
4064
its_vpe_4_1_send_inv(struct irq_data * d)4065 static void its_vpe_4_1_send_inv(struct irq_data *d)
4066 {
4067 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4068 struct its_node *its;
4069
4070 /*
4071 * GICv4.1 wants doorbells to be invalidated using the
4072 * INVDB command in order to be broadcast to all RDs. Send
4073 * it to the first valid ITS, and let the HW do its magic.
4074 */
4075 its = find_4_1_its();
4076 if (its)
4077 its_send_invdb(its, vpe);
4078 }
4079
its_vpe_4_1_mask_irq(struct irq_data * d)4080 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4081 {
4082 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4083 its_vpe_4_1_send_inv(d);
4084 }
4085
its_vpe_4_1_unmask_irq(struct irq_data * d)4086 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4087 {
4088 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4089 its_vpe_4_1_send_inv(d);
4090 }
4091
its_vpe_4_1_schedule(struct its_vpe * vpe,struct its_cmd_info * info)4092 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4093 struct its_cmd_info *info)
4094 {
4095 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4096 u64 val = 0;
4097
4098 /* Schedule the VPE */
4099 val |= GICR_VPENDBASER_Valid;
4100 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4101 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4102 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4103
4104 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4105 }
4106
its_vpe_4_1_deschedule(struct its_vpe * vpe,struct its_cmd_info * info)4107 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4108 struct its_cmd_info *info)
4109 {
4110 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4111 u64 val;
4112
4113 if (info->req_db) {
4114 unsigned long flags;
4115
4116 /*
4117 * vPE is going to block: make the vPE non-resident with
4118 * PendingLast clear and DB set. The GIC guarantees that if
4119 * we read-back PendingLast clear, then a doorbell will be
4120 * delivered when an interrupt comes.
4121 *
4122 * Note the locking to deal with the concurrent update of
4123 * pending_last from the doorbell interrupt handler that can
4124 * run concurrently.
4125 */
4126 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4127 val = its_clear_vpend_valid(vlpi_base,
4128 GICR_VPENDBASER_PendingLast,
4129 GICR_VPENDBASER_4_1_DB);
4130 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4131 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4132 } else {
4133 /*
4134 * We're not blocking, so just make the vPE non-resident
4135 * with PendingLast set, indicating that we'll be back.
4136 */
4137 val = its_clear_vpend_valid(vlpi_base,
4138 0,
4139 GICR_VPENDBASER_PendingLast);
4140 vpe->pending_last = true;
4141 }
4142 }
4143
its_vpe_4_1_invall(struct its_vpe * vpe)4144 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4145 {
4146 void __iomem *rdbase;
4147 unsigned long flags;
4148 u64 val;
4149 int cpu;
4150
4151 val = GICR_INVALLR_V;
4152 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4153
4154 /* Target the redistributor this vPE is currently known on */
4155 cpu = vpe_to_cpuid_lock(vpe, &flags);
4156 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4157 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4158 gic_write_lpir(val, rdbase + GICR_INVALLR);
4159
4160 wait_for_syncr(rdbase);
4161 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4162 vpe_to_cpuid_unlock(vpe, flags);
4163 }
4164
its_vpe_4_1_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4165 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4166 {
4167 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4168 struct its_cmd_info *info = vcpu_info;
4169
4170 switch (info->cmd_type) {
4171 case SCHEDULE_VPE:
4172 its_vpe_4_1_schedule(vpe, info);
4173 return 0;
4174
4175 case DESCHEDULE_VPE:
4176 its_vpe_4_1_deschedule(vpe, info);
4177 return 0;
4178
4179 case COMMIT_VPE:
4180 its_wait_vpt_parse_complete();
4181 return 0;
4182
4183 case INVALL_VPE:
4184 its_vpe_4_1_invall(vpe);
4185 return 0;
4186
4187 default:
4188 return -EINVAL;
4189 }
4190 }
4191
4192 static struct irq_chip its_vpe_4_1_irq_chip = {
4193 .name = "GICv4.1-vpe",
4194 .irq_mask = its_vpe_4_1_mask_irq,
4195 .irq_unmask = its_vpe_4_1_unmask_irq,
4196 .irq_eoi = irq_chip_eoi_parent,
4197 .irq_set_affinity = its_vpe_set_affinity,
4198 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4199 };
4200
its_configure_sgi(struct irq_data * d,bool clear)4201 static void its_configure_sgi(struct irq_data *d, bool clear)
4202 {
4203 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4204 struct its_cmd_desc desc;
4205
4206 desc.its_vsgi_cmd.vpe = vpe;
4207 desc.its_vsgi_cmd.sgi = d->hwirq;
4208 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4209 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4210 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4211 desc.its_vsgi_cmd.clear = clear;
4212
4213 /*
4214 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4215 * destination VPE is mapped there. Since we map them eagerly at
4216 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4217 */
4218 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4219 }
4220
its_sgi_mask_irq(struct irq_data * d)4221 static void its_sgi_mask_irq(struct irq_data *d)
4222 {
4223 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4224
4225 vpe->sgi_config[d->hwirq].enabled = false;
4226 its_configure_sgi(d, false);
4227 }
4228
its_sgi_unmask_irq(struct irq_data * d)4229 static void its_sgi_unmask_irq(struct irq_data *d)
4230 {
4231 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4232
4233 vpe->sgi_config[d->hwirq].enabled = true;
4234 its_configure_sgi(d, false);
4235 }
4236
its_sgi_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)4237 static int its_sgi_set_affinity(struct irq_data *d,
4238 const struct cpumask *mask_val,
4239 bool force)
4240 {
4241 /*
4242 * There is no notion of affinity for virtual SGIs, at least
4243 * not on the host (since they can only be targeting a vPE).
4244 * Tell the kernel we've done whatever it asked for.
4245 */
4246 irq_data_update_effective_affinity(d, mask_val);
4247 return IRQ_SET_MASK_OK;
4248 }
4249
its_sgi_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)4250 static int its_sgi_set_irqchip_state(struct irq_data *d,
4251 enum irqchip_irq_state which,
4252 bool state)
4253 {
4254 if (which != IRQCHIP_STATE_PENDING)
4255 return -EINVAL;
4256
4257 if (state) {
4258 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4259 struct its_node *its = find_4_1_its();
4260 u64 val;
4261
4262 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4263 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4264 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4265 } else {
4266 its_configure_sgi(d, true);
4267 }
4268
4269 return 0;
4270 }
4271
its_sgi_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)4272 static int its_sgi_get_irqchip_state(struct irq_data *d,
4273 enum irqchip_irq_state which, bool *val)
4274 {
4275 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4276 void __iomem *base;
4277 unsigned long flags;
4278 u32 count = 1000000; /* 1s! */
4279 u32 status;
4280 int cpu;
4281
4282 if (which != IRQCHIP_STATE_PENDING)
4283 return -EINVAL;
4284
4285 /*
4286 * Locking galore! We can race against two different events:
4287 *
4288 * - Concurrent vPE affinity change: we must make sure it cannot
4289 * happen, or we'll talk to the wrong redistributor. This is
4290 * identical to what happens with vLPIs.
4291 *
4292 * - Concurrent VSGIPENDR access: As it involves accessing two
4293 * MMIO registers, this must be made atomic one way or another.
4294 */
4295 cpu = vpe_to_cpuid_lock(vpe, &flags);
4296 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4297 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4298 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4299 do {
4300 status = readl_relaxed(base + GICR_VSGIPENDR);
4301 if (!(status & GICR_VSGIPENDR_BUSY))
4302 goto out;
4303
4304 count--;
4305 if (!count) {
4306 pr_err_ratelimited("Unable to get SGI status\n");
4307 goto out;
4308 }
4309 cpu_relax();
4310 udelay(1);
4311 } while (count);
4312
4313 out:
4314 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4315 vpe_to_cpuid_unlock(vpe, flags);
4316
4317 if (!count)
4318 return -ENXIO;
4319
4320 *val = !!(status & (1 << d->hwirq));
4321
4322 return 0;
4323 }
4324
its_sgi_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)4325 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4326 {
4327 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4328 struct its_cmd_info *info = vcpu_info;
4329
4330 switch (info->cmd_type) {
4331 case PROP_UPDATE_VSGI:
4332 vpe->sgi_config[d->hwirq].priority = info->priority;
4333 vpe->sgi_config[d->hwirq].group = info->group;
4334 its_configure_sgi(d, false);
4335 return 0;
4336
4337 default:
4338 return -EINVAL;
4339 }
4340 }
4341
4342 static struct irq_chip its_sgi_irq_chip = {
4343 .name = "GICv4.1-sgi",
4344 .irq_mask = its_sgi_mask_irq,
4345 .irq_unmask = its_sgi_unmask_irq,
4346 .irq_set_affinity = its_sgi_set_affinity,
4347 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4348 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4349 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4350 };
4351
its_sgi_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4352 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4353 unsigned int virq, unsigned int nr_irqs,
4354 void *args)
4355 {
4356 struct its_vpe *vpe = args;
4357 int i;
4358
4359 /* Yes, we do want 16 SGIs */
4360 WARN_ON(nr_irqs != 16);
4361
4362 for (i = 0; i < 16; i++) {
4363 vpe->sgi_config[i].priority = 0;
4364 vpe->sgi_config[i].enabled = false;
4365 vpe->sgi_config[i].group = false;
4366
4367 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4368 &its_sgi_irq_chip, vpe);
4369 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4370 }
4371
4372 return 0;
4373 }
4374
its_sgi_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4375 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4376 unsigned int virq,
4377 unsigned int nr_irqs)
4378 {
4379 /* Nothing to do */
4380 }
4381
its_sgi_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4382 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4383 struct irq_data *d, bool reserve)
4384 {
4385 /* Write out the initial SGI configuration */
4386 its_configure_sgi(d, false);
4387 return 0;
4388 }
4389
its_sgi_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4390 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4391 struct irq_data *d)
4392 {
4393 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4394
4395 /*
4396 * The VSGI command is awkward:
4397 *
4398 * - To change the configuration, CLEAR must be set to false,
4399 * leaving the pending bit unchanged.
4400 * - To clear the pending bit, CLEAR must be set to true, leaving
4401 * the configuration unchanged.
4402 *
4403 * You just can't do both at once, hence the two commands below.
4404 */
4405 vpe->sgi_config[d->hwirq].enabled = false;
4406 its_configure_sgi(d, false);
4407 its_configure_sgi(d, true);
4408 }
4409
4410 static const struct irq_domain_ops its_sgi_domain_ops = {
4411 .alloc = its_sgi_irq_domain_alloc,
4412 .free = its_sgi_irq_domain_free,
4413 .activate = its_sgi_irq_domain_activate,
4414 .deactivate = its_sgi_irq_domain_deactivate,
4415 };
4416
its_vpe_id_alloc(void)4417 static int its_vpe_id_alloc(void)
4418 {
4419 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4420 }
4421
its_vpe_id_free(u16 id)4422 static void its_vpe_id_free(u16 id)
4423 {
4424 ida_simple_remove(&its_vpeid_ida, id);
4425 }
4426
its_vpe_init(struct its_vpe * vpe)4427 static int its_vpe_init(struct its_vpe *vpe)
4428 {
4429 struct page *vpt_page;
4430 int vpe_id;
4431
4432 /* Allocate vpe_id */
4433 vpe_id = its_vpe_id_alloc();
4434 if (vpe_id < 0)
4435 return vpe_id;
4436
4437 /* Allocate VPT */
4438 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4439 if (!vpt_page) {
4440 its_vpe_id_free(vpe_id);
4441 return -ENOMEM;
4442 }
4443
4444 if (!its_alloc_vpe_table(vpe_id)) {
4445 its_vpe_id_free(vpe_id);
4446 its_free_pending_table(vpt_page);
4447 return -ENOMEM;
4448 }
4449
4450 raw_spin_lock_init(&vpe->vpe_lock);
4451 vpe->vpe_id = vpe_id;
4452 vpe->vpt_page = vpt_page;
4453 if (gic_rdists->has_rvpeid)
4454 atomic_set(&vpe->vmapp_count, 0);
4455 else
4456 vpe->vpe_proxy_event = -1;
4457
4458 return 0;
4459 }
4460
its_vpe_teardown(struct its_vpe * vpe)4461 static void its_vpe_teardown(struct its_vpe *vpe)
4462 {
4463 its_vpe_db_proxy_unmap(vpe);
4464 its_vpe_id_free(vpe->vpe_id);
4465 its_free_pending_table(vpe->vpt_page);
4466 }
4467
its_vpe_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)4468 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4469 unsigned int virq,
4470 unsigned int nr_irqs)
4471 {
4472 struct its_vm *vm = domain->host_data;
4473 int i;
4474
4475 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4476
4477 for (i = 0; i < nr_irqs; i++) {
4478 struct irq_data *data = irq_domain_get_irq_data(domain,
4479 virq + i);
4480 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4481
4482 BUG_ON(vm != vpe->its_vm);
4483
4484 clear_bit(data->hwirq, vm->db_bitmap);
4485 its_vpe_teardown(vpe);
4486 irq_domain_reset_irq_data(data);
4487 }
4488
4489 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4490 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4491 its_free_prop_table(vm->vprop_page);
4492 }
4493 }
4494
its_vpe_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)4495 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4496 unsigned int nr_irqs, void *args)
4497 {
4498 struct irq_chip *irqchip = &its_vpe_irq_chip;
4499 struct its_vm *vm = args;
4500 unsigned long *bitmap;
4501 struct page *vprop_page;
4502 int base, nr_ids, i, err = 0;
4503
4504 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4505 if (!bitmap)
4506 return -ENOMEM;
4507
4508 if (nr_ids < nr_irqs) {
4509 its_lpi_free(bitmap, base, nr_ids);
4510 return -ENOMEM;
4511 }
4512
4513 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4514 if (!vprop_page) {
4515 its_lpi_free(bitmap, base, nr_ids);
4516 return -ENOMEM;
4517 }
4518
4519 vm->db_bitmap = bitmap;
4520 vm->db_lpi_base = base;
4521 vm->nr_db_lpis = nr_ids;
4522 vm->vprop_page = vprop_page;
4523
4524 if (gic_rdists->has_rvpeid)
4525 irqchip = &its_vpe_4_1_irq_chip;
4526
4527 for (i = 0; i < nr_irqs; i++) {
4528 vm->vpes[i]->vpe_db_lpi = base + i;
4529 err = its_vpe_init(vm->vpes[i]);
4530 if (err)
4531 break;
4532 err = its_irq_gic_domain_alloc(domain, virq + i,
4533 vm->vpes[i]->vpe_db_lpi);
4534 if (err)
4535 break;
4536 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4537 irqchip, vm->vpes[i]);
4538 set_bit(i, bitmap);
4539 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4540 }
4541
4542 if (err)
4543 its_vpe_irq_domain_free(domain, virq, i);
4544
4545 return err;
4546 }
4547
its_vpe_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)4548 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4549 struct irq_data *d, bool reserve)
4550 {
4551 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4552 struct its_node *its;
4553
4554 /*
4555 * If we use the list map, we issue VMAPP on demand... Unless
4556 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4557 * so that VSGIs can work.
4558 */
4559 if (!gic_requires_eager_mapping())
4560 return 0;
4561
4562 /* Map the VPE to the first possible CPU */
4563 vpe->col_idx = cpumask_first(cpu_online_mask);
4564
4565 list_for_each_entry(its, &its_nodes, entry) {
4566 if (!is_v4(its))
4567 continue;
4568
4569 its_send_vmapp(its, vpe, true);
4570 its_send_vinvall(its, vpe);
4571 }
4572
4573 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4574
4575 return 0;
4576 }
4577
its_vpe_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)4578 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4579 struct irq_data *d)
4580 {
4581 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4582 struct its_node *its;
4583
4584 /*
4585 * If we use the list map on GICv4.0, we unmap the VPE once no
4586 * VLPIs are associated with the VM.
4587 */
4588 if (!gic_requires_eager_mapping())
4589 return;
4590
4591 list_for_each_entry(its, &its_nodes, entry) {
4592 if (!is_v4(its))
4593 continue;
4594
4595 its_send_vmapp(its, vpe, false);
4596 }
4597
4598 /*
4599 * There may be a direct read to the VPT after unmapping the
4600 * vPE, to guarantee the validity of this, we make the VPT
4601 * memory coherent with the CPU caches here.
4602 */
4603 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4604 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4605 LPI_PENDBASE_SZ);
4606 }
4607
4608 static const struct irq_domain_ops its_vpe_domain_ops = {
4609 .alloc = its_vpe_irq_domain_alloc,
4610 .free = its_vpe_irq_domain_free,
4611 .activate = its_vpe_irq_domain_activate,
4612 .deactivate = its_vpe_irq_domain_deactivate,
4613 };
4614
its_force_quiescent(void __iomem * base)4615 static int its_force_quiescent(void __iomem *base)
4616 {
4617 u32 count = 1000000; /* 1s */
4618 u32 val;
4619
4620 val = readl_relaxed(base + GITS_CTLR);
4621 /*
4622 * GIC architecture specification requires the ITS to be both
4623 * disabled and quiescent for writes to GITS_BASER<n> or
4624 * GITS_CBASER to not have UNPREDICTABLE results.
4625 */
4626 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4627 return 0;
4628
4629 /* Disable the generation of all interrupts to this ITS */
4630 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4631 writel_relaxed(val, base + GITS_CTLR);
4632
4633 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4634 while (1) {
4635 val = readl_relaxed(base + GITS_CTLR);
4636 if (val & GITS_CTLR_QUIESCENT)
4637 return 0;
4638
4639 count--;
4640 if (!count)
4641 return -EBUSY;
4642
4643 cpu_relax();
4644 udelay(1);
4645 }
4646 }
4647
its_enable_quirk_cavium_22375(void * data)4648 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4649 {
4650 struct its_node *its = data;
4651
4652 /* erratum 22375: only alloc 8MB table size (20 bits) */
4653 its->typer &= ~GITS_TYPER_DEVBITS;
4654 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4655 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4656
4657 return true;
4658 }
4659
its_enable_quirk_cavium_23144(void * data)4660 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4661 {
4662 struct its_node *its = data;
4663
4664 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4665
4666 return true;
4667 }
4668
its_enable_quirk_qdf2400_e0065(void * data)4669 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4670 {
4671 struct its_node *its = data;
4672
4673 /* On QDF2400, the size of the ITE is 16Bytes */
4674 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4675 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4676
4677 return true;
4678 }
4679
its_irq_get_msi_base_pre_its(struct its_device * its_dev)4680 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4681 {
4682 struct its_node *its = its_dev->its;
4683
4684 /*
4685 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4686 * which maps 32-bit writes targeted at a separate window of
4687 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4688 * with device ID taken from bits [device_id_bits + 1:2] of
4689 * the window offset.
4690 */
4691 return its->pre_its_base + (its_dev->device_id << 2);
4692 }
4693
its_enable_quirk_socionext_synquacer(void * data)4694 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4695 {
4696 struct its_node *its = data;
4697 u32 pre_its_window[2];
4698 u32 ids;
4699
4700 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4701 "socionext,synquacer-pre-its",
4702 pre_its_window,
4703 ARRAY_SIZE(pre_its_window))) {
4704
4705 its->pre_its_base = pre_its_window[0];
4706 its->get_msi_base = its_irq_get_msi_base_pre_its;
4707
4708 ids = ilog2(pre_its_window[1]) - 2;
4709 if (device_ids(its) > ids) {
4710 its->typer &= ~GITS_TYPER_DEVBITS;
4711 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4712 }
4713
4714 /* the pre-ITS breaks isolation, so disable MSI remapping */
4715 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4716 return true;
4717 }
4718 return false;
4719 }
4720
its_enable_quirk_hip07_161600802(void * data)4721 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4722 {
4723 struct its_node *its = data;
4724
4725 /*
4726 * Hip07 insists on using the wrong address for the VLPI
4727 * page. Trick it into doing the right thing...
4728 */
4729 its->vlpi_redist_offset = SZ_128K;
4730 return true;
4731 }
4732
its_enable_rk3588001(void * data)4733 static bool __maybe_unused its_enable_rk3588001(void *data)
4734 {
4735 struct its_node *its = data;
4736
4737 if (!of_machine_is_compatible("rockchip,rk3588") &&
4738 !of_machine_is_compatible("rockchip,rk3588s"))
4739 return false;
4740
4741 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4742 gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4743
4744 return true;
4745 }
4746
its_set_non_coherent(void * data)4747 static bool its_set_non_coherent(void *data)
4748 {
4749 struct its_node *its = data;
4750
4751 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4752 return true;
4753 }
4754
4755 static const struct gic_quirk its_quirks[] = {
4756 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4757 {
4758 .desc = "ITS: Cavium errata 22375, 24313",
4759 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4760 .mask = 0xffff0fff,
4761 .init = its_enable_quirk_cavium_22375,
4762 },
4763 #endif
4764 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4765 {
4766 .desc = "ITS: Cavium erratum 23144",
4767 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4768 .mask = 0xffff0fff,
4769 .init = its_enable_quirk_cavium_23144,
4770 },
4771 #endif
4772 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4773 {
4774 .desc = "ITS: QDF2400 erratum 0065",
4775 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4776 .mask = 0xffffffff,
4777 .init = its_enable_quirk_qdf2400_e0065,
4778 },
4779 #endif
4780 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4781 {
4782 /*
4783 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4784 * implementation, but with a 'pre-ITS' added that requires
4785 * special handling in software.
4786 */
4787 .desc = "ITS: Socionext Synquacer pre-ITS",
4788 .iidr = 0x0001143b,
4789 .mask = 0xffffffff,
4790 .init = its_enable_quirk_socionext_synquacer,
4791 },
4792 #endif
4793 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4794 {
4795 .desc = "ITS: Hip07 erratum 161600802",
4796 .iidr = 0x00000004,
4797 .mask = 0xffffffff,
4798 .init = its_enable_quirk_hip07_161600802,
4799 },
4800 #endif
4801 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4802 {
4803 .desc = "ITS: Rockchip erratum RK3588001",
4804 .iidr = 0x0201743b,
4805 .mask = 0xffffffff,
4806 .init = its_enable_rk3588001,
4807 },
4808 #endif
4809 {
4810 .desc = "ITS: non-coherent attribute",
4811 .property = "dma-noncoherent",
4812 .init = its_set_non_coherent,
4813 },
4814 {
4815 }
4816 };
4817
its_enable_quirks(struct its_node * its)4818 static void its_enable_quirks(struct its_node *its)
4819 {
4820 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4821
4822 gic_enable_quirks(iidr, its_quirks, its);
4823
4824 if (is_of_node(its->fwnode_handle))
4825 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4826 its_quirks, its);
4827 }
4828
its_save_disable(void)4829 static int its_save_disable(void)
4830 {
4831 struct its_node *its;
4832 int err = 0;
4833
4834 raw_spin_lock(&its_lock);
4835 list_for_each_entry(its, &its_nodes, entry) {
4836 void __iomem *base;
4837
4838 base = its->base;
4839 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4840 err = its_force_quiescent(base);
4841 if (err) {
4842 pr_err("ITS@%pa: failed to quiesce: %d\n",
4843 &its->phys_base, err);
4844 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4845 goto err;
4846 }
4847
4848 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4849 }
4850
4851 err:
4852 if (err) {
4853 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4854 void __iomem *base;
4855
4856 base = its->base;
4857 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4858 }
4859 }
4860 raw_spin_unlock(&its_lock);
4861
4862 return err;
4863 }
4864
its_restore_enable(void)4865 static void its_restore_enable(void)
4866 {
4867 struct its_node *its;
4868 int ret;
4869
4870 raw_spin_lock(&its_lock);
4871 list_for_each_entry(its, &its_nodes, entry) {
4872 void __iomem *base;
4873 int i;
4874
4875 base = its->base;
4876
4877 /*
4878 * Make sure that the ITS is disabled. If it fails to quiesce,
4879 * don't restore it since writing to CBASER or BASER<n>
4880 * registers is undefined according to the GIC v3 ITS
4881 * Specification.
4882 *
4883 * Firmware resuming with the ITS enabled is terminally broken.
4884 */
4885 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4886 ret = its_force_quiescent(base);
4887 if (ret) {
4888 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4889 &its->phys_base, ret);
4890 continue;
4891 }
4892
4893 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4894
4895 /*
4896 * Writing CBASER resets CREADR to 0, so make CWRITER and
4897 * cmd_write line up with it.
4898 */
4899 its->cmd_write = its->cmd_base;
4900 gits_write_cwriter(0, base + GITS_CWRITER);
4901
4902 /* Restore GITS_BASER from the value cache. */
4903 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4904 struct its_baser *baser = &its->tables[i];
4905
4906 if (!(baser->val & GITS_BASER_VALID))
4907 continue;
4908
4909 its_write_baser(its, baser, baser->val);
4910 }
4911 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4912
4913 /*
4914 * Reinit the collection if it's stored in the ITS. This is
4915 * indicated by the col_id being less than the HCC field.
4916 * CID < HCC as specified in the GIC v3 Documentation.
4917 */
4918 if (its->collections[smp_processor_id()].col_id <
4919 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4920 its_cpu_init_collection(its);
4921 }
4922 raw_spin_unlock(&its_lock);
4923 }
4924
4925 static struct syscore_ops its_syscore_ops = {
4926 .suspend = its_save_disable,
4927 .resume = its_restore_enable,
4928 };
4929
its_map_one(struct resource * res,int * err)4930 static void __init __iomem *its_map_one(struct resource *res, int *err)
4931 {
4932 void __iomem *its_base;
4933 u32 val;
4934
4935 its_base = ioremap(res->start, SZ_64K);
4936 if (!its_base) {
4937 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4938 *err = -ENOMEM;
4939 return NULL;
4940 }
4941
4942 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4943 if (val != 0x30 && val != 0x40) {
4944 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4945 *err = -ENODEV;
4946 goto out_unmap;
4947 }
4948
4949 *err = its_force_quiescent(its_base);
4950 if (*err) {
4951 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4952 goto out_unmap;
4953 }
4954
4955 return its_base;
4956
4957 out_unmap:
4958 iounmap(its_base);
4959 return NULL;
4960 }
4961
its_init_domain(struct its_node * its)4962 static int its_init_domain(struct its_node *its)
4963 {
4964 struct irq_domain *inner_domain;
4965 struct msi_domain_info *info;
4966
4967 info = kzalloc(sizeof(*info), GFP_KERNEL);
4968 if (!info)
4969 return -ENOMEM;
4970
4971 info->ops = &its_msi_domain_ops;
4972 info->data = its;
4973
4974 inner_domain = irq_domain_create_hierarchy(its_parent,
4975 its->msi_domain_flags, 0,
4976 its->fwnode_handle, &its_domain_ops,
4977 info);
4978 if (!inner_domain) {
4979 kfree(info);
4980 return -ENOMEM;
4981 }
4982
4983 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4984
4985 return 0;
4986 }
4987
its_init_vpe_domain(void)4988 static int its_init_vpe_domain(void)
4989 {
4990 struct its_node *its;
4991 u32 devid;
4992 int entries;
4993
4994 if (gic_rdists->has_direct_lpi) {
4995 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4996 return 0;
4997 }
4998
4999 /* Any ITS will do, even if not v4 */
5000 its = list_first_entry(&its_nodes, struct its_node, entry);
5001
5002 entries = roundup_pow_of_two(nr_cpu_ids);
5003 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
5004 GFP_KERNEL);
5005 if (!vpe_proxy.vpes)
5006 return -ENOMEM;
5007
5008 /* Use the last possible DevID */
5009 devid = GENMASK(device_ids(its) - 1, 0);
5010 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5011 if (!vpe_proxy.dev) {
5012 kfree(vpe_proxy.vpes);
5013 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5014 return -ENOMEM;
5015 }
5016
5017 BUG_ON(entries > vpe_proxy.dev->nr_ites);
5018
5019 raw_spin_lock_init(&vpe_proxy.lock);
5020 vpe_proxy.next_victim = 0;
5021 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5022 devid, vpe_proxy.dev->nr_ites);
5023
5024 return 0;
5025 }
5026
its_compute_its_list_map(struct its_node * its)5027 static int __init its_compute_its_list_map(struct its_node *its)
5028 {
5029 int its_number;
5030 u32 ctlr;
5031
5032 /*
5033 * This is assumed to be done early enough that we're
5034 * guaranteed to be single-threaded, hence no
5035 * locking. Should this change, we should address
5036 * this.
5037 */
5038 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5039 if (its_number >= GICv4_ITS_LIST_MAX) {
5040 pr_err("ITS@%pa: No ITSList entry available!\n",
5041 &its->phys_base);
5042 return -EINVAL;
5043 }
5044
5045 ctlr = readl_relaxed(its->base + GITS_CTLR);
5046 ctlr &= ~GITS_CTLR_ITS_NUMBER;
5047 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5048 writel_relaxed(ctlr, its->base + GITS_CTLR);
5049 ctlr = readl_relaxed(its->base + GITS_CTLR);
5050 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5051 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5052 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5053 }
5054
5055 if (test_and_set_bit(its_number, &its_list_map)) {
5056 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5057 &its->phys_base, its_number);
5058 return -EINVAL;
5059 }
5060
5061 return its_number;
5062 }
5063
its_probe_one(struct its_node * its)5064 static int __init its_probe_one(struct its_node *its)
5065 {
5066 u64 baser, tmp;
5067 struct page *page;
5068 u32 ctlr;
5069 int err;
5070
5071 its_enable_quirks(its);
5072
5073 if (is_v4(its)) {
5074 if (!(its->typer & GITS_TYPER_VMOVP)) {
5075 err = its_compute_its_list_map(its);
5076 if (err < 0)
5077 goto out;
5078
5079 its->list_nr = err;
5080
5081 pr_info("ITS@%pa: Using ITS number %d\n",
5082 &its->phys_base, err);
5083 } else {
5084 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5085 }
5086
5087 if (is_v4_1(its)) {
5088 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5089
5090 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5091 if (!its->sgir_base) {
5092 err = -ENOMEM;
5093 goto out;
5094 }
5095
5096 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5097
5098 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5099 &its->phys_base, its->mpidr, svpet);
5100 }
5101 }
5102
5103 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5104 get_order(ITS_CMD_QUEUE_SZ));
5105 if (!page) {
5106 err = -ENOMEM;
5107 goto out_unmap_sgir;
5108 }
5109 its->cmd_base = (void *)page_address(page);
5110 its->cmd_write = its->cmd_base;
5111
5112 err = its_alloc_tables(its);
5113 if (err)
5114 goto out_free_cmd;
5115
5116 err = its_alloc_collections(its);
5117 if (err)
5118 goto out_free_tables;
5119
5120 baser = (virt_to_phys(its->cmd_base) |
5121 GITS_CBASER_RaWaWb |
5122 GITS_CBASER_InnerShareable |
5123 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5124 GITS_CBASER_VALID);
5125
5126 gits_write_cbaser(baser, its->base + GITS_CBASER);
5127 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5128
5129 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5130 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5131
5132 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5133 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5134 /*
5135 * The HW reports non-shareable, we must
5136 * remove the cacheability attributes as
5137 * well.
5138 */
5139 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5140 GITS_CBASER_CACHEABILITY_MASK);
5141 baser |= GITS_CBASER_nC;
5142 gits_write_cbaser(baser, its->base + GITS_CBASER);
5143 }
5144 pr_info("ITS: using cache flushing for cmd queue\n");
5145 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5146 }
5147
5148 gits_write_cwriter(0, its->base + GITS_CWRITER);
5149 ctlr = readl_relaxed(its->base + GITS_CTLR);
5150 ctlr |= GITS_CTLR_ENABLE;
5151 if (is_v4(its))
5152 ctlr |= GITS_CTLR_ImDe;
5153 writel_relaxed(ctlr, its->base + GITS_CTLR);
5154
5155 err = its_init_domain(its);
5156 if (err)
5157 goto out_free_tables;
5158
5159 raw_spin_lock(&its_lock);
5160 list_add(&its->entry, &its_nodes);
5161 raw_spin_unlock(&its_lock);
5162
5163 return 0;
5164
5165 out_free_tables:
5166 its_free_tables(its);
5167 out_free_cmd:
5168 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5169 out_unmap_sgir:
5170 if (its->sgir_base)
5171 iounmap(its->sgir_base);
5172 out:
5173 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5174 return err;
5175 }
5176
gic_rdists_supports_plpis(void)5177 static bool gic_rdists_supports_plpis(void)
5178 {
5179 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5180 }
5181
redist_disable_lpis(void)5182 static int redist_disable_lpis(void)
5183 {
5184 void __iomem *rbase = gic_data_rdist_rd_base();
5185 u64 timeout = USEC_PER_SEC;
5186 u64 val;
5187
5188 if (!gic_rdists_supports_plpis()) {
5189 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5190 return -ENXIO;
5191 }
5192
5193 val = readl_relaxed(rbase + GICR_CTLR);
5194 if (!(val & GICR_CTLR_ENABLE_LPIS))
5195 return 0;
5196
5197 /*
5198 * If coming via a CPU hotplug event, we don't need to disable
5199 * LPIs before trying to re-enable them. They are already
5200 * configured and all is well in the world.
5201 *
5202 * If running with preallocated tables, there is nothing to do.
5203 */
5204 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5205 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5206 return 0;
5207
5208 /*
5209 * From that point on, we only try to do some damage control.
5210 */
5211 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5212 smp_processor_id());
5213 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5214
5215 /* Disable LPIs */
5216 val &= ~GICR_CTLR_ENABLE_LPIS;
5217 writel_relaxed(val, rbase + GICR_CTLR);
5218
5219 /* Make sure any change to GICR_CTLR is observable by the GIC */
5220 dsb(sy);
5221
5222 /*
5223 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5224 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5225 * Error out if we time out waiting for RWP to clear.
5226 */
5227 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5228 if (!timeout) {
5229 pr_err("CPU%d: Timeout while disabling LPIs\n",
5230 smp_processor_id());
5231 return -ETIMEDOUT;
5232 }
5233 udelay(1);
5234 timeout--;
5235 }
5236
5237 /*
5238 * After it has been written to 1, it is IMPLEMENTATION
5239 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5240 * cleared to 0. Error out if clearing the bit failed.
5241 */
5242 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5243 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5244 return -EBUSY;
5245 }
5246
5247 return 0;
5248 }
5249
its_cpu_init(void)5250 int its_cpu_init(void)
5251 {
5252 if (!list_empty(&its_nodes)) {
5253 int ret;
5254
5255 ret = redist_disable_lpis();
5256 if (ret)
5257 return ret;
5258
5259 its_cpu_init_lpis();
5260 its_cpu_init_collections();
5261 }
5262
5263 return 0;
5264 }
5265
rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct * work)5266 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5267 {
5268 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5269 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5270 }
5271
5272 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5273 rdist_memreserve_cpuhp_cleanup_workfn);
5274
its_cpu_memreserve_lpi(unsigned int cpu)5275 static int its_cpu_memreserve_lpi(unsigned int cpu)
5276 {
5277 struct page *pend_page;
5278 int ret = 0;
5279
5280 /* This gets to run exactly once per CPU */
5281 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5282 return 0;
5283
5284 pend_page = gic_data_rdist()->pend_page;
5285 if (WARN_ON(!pend_page)) {
5286 ret = -ENOMEM;
5287 goto out;
5288 }
5289 /*
5290 * If the pending table was pre-programmed, free the memory we
5291 * preemptively allocated. Otherwise, reserve that memory for
5292 * later kexecs.
5293 */
5294 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5295 its_free_pending_table(pend_page);
5296 gic_data_rdist()->pend_page = NULL;
5297 } else {
5298 phys_addr_t paddr = page_to_phys(pend_page);
5299 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5300 }
5301
5302 out:
5303 /* Last CPU being brought up gets to issue the cleanup */
5304 if (!IS_ENABLED(CONFIG_SMP) ||
5305 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5306 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5307
5308 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5309 return ret;
5310 }
5311
5312 /* Mark all the BASER registers as invalid before they get reprogrammed */
its_reset_one(struct resource * res)5313 static int __init its_reset_one(struct resource *res)
5314 {
5315 void __iomem *its_base;
5316 int err, i;
5317
5318 its_base = its_map_one(res, &err);
5319 if (!its_base)
5320 return err;
5321
5322 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5323 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5324
5325 iounmap(its_base);
5326 return 0;
5327 }
5328
5329 static const struct of_device_id its_device_id[] = {
5330 { .compatible = "arm,gic-v3-its", },
5331 {},
5332 };
5333
its_node_init(struct resource * res,struct fwnode_handle * handle,int numa_node)5334 static struct its_node __init *its_node_init(struct resource *res,
5335 struct fwnode_handle *handle, int numa_node)
5336 {
5337 void __iomem *its_base;
5338 struct its_node *its;
5339 int err;
5340
5341 its_base = its_map_one(res, &err);
5342 if (!its_base)
5343 return NULL;
5344
5345 pr_info("ITS %pR\n", res);
5346
5347 its = kzalloc(sizeof(*its), GFP_KERNEL);
5348 if (!its)
5349 goto out_unmap;
5350
5351 raw_spin_lock_init(&its->lock);
5352 mutex_init(&its->dev_alloc_lock);
5353 INIT_LIST_HEAD(&its->entry);
5354 INIT_LIST_HEAD(&its->its_device_list);
5355
5356 its->typer = gic_read_typer(its_base + GITS_TYPER);
5357 its->base = its_base;
5358 its->phys_base = res->start;
5359 its->get_msi_base = its_irq_get_msi_base;
5360 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5361
5362 its->numa_node = numa_node;
5363 its->fwnode_handle = handle;
5364
5365 return its;
5366
5367 out_unmap:
5368 iounmap(its_base);
5369 return NULL;
5370 }
5371
its_node_destroy(struct its_node * its)5372 static void its_node_destroy(struct its_node *its)
5373 {
5374 iounmap(its->base);
5375 kfree(its);
5376 }
5377
its_of_probe(struct device_node * node)5378 static int __init its_of_probe(struct device_node *node)
5379 {
5380 struct device_node *np;
5381 struct resource res;
5382 int err;
5383
5384 /*
5385 * Make sure *all* the ITS are reset before we probe any, as
5386 * they may be sharing memory. If any of the ITS fails to
5387 * reset, don't even try to go any further, as this could
5388 * result in something even worse.
5389 */
5390 for (np = of_find_matching_node(node, its_device_id); np;
5391 np = of_find_matching_node(np, its_device_id)) {
5392 if (!of_device_is_available(np) ||
5393 !of_property_read_bool(np, "msi-controller") ||
5394 of_address_to_resource(np, 0, &res))
5395 continue;
5396
5397 err = its_reset_one(&res);
5398 if (err)
5399 return err;
5400 }
5401
5402 for (np = of_find_matching_node(node, its_device_id); np;
5403 np = of_find_matching_node(np, its_device_id)) {
5404 struct its_node *its;
5405
5406 if (!of_device_is_available(np))
5407 continue;
5408 if (!of_property_read_bool(np, "msi-controller")) {
5409 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5410 np);
5411 continue;
5412 }
5413
5414 if (of_address_to_resource(np, 0, &res)) {
5415 pr_warn("%pOF: no regs?\n", np);
5416 continue;
5417 }
5418
5419
5420 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5421 if (!its)
5422 return -ENOMEM;
5423
5424 err = its_probe_one(its);
5425 if (err) {
5426 its_node_destroy(its);
5427 return err;
5428 }
5429 }
5430 return 0;
5431 }
5432
5433 #ifdef CONFIG_ACPI
5434
5435 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5436
5437 #ifdef CONFIG_ACPI_NUMA
5438 struct its_srat_map {
5439 /* numa node id */
5440 u32 numa_node;
5441 /* GIC ITS ID */
5442 u32 its_id;
5443 };
5444
5445 static struct its_srat_map *its_srat_maps __initdata;
5446 static int its_in_srat __initdata;
5447
acpi_get_its_numa_node(u32 its_id)5448 static int __init acpi_get_its_numa_node(u32 its_id)
5449 {
5450 int i;
5451
5452 for (i = 0; i < its_in_srat; i++) {
5453 if (its_id == its_srat_maps[i].its_id)
5454 return its_srat_maps[i].numa_node;
5455 }
5456 return NUMA_NO_NODE;
5457 }
5458
gic_acpi_match_srat_its(union acpi_subtable_headers * header,const unsigned long end)5459 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5460 const unsigned long end)
5461 {
5462 return 0;
5463 }
5464
gic_acpi_parse_srat_its(union acpi_subtable_headers * header,const unsigned long end)5465 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5466 const unsigned long end)
5467 {
5468 int node;
5469 struct acpi_srat_gic_its_affinity *its_affinity;
5470
5471 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5472 if (!its_affinity)
5473 return -EINVAL;
5474
5475 if (its_affinity->header.length < sizeof(*its_affinity)) {
5476 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5477 its_affinity->header.length);
5478 return -EINVAL;
5479 }
5480
5481 /*
5482 * Note that in theory a new proximity node could be created by this
5483 * entry as it is an SRAT resource allocation structure.
5484 * We do not currently support doing so.
5485 */
5486 node = pxm_to_node(its_affinity->proximity_domain);
5487
5488 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5489 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5490 return 0;
5491 }
5492
5493 its_srat_maps[its_in_srat].numa_node = node;
5494 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5495 its_in_srat++;
5496 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5497 its_affinity->proximity_domain, its_affinity->its_id, node);
5498
5499 return 0;
5500 }
5501
acpi_table_parse_srat_its(void)5502 static void __init acpi_table_parse_srat_its(void)
5503 {
5504 int count;
5505
5506 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5507 sizeof(struct acpi_table_srat),
5508 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5509 gic_acpi_match_srat_its, 0);
5510 if (count <= 0)
5511 return;
5512
5513 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5514 GFP_KERNEL);
5515 if (!its_srat_maps)
5516 return;
5517
5518 acpi_table_parse_entries(ACPI_SIG_SRAT,
5519 sizeof(struct acpi_table_srat),
5520 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5521 gic_acpi_parse_srat_its, 0);
5522 }
5523
5524 /* free the its_srat_maps after ITS probing */
acpi_its_srat_maps_free(void)5525 static void __init acpi_its_srat_maps_free(void)
5526 {
5527 kfree(its_srat_maps);
5528 }
5529 #else
acpi_table_parse_srat_its(void)5530 static void __init acpi_table_parse_srat_its(void) { }
acpi_get_its_numa_node(u32 its_id)5531 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
acpi_its_srat_maps_free(void)5532 static void __init acpi_its_srat_maps_free(void) { }
5533 #endif
5534
gic_acpi_parse_madt_its(union acpi_subtable_headers * header,const unsigned long end)5535 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5536 const unsigned long end)
5537 {
5538 struct acpi_madt_generic_translator *its_entry;
5539 struct fwnode_handle *dom_handle;
5540 struct its_node *its;
5541 struct resource res;
5542 int err;
5543
5544 its_entry = (struct acpi_madt_generic_translator *)header;
5545 memset(&res, 0, sizeof(res));
5546 res.start = its_entry->base_address;
5547 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5548 res.flags = IORESOURCE_MEM;
5549
5550 dom_handle = irq_domain_alloc_fwnode(&res.start);
5551 if (!dom_handle) {
5552 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5553 &res.start);
5554 return -ENOMEM;
5555 }
5556
5557 err = iort_register_domain_token(its_entry->translation_id, res.start,
5558 dom_handle);
5559 if (err) {
5560 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5561 &res.start, its_entry->translation_id);
5562 goto dom_err;
5563 }
5564
5565 its = its_node_init(&res, dom_handle,
5566 acpi_get_its_numa_node(its_entry->translation_id));
5567 if (!its) {
5568 err = -ENOMEM;
5569 goto node_err;
5570 }
5571
5572 err = its_probe_one(its);
5573 if (!err)
5574 return 0;
5575
5576 node_err:
5577 iort_deregister_domain_token(its_entry->translation_id);
5578 dom_err:
5579 irq_domain_free_fwnode(dom_handle);
5580 return err;
5581 }
5582
its_acpi_reset(union acpi_subtable_headers * header,const unsigned long end)5583 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5584 const unsigned long end)
5585 {
5586 struct acpi_madt_generic_translator *its_entry;
5587 struct resource res;
5588
5589 its_entry = (struct acpi_madt_generic_translator *)header;
5590 res = (struct resource) {
5591 .start = its_entry->base_address,
5592 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5593 .flags = IORESOURCE_MEM,
5594 };
5595
5596 return its_reset_one(&res);
5597 }
5598
its_acpi_probe(void)5599 static void __init its_acpi_probe(void)
5600 {
5601 acpi_table_parse_srat_its();
5602 /*
5603 * Make sure *all* the ITS are reset before we probe any, as
5604 * they may be sharing memory. If any of the ITS fails to
5605 * reset, don't even try to go any further, as this could
5606 * result in something even worse.
5607 */
5608 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5609 its_acpi_reset, 0) > 0)
5610 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5611 gic_acpi_parse_madt_its, 0);
5612 acpi_its_srat_maps_free();
5613 }
5614 #else
its_acpi_probe(void)5615 static void __init its_acpi_probe(void) { }
5616 #endif
5617
its_lpi_memreserve_init(void)5618 int __init its_lpi_memreserve_init(void)
5619 {
5620 int state;
5621
5622 if (!efi_enabled(EFI_CONFIG_TABLES))
5623 return 0;
5624
5625 if (list_empty(&its_nodes))
5626 return 0;
5627
5628 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5629 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5630 "irqchip/arm/gicv3/memreserve:online",
5631 its_cpu_memreserve_lpi,
5632 NULL);
5633 if (state < 0)
5634 return state;
5635
5636 gic_rdists->cpuhp_memreserve_state = state;
5637
5638 return 0;
5639 }
5640
its_init(struct fwnode_handle * handle,struct rdists * rdists,struct irq_domain * parent_domain)5641 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5642 struct irq_domain *parent_domain)
5643 {
5644 struct device_node *of_node;
5645 struct its_node *its;
5646 bool has_v4 = false;
5647 bool has_v4_1 = false;
5648 int err;
5649
5650 gic_rdists = rdists;
5651
5652 its_parent = parent_domain;
5653 of_node = to_of_node(handle);
5654 if (of_node)
5655 its_of_probe(of_node);
5656 else
5657 its_acpi_probe();
5658
5659 if (list_empty(&its_nodes)) {
5660 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5661 return -ENXIO;
5662 }
5663
5664 err = allocate_lpi_tables();
5665 if (err)
5666 return err;
5667
5668 list_for_each_entry(its, &its_nodes, entry) {
5669 has_v4 |= is_v4(its);
5670 has_v4_1 |= is_v4_1(its);
5671 }
5672
5673 /* Don't bother with inconsistent systems */
5674 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5675 rdists->has_rvpeid = false;
5676
5677 if (has_v4 & rdists->has_vlpis) {
5678 const struct irq_domain_ops *sgi_ops;
5679
5680 if (has_v4_1)
5681 sgi_ops = &its_sgi_domain_ops;
5682 else
5683 sgi_ops = NULL;
5684
5685 if (its_init_vpe_domain() ||
5686 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5687 rdists->has_vlpis = false;
5688 pr_err("ITS: Disabling GICv4 support\n");
5689 }
5690 }
5691
5692 register_syscore_ops(&its_syscore_ops);
5693
5694 return 0;
5695 }
5696