1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARM GIC v2m MSI(-X) support
4 * Support for Message Signaled Interrupts for systems that
5 * implement ARM Generic Interrupt Controller: GICv2m.
6 *
7 * Copyright (C) 2014 Advanced Micro Devices, Inc.
8 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
10 * Brandon Anderson <brandon.anderson@amd.com>
11 */
12
13 #define pr_fmt(fmt) "GICv2m: " fmt
14
15 #include <linux/acpi.h>
16 #include <linux/iommu.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/kernel.h>
20 #include <linux/pci.h>
21 #include <linux/msi.h>
22 #include <linux/of_address.h>
23 #include <linux/of_pci.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/irqchip/arm-gic.h>
27 #include <linux/irqchip/arm-gic-common.h>
28
29 /*
30 * MSI_TYPER:
31 * [31:26] Reserved
32 * [25:16] lowest SPI assigned to MSI
33 * [15:10] Reserved
34 * [9:0] Numer of SPIs assigned to MSI
35 */
36 #define V2M_MSI_TYPER 0x008
37 #define V2M_MSI_TYPER_BASE_SHIFT 16
38 #define V2M_MSI_TYPER_BASE_MASK 0x3FF
39 #define V2M_MSI_TYPER_NUM_MASK 0x3FF
40 #define V2M_MSI_SETSPI_NS 0x040
41 #define V2M_MIN_SPI 32
42 #define V2M_MAX_SPI 1019
43 #define V2M_MSI_IIDR 0xFCC
44
45 #define V2M_MSI_TYPER_BASE_SPI(x) \
46 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
47
48 #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
49
50 /* APM X-Gene with GICv2m MSI_IIDR register value */
51 #define XGENE_GICV2M_MSI_IIDR 0x06000170
52
53 /* Broadcom NS2 GICv2m MSI_IIDR register value */
54 #define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
55
56 /* List of flags for specific v2m implementation */
57 #define GICV2M_NEEDS_SPI_OFFSET 0x00000001
58 #define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
59
60 static LIST_HEAD(v2m_nodes);
61 static DEFINE_SPINLOCK(v2m_lock);
62
63 struct v2m_data {
64 struct list_head entry;
65 struct fwnode_handle *fwnode;
66 struct resource res; /* GICv2m resource */
67 void __iomem *base; /* GICv2m virt address */
68 u32 spi_start; /* The SPI number that MSIs start */
69 u32 nr_spis; /* The number of SPIs for MSIs */
70 u32 spi_offset; /* offset to be subtracted from SPI number */
71 unsigned long *bm; /* MSI vector bitmap */
72 u32 flags; /* v2m flags for specific implementation */
73 };
74
gicv2m_mask_msi_irq(struct irq_data * d)75 static void gicv2m_mask_msi_irq(struct irq_data *d)
76 {
77 pci_msi_mask_irq(d);
78 irq_chip_mask_parent(d);
79 }
80
gicv2m_unmask_msi_irq(struct irq_data * d)81 static void gicv2m_unmask_msi_irq(struct irq_data *d)
82 {
83 pci_msi_unmask_irq(d);
84 irq_chip_unmask_parent(d);
85 }
86
87 static struct irq_chip gicv2m_msi_irq_chip = {
88 .name = "MSI",
89 .irq_mask = gicv2m_mask_msi_irq,
90 .irq_unmask = gicv2m_unmask_msi_irq,
91 .irq_eoi = irq_chip_eoi_parent,
92 };
93
94 static struct msi_domain_info gicv2m_msi_domain_info = {
95 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
96 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
97 .chip = &gicv2m_msi_irq_chip,
98 };
99
gicv2m_get_msi_addr(struct v2m_data * v2m,int hwirq)100 static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
101 {
102 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
103 return v2m->res.start | ((hwirq - 32) << 3);
104 else
105 return v2m->res.start + V2M_MSI_SETSPI_NS;
106 }
107
gicv2m_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)108 static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
109 {
110 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
111 phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
112
113 msg->address_hi = upper_32_bits(addr);
114 msg->address_lo = lower_32_bits(addr);
115
116 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
117 msg->data = 0;
118 else
119 msg->data = data->hwirq;
120 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
121 msg->data -= v2m->spi_offset;
122
123 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
124 }
125
126 static struct irq_chip gicv2m_irq_chip = {
127 .name = "GICv2m",
128 .irq_mask = irq_chip_mask_parent,
129 .irq_unmask = irq_chip_unmask_parent,
130 .irq_eoi = irq_chip_eoi_parent,
131 .irq_set_affinity = irq_chip_set_affinity_parent,
132 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
133 };
134
gicv2m_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)135 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
136 unsigned int virq,
137 irq_hw_number_t hwirq)
138 {
139 struct irq_fwspec fwspec;
140 struct irq_data *d;
141 int err;
142
143 if (is_of_node(domain->parent->fwnode)) {
144 fwspec.fwnode = domain->parent->fwnode;
145 fwspec.param_count = 3;
146 fwspec.param[0] = 0;
147 fwspec.param[1] = hwirq - 32;
148 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
149 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
150 fwspec.fwnode = domain->parent->fwnode;
151 fwspec.param_count = 2;
152 fwspec.param[0] = hwirq;
153 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
154 } else {
155 return -EINVAL;
156 }
157
158 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
159 if (err)
160 return err;
161
162 /* Configure the interrupt line to be edge */
163 d = irq_domain_get_irq_data(domain->parent, virq);
164 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
165 return 0;
166 }
167
gicv2m_unalloc_msi(struct v2m_data * v2m,unsigned int hwirq,int nr_irqs)168 static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
169 int nr_irqs)
170 {
171 spin_lock(&v2m_lock);
172 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
173 get_count_order(nr_irqs));
174 spin_unlock(&v2m_lock);
175 }
176
gicv2m_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)177 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
178 unsigned int nr_irqs, void *args)
179 {
180 msi_alloc_info_t *info = args;
181 struct v2m_data *v2m = NULL, *tmp;
182 int hwirq, offset, i, err = 0;
183
184 spin_lock(&v2m_lock);
185 list_for_each_entry(tmp, &v2m_nodes, entry) {
186 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
187 get_count_order(nr_irqs));
188 if (offset >= 0) {
189 v2m = tmp;
190 break;
191 }
192 }
193 spin_unlock(&v2m_lock);
194
195 if (!v2m)
196 return -ENOSPC;
197
198 hwirq = v2m->spi_start + offset;
199
200 err = iommu_dma_prepare_msi(info->desc,
201 gicv2m_get_msi_addr(v2m, hwirq));
202 if (err)
203 return err;
204
205 for (i = 0; i < nr_irqs; i++) {
206 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
207 if (err)
208 goto fail;
209
210 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
211 &gicv2m_irq_chip, v2m);
212 }
213
214 return 0;
215
216 fail:
217 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
218 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
219 return err;
220 }
221
gicv2m_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)222 static void gicv2m_irq_domain_free(struct irq_domain *domain,
223 unsigned int virq, unsigned int nr_irqs)
224 {
225 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
226 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
227
228 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
229 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
230 }
231
232 static const struct irq_domain_ops gicv2m_domain_ops = {
233 .alloc = gicv2m_irq_domain_alloc,
234 .free = gicv2m_irq_domain_free,
235 };
236
is_msi_spi_valid(u32 base,u32 num)237 static bool is_msi_spi_valid(u32 base, u32 num)
238 {
239 if (base < V2M_MIN_SPI) {
240 pr_err("Invalid MSI base SPI (base:%u)\n", base);
241 return false;
242 }
243
244 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
245 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
246 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
247 return false;
248 }
249
250 return true;
251 }
252
253 static struct irq_chip gicv2m_pmsi_irq_chip = {
254 .name = "pMSI",
255 };
256
257 static struct msi_domain_ops gicv2m_pmsi_ops = {
258 };
259
260 static struct msi_domain_info gicv2m_pmsi_domain_info = {
261 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
262 .ops = &gicv2m_pmsi_ops,
263 .chip = &gicv2m_pmsi_irq_chip,
264 };
265
gicv2m_teardown(void)266 static void __init gicv2m_teardown(void)
267 {
268 struct v2m_data *v2m, *tmp;
269
270 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
271 list_del(&v2m->entry);
272 bitmap_free(v2m->bm);
273 iounmap(v2m->base);
274 of_node_put(to_of_node(v2m->fwnode));
275 if (is_fwnode_irqchip(v2m->fwnode))
276 irq_domain_free_fwnode(v2m->fwnode);
277 kfree(v2m);
278 }
279 }
280
gicv2m_allocate_domains(struct irq_domain * parent)281 static __init int gicv2m_allocate_domains(struct irq_domain *parent)
282 {
283 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
284 struct v2m_data *v2m;
285
286 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
287 if (!v2m)
288 return 0;
289
290 inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
291 &gicv2m_domain_ops, v2m);
292 if (!inner_domain) {
293 pr_err("Failed to create GICv2m domain\n");
294 return -ENOMEM;
295 }
296
297 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
298 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
299 &gicv2m_msi_domain_info,
300 inner_domain);
301 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
302 &gicv2m_pmsi_domain_info,
303 inner_domain);
304 if (!pci_domain || !plat_domain) {
305 pr_err("Failed to create MSI domains\n");
306 if (plat_domain)
307 irq_domain_remove(plat_domain);
308 if (pci_domain)
309 irq_domain_remove(pci_domain);
310 irq_domain_remove(inner_domain);
311 return -ENOMEM;
312 }
313
314 return 0;
315 }
316
gicv2m_init_one(struct fwnode_handle * fwnode,u32 spi_start,u32 nr_spis,struct resource * res,u32 flags)317 static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
318 u32 spi_start, u32 nr_spis,
319 struct resource *res, u32 flags)
320 {
321 int ret;
322 struct v2m_data *v2m;
323
324 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
325 if (!v2m)
326 return -ENOMEM;
327
328 INIT_LIST_HEAD(&v2m->entry);
329 v2m->fwnode = fwnode;
330 v2m->flags = flags;
331
332 memcpy(&v2m->res, res, sizeof(struct resource));
333
334 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
335 if (!v2m->base) {
336 pr_err("Failed to map GICv2m resource\n");
337 ret = -ENOMEM;
338 goto err_free_v2m;
339 }
340
341 if (spi_start && nr_spis) {
342 v2m->spi_start = spi_start;
343 v2m->nr_spis = nr_spis;
344 } else {
345 u32 typer;
346
347 /* Graviton should always have explicit spi_start/nr_spis */
348 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
349 ret = -EINVAL;
350 goto err_iounmap;
351 }
352 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
353
354 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
355 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
356 }
357
358 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
359 ret = -EINVAL;
360 goto err_iounmap;
361 }
362
363 /*
364 * APM X-Gene GICv2m implementation has an erratum where
365 * the MSI data needs to be the offset from the spi_start
366 * in order to trigger the correct MSI interrupt. This is
367 * different from the standard GICv2m implementation where
368 * the MSI data is the absolute value within the range from
369 * spi_start to (spi_start + num_spis).
370 *
371 * Broadcom NS2 GICv2m implementation has an erratum where the MSI data
372 * is 'spi_number - 32'
373 *
374 * Reading that register fails on the Graviton implementation
375 */
376 if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
377 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
378 case XGENE_GICV2M_MSI_IIDR:
379 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
380 v2m->spi_offset = v2m->spi_start;
381 break;
382 case BCM_NS2_GICV2M_MSI_IIDR:
383 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
384 v2m->spi_offset = 32;
385 break;
386 }
387 }
388 v2m->bm = bitmap_zalloc(v2m->nr_spis, GFP_KERNEL);
389 if (!v2m->bm) {
390 ret = -ENOMEM;
391 goto err_iounmap;
392 }
393
394 list_add_tail(&v2m->entry, &v2m_nodes);
395
396 pr_info("range%pR, SPI[%d:%d]\n", res,
397 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
398 return 0;
399
400 err_iounmap:
401 iounmap(v2m->base);
402 err_free_v2m:
403 kfree(v2m);
404 return ret;
405 }
406
407 static __initconst struct of_device_id gicv2m_device_id[] = {
408 { .compatible = "arm,gic-v2m-frame", },
409 {},
410 };
411
gicv2m_of_init(struct fwnode_handle * parent_handle,struct irq_domain * parent)412 static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
413 struct irq_domain *parent)
414 {
415 int ret = 0;
416 struct device_node *node = to_of_node(parent_handle);
417 struct device_node *child;
418
419 for (child = of_find_matching_node(node, gicv2m_device_id); child;
420 child = of_find_matching_node(child, gicv2m_device_id)) {
421 u32 spi_start = 0, nr_spis = 0;
422 struct resource res;
423
424 if (!of_property_read_bool(child, "msi-controller"))
425 continue;
426
427 ret = of_address_to_resource(child, 0, &res);
428 if (ret) {
429 pr_err("Failed to allocate v2m resource.\n");
430 break;
431 }
432
433 if (!of_property_read_u32(child, "arm,msi-base-spi",
434 &spi_start) &&
435 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
436 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
437 spi_start, nr_spis);
438
439 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
440 &res, 0);
441 if (ret)
442 break;
443 }
444
445 if (ret && child)
446 of_node_put(child);
447 if (!ret)
448 ret = gicv2m_allocate_domains(parent);
449 if (ret)
450 gicv2m_teardown();
451 return ret;
452 }
453
454 #ifdef CONFIG_ACPI
455 static int acpi_num_msi;
456
gicv2m_get_fwnode(struct device * dev)457 static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
458 {
459 struct v2m_data *data;
460
461 if (WARN_ON(acpi_num_msi <= 0))
462 return NULL;
463
464 /* We only return the fwnode of the first MSI frame. */
465 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
466 if (!data)
467 return NULL;
468
469 return data->fwnode;
470 }
471
acpi_check_amazon_graviton_quirks(void)472 static __init bool acpi_check_amazon_graviton_quirks(void)
473 {
474 static struct acpi_table_madt *madt;
475 acpi_status status;
476 bool rc = false;
477
478 #define ACPI_AMZN_OEM_ID "AMAZON"
479
480 status = acpi_get_table(ACPI_SIG_MADT, 0,
481 (struct acpi_table_header **)&madt);
482
483 if (ACPI_FAILURE(status) || !madt)
484 return rc;
485 rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
486 acpi_put_table((struct acpi_table_header *)madt);
487
488 return rc;
489 }
490
491 static int __init
acpi_parse_madt_msi(union acpi_subtable_headers * header,const unsigned long end)492 acpi_parse_madt_msi(union acpi_subtable_headers *header,
493 const unsigned long end)
494 {
495 int ret;
496 struct resource res;
497 u32 spi_start = 0, nr_spis = 0;
498 struct acpi_madt_generic_msi_frame *m;
499 struct fwnode_handle *fwnode;
500 u32 flags = 0;
501
502 m = (struct acpi_madt_generic_msi_frame *)header;
503 if (BAD_MADT_ENTRY(m, end))
504 return -EINVAL;
505
506 res.start = m->base_address;
507 res.end = m->base_address + SZ_4K - 1;
508 res.flags = IORESOURCE_MEM;
509
510 if (acpi_check_amazon_graviton_quirks()) {
511 pr_info("applying Amazon Graviton quirk\n");
512 res.end = res.start + SZ_8K - 1;
513 flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
514 gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
515 }
516
517 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
518 spi_start = m->spi_base;
519 nr_spis = m->spi_count;
520
521 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
522 spi_start, nr_spis);
523 }
524
525 fwnode = irq_domain_alloc_fwnode(&res.start);
526 if (!fwnode) {
527 pr_err("Unable to allocate GICv2m domain token\n");
528 return -EINVAL;
529 }
530
531 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
532 if (ret)
533 irq_domain_free_fwnode(fwnode);
534
535 return ret;
536 }
537
gicv2m_acpi_init(struct irq_domain * parent)538 static int __init gicv2m_acpi_init(struct irq_domain *parent)
539 {
540 int ret;
541
542 if (acpi_num_msi > 0)
543 return 0;
544
545 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
546 acpi_parse_madt_msi, 0);
547
548 if (acpi_num_msi <= 0)
549 goto err_out;
550
551 ret = gicv2m_allocate_domains(parent);
552 if (ret)
553 goto err_out;
554
555 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
556
557 return 0;
558
559 err_out:
560 gicv2m_teardown();
561 return -EINVAL;
562 }
563 #else /* CONFIG_ACPI */
gicv2m_acpi_init(struct irq_domain * parent)564 static int __init gicv2m_acpi_init(struct irq_domain *parent)
565 {
566 return -EINVAL;
567 }
568 #endif /* CONFIG_ACPI */
569
gicv2m_init(struct fwnode_handle * parent_handle,struct irq_domain * parent)570 int __init gicv2m_init(struct fwnode_handle *parent_handle,
571 struct irq_domain *parent)
572 {
573 if (is_of_node(parent_handle))
574 return gicv2m_of_init(parent_handle, parent);
575
576 return gicv2m_acpi_init(parent);
577 }
578