1 /*
2  * Xtensa MX interrupt distributor
3  *
4  * Copyright (C) 2002 - 2013 Tensilica, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/interrupt.h>
12 #include <linux/irqdomain.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/of.h>
16 
17 #include <asm/mxregs.h>
18 
19 #define HW_IRQ_IPI_COUNT 2
20 #define HW_IRQ_MX_BASE 2
21 #define HW_IRQ_EXTERN_BASE 3
22 
23 static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
24 
25 static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
26 		irq_hw_number_t hw)
27 {
28 	if (hw < HW_IRQ_IPI_COUNT) {
29 		struct irq_chip *irq_chip = d->host_data;
30 		irq_set_chip_and_handler_name(irq, irq_chip,
31 				handle_percpu_irq, "ipi");
32 		irq_set_status_flags(irq, IRQ_LEVEL);
33 		return 0;
34 	}
35 	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
36 	return xtensa_irq_map(d, irq, hw);
37 }
38 
39 /*
40  * Device Tree IRQ specifier translation function which works with one or
41  * two cell bindings. First cell value maps directly to the hwirq number.
42  * Second cell if present specifies whether hwirq number is external (1) or
43  * internal (0).
44  */
45 static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
46 		struct device_node *ctrlr,
47 		const u32 *intspec, unsigned int intsize,
48 		unsigned long *out_hwirq, unsigned int *out_type)
49 {
50 	return xtensa_irq_domain_xlate(intspec, intsize,
51 			intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
52 			out_hwirq, out_type);
53 }
54 
55 static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
56 	.xlate = xtensa_mx_irq_domain_xlate,
57 	.map = xtensa_mx_irq_map,
58 };
59 
60 void secondary_init_irq(void)
61 {
62 	__this_cpu_write(cached_irq_mask,
63 			XCHAL_INTTYPE_MASK_EXTERN_EDGE |
64 			XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
65 	set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
66 			XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
67 }
68 
69 static void xtensa_mx_irq_mask(struct irq_data *d)
70 {
71 	unsigned int mask = 1u << d->hwirq;
72 
73 	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
76 					HW_IRQ_MX_BASE), MIENG);
77 	} else {
78 		mask = __this_cpu_read(cached_irq_mask) & ~mask;
79 		__this_cpu_write(cached_irq_mask, mask);
80 		set_sr(mask, intenable);
81 	}
82 }
83 
84 static void xtensa_mx_irq_unmask(struct irq_data *d)
85 {
86 	unsigned int mask = 1u << d->hwirq;
87 
88 	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
91 					HW_IRQ_MX_BASE), MIENGSET);
92 	} else {
93 		mask |= __this_cpu_read(cached_irq_mask);
94 		__this_cpu_write(cached_irq_mask, mask);
95 		set_sr(mask, intenable);
96 	}
97 }
98 
99 static void xtensa_mx_irq_enable(struct irq_data *d)
100 {
101 	variant_irq_enable(d->hwirq);
102 	xtensa_mx_irq_unmask(d);
103 }
104 
105 static void xtensa_mx_irq_disable(struct irq_data *d)
106 {
107 	xtensa_mx_irq_mask(d);
108 	variant_irq_disable(d->hwirq);
109 }
110 
111 static void xtensa_mx_irq_ack(struct irq_data *d)
112 {
113 	set_sr(1 << d->hwirq, intclear);
114 }
115 
116 static int xtensa_mx_irq_retrigger(struct irq_data *d)
117 {
118 	set_sr(1 << d->hwirq, intset);
119 	return 1;
120 }
121 
122 static int xtensa_mx_irq_set_affinity(struct irq_data *d,
123 		const struct cpumask *dest, bool force)
124 {
125 	int cpu = cpumask_any_and(dest, cpu_online_mask);
126 	unsigned mask = 1u << cpu;
127 
128 	set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
129 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
130 
131 	return 0;
132 
133 }
134 
135 static struct irq_chip xtensa_mx_irq_chip = {
136 	.name		= "xtensa-mx",
137 	.irq_enable	= xtensa_mx_irq_enable,
138 	.irq_disable	= xtensa_mx_irq_disable,
139 	.irq_mask	= xtensa_mx_irq_mask,
140 	.irq_unmask	= xtensa_mx_irq_unmask,
141 	.irq_ack	= xtensa_mx_irq_ack,
142 	.irq_retrigger	= xtensa_mx_irq_retrigger,
143 	.irq_set_affinity = xtensa_mx_irq_set_affinity,
144 };
145 
146 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
147 {
148 	struct irq_domain *root_domain =
149 		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
150 				&xtensa_mx_irq_domain_ops,
151 				&xtensa_mx_irq_chip);
152 	irq_set_default_host(root_domain);
153 	secondary_init_irq();
154 	return 0;
155 }
156 
157 static int __init xtensa_mx_init(struct device_node *np,
158 		struct device_node *interrupt_parent)
159 {
160 	struct irq_domain *root_domain =
161 		irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
162 				&xtensa_mx_irq_chip);
163 	irq_set_default_host(root_domain);
164 	secondary_init_irq();
165 	return 0;
166 }
167 IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
168