xref: /openbmc/linux/arch/arm/mach-tegra/irq.c (revision 1dd24dae)
1 /*
2  * Copyright (C) 2011 Google, Inc.
3  *
4  * Author:
5  *	Colin Cross <ccross@android.com>
6  *
7  * Copyright (C) 2010,2013, NVIDIA Corporation
8  *
9  * This software is licensed under the terms of the GNU General Public
10  * License version 2, as published by the Free Software Foundation, and
11  * may be copied, distributed, and modified under those terms.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/irqchip/arm-gic.h>
26 #include <linux/syscore_ops.h>
27 
28 #include "board.h"
29 #include "iomap.h"
30 
31 #define ICTLR_CPU_IEP_VFIQ	0x08
32 #define ICTLR_CPU_IEP_FIR	0x14
33 #define ICTLR_CPU_IEP_FIR_SET	0x18
34 #define ICTLR_CPU_IEP_FIR_CLR	0x1c
35 
36 #define ICTLR_CPU_IER		0x20
37 #define ICTLR_CPU_IER_SET	0x24
38 #define ICTLR_CPU_IER_CLR	0x28
39 #define ICTLR_CPU_IEP_CLASS	0x2C
40 
41 #define ICTLR_COP_IER		0x30
42 #define ICTLR_COP_IER_SET	0x34
43 #define ICTLR_COP_IER_CLR	0x38
44 #define ICTLR_COP_IEP_CLASS	0x3c
45 
46 #define FIRST_LEGACY_IRQ 32
47 #define TEGRA_MAX_NUM_ICTLRS	5
48 
49 #define SGI_MASK 0xFFFF
50 
51 static int num_ictlrs;
52 
53 static void __iomem *ictlr_reg_base[] = {
54 	IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
55 	IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
56 	IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
57 	IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
58 	IO_ADDRESS(TEGRA_QUINARY_ICTLR_BASE),
59 };
60 
61 #ifdef CONFIG_PM_SLEEP
62 static u32 cop_ier[TEGRA_MAX_NUM_ICTLRS];
63 static u32 cop_iep[TEGRA_MAX_NUM_ICTLRS];
64 static u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS];
65 static u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS];
66 
67 static u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS];
68 #endif
69 
70 bool tegra_pending_sgi(void)
71 {
72 	u32 pending_set;
73 	void __iomem *distbase = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
74 
75 	pending_set = readl_relaxed(distbase + GIC_DIST_PENDING_SET);
76 
77 	if (pending_set & SGI_MASK)
78 		return true;
79 
80 	return false;
81 }
82 
83 static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
84 {
85 	void __iomem *base;
86 	u32 mask;
87 
88 	BUG_ON(irq < FIRST_LEGACY_IRQ ||
89 		irq >= FIRST_LEGACY_IRQ + num_ictlrs * 32);
90 
91 	base = ictlr_reg_base[(irq - FIRST_LEGACY_IRQ) / 32];
92 	mask = BIT((irq - FIRST_LEGACY_IRQ) % 32);
93 
94 	__raw_writel(mask, base + reg);
95 }
96 
97 static void tegra_mask(struct irq_data *d)
98 {
99 	if (d->irq < FIRST_LEGACY_IRQ)
100 		return;
101 
102 	tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
103 }
104 
105 static void tegra_unmask(struct irq_data *d)
106 {
107 	if (d->irq < FIRST_LEGACY_IRQ)
108 		return;
109 
110 	tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
111 }
112 
113 static void tegra_ack(struct irq_data *d)
114 {
115 	if (d->irq < FIRST_LEGACY_IRQ)
116 		return;
117 
118 	tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
119 }
120 
121 static void tegra_eoi(struct irq_data *d)
122 {
123 	if (d->irq < FIRST_LEGACY_IRQ)
124 		return;
125 
126 	tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
127 }
128 
129 static int tegra_retrigger(struct irq_data *d)
130 {
131 	if (d->irq < FIRST_LEGACY_IRQ)
132 		return 0;
133 
134 	tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
135 
136 	return 1;
137 }
138 
139 #ifdef CONFIG_PM_SLEEP
140 static int tegra_set_wake(struct irq_data *d, unsigned int enable)
141 {
142 	u32 irq = d->irq;
143 	u32 index, mask;
144 
145 	if (irq < FIRST_LEGACY_IRQ ||
146 		irq >= FIRST_LEGACY_IRQ + num_ictlrs * 32)
147 		return -EINVAL;
148 
149 	index = ((irq - FIRST_LEGACY_IRQ) / 32);
150 	mask = BIT((irq - FIRST_LEGACY_IRQ) % 32);
151 	if (enable)
152 		ictlr_wake_mask[index] |= mask;
153 	else
154 		ictlr_wake_mask[index] &= ~mask;
155 
156 	return 0;
157 }
158 
159 static int tegra_legacy_irq_suspend(void)
160 {
161 	unsigned long flags;
162 	int i;
163 
164 	local_irq_save(flags);
165 	for (i = 0; i < num_ictlrs; i++) {
166 		void __iomem *ictlr = ictlr_reg_base[i];
167 		/* Save interrupt state */
168 		cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER);
169 		cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS);
170 		cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER);
171 		cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
172 
173 		/* Disable COP interrupts */
174 		writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
175 
176 		/* Disable CPU interrupts */
177 		writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
178 
179 		/* Enable the wakeup sources of ictlr */
180 		writel_relaxed(ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
181 	}
182 	local_irq_restore(flags);
183 
184 	return 0;
185 }
186 
187 static void tegra_legacy_irq_resume(void)
188 {
189 	unsigned long flags;
190 	int i;
191 
192 	local_irq_save(flags);
193 	for (i = 0; i < num_ictlrs; i++) {
194 		void __iomem *ictlr = ictlr_reg_base[i];
195 		writel_relaxed(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
196 		writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
197 		writel_relaxed(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
198 		writel_relaxed(cop_iep[i], ictlr + ICTLR_COP_IEP_CLASS);
199 		writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
200 		writel_relaxed(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
201 	}
202 	local_irq_restore(flags);
203 }
204 
205 static struct syscore_ops tegra_legacy_irq_syscore_ops = {
206 	.suspend = tegra_legacy_irq_suspend,
207 	.resume = tegra_legacy_irq_resume,
208 };
209 
210 int tegra_legacy_irq_syscore_init(void)
211 {
212 	register_syscore_ops(&tegra_legacy_irq_syscore_ops);
213 
214 	return 0;
215 }
216 #else
217 #define tegra_set_wake NULL
218 #endif
219 
220 void __init tegra_init_irq(void)
221 {
222 	int i;
223 	void __iomem *distbase;
224 
225 	distbase = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
226 	num_ictlrs = readl_relaxed(distbase + GIC_DIST_CTR) & 0x1f;
227 
228 	if (num_ictlrs > ARRAY_SIZE(ictlr_reg_base)) {
229 		WARN(1, "Too many (%d) interrupt controllers found. Maximum is %d.",
230 			num_ictlrs, ARRAY_SIZE(ictlr_reg_base));
231 		num_ictlrs = ARRAY_SIZE(ictlr_reg_base);
232 	}
233 
234 	for (i = 0; i < num_ictlrs; i++) {
235 		void __iomem *ictlr = ictlr_reg_base[i];
236 		writel(~0, ictlr + ICTLR_CPU_IER_CLR);
237 		writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
238 	}
239 
240 	gic_arch_extn.irq_ack = tegra_ack;
241 	gic_arch_extn.irq_eoi = tegra_eoi;
242 	gic_arch_extn.irq_mask = tegra_mask;
243 	gic_arch_extn.irq_unmask = tegra_unmask;
244 	gic_arch_extn.irq_retrigger = tegra_retrigger;
245 	gic_arch_extn.irq_set_wake = tegra_set_wake;
246 	gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND;
247 
248 	/*
249 	 * Check if there is a devicetree present, since the GIC will be
250 	 * initialized elsewhere under DT.
251 	 */
252 	if (!of_have_populated_dt())
253 		gic_init(0, 29, distbase,
254 			IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
255 }
256