1 /* 2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 3 * http://www.samsung.com 4 * 5 * Combiner irqchip for EXYNOS 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/err.h> 12 #include <linux/export.h> 13 #include <linux/init.h> 14 #include <linux/io.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/irqdomain.h> 18 #include <linux/irqchip.h> 19 #include <linux/irqchip/chained_irq.h> 20 #include <linux/interrupt.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 24 #define COMBINER_ENABLE_SET 0x0 25 #define COMBINER_ENABLE_CLEAR 0x4 26 #define COMBINER_INT_STATUS 0xC 27 28 #define IRQ_IN_COMBINER 8 29 30 static DEFINE_SPINLOCK(irq_controller_lock); 31 32 struct combiner_chip_data { 33 unsigned int hwirq_offset; 34 unsigned int irq_mask; 35 void __iomem *base; 36 unsigned int parent_irq; 37 #ifdef CONFIG_PM 38 u32 pm_save; 39 #endif 40 }; 41 42 static struct combiner_chip_data *combiner_data; 43 static struct irq_domain *combiner_irq_domain; 44 static unsigned int max_nr = 20; 45 46 static inline void __iomem *combiner_base(struct irq_data *data) 47 { 48 struct combiner_chip_data *combiner_data = 49 irq_data_get_irq_chip_data(data); 50 51 return combiner_data->base; 52 } 53 54 static void combiner_mask_irq(struct irq_data *data) 55 { 56 u32 mask = 1 << (data->hwirq % 32); 57 58 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); 59 } 60 61 static void combiner_unmask_irq(struct irq_data *data) 62 { 63 u32 mask = 1 << (data->hwirq % 32); 64 65 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 66 } 67 68 static void combiner_handle_cascade_irq(struct irq_desc *desc) 69 { 70 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); 71 struct irq_chip *chip = irq_desc_get_chip(desc); 72 unsigned int cascade_irq, combiner_irq; 73 unsigned long status; 74 75 chained_irq_enter(chip, desc); 76 77 spin_lock(&irq_controller_lock); 78 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); 79 spin_unlock(&irq_controller_lock); 80 status &= chip_data->irq_mask; 81 82 if (status == 0) 83 goto out; 84 85 combiner_irq = chip_data->hwirq_offset + __ffs(status); 86 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); 87 88 if (unlikely(!cascade_irq)) 89 handle_bad_irq(desc); 90 else 91 generic_handle_irq(cascade_irq); 92 93 out: 94 chained_irq_exit(chip, desc); 95 } 96 97 #ifdef CONFIG_SMP 98 static int combiner_set_affinity(struct irq_data *d, 99 const struct cpumask *mask_val, bool force) 100 { 101 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d); 102 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq); 103 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq); 104 105 if (chip && chip->irq_set_affinity) 106 return chip->irq_set_affinity(data, mask_val, force); 107 else 108 return -EINVAL; 109 } 110 #endif 111 112 static struct irq_chip combiner_chip = { 113 .name = "COMBINER", 114 .irq_mask = combiner_mask_irq, 115 .irq_unmask = combiner_unmask_irq, 116 #ifdef CONFIG_SMP 117 .irq_set_affinity = combiner_set_affinity, 118 #endif 119 }; 120 121 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, 122 unsigned int irq) 123 { 124 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq, 125 combiner_data); 126 } 127 128 static void __init combiner_init_one(struct combiner_chip_data *combiner_data, 129 unsigned int combiner_nr, 130 void __iomem *base, unsigned int irq) 131 { 132 combiner_data->base = base; 133 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER; 134 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3); 135 combiner_data->parent_irq = irq; 136 137 /* Disable all interrupts */ 138 __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); 139 } 140 141 static int combiner_irq_domain_xlate(struct irq_domain *d, 142 struct device_node *controller, 143 const u32 *intspec, unsigned int intsize, 144 unsigned long *out_hwirq, 145 unsigned int *out_type) 146 { 147 if (d->of_node != controller) 148 return -EINVAL; 149 150 if (intsize < 2) 151 return -EINVAL; 152 153 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1]; 154 *out_type = 0; 155 156 return 0; 157 } 158 159 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, 160 irq_hw_number_t hw) 161 { 162 struct combiner_chip_data *combiner_data = d->host_data; 163 164 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); 165 irq_set_chip_data(irq, &combiner_data[hw >> 3]); 166 irq_set_probe(irq); 167 168 return 0; 169 } 170 171 static const struct irq_domain_ops combiner_irq_domain_ops = { 172 .xlate = combiner_irq_domain_xlate, 173 .map = combiner_irq_domain_map, 174 }; 175 176 static void __init combiner_init(void __iomem *combiner_base, 177 struct device_node *np) 178 { 179 int i, irq; 180 unsigned int nr_irq; 181 182 nr_irq = max_nr * IRQ_IN_COMBINER; 183 184 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); 185 if (!combiner_data) { 186 pr_warn("%s: could not allocate combiner data\n", __func__); 187 return; 188 } 189 190 combiner_irq_domain = irq_domain_add_linear(np, nr_irq, 191 &combiner_irq_domain_ops, combiner_data); 192 if (WARN_ON(!combiner_irq_domain)) { 193 pr_warn("%s: irq domain init failed\n", __func__); 194 return; 195 } 196 197 for (i = 0; i < max_nr; i++) { 198 irq = irq_of_parse_and_map(np, i); 199 200 combiner_init_one(&combiner_data[i], i, 201 combiner_base + (i >> 2) * 0x10, irq); 202 combiner_cascade_irq(&combiner_data[i], irq); 203 } 204 } 205 206 #ifdef CONFIG_PM 207 208 /** 209 * combiner_suspend - save interrupt combiner state before suspend 210 * 211 * Save the interrupt enable set register for all combiner groups since 212 * the state is lost when the system enters into a sleep state. 213 * 214 */ 215 static int combiner_suspend(void) 216 { 217 int i; 218 219 for (i = 0; i < max_nr; i++) 220 combiner_data[i].pm_save = 221 __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); 222 223 return 0; 224 } 225 226 /** 227 * combiner_resume - restore interrupt combiner state after resume 228 * 229 * Restore the interrupt enable set register for all combiner groups since 230 * the state is lost when the system enters into a sleep state on suspend. 231 * 232 */ 233 static void combiner_resume(void) 234 { 235 int i; 236 237 for (i = 0; i < max_nr; i++) { 238 __raw_writel(combiner_data[i].irq_mask, 239 combiner_data[i].base + COMBINER_ENABLE_CLEAR); 240 __raw_writel(combiner_data[i].pm_save, 241 combiner_data[i].base + COMBINER_ENABLE_SET); 242 } 243 } 244 245 #else 246 #define combiner_suspend NULL 247 #define combiner_resume NULL 248 #endif 249 250 static struct syscore_ops combiner_syscore_ops = { 251 .suspend = combiner_suspend, 252 .resume = combiner_resume, 253 }; 254 255 static int __init combiner_of_init(struct device_node *np, 256 struct device_node *parent) 257 { 258 void __iomem *combiner_base; 259 260 combiner_base = of_iomap(np, 0); 261 if (!combiner_base) { 262 pr_err("%s: failed to map combiner registers\n", __func__); 263 return -ENXIO; 264 } 265 266 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { 267 pr_info("%s: number of combiners not specified, " 268 "setting default as %d.\n", 269 __func__, max_nr); 270 } 271 272 combiner_init(combiner_base, np); 273 274 register_syscore_ops(&combiner_syscore_ops); 275 276 return 0; 277 } 278 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner", 279 combiner_of_init); 280