1 /* 2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 3 * http://www.samsung.com 4 * 5 * Combiner irqchip for EXYNOS 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/err.h> 12 #include <linux/export.h> 13 #include <linux/init.h> 14 #include <linux/io.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/irqdomain.h> 18 #include <linux/irqchip.h> 19 #include <linux/irqchip/chained_irq.h> 20 #include <linux/interrupt.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 24 #define COMBINER_ENABLE_SET 0x0 25 #define COMBINER_ENABLE_CLEAR 0x4 26 #define COMBINER_INT_STATUS 0xC 27 28 #define IRQ_IN_COMBINER 8 29 30 static DEFINE_SPINLOCK(irq_controller_lock); 31 32 struct combiner_chip_data { 33 unsigned int hwirq_offset; 34 unsigned int irq_mask; 35 void __iomem *base; 36 unsigned int parent_irq; 37 #ifdef CONFIG_PM 38 u32 pm_save; 39 #endif 40 }; 41 42 static struct combiner_chip_data *combiner_data; 43 static struct irq_domain *combiner_irq_domain; 44 static unsigned int max_nr = 20; 45 46 static inline void __iomem *combiner_base(struct irq_data *data) 47 { 48 struct combiner_chip_data *combiner_data = 49 irq_data_get_irq_chip_data(data); 50 51 return combiner_data->base; 52 } 53 54 static void combiner_mask_irq(struct irq_data *data) 55 { 56 u32 mask = 1 << (data->hwirq % 32); 57 58 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); 59 } 60 61 static void combiner_unmask_irq(struct irq_data *data) 62 { 63 u32 mask = 1 << (data->hwirq % 32); 64 65 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 66 } 67 68 static void combiner_handle_cascade_irq(unsigned int __irq, 69 struct irq_desc *desc) 70 { 71 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); 72 struct irq_chip *chip = irq_desc_get_chip(desc); 73 unsigned int irq = irq_desc_get_irq(desc); 74 unsigned int cascade_irq, combiner_irq; 75 unsigned long status; 76 77 chained_irq_enter(chip, desc); 78 79 spin_lock(&irq_controller_lock); 80 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); 81 spin_unlock(&irq_controller_lock); 82 status &= chip_data->irq_mask; 83 84 if (status == 0) 85 goto out; 86 87 combiner_irq = chip_data->hwirq_offset + __ffs(status); 88 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); 89 90 if (unlikely(!cascade_irq)) 91 handle_bad_irq(irq, desc); 92 else 93 generic_handle_irq(cascade_irq); 94 95 out: 96 chained_irq_exit(chip, desc); 97 } 98 99 #ifdef CONFIG_SMP 100 static int combiner_set_affinity(struct irq_data *d, 101 const struct cpumask *mask_val, bool force) 102 { 103 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d); 104 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq); 105 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq); 106 107 if (chip && chip->irq_set_affinity) 108 return chip->irq_set_affinity(data, mask_val, force); 109 else 110 return -EINVAL; 111 } 112 #endif 113 114 static struct irq_chip combiner_chip = { 115 .name = "COMBINER", 116 .irq_mask = combiner_mask_irq, 117 .irq_unmask = combiner_unmask_irq, 118 #ifdef CONFIG_SMP 119 .irq_set_affinity = combiner_set_affinity, 120 #endif 121 }; 122 123 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, 124 unsigned int irq) 125 { 126 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq, 127 combiner_data); 128 } 129 130 static void __init combiner_init_one(struct combiner_chip_data *combiner_data, 131 unsigned int combiner_nr, 132 void __iomem *base, unsigned int irq) 133 { 134 combiner_data->base = base; 135 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER; 136 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3); 137 combiner_data->parent_irq = irq; 138 139 /* Disable all interrupts */ 140 __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); 141 } 142 143 static int combiner_irq_domain_xlate(struct irq_domain *d, 144 struct device_node *controller, 145 const u32 *intspec, unsigned int intsize, 146 unsigned long *out_hwirq, 147 unsigned int *out_type) 148 { 149 if (d->of_node != controller) 150 return -EINVAL; 151 152 if (intsize < 2) 153 return -EINVAL; 154 155 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1]; 156 *out_type = 0; 157 158 return 0; 159 } 160 161 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, 162 irq_hw_number_t hw) 163 { 164 struct combiner_chip_data *combiner_data = d->host_data; 165 166 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); 167 irq_set_chip_data(irq, &combiner_data[hw >> 3]); 168 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 169 170 return 0; 171 } 172 173 static const struct irq_domain_ops combiner_irq_domain_ops = { 174 .xlate = combiner_irq_domain_xlate, 175 .map = combiner_irq_domain_map, 176 }; 177 178 static void __init combiner_init(void __iomem *combiner_base, 179 struct device_node *np) 180 { 181 int i, irq; 182 unsigned int nr_irq; 183 184 nr_irq = max_nr * IRQ_IN_COMBINER; 185 186 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); 187 if (!combiner_data) { 188 pr_warn("%s: could not allocate combiner data\n", __func__); 189 return; 190 } 191 192 combiner_irq_domain = irq_domain_add_linear(np, nr_irq, 193 &combiner_irq_domain_ops, combiner_data); 194 if (WARN_ON(!combiner_irq_domain)) { 195 pr_warn("%s: irq domain init failed\n", __func__); 196 return; 197 } 198 199 for (i = 0; i < max_nr; i++) { 200 irq = irq_of_parse_and_map(np, i); 201 202 combiner_init_one(&combiner_data[i], i, 203 combiner_base + (i >> 2) * 0x10, irq); 204 combiner_cascade_irq(&combiner_data[i], irq); 205 } 206 } 207 208 #ifdef CONFIG_PM 209 210 /** 211 * combiner_suspend - save interrupt combiner state before suspend 212 * 213 * Save the interrupt enable set register for all combiner groups since 214 * the state is lost when the system enters into a sleep state. 215 * 216 */ 217 static int combiner_suspend(void) 218 { 219 int i; 220 221 for (i = 0; i < max_nr; i++) 222 combiner_data[i].pm_save = 223 __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); 224 225 return 0; 226 } 227 228 /** 229 * combiner_resume - restore interrupt combiner state after resume 230 * 231 * Restore the interrupt enable set register for all combiner groups since 232 * the state is lost when the system enters into a sleep state on suspend. 233 * 234 */ 235 static void combiner_resume(void) 236 { 237 int i; 238 239 for (i = 0; i < max_nr; i++) { 240 __raw_writel(combiner_data[i].irq_mask, 241 combiner_data[i].base + COMBINER_ENABLE_CLEAR); 242 __raw_writel(combiner_data[i].pm_save, 243 combiner_data[i].base + COMBINER_ENABLE_SET); 244 } 245 } 246 247 #else 248 #define combiner_suspend NULL 249 #define combiner_resume NULL 250 #endif 251 252 static struct syscore_ops combiner_syscore_ops = { 253 .suspend = combiner_suspend, 254 .resume = combiner_resume, 255 }; 256 257 static int __init combiner_of_init(struct device_node *np, 258 struct device_node *parent) 259 { 260 void __iomem *combiner_base; 261 262 combiner_base = of_iomap(np, 0); 263 if (!combiner_base) { 264 pr_err("%s: failed to map combiner registers\n", __func__); 265 return -ENXIO; 266 } 267 268 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { 269 pr_info("%s: number of combiners not specified, " 270 "setting default as %d.\n", 271 __func__, max_nr); 272 } 273 274 combiner_init(combiner_base, np); 275 276 register_syscore_ops(&combiner_syscore_ops); 277 278 return 0; 279 } 280 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner", 281 combiner_of_init); 282