1 /* 2 * Allwinner A20/A31 SoCs NMI IRQ chip driver. 3 * 4 * Carlo Caione <carlo.caione@gmail.com> 5 * 6 * This file is licensed under the terms of the GNU General Public 7 * License version 2. This program is licensed "as is" without any 8 * warranty of any kind, whether express or implied. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/device.h> 13 #include <linux/io.h> 14 #include <linux/irq.h> 15 #include <linux/interrupt.h> 16 #include <linux/irqdomain.h> 17 #include <linux/of_irq.h> 18 #include <linux/of_address.h> 19 #include <linux/of_platform.h> 20 #include <linux/irqchip/chained_irq.h> 21 #include "irqchip.h" 22 23 #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003 24 25 enum { 26 SUNXI_SRC_TYPE_LEVEL_LOW = 0, 27 SUNXI_SRC_TYPE_EDGE_FALLING, 28 SUNXI_SRC_TYPE_LEVEL_HIGH, 29 SUNXI_SRC_TYPE_EDGE_RISING, 30 }; 31 32 struct sunxi_sc_nmi_reg_offs { 33 u32 ctrl; 34 u32 pend; 35 u32 enable; 36 }; 37 38 static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = { 39 .ctrl = 0x00, 40 .pend = 0x04, 41 .enable = 0x08, 42 }; 43 44 static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { 45 .ctrl = 0x00, 46 .pend = 0x04, 47 .enable = 0x34, 48 }; 49 50 static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, 51 u32 val) 52 { 53 irq_reg_writel(gc, val, off); 54 } 55 56 static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) 57 { 58 return irq_reg_readl(gc, off); 59 } 60 61 static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) 62 { 63 struct irq_domain *domain = irq_desc_get_handler_data(desc); 64 struct irq_chip *chip = irq_get_chip(irq); 65 unsigned int virq = irq_find_mapping(domain, 0); 66 67 chained_irq_enter(chip, desc); 68 generic_handle_irq(virq); 69 chained_irq_exit(chip, desc); 70 } 71 72 static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) 73 { 74 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 75 struct irq_chip_type *ct = gc->chip_types; 76 u32 src_type_reg; 77 u32 ctrl_off = ct->regs.type; 78 unsigned int src_type; 79 unsigned int i; 80 81 irq_gc_lock(gc); 82 83 switch (flow_type & IRQF_TRIGGER_MASK) { 84 case IRQ_TYPE_EDGE_FALLING: 85 src_type = SUNXI_SRC_TYPE_EDGE_FALLING; 86 break; 87 case IRQ_TYPE_EDGE_RISING: 88 src_type = SUNXI_SRC_TYPE_EDGE_RISING; 89 break; 90 case IRQ_TYPE_LEVEL_HIGH: 91 src_type = SUNXI_SRC_TYPE_LEVEL_HIGH; 92 break; 93 case IRQ_TYPE_NONE: 94 case IRQ_TYPE_LEVEL_LOW: 95 src_type = SUNXI_SRC_TYPE_LEVEL_LOW; 96 break; 97 default: 98 irq_gc_unlock(gc); 99 pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", 100 __func__, data->irq); 101 return -EBADR; 102 } 103 104 irqd_set_trigger_type(data, flow_type); 105 irq_setup_alt_chip(data, flow_type); 106 107 for (i = 0; i <= gc->num_ct; i++, ct++) 108 if (ct->type & flow_type) 109 ctrl_off = ct->regs.type; 110 111 src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off); 112 src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; 113 src_type_reg |= src_type; 114 sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); 115 116 irq_gc_unlock(gc); 117 118 return IRQ_SET_MASK_OK; 119 } 120 121 static int __init sunxi_sc_nmi_irq_init(struct device_node *node, 122 struct sunxi_sc_nmi_reg_offs *reg_offs) 123 { 124 struct irq_domain *domain; 125 struct irq_chip_generic *gc; 126 unsigned int irq; 127 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 128 int ret; 129 130 131 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); 132 if (!domain) { 133 pr_err("%s: Could not register interrupt domain.\n", node->name); 134 return -ENOMEM; 135 } 136 137 ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, 138 handle_fasteoi_irq, clr, 0, 139 IRQ_GC_INIT_MASK_CACHE); 140 if (ret) { 141 pr_err("%s: Could not allocate generic interrupt chip.\n", 142 node->name); 143 goto fail_irqd_remove; 144 } 145 146 irq = irq_of_parse_and_map(node, 0); 147 if (irq <= 0) { 148 pr_err("%s: unable to parse irq\n", node->name); 149 ret = -EINVAL; 150 goto fail_irqd_remove; 151 } 152 153 gc = irq_get_domain_generic_chip(domain, 0); 154 gc->reg_base = of_iomap(node, 0); 155 if (!gc->reg_base) { 156 pr_err("%s: unable to map resource\n", node->name); 157 ret = -ENOMEM; 158 goto fail_irqd_remove; 159 } 160 161 gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; 162 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 163 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 164 gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; 165 gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; 166 gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; 167 gc->chip_types[0].regs.ack = reg_offs->pend; 168 gc->chip_types[0].regs.mask = reg_offs->enable; 169 gc->chip_types[0].regs.type = reg_offs->ctrl; 170 171 gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; 172 gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; 173 gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; 174 gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; 175 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; 176 gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type; 177 gc->chip_types[1].regs.ack = reg_offs->pend; 178 gc->chip_types[1].regs.mask = reg_offs->enable; 179 gc->chip_types[1].regs.type = reg_offs->ctrl; 180 gc->chip_types[1].handler = handle_edge_irq; 181 182 sunxi_sc_nmi_write(gc, reg_offs->enable, 0); 183 sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1); 184 185 irq_set_handler_data(irq, domain); 186 irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq); 187 188 return 0; 189 190 fail_irqd_remove: 191 irq_domain_remove(domain); 192 193 return ret; 194 } 195 196 static int __init sun6i_sc_nmi_irq_init(struct device_node *node, 197 struct device_node *parent) 198 { 199 return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs); 200 } 201 IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init); 202 203 static int __init sun7i_sc_nmi_irq_init(struct device_node *node, 204 struct device_node *parent) 205 { 206 return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs); 207 } 208 IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init); 209