1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/sh/kernel/irq.c 4 * 5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 6 * 7 * 8 * SuperH version: Copyright (C) 1999 Niibe Yutaka 9 */ 10 #include <linux/irq.h> 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/seq_file.h> 15 #include <linux/ftrace.h> 16 #include <linux/delay.h> 17 #include <linux/ratelimit.h> 18 #include <asm/processor.h> 19 #include <asm/machvec.h> 20 #include <linux/uaccess.h> 21 #include <asm/thread_info.h> 22 #include <cpu/mmu_context.h> 23 24 atomic_t irq_err_count; 25 26 /* 27 * 'what should we do if we get a hw irq event on an illegal vector'. 28 * each architecture has to answer this themselves, it doesn't deserve 29 * a generic callback i think. 30 */ 31 void ack_bad_irq(unsigned int irq) 32 { 33 atomic_inc(&irq_err_count); 34 printk("unexpected IRQ trap at vector %02x\n", irq); 35 } 36 37 #if defined(CONFIG_PROC_FS) 38 /* 39 * /proc/interrupts printing for arch specific interrupts 40 */ 41 int arch_show_interrupts(struct seq_file *p, int prec) 42 { 43 int j; 44 45 seq_printf(p, "%*s: ", prec, "NMI"); 46 for_each_online_cpu(j) 47 seq_printf(p, "%10u ", per_cpu(irq_stat.__nmi_count, j)); 48 seq_printf(p, " Non-maskable interrupts\n"); 49 50 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 51 52 return 0; 53 } 54 #endif 55 56 #ifdef CONFIG_IRQSTACKS 57 /* 58 * per-CPU IRQ handling contexts (thread information and stack) 59 */ 60 union irq_ctx { 61 struct thread_info tinfo; 62 u32 stack[THREAD_SIZE/sizeof(u32)]; 63 }; 64 65 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 66 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 67 68 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 69 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 70 71 static inline void handle_one_irq(unsigned int irq) 72 { 73 union irq_ctx *curctx, *irqctx; 74 75 curctx = (union irq_ctx *)current_thread_info(); 76 irqctx = hardirq_ctx[smp_processor_id()]; 77 78 /* 79 * this is where we switch to the IRQ stack. However, if we are 80 * already using the IRQ stack (because we interrupted a hardirq 81 * handler) we can't do that and just have to keep using the 82 * current stack (which is the irq stack already after all) 83 */ 84 if (curctx != irqctx) { 85 u32 *isp; 86 87 isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); 88 irqctx->tinfo.task = curctx->tinfo.task; 89 irqctx->tinfo.previous_sp = current_stack_pointer; 90 91 /* 92 * Copy the softirq bits in preempt_count so that the 93 * softirq checks work in the hardirq context. 94 */ 95 irqctx->tinfo.preempt_count = 96 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | 97 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 98 99 __asm__ __volatile__ ( 100 "mov %0, r4 \n" 101 "mov r15, r8 \n" 102 "jsr @%1 \n" 103 /* switch to the irq stack */ 104 " mov %2, r15 \n" 105 /* restore the stack (ring zero) */ 106 "mov r8, r15 \n" 107 : /* no outputs */ 108 : "r" (irq), "r" (generic_handle_irq), "r" (isp) 109 : "memory", "r0", "r1", "r2", "r3", "r4", 110 "r5", "r6", "r7", "r8", "t", "pr" 111 ); 112 } else 113 generic_handle_irq(irq); 114 } 115 116 /* 117 * allocate per-cpu stacks for hardirq and for softirq processing 118 */ 119 void irq_ctx_init(int cpu) 120 { 121 union irq_ctx *irqctx; 122 123 if (hardirq_ctx[cpu]) 124 return; 125 126 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; 127 irqctx->tinfo.task = NULL; 128 irqctx->tinfo.cpu = cpu; 129 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 130 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 131 132 hardirq_ctx[cpu] = irqctx; 133 134 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; 135 irqctx->tinfo.task = NULL; 136 irqctx->tinfo.cpu = cpu; 137 irqctx->tinfo.preempt_count = 0; 138 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 139 140 softirq_ctx[cpu] = irqctx; 141 142 printk("CPU %u irqstacks, hard=%p soft=%p\n", 143 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); 144 } 145 146 void irq_ctx_exit(int cpu) 147 { 148 hardirq_ctx[cpu] = NULL; 149 } 150 151 void do_softirq_own_stack(void) 152 { 153 struct thread_info *curctx; 154 union irq_ctx *irqctx; 155 u32 *isp; 156 157 curctx = current_thread_info(); 158 irqctx = softirq_ctx[smp_processor_id()]; 159 irqctx->tinfo.task = curctx->task; 160 irqctx->tinfo.previous_sp = current_stack_pointer; 161 162 /* build the stack frame on the softirq stack */ 163 isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); 164 165 __asm__ __volatile__ ( 166 "mov r15, r9 \n" 167 "jsr @%0 \n" 168 /* switch to the softirq stack */ 169 " mov %1, r15 \n" 170 /* restore the thread stack */ 171 "mov r9, r15 \n" 172 : /* no outputs */ 173 : "r" (__do_softirq), "r" (isp) 174 : "memory", "r0", "r1", "r2", "r3", "r4", 175 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" 176 ); 177 } 178 #else 179 static inline void handle_one_irq(unsigned int irq) 180 { 181 generic_handle_irq(irq); 182 } 183 #endif 184 185 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) 186 { 187 struct pt_regs *old_regs = set_irq_regs(regs); 188 189 irq_enter(); 190 191 irq = irq_demux(irq_lookup(irq)); 192 193 if (irq != NO_IRQ_IGNORE) { 194 handle_one_irq(irq); 195 irq_finish(irq); 196 } 197 198 irq_exit(); 199 200 set_irq_regs(old_regs); 201 202 return IRQ_HANDLED; 203 } 204 205 void __init init_IRQ(void) 206 { 207 plat_irq_setup(); 208 209 /* Perform the machine specific initialisation */ 210 if (sh_mv.mv_init_irq) 211 sh_mv.mv_init_irq(); 212 213 intc_finalize(); 214 215 irq_ctx_init(smp_processor_id()); 216 } 217 218 #ifdef CONFIG_HOTPLUG_CPU 219 /* 220 * The CPU has been marked offline. Migrate IRQs off this CPU. If 221 * the affinity settings do not allow other CPUs, force them onto any 222 * available CPU. 223 */ 224 void migrate_irqs(void) 225 { 226 unsigned int irq, cpu = smp_processor_id(); 227 228 for_each_active_irq(irq) { 229 struct irq_data *data = irq_get_irq_data(irq); 230 231 if (irq_data_get_node(data) == cpu) { 232 struct cpumask *mask = irq_data_get_affinity_mask(data); 233 unsigned int newcpu = cpumask_any_and(mask, 234 cpu_online_mask); 235 if (newcpu >= nr_cpu_ids) { 236 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", 237 irq, cpu); 238 239 cpumask_setall(mask); 240 } 241 irq_set_affinity(irq, mask); 242 } 243 } 244 } 245 #endif 246