16ea0f26aSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
276d2a049SPalmer Dabbelt /*
376d2a049SPalmer Dabbelt * Copyright (C) 2012 Regents of the University of California
476d2a049SPalmer Dabbelt * Copyright (C) 2017 SiFive
56ea0f26aSChristoph Hellwig * Copyright (C) 2018 Christoph Hellwig
676d2a049SPalmer Dabbelt */
776d2a049SPalmer Dabbelt
876d2a049SPalmer Dabbelt #include <linux/interrupt.h>
976d2a049SPalmer Dabbelt #include <linux/irqchip.h>
100c60a31cSAnup Patel #include <linux/irqdomain.h>
110c60a31cSAnup Patel #include <linux/module.h>
128b20d2dbSAnup Patel #include <linux/seq_file.h>
13832f15f4SAnup Patel #include <asm/sbi.h>
14dd69d07aSGuo Ren #include <asm/smp.h>
15dd69d07aSGuo Ren #include <asm/softirq_stack.h>
16dd69d07aSGuo Ren #include <asm/stacktrace.h>
1776d2a049SPalmer Dabbelt
180c60a31cSAnup Patel static struct fwnode_handle *(*__get_intc_node)(void);
190c60a31cSAnup Patel
riscv_set_intc_hwnode_fn(struct fwnode_handle * (* fn)(void))200c60a31cSAnup Patel void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void))
210c60a31cSAnup Patel {
220c60a31cSAnup Patel __get_intc_node = fn;
230c60a31cSAnup Patel }
240c60a31cSAnup Patel
riscv_get_intc_hwnode(void)250c60a31cSAnup Patel struct fwnode_handle *riscv_get_intc_hwnode(void)
260c60a31cSAnup Patel {
270c60a31cSAnup Patel if (__get_intc_node)
280c60a31cSAnup Patel return __get_intc_node();
290c60a31cSAnup Patel
300c60a31cSAnup Patel return NULL;
310c60a31cSAnup Patel }
320c60a31cSAnup Patel EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
330c60a31cSAnup Patel
34163e76ccSGuo Ren #ifdef CONFIG_IRQ_STACKS
35163e76ccSGuo Ren #include <asm/irq_stack.h>
36163e76ccSGuo Ren
37163e76ccSGuo Ren DEFINE_PER_CPU(ulong *, irq_stack_ptr);
38163e76ccSGuo Ren
39163e76ccSGuo Ren #ifdef CONFIG_VMAP_STACK
init_irq_stacks(void)40163e76ccSGuo Ren static void init_irq_stacks(void)
41163e76ccSGuo Ren {
42163e76ccSGuo Ren int cpu;
43163e76ccSGuo Ren ulong *p;
44163e76ccSGuo Ren
45163e76ccSGuo Ren for_each_possible_cpu(cpu) {
46163e76ccSGuo Ren p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
47163e76ccSGuo Ren per_cpu(irq_stack_ptr, cpu) = p;
48163e76ccSGuo Ren }
49163e76ccSGuo Ren }
50163e76ccSGuo Ren #else
51163e76ccSGuo Ren /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
52163e76ccSGuo Ren DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack);
53163e76ccSGuo Ren
init_irq_stacks(void)54163e76ccSGuo Ren static void init_irq_stacks(void)
55163e76ccSGuo Ren {
56163e76ccSGuo Ren int cpu;
57163e76ccSGuo Ren
58163e76ccSGuo Ren for_each_possible_cpu(cpu)
59163e76ccSGuo Ren per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
60163e76ccSGuo Ren }
61163e76ccSGuo Ren #endif /* CONFIG_VMAP_STACK */
62dd69d07aSGuo Ren
63*07a27665SJiexun Wang #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
do_softirq_own_stack(void)64dd69d07aSGuo Ren void do_softirq_own_stack(void)
65dd69d07aSGuo Ren {
66dd69d07aSGuo Ren #ifdef CONFIG_IRQ_STACKS
67dd69d07aSGuo Ren if (on_thread_stack()) {
68dd69d07aSGuo Ren ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
69dd69d07aSGuo Ren + IRQ_STACK_SIZE/sizeof(ulong);
70dd69d07aSGuo Ren __asm__ __volatile(
71dd69d07aSGuo Ren "addi sp, sp, -"RISCV_SZPTR "\n"
72dd69d07aSGuo Ren REG_S" ra, (sp) \n"
73dd69d07aSGuo Ren "addi sp, sp, -"RISCV_SZPTR "\n"
74dd69d07aSGuo Ren REG_S" s0, (sp) \n"
75dd69d07aSGuo Ren "addi s0, sp, 2*"RISCV_SZPTR "\n"
76dd69d07aSGuo Ren "move sp, %[sp] \n"
77dd69d07aSGuo Ren "call __do_softirq \n"
78dd69d07aSGuo Ren "addi sp, s0, -2*"RISCV_SZPTR"\n"
79dd69d07aSGuo Ren REG_L" s0, (sp) \n"
80dd69d07aSGuo Ren "addi sp, sp, "RISCV_SZPTR "\n"
81dd69d07aSGuo Ren REG_L" ra, (sp) \n"
82dd69d07aSGuo Ren "addi sp, sp, "RISCV_SZPTR "\n"
83dd69d07aSGuo Ren :
84dd69d07aSGuo Ren : [sp] "r" (sp)
85dd69d07aSGuo Ren : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
86dd69d07aSGuo Ren "t0", "t1", "t2", "t3", "t4", "t5", "t6",
87ebc9cb03SGuo Ren #ifndef CONFIG_FRAME_POINTER
88ebc9cb03SGuo Ren "s0",
89ebc9cb03SGuo Ren #endif
90dd69d07aSGuo Ren "memory");
91dd69d07aSGuo Ren } else
92dd69d07aSGuo Ren #endif
93dd69d07aSGuo Ren __do_softirq();
94dd69d07aSGuo Ren }
95*07a27665SJiexun Wang #endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
96dd69d07aSGuo Ren
97163e76ccSGuo Ren #else
init_irq_stacks(void)98163e76ccSGuo Ren static void init_irq_stacks(void) {}
99163e76ccSGuo Ren #endif /* CONFIG_IRQ_STACKS */
100163e76ccSGuo Ren
arch_show_interrupts(struct seq_file * p,int prec)1018b20d2dbSAnup Patel int arch_show_interrupts(struct seq_file *p, int prec)
1028b20d2dbSAnup Patel {
1038b20d2dbSAnup Patel show_ipi_stats(p, prec);
1048b20d2dbSAnup Patel return 0;
1058b20d2dbSAnup Patel }
1068b20d2dbSAnup Patel
init_IRQ(void)10776d2a049SPalmer Dabbelt void __init init_IRQ(void)
10876d2a049SPalmer Dabbelt {
109163e76ccSGuo Ren init_irq_stacks();
11076d2a049SPalmer Dabbelt irqchip_init();
1116b7ce892SAnup Patel if (!handle_arch_irq)
1126b7ce892SAnup Patel panic("No interrupt controller found.");
113832f15f4SAnup Patel sbi_ipi_init();
11476d2a049SPalmer Dabbelt }
115