xref: /openbmc/linux/arch/riscv/kernel/smp.c (revision 36de991e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SMP initialisation and IPI support
4  * Based on arch/arm64/kernel/smp.c
5  *
6  * Copyright (C) 2012 ARM Ltd.
7  * Copyright (C) 2015 Regents of the University of California
8  * Copyright (C) 2017 SiFive
9  */
10 
11 #include <linux/cpu.h>
12 #include <linux/clockchips.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/profile.h>
16 #include <linux/smp.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/delay.h>
20 #include <linux/irq_work.h>
21 
22 #include <asm/sbi.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
25 
26 enum ipi_message_type {
27 	IPI_RESCHEDULE,
28 	IPI_CALL_FUNC,
29 	IPI_CPU_STOP,
30 	IPI_IRQ_WORK,
31 	IPI_TIMER,
32 	IPI_MAX
33 };
34 
35 unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
36 	[0 ... NR_CPUS-1] = INVALID_HARTID
37 };
38 
39 void __init smp_setup_processor_id(void)
40 {
41 	cpuid_to_hartid_map(0) = boot_cpu_hartid;
42 }
43 
44 /* A collection of single bit ipi messages.  */
45 static struct {
46 	unsigned long stats[IPI_MAX] ____cacheline_aligned;
47 	unsigned long bits ____cacheline_aligned;
48 } ipi_data[NR_CPUS] __cacheline_aligned;
49 
50 int riscv_hartid_to_cpuid(int hartid)
51 {
52 	int i;
53 
54 	for (i = 0; i < NR_CPUS; i++)
55 		if (cpuid_to_hartid_map(i) == hartid)
56 			return i;
57 
58 	pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
59 	return -ENOENT;
60 }
61 
62 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
63 {
64 	int cpu;
65 
66 	cpumask_clear(out);
67 	for_each_cpu(cpu, in)
68 		cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
69 }
70 EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
71 
72 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
73 {
74 	return phys_id == cpuid_to_hartid_map(cpu);
75 }
76 
77 /* Unsupported */
78 int setup_profiling_timer(unsigned int multiplier)
79 {
80 	return -EINVAL;
81 }
82 
83 static void ipi_stop(void)
84 {
85 	set_cpu_online(smp_processor_id(), false);
86 	while (1)
87 		wait_for_interrupt();
88 }
89 
90 static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
91 
92 void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
93 {
94 	ipi_ops = ops;
95 }
96 EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
97 
98 void riscv_clear_ipi(void)
99 {
100 	if (ipi_ops && ipi_ops->ipi_clear)
101 		ipi_ops->ipi_clear();
102 
103 	csr_clear(CSR_IP, IE_SIE);
104 }
105 EXPORT_SYMBOL_GPL(riscv_clear_ipi);
106 
107 static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
108 {
109 	int cpu;
110 
111 	smp_mb__before_atomic();
112 	for_each_cpu(cpu, mask)
113 		set_bit(op, &ipi_data[cpu].bits);
114 	smp_mb__after_atomic();
115 
116 	if (ipi_ops && ipi_ops->ipi_inject)
117 		ipi_ops->ipi_inject(mask);
118 	else
119 		pr_warn("SMP: IPI inject method not available\n");
120 }
121 
122 static void send_ipi_single(int cpu, enum ipi_message_type op)
123 {
124 	smp_mb__before_atomic();
125 	set_bit(op, &ipi_data[cpu].bits);
126 	smp_mb__after_atomic();
127 
128 	if (ipi_ops && ipi_ops->ipi_inject)
129 		ipi_ops->ipi_inject(cpumask_of(cpu));
130 	else
131 		pr_warn("SMP: IPI inject method not available\n");
132 }
133 
134 #ifdef CONFIG_IRQ_WORK
135 void arch_irq_work_raise(void)
136 {
137 	send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
138 }
139 #endif
140 
141 void handle_IPI(struct pt_regs *regs)
142 {
143 	unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
144 	unsigned long *stats = ipi_data[smp_processor_id()].stats;
145 
146 	riscv_clear_ipi();
147 
148 	while (true) {
149 		unsigned long ops;
150 
151 		/* Order bit clearing and data access. */
152 		mb();
153 
154 		ops = xchg(pending_ipis, 0);
155 		if (ops == 0)
156 			return;
157 
158 		if (ops & (1 << IPI_RESCHEDULE)) {
159 			stats[IPI_RESCHEDULE]++;
160 			scheduler_ipi();
161 		}
162 
163 		if (ops & (1 << IPI_CALL_FUNC)) {
164 			stats[IPI_CALL_FUNC]++;
165 			generic_smp_call_function_interrupt();
166 		}
167 
168 		if (ops & (1 << IPI_CPU_STOP)) {
169 			stats[IPI_CPU_STOP]++;
170 			ipi_stop();
171 		}
172 
173 		if (ops & (1 << IPI_IRQ_WORK)) {
174 			stats[IPI_IRQ_WORK]++;
175 			irq_work_run();
176 		}
177 
178 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
179 		if (ops & (1 << IPI_TIMER)) {
180 			stats[IPI_TIMER]++;
181 			tick_receive_broadcast();
182 		}
183 #endif
184 		BUG_ON((ops >> IPI_MAX) != 0);
185 
186 		/* Order data access and bit testing. */
187 		mb();
188 	}
189 }
190 
191 static const char * const ipi_names[] = {
192 	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
193 	[IPI_CALL_FUNC]		= "Function call interrupts",
194 	[IPI_CPU_STOP]		= "CPU stop interrupts",
195 	[IPI_IRQ_WORK]		= "IRQ work interrupts",
196 	[IPI_TIMER]		= "Timer broadcast interrupts",
197 };
198 
199 void show_ipi_stats(struct seq_file *p, int prec)
200 {
201 	unsigned int cpu, i;
202 
203 	for (i = 0; i < IPI_MAX; i++) {
204 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
205 			   prec >= 4 ? " " : "");
206 		for_each_online_cpu(cpu)
207 			seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
208 		seq_printf(p, " %s\n", ipi_names[i]);
209 	}
210 }
211 
212 void arch_send_call_function_ipi_mask(struct cpumask *mask)
213 {
214 	send_ipi_mask(mask, IPI_CALL_FUNC);
215 }
216 
217 void arch_send_call_function_single_ipi(int cpu)
218 {
219 	send_ipi_single(cpu, IPI_CALL_FUNC);
220 }
221 
222 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
223 void tick_broadcast(const struct cpumask *mask)
224 {
225 	send_ipi_mask(mask, IPI_TIMER);
226 }
227 #endif
228 
229 void smp_send_stop(void)
230 {
231 	unsigned long timeout;
232 
233 	if (num_online_cpus() > 1) {
234 		cpumask_t mask;
235 
236 		cpumask_copy(&mask, cpu_online_mask);
237 		cpumask_clear_cpu(smp_processor_id(), &mask);
238 
239 		if (system_state <= SYSTEM_RUNNING)
240 			pr_crit("SMP: stopping secondary CPUs\n");
241 		send_ipi_mask(&mask, IPI_CPU_STOP);
242 	}
243 
244 	/* Wait up to one second for other CPUs to stop */
245 	timeout = USEC_PER_SEC;
246 	while (num_online_cpus() > 1 && timeout--)
247 		udelay(1);
248 
249 	if (num_online_cpus() > 1)
250 		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
251 			   cpumask_pr_args(cpu_online_mask));
252 }
253 
254 void smp_send_reschedule(int cpu)
255 {
256 	send_ipi_single(cpu, IPI_RESCHEDULE);
257 }
258 EXPORT_SYMBOL_GPL(smp_send_reschedule);
259