xref: /openbmc/linux/arch/riscv/kernel/smp.c (revision 9fb29c73)
1 /*
2  * SMP initialisation and IPI support
3  * Based on arch/arm64/kernel/smp.c
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Copyright (C) 2015 Regents of the University of California
7  * Copyright (C) 2017 SiFive
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/interrupt.h>
23 #include <linux/smp.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
27 
28 #include <asm/sbi.h>
29 #include <asm/tlbflush.h>
30 #include <asm/cacheflush.h>
31 
32 enum ipi_message_type {
33 	IPI_RESCHEDULE,
34 	IPI_CALL_FUNC,
35 	IPI_CPU_STOP,
36 	IPI_MAX
37 };
38 
39 /* A collection of single bit ipi messages.  */
40 static struct {
41 	unsigned long stats[IPI_MAX] ____cacheline_aligned;
42 	unsigned long bits ____cacheline_aligned;
43 } ipi_data[NR_CPUS] __cacheline_aligned;
44 
45 int riscv_hartid_to_cpuid(int hartid)
46 {
47 	int i = -1;
48 
49 	for (i = 0; i < NR_CPUS; i++)
50 		if (cpuid_to_hartid_map(i) == hartid)
51 			return i;
52 
53 	pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
54 	BUG();
55 	return i;
56 }
57 
58 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
59 {
60 	int cpu;
61 
62 	for_each_cpu(cpu, in)
63 		cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
64 }
65 /* Unsupported */
66 int setup_profiling_timer(unsigned int multiplier)
67 {
68 	return -EINVAL;
69 }
70 
71 static void ipi_stop(void)
72 {
73 	set_cpu_online(smp_processor_id(), false);
74 	while (1)
75 		wait_for_interrupt();
76 }
77 
78 void riscv_software_interrupt(void)
79 {
80 	unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
81 	unsigned long *stats = ipi_data[smp_processor_id()].stats;
82 
83 	/* Clear pending IPI */
84 	csr_clear(sip, SIE_SSIE);
85 
86 	while (true) {
87 		unsigned long ops;
88 
89 		/* Order bit clearing and data access. */
90 		mb();
91 
92 		ops = xchg(pending_ipis, 0);
93 		if (ops == 0)
94 			return;
95 
96 		if (ops & (1 << IPI_RESCHEDULE)) {
97 			stats[IPI_RESCHEDULE]++;
98 			scheduler_ipi();
99 		}
100 
101 		if (ops & (1 << IPI_CALL_FUNC)) {
102 			stats[IPI_CALL_FUNC]++;
103 			generic_smp_call_function_interrupt();
104 		}
105 
106 		if (ops & (1 << IPI_CPU_STOP)) {
107 			stats[IPI_CPU_STOP]++;
108 			ipi_stop();
109 		}
110 
111 		BUG_ON((ops >> IPI_MAX) != 0);
112 
113 		/* Order data access and bit testing. */
114 		mb();
115 	}
116 }
117 
118 static void
119 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
120 {
121 	int cpuid, hartid;
122 	struct cpumask hartid_mask;
123 
124 	cpumask_clear(&hartid_mask);
125 	mb();
126 	for_each_cpu(cpuid, to_whom) {
127 		set_bit(operation, &ipi_data[cpuid].bits);
128 		hartid = cpuid_to_hartid_map(cpuid);
129 		cpumask_set_cpu(hartid, &hartid_mask);
130 	}
131 	mb();
132 	sbi_send_ipi(cpumask_bits(&hartid_mask));
133 }
134 
135 static const char * const ipi_names[] = {
136 	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
137 	[IPI_CALL_FUNC]		= "Function call interrupts",
138 	[IPI_CPU_STOP]		= "CPU stop interrupts",
139 };
140 
141 void show_ipi_stats(struct seq_file *p, int prec)
142 {
143 	unsigned int cpu, i;
144 
145 	for (i = 0; i < IPI_MAX; i++) {
146 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
147 			   prec >= 4 ? " " : "");
148 		for_each_online_cpu(cpu)
149 			seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
150 		seq_printf(p, " %s\n", ipi_names[i]);
151 	}
152 }
153 
154 void arch_send_call_function_ipi_mask(struct cpumask *mask)
155 {
156 	send_ipi_message(mask, IPI_CALL_FUNC);
157 }
158 
159 void arch_send_call_function_single_ipi(int cpu)
160 {
161 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
162 }
163 
164 void smp_send_stop(void)
165 {
166 	unsigned long timeout;
167 
168 	if (num_online_cpus() > 1) {
169 		cpumask_t mask;
170 
171 		cpumask_copy(&mask, cpu_online_mask);
172 		cpumask_clear_cpu(smp_processor_id(), &mask);
173 
174 		if (system_state <= SYSTEM_RUNNING)
175 			pr_crit("SMP: stopping secondary CPUs\n");
176 		send_ipi_message(&mask, IPI_CPU_STOP);
177 	}
178 
179 	/* Wait up to one second for other CPUs to stop */
180 	timeout = USEC_PER_SEC;
181 	while (num_online_cpus() > 1 && timeout--)
182 		udelay(1);
183 
184 	if (num_online_cpus() > 1)
185 		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
186 			   cpumask_pr_args(cpu_online_mask));
187 }
188 
189 void smp_send_reschedule(int cpu)
190 {
191 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
192 }
193 
194 /*
195  * Performs an icache flush for the given MM context.  RISC-V has no direct
196  * mechanism for instruction cache shoot downs, so instead we send an IPI that
197  * informs the remote harts they need to flush their local instruction caches.
198  * To avoid pathologically slow behavior in a common case (a bunch of
199  * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
200  * IPIs for harts that are not currently executing a MM context and instead
201  * schedule a deferred local instruction cache flush to be performed before
202  * execution resumes on each hart.
203  */
204 void flush_icache_mm(struct mm_struct *mm, bool local)
205 {
206 	unsigned int cpu;
207 	cpumask_t others, hmask, *mask;
208 
209 	preempt_disable();
210 
211 	/* Mark every hart's icache as needing a flush for this MM. */
212 	mask = &mm->context.icache_stale_mask;
213 	cpumask_setall(mask);
214 	/* Flush this hart's I$ now, and mark it as flushed. */
215 	cpu = smp_processor_id();
216 	cpumask_clear_cpu(cpu, mask);
217 	local_flush_icache_all();
218 
219 	/*
220 	 * Flush the I$ of other harts concurrently executing, and mark them as
221 	 * flushed.
222 	 */
223 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
224 	local |= cpumask_empty(&others);
225 	if (mm != current->active_mm || !local) {
226 		cpumask_clear(&hmask);
227 		riscv_cpuid_to_hartid_mask(&others, &hmask);
228 		sbi_remote_fence_i(hmask.bits);
229 	} else {
230 		/*
231 		 * It's assumed that at least one strongly ordered operation is
232 		 * performed on this hart between setting a hart's cpumask bit
233 		 * and scheduling this MM context on that hart.  Sending an SBI
234 		 * remote message will do this, but in the case where no
235 		 * messages are sent we still need to order this hart's writes
236 		 * with flush_icache_deferred().
237 		 */
238 		smp_mb();
239 	}
240 
241 	preempt_enable();
242 }
243