xref: /openbmc/linux/arch/xtensa/kernel/smp.c (revision f1f1007644ffc8051a4c11427d58b1967ae7b75a)
1f615136cSMax Filippov /*
2f615136cSMax Filippov  * Xtensa SMP support functions.
3f615136cSMax Filippov  *
4f615136cSMax Filippov  * This file is subject to the terms and conditions of the GNU General Public
5f615136cSMax Filippov  * License.  See the file "COPYING" in the main directory of this archive
6f615136cSMax Filippov  * for more details.
7f615136cSMax Filippov  *
8f615136cSMax Filippov  * Copyright (C) 2008 - 2013 Tensilica Inc.
9f615136cSMax Filippov  *
10f615136cSMax Filippov  * Chris Zankel <chris@zankel.net>
11f615136cSMax Filippov  * Joe Taylor <joe@tensilica.com>
12f615136cSMax Filippov  * Pete Delaney <piet@tensilica.com
13f615136cSMax Filippov  */
14f615136cSMax Filippov 
15f615136cSMax Filippov #include <linux/cpu.h>
16f615136cSMax Filippov #include <linux/cpumask.h>
17f615136cSMax Filippov #include <linux/delay.h>
18f615136cSMax Filippov #include <linux/init.h>
19f615136cSMax Filippov #include <linux/interrupt.h>
20f615136cSMax Filippov #include <linux/irqdomain.h>
21f615136cSMax Filippov #include <linux/irq.h>
22f615136cSMax Filippov #include <linux/kdebug.h>
23f615136cSMax Filippov #include <linux/module.h>
24f615136cSMax Filippov #include <linux/reboot.h>
25f615136cSMax Filippov #include <linux/seq_file.h>
26f615136cSMax Filippov #include <linux/smp.h>
27f615136cSMax Filippov #include <linux/thread_info.h>
28f615136cSMax Filippov 
29f615136cSMax Filippov #include <asm/cacheflush.h>
30f615136cSMax Filippov #include <asm/kdebug.h>
31f615136cSMax Filippov #include <asm/mmu_context.h>
32f615136cSMax Filippov #include <asm/mxregs.h>
33f615136cSMax Filippov #include <asm/platform.h>
34f615136cSMax Filippov #include <asm/tlbflush.h>
35f615136cSMax Filippov #include <asm/traps.h>
36f615136cSMax Filippov 
37f615136cSMax Filippov #ifdef CONFIG_SMP
38f615136cSMax Filippov # if XCHAL_HAVE_S32C1I == 0
39f615136cSMax Filippov #  error "The S32C1I option is required for SMP."
40f615136cSMax Filippov # endif
41f615136cSMax Filippov #endif
42f615136cSMax Filippov 
4349b424feSMax Filippov static void system_invalidate_dcache_range(unsigned long start,
4449b424feSMax Filippov 		unsigned long size);
4549b424feSMax Filippov static void system_flush_invalidate_dcache_range(unsigned long start,
4649b424feSMax Filippov 		unsigned long size);
4749b424feSMax Filippov 
48f615136cSMax Filippov /* IPI (Inter Process Interrupt) */
49f615136cSMax Filippov 
50f615136cSMax Filippov #define IPI_IRQ	0
51f615136cSMax Filippov 
52f615136cSMax Filippov static irqreturn_t ipi_interrupt(int irq, void *dev_id);
53f615136cSMax Filippov static struct irqaction ipi_irqaction = {
54f615136cSMax Filippov 	.handler =	ipi_interrupt,
55f615136cSMax Filippov 	.flags =	IRQF_PERCPU,
56f615136cSMax Filippov 	.name =		"ipi",
57f615136cSMax Filippov };
58f615136cSMax Filippov 
59f615136cSMax Filippov void ipi_init(void)
60f615136cSMax Filippov {
61f615136cSMax Filippov 	unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
62f615136cSMax Filippov 	setup_irq(irq, &ipi_irqaction);
63f615136cSMax Filippov }
64f615136cSMax Filippov 
65f615136cSMax Filippov static inline unsigned int get_core_count(void)
66f615136cSMax Filippov {
67f615136cSMax Filippov 	/* Bits 18..21 of SYSCFGID contain the core count minus 1. */
68f615136cSMax Filippov 	unsigned int syscfgid = get_er(SYSCFGID);
69f615136cSMax Filippov 	return ((syscfgid >> 18) & 0xf) + 1;
70f615136cSMax Filippov }
71f615136cSMax Filippov 
72f615136cSMax Filippov static inline int get_core_id(void)
73f615136cSMax Filippov {
74f615136cSMax Filippov 	/* Bits 0...18 of SYSCFGID contain the core id  */
75f615136cSMax Filippov 	unsigned int core_id = get_er(SYSCFGID);
76f615136cSMax Filippov 	return core_id & 0x3fff;
77f615136cSMax Filippov }
78f615136cSMax Filippov 
79f615136cSMax Filippov void __init smp_prepare_cpus(unsigned int max_cpus)
80f615136cSMax Filippov {
81f615136cSMax Filippov 	unsigned i;
82f615136cSMax Filippov 
83f615136cSMax Filippov 	for (i = 0; i < max_cpus; ++i)
84f615136cSMax Filippov 		set_cpu_present(i, true);
85f615136cSMax Filippov }
86f615136cSMax Filippov 
87f615136cSMax Filippov void __init smp_init_cpus(void)
88f615136cSMax Filippov {
89f615136cSMax Filippov 	unsigned i;
90f615136cSMax Filippov 	unsigned int ncpus = get_core_count();
91f615136cSMax Filippov 	unsigned int core_id = get_core_id();
92f615136cSMax Filippov 
93f615136cSMax Filippov 	pr_info("%s: Core Count = %d\n", __func__, ncpus);
94f615136cSMax Filippov 	pr_info("%s: Core Id = %d\n", __func__, core_id);
95f615136cSMax Filippov 
96f615136cSMax Filippov 	for (i = 0; i < ncpus; ++i)
97f615136cSMax Filippov 		set_cpu_possible(i, true);
98f615136cSMax Filippov }
99f615136cSMax Filippov 
100f615136cSMax Filippov void __init smp_prepare_boot_cpu(void)
101f615136cSMax Filippov {
102f615136cSMax Filippov 	unsigned int cpu = smp_processor_id();
103f615136cSMax Filippov 	BUG_ON(cpu != 0);
104f615136cSMax Filippov 	cpu_asid_cache(cpu) = ASID_USER_FIRST;
105f615136cSMax Filippov }
106f615136cSMax Filippov 
107f615136cSMax Filippov void __init smp_cpus_done(unsigned int max_cpus)
108f615136cSMax Filippov {
109f615136cSMax Filippov }
110f615136cSMax Filippov 
111f615136cSMax Filippov static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
112f615136cSMax Filippov static DECLARE_COMPLETION(cpu_running);
113f615136cSMax Filippov 
11449b424feSMax Filippov void secondary_start_kernel(void)
115f615136cSMax Filippov {
116f615136cSMax Filippov 	struct mm_struct *mm = &init_mm;
117f615136cSMax Filippov 	unsigned int cpu = smp_processor_id();
118f615136cSMax Filippov 
119f615136cSMax Filippov 	init_mmu();
120f615136cSMax Filippov 
121f615136cSMax Filippov #ifdef CONFIG_DEBUG_KERNEL
122f615136cSMax Filippov 	if (boot_secondary_processors == 0) {
123f615136cSMax Filippov 		pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
124f615136cSMax Filippov 			__func__, boot_secondary_processors, cpu);
125f615136cSMax Filippov 		for (;;)
126f615136cSMax Filippov 			__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
127f615136cSMax Filippov 	}
128f615136cSMax Filippov 
129f615136cSMax Filippov 	pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
130f615136cSMax Filippov 		__func__, boot_secondary_processors, cpu);
131f615136cSMax Filippov #endif
132f615136cSMax Filippov 	/* Init EXCSAVE1 */
133f615136cSMax Filippov 
134f615136cSMax Filippov 	secondary_trap_init();
135f615136cSMax Filippov 
136f615136cSMax Filippov 	/* All kernel threads share the same mm context. */
137f615136cSMax Filippov 
138f615136cSMax Filippov 	atomic_inc(&mm->mm_users);
139*f1f10076SVegard Nossum 	mmgrab(mm);
140f615136cSMax Filippov 	current->active_mm = mm;
141f615136cSMax Filippov 	cpumask_set_cpu(cpu, mm_cpumask(mm));
142f615136cSMax Filippov 	enter_lazy_tlb(mm, current);
143f615136cSMax Filippov 
144f615136cSMax Filippov 	preempt_disable();
145f615136cSMax Filippov 	trace_hardirqs_off();
146f615136cSMax Filippov 
147f615136cSMax Filippov 	calibrate_delay();
148f615136cSMax Filippov 
149f615136cSMax Filippov 	notify_cpu_starting(cpu);
150f615136cSMax Filippov 
151f615136cSMax Filippov 	secondary_init_irq();
152f615136cSMax Filippov 	local_timer_setup(cpu);
153f615136cSMax Filippov 
154abf0ea65SKirill Tkhai 	set_cpu_online(cpu, true);
155abf0ea65SKirill Tkhai 
156f615136cSMax Filippov 	local_irq_enable();
157f615136cSMax Filippov 
158f615136cSMax Filippov 	complete(&cpu_running);
159f615136cSMax Filippov 
160fc6d73d6SThomas Gleixner 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
161f615136cSMax Filippov }
162f615136cSMax Filippov 
163f615136cSMax Filippov static void mx_cpu_start(void *p)
164f615136cSMax Filippov {
165f615136cSMax Filippov 	unsigned cpu = (unsigned)p;
166f615136cSMax Filippov 	unsigned long run_stall_mask = get_er(MPSCORE);
167f615136cSMax Filippov 
168f615136cSMax Filippov 	set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
169f615136cSMax Filippov 	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
170f615136cSMax Filippov 			__func__, cpu, run_stall_mask, get_er(MPSCORE));
171f615136cSMax Filippov }
172f615136cSMax Filippov 
173f615136cSMax Filippov static void mx_cpu_stop(void *p)
174f615136cSMax Filippov {
175f615136cSMax Filippov 	unsigned cpu = (unsigned)p;
176f615136cSMax Filippov 	unsigned long run_stall_mask = get_er(MPSCORE);
177f615136cSMax Filippov 
178f615136cSMax Filippov 	set_er(run_stall_mask | (1u << cpu), MPSCORE);
179f615136cSMax Filippov 	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
180f615136cSMax Filippov 			__func__, cpu, run_stall_mask, get_er(MPSCORE));
181f615136cSMax Filippov }
182f615136cSMax Filippov 
18349b424feSMax Filippov #ifdef CONFIG_HOTPLUG_CPU
18449b424feSMax Filippov unsigned long cpu_start_id __cacheline_aligned;
18549b424feSMax Filippov #endif
186f615136cSMax Filippov unsigned long cpu_start_ccount;
187f615136cSMax Filippov 
188f615136cSMax Filippov static int boot_secondary(unsigned int cpu, struct task_struct *ts)
189f615136cSMax Filippov {
190f615136cSMax Filippov 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
191f615136cSMax Filippov 	unsigned long ccount;
192f615136cSMax Filippov 	int i;
193f615136cSMax Filippov 
19449b424feSMax Filippov #ifdef CONFIG_HOTPLUG_CPU
19549b424feSMax Filippov 	cpu_start_id = cpu;
19649b424feSMax Filippov 	system_flush_invalidate_dcache_range(
19749b424feSMax Filippov 			(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
19849b424feSMax Filippov #endif
199f615136cSMax Filippov 	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
200f615136cSMax Filippov 
201f615136cSMax Filippov 	for (i = 0; i < 2; ++i) {
202f615136cSMax Filippov 		do
203f615136cSMax Filippov 			ccount = get_ccount();
204f615136cSMax Filippov 		while (!ccount);
205f615136cSMax Filippov 
206f615136cSMax Filippov 		cpu_start_ccount = ccount;
207f615136cSMax Filippov 
208f615136cSMax Filippov 		while (time_before(jiffies, timeout)) {
209f615136cSMax Filippov 			mb();
210f615136cSMax Filippov 			if (!cpu_start_ccount)
211f615136cSMax Filippov 				break;
212f615136cSMax Filippov 		}
213f615136cSMax Filippov 
214f615136cSMax Filippov 		if (cpu_start_ccount) {
215f615136cSMax Filippov 			smp_call_function_single(0, mx_cpu_stop,
216f615136cSMax Filippov 					(void *)cpu, 1);
217f615136cSMax Filippov 			cpu_start_ccount = 0;
218f615136cSMax Filippov 			return -EIO;
219f615136cSMax Filippov 		}
220f615136cSMax Filippov 	}
221f615136cSMax Filippov 	return 0;
222f615136cSMax Filippov }
223f615136cSMax Filippov 
224f615136cSMax Filippov int __cpu_up(unsigned int cpu, struct task_struct *idle)
225f615136cSMax Filippov {
226f615136cSMax Filippov 	int ret = 0;
227f615136cSMax Filippov 
228f615136cSMax Filippov 	if (cpu_asid_cache(cpu) == 0)
229f615136cSMax Filippov 		cpu_asid_cache(cpu) = ASID_USER_FIRST;
230f615136cSMax Filippov 
231f615136cSMax Filippov 	start_info.stack = (unsigned long)task_pt_regs(idle);
232f615136cSMax Filippov 	wmb();
233f615136cSMax Filippov 
234f615136cSMax Filippov 	pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
235f615136cSMax Filippov 			__func__, cpu, idle, start_info.stack);
236f615136cSMax Filippov 
237f615136cSMax Filippov 	ret = boot_secondary(cpu, idle);
238f615136cSMax Filippov 	if (ret == 0) {
239f615136cSMax Filippov 		wait_for_completion_timeout(&cpu_running,
240f615136cSMax Filippov 				msecs_to_jiffies(1000));
241f615136cSMax Filippov 		if (!cpu_online(cpu))
242f615136cSMax Filippov 			ret = -EIO;
243f615136cSMax Filippov 	}
244f615136cSMax Filippov 
245f615136cSMax Filippov 	if (ret)
246f615136cSMax Filippov 		pr_err("CPU %u failed to boot\n", cpu);
247f615136cSMax Filippov 
248f615136cSMax Filippov 	return ret;
249f615136cSMax Filippov }
250f615136cSMax Filippov 
25149b424feSMax Filippov #ifdef CONFIG_HOTPLUG_CPU
25249b424feSMax Filippov 
25349b424feSMax Filippov /*
25449b424feSMax Filippov  * __cpu_disable runs on the processor to be shutdown.
25549b424feSMax Filippov  */
25649b424feSMax Filippov int __cpu_disable(void)
25749b424feSMax Filippov {
25849b424feSMax Filippov 	unsigned int cpu = smp_processor_id();
25949b424feSMax Filippov 
26049b424feSMax Filippov 	/*
26149b424feSMax Filippov 	 * Take this CPU offline.  Once we clear this, we can't return,
26249b424feSMax Filippov 	 * and we must not schedule until we're ready to give up the cpu.
26349b424feSMax Filippov 	 */
26449b424feSMax Filippov 	set_cpu_online(cpu, false);
26549b424feSMax Filippov 
26649b424feSMax Filippov 	/*
26749b424feSMax Filippov 	 * OK - migrate IRQs away from this CPU
26849b424feSMax Filippov 	 */
26949b424feSMax Filippov 	migrate_irqs();
27049b424feSMax Filippov 
27149b424feSMax Filippov 	/*
27249b424feSMax Filippov 	 * Flush user cache and TLB mappings, and then remove this CPU
27349b424feSMax Filippov 	 * from the vm mask set of all processes.
27449b424feSMax Filippov 	 */
27549b424feSMax Filippov 	local_flush_cache_all();
27649b424feSMax Filippov 	local_flush_tlb_all();
27749b424feSMax Filippov 	invalidate_page_directory();
27849b424feSMax Filippov 
27949b424feSMax Filippov 	clear_tasks_mm_cpumask(cpu);
28049b424feSMax Filippov 
28149b424feSMax Filippov 	return 0;
28249b424feSMax Filippov }
28349b424feSMax Filippov 
28449b424feSMax Filippov static void platform_cpu_kill(unsigned int cpu)
28549b424feSMax Filippov {
28649b424feSMax Filippov 	smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
28749b424feSMax Filippov }
28849b424feSMax Filippov 
28949b424feSMax Filippov /*
29049b424feSMax Filippov  * called on the thread which is asking for a CPU to be shutdown -
29149b424feSMax Filippov  * waits until shutdown has completed, or it is timed out.
29249b424feSMax Filippov  */
29349b424feSMax Filippov void __cpu_die(unsigned int cpu)
29449b424feSMax Filippov {
29549b424feSMax Filippov 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
29649b424feSMax Filippov 	while (time_before(jiffies, timeout)) {
29749b424feSMax Filippov 		system_invalidate_dcache_range((unsigned long)&cpu_start_id,
29849b424feSMax Filippov 				sizeof(cpu_start_id));
29949b424feSMax Filippov 		if (cpu_start_id == -cpu) {
30049b424feSMax Filippov 			platform_cpu_kill(cpu);
30149b424feSMax Filippov 			return;
30249b424feSMax Filippov 		}
30349b424feSMax Filippov 	}
30449b424feSMax Filippov 	pr_err("CPU%u: unable to kill\n", cpu);
30549b424feSMax Filippov }
30649b424feSMax Filippov 
30749b424feSMax Filippov void arch_cpu_idle_dead(void)
30849b424feSMax Filippov {
30949b424feSMax Filippov 	cpu_die();
31049b424feSMax Filippov }
31149b424feSMax Filippov /*
31249b424feSMax Filippov  * Called from the idle thread for the CPU which has been shutdown.
31349b424feSMax Filippov  *
31449b424feSMax Filippov  * Note that we disable IRQs here, but do not re-enable them
31549b424feSMax Filippov  * before returning to the caller. This is also the behaviour
31649b424feSMax Filippov  * of the other hotplug-cpu capable cores, so presumably coming
31749b424feSMax Filippov  * out of idle fixes this.
31849b424feSMax Filippov  */
31949b424feSMax Filippov void __ref cpu_die(void)
32049b424feSMax Filippov {
32149b424feSMax Filippov 	idle_task_exit();
32249b424feSMax Filippov 	local_irq_disable();
32349b424feSMax Filippov 	__asm__ __volatile__(
32449b424feSMax Filippov 			"	movi	a2, cpu_restart\n"
32549b424feSMax Filippov 			"	jx	a2\n");
32649b424feSMax Filippov }
32749b424feSMax Filippov 
32849b424feSMax Filippov #endif /* CONFIG_HOTPLUG_CPU */
32949b424feSMax Filippov 
330f615136cSMax Filippov enum ipi_msg_type {
331f615136cSMax Filippov 	IPI_RESCHEDULE = 0,
332f615136cSMax Filippov 	IPI_CALL_FUNC,
333f615136cSMax Filippov 	IPI_CPU_STOP,
334f615136cSMax Filippov 	IPI_MAX
335f615136cSMax Filippov };
336f615136cSMax Filippov 
337f615136cSMax Filippov static const struct {
338f615136cSMax Filippov 	const char *short_text;
339f615136cSMax Filippov 	const char *long_text;
340f615136cSMax Filippov } ipi_text[] = {
341f615136cSMax Filippov 	{ .short_text = "RES", .long_text = "Rescheduling interrupts" },
342f615136cSMax Filippov 	{ .short_text = "CAL", .long_text = "Function call interrupts" },
343f615136cSMax Filippov 	{ .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
344f615136cSMax Filippov };
345f615136cSMax Filippov 
346f615136cSMax Filippov struct ipi_data {
347f615136cSMax Filippov 	unsigned long ipi_count[IPI_MAX];
348f615136cSMax Filippov };
349f615136cSMax Filippov 
350f615136cSMax Filippov static DEFINE_PER_CPU(struct ipi_data, ipi_data);
351f615136cSMax Filippov 
352f615136cSMax Filippov static void send_ipi_message(const struct cpumask *callmask,
353f615136cSMax Filippov 		enum ipi_msg_type msg_id)
354f615136cSMax Filippov {
355f615136cSMax Filippov 	int index;
356f615136cSMax Filippov 	unsigned long mask = 0;
357f615136cSMax Filippov 
358f615136cSMax Filippov 	for_each_cpu(index, callmask)
359f615136cSMax Filippov 		if (index != smp_processor_id())
360f615136cSMax Filippov 			mask |= 1 << index;
361f615136cSMax Filippov 
362f615136cSMax Filippov 	set_er(mask, MIPISET(msg_id));
363f615136cSMax Filippov }
364f615136cSMax Filippov 
365f615136cSMax Filippov void arch_send_call_function_ipi_mask(const struct cpumask *mask)
366f615136cSMax Filippov {
367f615136cSMax Filippov 	send_ipi_message(mask, IPI_CALL_FUNC);
368f615136cSMax Filippov }
369f615136cSMax Filippov 
370f615136cSMax Filippov void arch_send_call_function_single_ipi(int cpu)
371f615136cSMax Filippov {
372f615136cSMax Filippov 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
373f615136cSMax Filippov }
374f615136cSMax Filippov 
375f615136cSMax Filippov void smp_send_reschedule(int cpu)
376f615136cSMax Filippov {
377f615136cSMax Filippov 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
378f615136cSMax Filippov }
379f615136cSMax Filippov 
380f615136cSMax Filippov void smp_send_stop(void)
381f615136cSMax Filippov {
382f615136cSMax Filippov 	struct cpumask targets;
383f615136cSMax Filippov 
384f615136cSMax Filippov 	cpumask_copy(&targets, cpu_online_mask);
385f615136cSMax Filippov 	cpumask_clear_cpu(smp_processor_id(), &targets);
386f615136cSMax Filippov 	send_ipi_message(&targets, IPI_CPU_STOP);
387f615136cSMax Filippov }
388f615136cSMax Filippov 
389f615136cSMax Filippov static void ipi_cpu_stop(unsigned int cpu)
390f615136cSMax Filippov {
391f615136cSMax Filippov 	set_cpu_online(cpu, false);
392f615136cSMax Filippov 	machine_halt();
393f615136cSMax Filippov }
394f615136cSMax Filippov 
395f615136cSMax Filippov irqreturn_t ipi_interrupt(int irq, void *dev_id)
396f615136cSMax Filippov {
397f615136cSMax Filippov 	unsigned int cpu = smp_processor_id();
398f615136cSMax Filippov 	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
399f615136cSMax Filippov 	unsigned int msg;
400f615136cSMax Filippov 	unsigned i;
401f615136cSMax Filippov 
402f615136cSMax Filippov 	msg = get_er(MIPICAUSE(cpu));
403f615136cSMax Filippov 	for (i = 0; i < IPI_MAX; i++)
404f615136cSMax Filippov 		if (msg & (1 << i)) {
405f615136cSMax Filippov 			set_er(1 << i, MIPICAUSE(cpu));
406f615136cSMax Filippov 			++ipi->ipi_count[i];
407f615136cSMax Filippov 		}
408f615136cSMax Filippov 
409f615136cSMax Filippov 	if (msg & (1 << IPI_RESCHEDULE))
410f615136cSMax Filippov 		scheduler_ipi();
411f615136cSMax Filippov 	if (msg & (1 << IPI_CALL_FUNC))
412f615136cSMax Filippov 		generic_smp_call_function_interrupt();
413f615136cSMax Filippov 	if (msg & (1 << IPI_CPU_STOP))
414f615136cSMax Filippov 		ipi_cpu_stop(cpu);
415f615136cSMax Filippov 
416f615136cSMax Filippov 	return IRQ_HANDLED;
417f615136cSMax Filippov }
418f615136cSMax Filippov 
419f615136cSMax Filippov void show_ipi_list(struct seq_file *p, int prec)
420f615136cSMax Filippov {
421f615136cSMax Filippov 	unsigned int cpu;
422f615136cSMax Filippov 	unsigned i;
423f615136cSMax Filippov 
424f615136cSMax Filippov 	for (i = 0; i < IPI_MAX; ++i) {
425f615136cSMax Filippov 		seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
426f615136cSMax Filippov 		for_each_online_cpu(cpu)
427f615136cSMax Filippov 			seq_printf(p, " %10lu",
428f615136cSMax Filippov 					per_cpu(ipi_data, cpu).ipi_count[i]);
429f615136cSMax Filippov 		seq_printf(p, "   %s\n", ipi_text[i].long_text);
430f615136cSMax Filippov 	}
431f615136cSMax Filippov }
432f615136cSMax Filippov 
433f615136cSMax Filippov int setup_profiling_timer(unsigned int multiplier)
434f615136cSMax Filippov {
435f615136cSMax Filippov 	pr_debug("setup_profiling_timer %d\n", multiplier);
436f615136cSMax Filippov 	return 0;
437f615136cSMax Filippov }
438f615136cSMax Filippov 
439f615136cSMax Filippov /* TLB flush functions */
440f615136cSMax Filippov 
441f615136cSMax Filippov struct flush_data {
442f615136cSMax Filippov 	struct vm_area_struct *vma;
443f615136cSMax Filippov 	unsigned long addr1;
444f615136cSMax Filippov 	unsigned long addr2;
445f615136cSMax Filippov };
446f615136cSMax Filippov 
447f615136cSMax Filippov static void ipi_flush_tlb_all(void *arg)
448f615136cSMax Filippov {
449f615136cSMax Filippov 	local_flush_tlb_all();
450f615136cSMax Filippov }
451f615136cSMax Filippov 
452f615136cSMax Filippov void flush_tlb_all(void)
453f615136cSMax Filippov {
454f615136cSMax Filippov 	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
455f615136cSMax Filippov }
456f615136cSMax Filippov 
457f615136cSMax Filippov static void ipi_flush_tlb_mm(void *arg)
458f615136cSMax Filippov {
459f615136cSMax Filippov 	local_flush_tlb_mm(arg);
460f615136cSMax Filippov }
461f615136cSMax Filippov 
462f615136cSMax Filippov void flush_tlb_mm(struct mm_struct *mm)
463f615136cSMax Filippov {
464f615136cSMax Filippov 	on_each_cpu(ipi_flush_tlb_mm, mm, 1);
465f615136cSMax Filippov }
466f615136cSMax Filippov 
467f615136cSMax Filippov static void ipi_flush_tlb_page(void *arg)
468f615136cSMax Filippov {
469f615136cSMax Filippov 	struct flush_data *fd = arg;
470f615136cSMax Filippov 	local_flush_tlb_page(fd->vma, fd->addr1);
471f615136cSMax Filippov }
472f615136cSMax Filippov 
473f615136cSMax Filippov void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
474f615136cSMax Filippov {
475f615136cSMax Filippov 	struct flush_data fd = {
476f615136cSMax Filippov 		.vma = vma,
477f615136cSMax Filippov 		.addr1 = addr,
478f615136cSMax Filippov 	};
479f615136cSMax Filippov 	on_each_cpu(ipi_flush_tlb_page, &fd, 1);
480f615136cSMax Filippov }
481f615136cSMax Filippov 
482f615136cSMax Filippov static void ipi_flush_tlb_range(void *arg)
483f615136cSMax Filippov {
484f615136cSMax Filippov 	struct flush_data *fd = arg;
485f615136cSMax Filippov 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
486f615136cSMax Filippov }
487f615136cSMax Filippov 
488f615136cSMax Filippov void flush_tlb_range(struct vm_area_struct *vma,
489f615136cSMax Filippov 		     unsigned long start, unsigned long end)
490f615136cSMax Filippov {
491f615136cSMax Filippov 	struct flush_data fd = {
492f615136cSMax Filippov 		.vma = vma,
493f615136cSMax Filippov 		.addr1 = start,
494f615136cSMax Filippov 		.addr2 = end,
495f615136cSMax Filippov 	};
496f615136cSMax Filippov 	on_each_cpu(ipi_flush_tlb_range, &fd, 1);
497f615136cSMax Filippov }
498f615136cSMax Filippov 
49904c6b3e2SMax Filippov static void ipi_flush_tlb_kernel_range(void *arg)
50004c6b3e2SMax Filippov {
50104c6b3e2SMax Filippov 	struct flush_data *fd = arg;
50204c6b3e2SMax Filippov 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
50304c6b3e2SMax Filippov }
50404c6b3e2SMax Filippov 
50504c6b3e2SMax Filippov void flush_tlb_kernel_range(unsigned long start, unsigned long end)
50604c6b3e2SMax Filippov {
50704c6b3e2SMax Filippov 	struct flush_data fd = {
50804c6b3e2SMax Filippov 		.addr1 = start,
50904c6b3e2SMax Filippov 		.addr2 = end,
51004c6b3e2SMax Filippov 	};
51104c6b3e2SMax Filippov 	on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
51204c6b3e2SMax Filippov }
51304c6b3e2SMax Filippov 
514f615136cSMax Filippov /* Cache flush functions */
515f615136cSMax Filippov 
516f615136cSMax Filippov static void ipi_flush_cache_all(void *arg)
517f615136cSMax Filippov {
518f615136cSMax Filippov 	local_flush_cache_all();
519f615136cSMax Filippov }
520f615136cSMax Filippov 
521f615136cSMax Filippov void flush_cache_all(void)
522f615136cSMax Filippov {
523f615136cSMax Filippov 	on_each_cpu(ipi_flush_cache_all, NULL, 1);
524f615136cSMax Filippov }
525f615136cSMax Filippov 
526f615136cSMax Filippov static void ipi_flush_cache_page(void *arg)
527f615136cSMax Filippov {
528f615136cSMax Filippov 	struct flush_data *fd = arg;
529f615136cSMax Filippov 	local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
530f615136cSMax Filippov }
531f615136cSMax Filippov 
532f615136cSMax Filippov void flush_cache_page(struct vm_area_struct *vma,
533f615136cSMax Filippov 		     unsigned long address, unsigned long pfn)
534f615136cSMax Filippov {
535f615136cSMax Filippov 	struct flush_data fd = {
536f615136cSMax Filippov 		.vma = vma,
537f615136cSMax Filippov 		.addr1 = address,
538f615136cSMax Filippov 		.addr2 = pfn,
539f615136cSMax Filippov 	};
540f615136cSMax Filippov 	on_each_cpu(ipi_flush_cache_page, &fd, 1);
541f615136cSMax Filippov }
542f615136cSMax Filippov 
543f615136cSMax Filippov static void ipi_flush_cache_range(void *arg)
544f615136cSMax Filippov {
545f615136cSMax Filippov 	struct flush_data *fd = arg;
546f615136cSMax Filippov 	local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
547f615136cSMax Filippov }
548f615136cSMax Filippov 
549f615136cSMax Filippov void flush_cache_range(struct vm_area_struct *vma,
550f615136cSMax Filippov 		     unsigned long start, unsigned long end)
551f615136cSMax Filippov {
552f615136cSMax Filippov 	struct flush_data fd = {
553f615136cSMax Filippov 		.vma = vma,
554f615136cSMax Filippov 		.addr1 = start,
555f615136cSMax Filippov 		.addr2 = end,
556f615136cSMax Filippov 	};
557f615136cSMax Filippov 	on_each_cpu(ipi_flush_cache_range, &fd, 1);
558f615136cSMax Filippov }
559f615136cSMax Filippov 
560f615136cSMax Filippov static void ipi_flush_icache_range(void *arg)
561f615136cSMax Filippov {
562f615136cSMax Filippov 	struct flush_data *fd = arg;
563f615136cSMax Filippov 	local_flush_icache_range(fd->addr1, fd->addr2);
564f615136cSMax Filippov }
565f615136cSMax Filippov 
566f615136cSMax Filippov void flush_icache_range(unsigned long start, unsigned long end)
567f615136cSMax Filippov {
568f615136cSMax Filippov 	struct flush_data fd = {
569f615136cSMax Filippov 		.addr1 = start,
570f615136cSMax Filippov 		.addr2 = end,
571f615136cSMax Filippov 	};
572f615136cSMax Filippov 	on_each_cpu(ipi_flush_icache_range, &fd, 1);
573f615136cSMax Filippov }
574e3560305SPranith Kumar EXPORT_SYMBOL(flush_icache_range);
57549b424feSMax Filippov 
57649b424feSMax Filippov /* ------------------------------------------------------------------------- */
57749b424feSMax Filippov 
57849b424feSMax Filippov static void ipi_invalidate_dcache_range(void *arg)
57949b424feSMax Filippov {
58049b424feSMax Filippov 	struct flush_data *fd = arg;
58149b424feSMax Filippov 	__invalidate_dcache_range(fd->addr1, fd->addr2);
58249b424feSMax Filippov }
58349b424feSMax Filippov 
58449b424feSMax Filippov static void system_invalidate_dcache_range(unsigned long start,
58549b424feSMax Filippov 		unsigned long size)
58649b424feSMax Filippov {
58749b424feSMax Filippov 	struct flush_data fd = {
58849b424feSMax Filippov 		.addr1 = start,
58949b424feSMax Filippov 		.addr2 = size,
59049b424feSMax Filippov 	};
59149b424feSMax Filippov 	on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
59249b424feSMax Filippov }
59349b424feSMax Filippov 
59449b424feSMax Filippov static void ipi_flush_invalidate_dcache_range(void *arg)
59549b424feSMax Filippov {
59649b424feSMax Filippov 	struct flush_data *fd = arg;
59749b424feSMax Filippov 	__flush_invalidate_dcache_range(fd->addr1, fd->addr2);
59849b424feSMax Filippov }
59949b424feSMax Filippov 
60049b424feSMax Filippov static void system_flush_invalidate_dcache_range(unsigned long start,
60149b424feSMax Filippov 		unsigned long size)
60249b424feSMax Filippov {
60349b424feSMax Filippov 	struct flush_data fd = {
60449b424feSMax Filippov 		.addr1 = start,
60549b424feSMax Filippov 		.addr2 = size,
60649b424feSMax Filippov 	};
60749b424feSMax Filippov 	on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
60849b424feSMax Filippov }
609