xref: /openbmc/linux/arch/mips/include/asm/smp.h (revision 96cb8ae2)
1384740dcSRalf Baechle /*
2384740dcSRalf Baechle  * This file is subject to the terms and conditions of the GNU General
3384740dcSRalf Baechle  * Public License.  See the file "COPYING" in the main directory of this
4384740dcSRalf Baechle  * archive for more details.
5384740dcSRalf Baechle  *
6384740dcSRalf Baechle  * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
7384740dcSRalf Baechle  * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
8384740dcSRalf Baechle  * Copyright (C) 2000, 2001, 2002 Ralf Baechle
9384740dcSRalf Baechle  * Copyright (C) 2000, 2001 Broadcom Corporation
10384740dcSRalf Baechle  */
11384740dcSRalf Baechle #ifndef __ASM_SMP_H
12384740dcSRalf Baechle #define __ASM_SMP_H
13384740dcSRalf Baechle 
14384740dcSRalf Baechle #include <linux/bitops.h>
15384740dcSRalf Baechle #include <linux/linkage.h>
16631330f5SRalf Baechle #include <linux/smp.h>
17384740dcSRalf Baechle #include <linux/threads.h>
18384740dcSRalf Baechle #include <linux/cpumask.h>
19384740dcSRalf Baechle 
2060063497SArun Sharma #include <linux/atomic.h>
21384740dcSRalf Baechle #include <asm/smp-ops.h>
22384740dcSRalf Baechle 
23384740dcSRalf Baechle extern int smp_num_siblings;
24384740dcSRalf Baechle extern cpumask_t cpu_sibling_map[];
25bda4584cSHuacai Chen extern cpumask_t cpu_core_map[];
26640511aeSJames Hogan extern cpumask_t cpu_foreign_map[];
27384740dcSRalf Baechle 
raw_smp_processor_id(void)28351fddddSPaul Burton static inline int raw_smp_processor_id(void)
29351fddddSPaul Burton {
30351fddddSPaul Burton #if defined(__VDSO__)
31351fddddSPaul Burton 	extern int vdso_smp_processor_id(void)
32351fddddSPaul Burton 		__compiletime_error("VDSO should not call smp_processor_id()");
33351fddddSPaul Burton 	return vdso_smp_processor_id();
34351fddddSPaul Burton #else
35351fddddSPaul Burton 	return current_thread_info()->cpu;
36351fddddSPaul Burton #endif
37351fddddSPaul Burton }
38351fddddSPaul Burton #define raw_smp_processor_id raw_smp_processor_id
39384740dcSRalf Baechle 
40384740dcSRalf Baechle /* Map from cpu id to sequential logical cpu number.  This will only
41384740dcSRalf Baechle    not be idempotent when cpus failed to come on-line.	*/
427820b84bSDavid Daney extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
43384740dcSRalf Baechle #define cpu_number_map(cpu)  __cpu_number_map[cpu]
44384740dcSRalf Baechle 
45384740dcSRalf Baechle /* The reverse map from sequential logical cpu number to cpu id.  */
46384740dcSRalf Baechle extern int __cpu_logical_map[NR_CPUS];
47384740dcSRalf Baechle #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
48384740dcSRalf Baechle 
49384740dcSRalf Baechle #define NO_PROC_ID	(-1)
50384740dcSRalf Baechle 
51384740dcSRalf Baechle #define SMP_RESCHEDULE_YOURSELF 0x1	/* XXX braindead */
52384740dcSRalf Baechle #define SMP_CALL_FUNCTION	0x2
53ddcdb1b4SDavid Daney /* Octeon - Tell another core to flush its icache */
54ddcdb1b4SDavid Daney #define SMP_ICACHE_FLUSH	0x4
55c83c2eedSMarcin Nowakowski #define SMP_ASK_C0COUNT		0x8
56ddcdb1b4SDavid Daney 
5776306f42SPaul Burton /* Mask of CPUs which are currently definitely operating coherently */
5876306f42SPaul Burton extern cpumask_t cpu_coherent_mask;
5976306f42SPaul Burton 
60*96cb8ae2SJiaxun Yang extern unsigned int smp_max_threads __initdata;
61*96cb8ae2SJiaxun Yang 
62b745fcb9SJoe Perches extern asmlinkage void smp_bootstrap(void);
63384740dcSRalf Baechle 
64826e99beSJames Hogan extern void calculate_cpu_foreign_map(void);
65826e99beSJames Hogan 
66384740dcSRalf Baechle /*
67384740dcSRalf Baechle  * this function sends a 'reschedule' IPI to another CPU.
68384740dcSRalf Baechle  * it goes straight through and wastes no time serializing
69384740dcSRalf Baechle  * anything. Worst case is that we lose a reschedule ...
70384740dcSRalf Baechle  */
arch_smp_send_reschedule(int cpu)714c8c3c7fSValentin Schneider static inline void arch_smp_send_reschedule(int cpu)
72384740dcSRalf Baechle {
73ff2c8252SMatt Redfearn 	extern const struct plat_smp_ops *mp_ops;	/* private */
74384740dcSRalf Baechle 
75384740dcSRalf Baechle 	mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
76384740dcSRalf Baechle }
77384740dcSRalf Baechle 
781b2bc75cSRalf Baechle #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)791b2bc75cSRalf Baechle static inline int __cpu_disable(void)
801b2bc75cSRalf Baechle {
81ff2c8252SMatt Redfearn 	extern const struct plat_smp_ops *mp_ops;	/* private */
821b2bc75cSRalf Baechle 
831b2bc75cSRalf Baechle 	return mp_ops->cpu_disable();
841b2bc75cSRalf Baechle }
851b2bc75cSRalf Baechle 
__cpu_die(unsigned int cpu)861b2bc75cSRalf Baechle static inline void __cpu_die(unsigned int cpu)
871b2bc75cSRalf Baechle {
88ff2c8252SMatt Redfearn 	extern const struct plat_smp_ops *mp_ops;	/* private */
891b2bc75cSRalf Baechle 
901b2bc75cSRalf Baechle 	mp_ops->cpu_die(cpu);
911b2bc75cSRalf Baechle }
921b2bc75cSRalf Baechle 
939e57f049SJosh Poimboeuf extern void __noreturn play_dead(void);
941b2bc75cSRalf Baechle #endif
951b2bc75cSRalf Baechle 
9662cac480SDengcheng Zhu #ifdef CONFIG_KEXEC
kexec_nonboot_cpu(void)9762cac480SDengcheng Zhu static inline void kexec_nonboot_cpu(void)
9862cac480SDengcheng Zhu {
9962cac480SDengcheng Zhu 	extern const struct plat_smp_ops *mp_ops;	/* private */
10062cac480SDengcheng Zhu 
10162cac480SDengcheng Zhu 	return mp_ops->kexec_nonboot_cpu();
10262cac480SDengcheng Zhu }
10362cac480SDengcheng Zhu 
kexec_nonboot_cpu_func(void)10462cac480SDengcheng Zhu static inline void *kexec_nonboot_cpu_func(void)
10562cac480SDengcheng Zhu {
10662cac480SDengcheng Zhu 	extern const struct plat_smp_ops *mp_ops;	/* private */
10762cac480SDengcheng Zhu 
10862cac480SDengcheng Zhu 	return mp_ops->kexec_nonboot_cpu;
10962cac480SDengcheng Zhu }
11062cac480SDengcheng Zhu #endif
11162cac480SDengcheng Zhu 
1127688c539SMatt Redfearn /*
1137688c539SMatt Redfearn  * This function will set up the necessary IPIs for Linux to communicate
1147688c539SMatt Redfearn  * with the CPUs in mask.
1157688c539SMatt Redfearn  * Return 0 on success.
1167688c539SMatt Redfearn  */
1177688c539SMatt Redfearn int mips_smp_ipi_allocate(const struct cpumask *mask);
1187688c539SMatt Redfearn 
1197688c539SMatt Redfearn /*
1207688c539SMatt Redfearn  * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
1217688c539SMatt Redfearn  * CPUs in mask, which must be a subset of the IPIs that have been configured.
1227688c539SMatt Redfearn  * Return 0 on success.
1237688c539SMatt Redfearn  */
1247688c539SMatt Redfearn int mips_smp_ipi_free(const struct cpumask *mask);
1257688c539SMatt Redfearn 
arch_send_call_function_single_ipi(int cpu)126eef34ec5SRalf Baechle static inline void arch_send_call_function_single_ipi(int cpu)
127eef34ec5SRalf Baechle {
128ff2c8252SMatt Redfearn 	extern const struct plat_smp_ops *mp_ops;	/* private */
129eef34ec5SRalf Baechle 
130d82d500fSLiangliang Huang 	mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
131eef34ec5SRalf Baechle }
132eef34ec5SRalf Baechle 
arch_send_call_function_ipi_mask(const struct cpumask * mask)133eef34ec5SRalf Baechle static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
134eef34ec5SRalf Baechle {
135ff2c8252SMatt Redfearn 	extern const struct plat_smp_ops *mp_ops;	/* private */
136eef34ec5SRalf Baechle 
137eef34ec5SRalf Baechle 	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
138eef34ec5SRalf Baechle }
139384740dcSRalf Baechle 
140384740dcSRalf Baechle #endif /* __ASM_SMP_H */
141