1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General 3384740dcSRalf Baechle * Public License. See the file "COPYING" in the main directory of this 4384740dcSRalf Baechle * archive for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) 7384740dcSRalf Baechle * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. 8384740dcSRalf Baechle * Copyright (C) 2000, 2001, 2002 Ralf Baechle 9384740dcSRalf Baechle * Copyright (C) 2000, 2001 Broadcom Corporation 10384740dcSRalf Baechle */ 11384740dcSRalf Baechle #ifndef __ASM_SMP_H 12384740dcSRalf Baechle #define __ASM_SMP_H 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <linux/bitops.h> 15384740dcSRalf Baechle #include <linux/linkage.h> 16631330f5SRalf Baechle #include <linux/smp.h> 17384740dcSRalf Baechle #include <linux/threads.h> 18384740dcSRalf Baechle #include <linux/cpumask.h> 19384740dcSRalf Baechle 2060063497SArun Sharma #include <linux/atomic.h> 21384740dcSRalf Baechle #include <asm/smp-ops.h> 22384740dcSRalf Baechle 23384740dcSRalf Baechle extern int smp_num_siblings; 24384740dcSRalf Baechle extern cpumask_t cpu_sibling_map[]; 25bda4584cSHuacai Chen extern cpumask_t cpu_core_map[]; 26640511aeSJames Hogan extern cpumask_t cpu_foreign_map[]; 27384740dcSRalf Baechle raw_smp_processor_id(void)28351fddddSPaul Burtonstatic inline int raw_smp_processor_id(void) 29351fddddSPaul Burton { 30351fddddSPaul Burton #if defined(__VDSO__) 31351fddddSPaul Burton extern int vdso_smp_processor_id(void) 32351fddddSPaul Burton __compiletime_error("VDSO should not call smp_processor_id()"); 33351fddddSPaul Burton return vdso_smp_processor_id(); 34351fddddSPaul Burton #else 35351fddddSPaul Burton return current_thread_info()->cpu; 36351fddddSPaul Burton #endif 37351fddddSPaul Burton } 38351fddddSPaul Burton #define raw_smp_processor_id raw_smp_processor_id 39384740dcSRalf Baechle 40384740dcSRalf Baechle /* Map from cpu id to sequential logical cpu number. This will only 41384740dcSRalf Baechle not be idempotent when cpus failed to come on-line. */ 427820b84bSDavid Daney extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; 43384740dcSRalf Baechle #define cpu_number_map(cpu) __cpu_number_map[cpu] 44384740dcSRalf Baechle 45384740dcSRalf Baechle /* The reverse map from sequential logical cpu number to cpu id. */ 46384740dcSRalf Baechle extern int __cpu_logical_map[NR_CPUS]; 47384740dcSRalf Baechle #define cpu_logical_map(cpu) __cpu_logical_map[cpu] 48384740dcSRalf Baechle 49384740dcSRalf Baechle #define NO_PROC_ID (-1) 50384740dcSRalf Baechle 51384740dcSRalf Baechle #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 52384740dcSRalf Baechle #define SMP_CALL_FUNCTION 0x2 53ddcdb1b4SDavid Daney /* Octeon - Tell another core to flush its icache */ 54ddcdb1b4SDavid Daney #define SMP_ICACHE_FLUSH 0x4 55c83c2eedSMarcin Nowakowski #define SMP_ASK_C0COUNT 0x8 56ddcdb1b4SDavid Daney 5776306f42SPaul Burton /* Mask of CPUs which are currently definitely operating coherently */ 5876306f42SPaul Burton extern cpumask_t cpu_coherent_mask; 5976306f42SPaul Burton 60b745fcb9SJoe Perches extern unsigned int smp_max_threads __initdata; 61384740dcSRalf Baechle 62826e99beSJames Hogan extern asmlinkage void smp_bootstrap(void); 63826e99beSJames Hogan 64384740dcSRalf Baechle extern void calculate_cpu_foreign_map(void); 65384740dcSRalf Baechle 66384740dcSRalf Baechle /* 67384740dcSRalf Baechle * this function sends a 'reschedule' IPI to another CPU. 68384740dcSRalf Baechle * it goes straight through and wastes no time serializing 69*4c8c3c7fSValentin Schneider * anything. Worst case is that we lose a reschedule ... 70384740dcSRalf Baechle */ arch_smp_send_reschedule(int cpu)71ff2c8252SMatt Redfearnstatic inline void arch_smp_send_reschedule(int cpu) 72384740dcSRalf Baechle { 73384740dcSRalf Baechle extern const struct plat_smp_ops *mp_ops; /* private */ 74384740dcSRalf Baechle 75384740dcSRalf Baechle mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); 761b2bc75cSRalf Baechle } 771b2bc75cSRalf Baechle 781b2bc75cSRalf Baechle #ifdef CONFIG_HOTPLUG_CPU __cpu_disable(void)79ff2c8252SMatt Redfearnstatic inline int __cpu_disable(void) 801b2bc75cSRalf Baechle { 811b2bc75cSRalf Baechle extern const struct plat_smp_ops *mp_ops; /* private */ 821b2bc75cSRalf Baechle 831b2bc75cSRalf Baechle return mp_ops->cpu_disable(); 841b2bc75cSRalf Baechle } 851b2bc75cSRalf Baechle __cpu_die(unsigned int cpu)86ff2c8252SMatt Redfearnstatic inline void __cpu_die(unsigned int cpu) 871b2bc75cSRalf Baechle { 881b2bc75cSRalf Baechle extern const struct plat_smp_ops *mp_ops; /* private */ 891b2bc75cSRalf Baechle 901b2bc75cSRalf Baechle mp_ops->cpu_die(cpu); 919e57f049SJosh Poimboeuf } 921b2bc75cSRalf Baechle 931b2bc75cSRalf Baechle extern void __noreturn play_dead(void); 9462cac480SDengcheng Zhu #endif 9562cac480SDengcheng Zhu 9662cac480SDengcheng Zhu #ifdef CONFIG_KEXEC kexec_nonboot_cpu(void)9762cac480SDengcheng Zhustatic inline void kexec_nonboot_cpu(void) 9862cac480SDengcheng Zhu { 9962cac480SDengcheng Zhu extern const struct plat_smp_ops *mp_ops; /* private */ 10062cac480SDengcheng Zhu 10162cac480SDengcheng Zhu return mp_ops->kexec_nonboot_cpu(); 10262cac480SDengcheng Zhu } 10362cac480SDengcheng Zhu kexec_nonboot_cpu_func(void)10462cac480SDengcheng Zhustatic inline void *kexec_nonboot_cpu_func(void) 10562cac480SDengcheng Zhu { 10662cac480SDengcheng Zhu extern const struct plat_smp_ops *mp_ops; /* private */ 10762cac480SDengcheng Zhu 10862cac480SDengcheng Zhu return mp_ops->kexec_nonboot_cpu; 10962cac480SDengcheng Zhu } 1107688c539SMatt Redfearn #endif 1117688c539SMatt Redfearn 1127688c539SMatt Redfearn /* 1137688c539SMatt Redfearn * This function will set up the necessary IPIs for Linux to communicate 1147688c539SMatt Redfearn * with the CPUs in mask. 1157688c539SMatt Redfearn * Return 0 on success. 1167688c539SMatt Redfearn */ 1177688c539SMatt Redfearn int mips_smp_ipi_allocate(const struct cpumask *mask); 1187688c539SMatt Redfearn 1197688c539SMatt Redfearn /* 1207688c539SMatt Redfearn * This function will free up IPIs allocated with mips_smp_ipi_allocate to the 1217688c539SMatt Redfearn * CPUs in mask, which must be a subset of the IPIs that have been configured. 1227688c539SMatt Redfearn * Return 0 on success. 1237688c539SMatt Redfearn */ 124eef34ec5SRalf Baechle int mips_smp_ipi_free(const struct cpumask *mask); 125eef34ec5SRalf Baechle arch_send_call_function_single_ipi(int cpu)126ff2c8252SMatt Redfearnstatic inline void arch_send_call_function_single_ipi(int cpu) 127eef34ec5SRalf Baechle { 128d82d500fSLiangliang Huang extern const struct plat_smp_ops *mp_ops; /* private */ 129eef34ec5SRalf Baechle 130eef34ec5SRalf Baechle mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION); 131eef34ec5SRalf Baechle } 132eef34ec5SRalf Baechle arch_send_call_function_ipi_mask(const struct cpumask * mask)133ff2c8252SMatt Redfearnstatic inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 134eef34ec5SRalf Baechle { 135eef34ec5SRalf Baechle extern const struct plat_smp_ops *mp_ops; /* private */ 136eef34ec5SRalf Baechle 137384740dcSRalf Baechle mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 138384740dcSRalf Baechle } 139 140 #endif /* __ASM_SMP_H */ 141