xref: /openbmc/linux/arch/parisc/include/asm/smp.h (revision 91887a362984324e254473e92820758c8e658f78)
1deae26bfSKyle McMartin #ifndef __ASM_SMP_H
2deae26bfSKyle McMartin #define __ASM_SMP_H
3deae26bfSKyle McMartin 
4deae26bfSKyle McMartin 
5deae26bfSKyle McMartin #if defined(CONFIG_SMP)
6deae26bfSKyle McMartin 
7deae26bfSKyle McMartin /* Page Zero Location PDC will look for the address to branch to when we poke
8deae26bfSKyle McMartin ** slave CPUs still in "Icache loop".
9deae26bfSKyle McMartin */
10deae26bfSKyle McMartin #define PDC_OS_BOOT_RENDEZVOUS     0x10
11deae26bfSKyle McMartin #define PDC_OS_BOOT_RENDEZVOUS_HI  0x28
12deae26bfSKyle McMartin 
13deae26bfSKyle McMartin #ifndef ASSEMBLY
14deae26bfSKyle McMartin #include <linux/bitops.h>
15deae26bfSKyle McMartin #include <linux/threads.h>	/* for NR_CPUS */
16deae26bfSKyle McMartin #include <linux/cpumask.h>
17deae26bfSKyle McMartin typedef unsigned long address_t;
18deae26bfSKyle McMartin 
19deae26bfSKyle McMartin 
20deae26bfSKyle McMartin /*
21deae26bfSKyle McMartin  *	Private routines/data
22deae26bfSKyle McMartin  *
23deae26bfSKyle McMartin  *	physical and logical are equivalent until we support CPU hotplug.
24deae26bfSKyle McMartin  */
25deae26bfSKyle McMartin #define cpu_number_map(cpu)	(cpu)
26deae26bfSKyle McMartin #define cpu_logical_map(cpu)	(cpu)
27deae26bfSKyle McMartin 
28deae26bfSKyle McMartin extern void smp_send_reschedule(int cpu);
29deae26bfSKyle McMartin extern void smp_send_all_nop(void);
30deae26bfSKyle McMartin 
31deae26bfSKyle McMartin extern void arch_send_call_function_single_ipi(int cpu);
32*91887a36SRusty Russell extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
33*91887a36SRusty Russell #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
34deae26bfSKyle McMartin 
35deae26bfSKyle McMartin #endif /* !ASSEMBLY */
36deae26bfSKyle McMartin 
37deae26bfSKyle McMartin /*
38deae26bfSKyle McMartin  *	This magic constant controls our willingness to transfer
39deae26bfSKyle McMartin  *      a process across CPUs. Such a transfer incurs cache and tlb
40deae26bfSKyle McMartin  *      misses. The current value is inherited from i386. Still needs
41deae26bfSKyle McMartin  *      to be tuned for parisc.
42deae26bfSKyle McMartin  */
43deae26bfSKyle McMartin 
44deae26bfSKyle McMartin #define PROC_CHANGE_PENALTY	15		/* Schedule penalty */
45deae26bfSKyle McMartin 
46deae26bfSKyle McMartin #define raw_smp_processor_id()	(current_thread_info()->cpu)
47deae26bfSKyle McMartin 
48deae26bfSKyle McMartin #else /* CONFIG_SMP */
49deae26bfSKyle McMartin 
50deae26bfSKyle McMartin static inline void smp_send_all_nop(void) { return; }
51deae26bfSKyle McMartin 
52deae26bfSKyle McMartin #endif
53deae26bfSKyle McMartin 
54deae26bfSKyle McMartin #define NO_PROC_ID		0xFF		/* No processor magic marker */
55deae26bfSKyle McMartin #define ANY_PROC_ID		0xFF		/* Any processor magic marker */
56deae26bfSKyle McMartin static inline int __cpu_disable (void) {
57deae26bfSKyle McMartin   return 0;
58deae26bfSKyle McMartin }
59deae26bfSKyle McMartin static inline void __cpu_die (unsigned int cpu) {
60deae26bfSKyle McMartin   while(1)
61deae26bfSKyle McMartin     ;
62deae26bfSKyle McMartin }
63deae26bfSKyle McMartin extern int __cpu_up (unsigned int cpu);
64deae26bfSKyle McMartin 
65deae26bfSKyle McMartin #endif /*  __ASM_SMP_H */
66