xref: /openbmc/linux/arch/powerpc/include/asm/cputhreads.h (revision fcce810986b3f32a8322faf240f8cc5560a4c463)
1b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CPUTHREADS_H
2b8b572e1SStephen Rothwell #define _ASM_POWERPC_CPUTHREADS_H
3b8b572e1SStephen Rothwell 
4b8b572e1SStephen Rothwell #include <linux/cpumask.h>
5b8b572e1SStephen Rothwell 
6b8b572e1SStephen Rothwell /*
7b8b572e1SStephen Rothwell  * Mapping of threads to cores
8*fcce8109SBenjamin Herrenschmidt  *
9*fcce8109SBenjamin Herrenschmidt  * Note: This implementation is limited to a power of 2 number of
10*fcce8109SBenjamin Herrenschmidt  * threads per core and the same number for each core in the system
11*fcce8109SBenjamin Herrenschmidt  * (though it would work if some processors had less threads as long
12*fcce8109SBenjamin Herrenschmidt  * as the CPU numbers are still allocated, just not brought offline).
13*fcce8109SBenjamin Herrenschmidt  *
14*fcce8109SBenjamin Herrenschmidt  * However, the API allows for a different implementation in the future
15*fcce8109SBenjamin Herrenschmidt  * if needed, as long as you only use the functions and not the variables
16*fcce8109SBenjamin Herrenschmidt  * directly.
17b8b572e1SStephen Rothwell  */
18b8b572e1SStephen Rothwell 
19b8b572e1SStephen Rothwell #ifdef CONFIG_SMP
20b8b572e1SStephen Rothwell extern int threads_per_core;
21b8b572e1SStephen Rothwell extern int threads_shift;
22b8b572e1SStephen Rothwell extern cpumask_t threads_core_mask;
23b8b572e1SStephen Rothwell #else
24b8b572e1SStephen Rothwell #define threads_per_core	1
25b8b572e1SStephen Rothwell #define threads_shift		0
26b8b572e1SStephen Rothwell #define threads_core_mask	(CPU_MASK_CPU0)
27b8b572e1SStephen Rothwell #endif
28b8b572e1SStephen Rothwell 
29b8b572e1SStephen Rothwell /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
30b8b572e1SStephen Rothwell  *                            hit by the argument
31b8b572e1SStephen Rothwell  *
32b8b572e1SStephen Rothwell  * @threads:	a cpumask of threads
33b8b572e1SStephen Rothwell  *
34b8b572e1SStephen Rothwell  * This function returns a cpumask which will have one "cpu" (or thread)
35b8b572e1SStephen Rothwell  * bit set for each core that has at least one thread set in the argument.
36b8b572e1SStephen Rothwell  *
37b8b572e1SStephen Rothwell  * This can typically be used for things like IPI for tlb invalidations
38b8b572e1SStephen Rothwell  * since those need to be done only once per core/TLB
39b8b572e1SStephen Rothwell  */
40b8b572e1SStephen Rothwell static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
41b8b572e1SStephen Rothwell {
42b8b572e1SStephen Rothwell 	cpumask_t	tmp, res;
43b8b572e1SStephen Rothwell 	int		i;
44b8b572e1SStephen Rothwell 
45b8b572e1SStephen Rothwell 	res = CPU_MASK_NONE;
46b8b572e1SStephen Rothwell 	for (i = 0; i < NR_CPUS; i += threads_per_core) {
47b8b572e1SStephen Rothwell 		cpus_shift_left(tmp, threads_core_mask, i);
48b8b572e1SStephen Rothwell 		if (cpus_intersects(threads, tmp))
49b8b572e1SStephen Rothwell 			cpu_set(i, res);
50b8b572e1SStephen Rothwell 	}
51b8b572e1SStephen Rothwell 	return res;
52b8b572e1SStephen Rothwell }
53b8b572e1SStephen Rothwell 
54b8b572e1SStephen Rothwell static inline int cpu_nr_cores(void)
55b8b572e1SStephen Rothwell {
56b8b572e1SStephen Rothwell 	return NR_CPUS >> threads_shift;
57b8b572e1SStephen Rothwell }
58b8b572e1SStephen Rothwell 
59b8b572e1SStephen Rothwell static inline cpumask_t cpu_online_cores_map(void)
60b8b572e1SStephen Rothwell {
61b8b572e1SStephen Rothwell 	return cpu_thread_mask_to_cores(cpu_online_map);
62b8b572e1SStephen Rothwell }
63b8b572e1SStephen Rothwell 
64b8b572e1SStephen Rothwell static inline int cpu_thread_to_core(int cpu)
65b8b572e1SStephen Rothwell {
66b8b572e1SStephen Rothwell 	return cpu >> threads_shift;
67b8b572e1SStephen Rothwell }
68b8b572e1SStephen Rothwell 
69b8b572e1SStephen Rothwell static inline int cpu_thread_in_core(int cpu)
70b8b572e1SStephen Rothwell {
71b8b572e1SStephen Rothwell 	return cpu & (threads_per_core - 1);
72b8b572e1SStephen Rothwell }
73b8b572e1SStephen Rothwell 
74b8b572e1SStephen Rothwell static inline int cpu_first_thread_in_core(int cpu)
75b8b572e1SStephen Rothwell {
76b8b572e1SStephen Rothwell 	return cpu & ~(threads_per_core - 1);
77b8b572e1SStephen Rothwell }
78b8b572e1SStephen Rothwell 
79*fcce8109SBenjamin Herrenschmidt static inline int cpu_last_thread_in_core(int cpu)
80*fcce8109SBenjamin Herrenschmidt {
81*fcce8109SBenjamin Herrenschmidt 	return cpu | (threads_per_core - 1);
82*fcce8109SBenjamin Herrenschmidt }
83*fcce8109SBenjamin Herrenschmidt 
84*fcce8109SBenjamin Herrenschmidt 
85*fcce8109SBenjamin Herrenschmidt 
86b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CPUTHREADS_H */
87b8b572e1SStephen Rothwell 
88