xref: /openbmc/linux/arch/powerpc/include/asm/cputhreads.h (revision d17799f9c10e283cccd4d598d3416e6fac336ab9)
1b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CPUTHREADS_H
2b8b572e1SStephen Rothwell #define _ASM_POWERPC_CPUTHREADS_H
3b8b572e1SStephen Rothwell 
4b8b572e1SStephen Rothwell #include <linux/cpumask.h>
5b8b572e1SStephen Rothwell 
6b8b572e1SStephen Rothwell /*
7b8b572e1SStephen Rothwell  * Mapping of threads to cores
8fcce8109SBenjamin Herrenschmidt  *
9fcce8109SBenjamin Herrenschmidt  * Note: This implementation is limited to a power of 2 number of
10fcce8109SBenjamin Herrenschmidt  * threads per core and the same number for each core in the system
11fcce8109SBenjamin Herrenschmidt  * (though it would work if some processors had less threads as long
12933b90a9SAnshuman Khandual  * as the CPU numbers are still allocated, just not brought online).
13fcce8109SBenjamin Herrenschmidt  *
14fcce8109SBenjamin Herrenschmidt  * However, the API allows for a different implementation in the future
15fcce8109SBenjamin Herrenschmidt  * if needed, as long as you only use the functions and not the variables
16fcce8109SBenjamin Herrenschmidt  * directly.
17b8b572e1SStephen Rothwell  */
18b8b572e1SStephen Rothwell 
19b8b572e1SStephen Rothwell #ifdef CONFIG_SMP
20b8b572e1SStephen Rothwell extern int threads_per_core;
215853aef1SMichael Ellerman extern int threads_per_subcore;
22b8b572e1SStephen Rothwell extern int threads_shift;
23b8b572e1SStephen Rothwell extern cpumask_t threads_core_mask;
24b8b572e1SStephen Rothwell #else
25b8b572e1SStephen Rothwell #define threads_per_core	1
265853aef1SMichael Ellerman #define threads_per_subcore	1
27b8b572e1SStephen Rothwell #define threads_shift		0
2887313df7SRusty Russell #define threads_core_mask	(*get_cpu_mask(0))
29b8b572e1SStephen Rothwell #endif
30b8b572e1SStephen Rothwell 
31b8b572e1SStephen Rothwell /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
32b8b572e1SStephen Rothwell  *                            hit by the argument
33b8b572e1SStephen Rothwell  *
34e602ffb2SShreyas B. Prabhu  * @threads:	a cpumask of online threads
35b8b572e1SStephen Rothwell  *
36e602ffb2SShreyas B. Prabhu  * This function returns a cpumask which will have one online cpu's
37b8b572e1SStephen Rothwell  * bit set for each core that has at least one thread set in the argument.
38b8b572e1SStephen Rothwell  *
39b8b572e1SStephen Rothwell  * This can typically be used for things like IPI for tlb invalidations
40b8b572e1SStephen Rothwell  * since those need to be done only once per core/TLB
41b8b572e1SStephen Rothwell  */
42104699c0SKOSAKI Motohiro static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
43b8b572e1SStephen Rothwell {
44b8b572e1SStephen Rothwell 	cpumask_t	tmp, res;
45e602ffb2SShreyas B. Prabhu 	int		i, cpu;
46b8b572e1SStephen Rothwell 
47104699c0SKOSAKI Motohiro 	cpumask_clear(&res);
48b8b572e1SStephen Rothwell 	for (i = 0; i < NR_CPUS; i += threads_per_core) {
49104699c0SKOSAKI Motohiro 		cpumask_shift_left(&tmp, &threads_core_mask, i);
50e602ffb2SShreyas B. Prabhu 		if (cpumask_intersects(threads, &tmp)) {
51e602ffb2SShreyas B. Prabhu 			cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
52e602ffb2SShreyas B. Prabhu 			if (cpu < nr_cpu_ids)
53e602ffb2SShreyas B. Prabhu 				cpumask_set_cpu(cpu, &res);
54e602ffb2SShreyas B. Prabhu 		}
55b8b572e1SStephen Rothwell 	}
56b8b572e1SStephen Rothwell 	return res;
57b8b572e1SStephen Rothwell }
58b8b572e1SStephen Rothwell 
59b8b572e1SStephen Rothwell static inline int cpu_nr_cores(void)
60b8b572e1SStephen Rothwell {
61d52356e7SJan Stancek 	return nr_cpu_ids >> threads_shift;
62b8b572e1SStephen Rothwell }
63b8b572e1SStephen Rothwell 
64b8b572e1SStephen Rothwell static inline cpumask_t cpu_online_cores_map(void)
65b8b572e1SStephen Rothwell {
66104699c0SKOSAKI Motohiro 	return cpu_thread_mask_to_cores(cpu_online_mask);
67b8b572e1SStephen Rothwell }
68b8b572e1SStephen Rothwell 
6999d86705SVaidyanathan Srinivasan #ifdef CONFIG_SMP
7099d86705SVaidyanathan Srinivasan int cpu_core_index_of_thread(int cpu);
7199d86705SVaidyanathan Srinivasan int cpu_first_thread_of_core(int core);
7299d86705SVaidyanathan Srinivasan #else
7399d86705SVaidyanathan Srinivasan static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
7499d86705SVaidyanathan Srinivasan static inline int cpu_first_thread_of_core(int core) { return core; }
7599d86705SVaidyanathan Srinivasan #endif
76b8b572e1SStephen Rothwell 
77b8b572e1SStephen Rothwell static inline int cpu_thread_in_core(int cpu)
78b8b572e1SStephen Rothwell {
79b8b572e1SStephen Rothwell 	return cpu & (threads_per_core - 1);
80b8b572e1SStephen Rothwell }
81b8b572e1SStephen Rothwell 
825853aef1SMichael Ellerman static inline int cpu_thread_in_subcore(int cpu)
835853aef1SMichael Ellerman {
845853aef1SMichael Ellerman 	return cpu & (threads_per_subcore - 1);
855853aef1SMichael Ellerman }
865853aef1SMichael Ellerman 
8799d86705SVaidyanathan Srinivasan static inline int cpu_first_thread_sibling(int cpu)
88b8b572e1SStephen Rothwell {
89b8b572e1SStephen Rothwell 	return cpu & ~(threads_per_core - 1);
90b8b572e1SStephen Rothwell }
91b8b572e1SStephen Rothwell 
9299d86705SVaidyanathan Srinivasan static inline int cpu_last_thread_sibling(int cpu)
93fcce8109SBenjamin Herrenschmidt {
94fcce8109SBenjamin Herrenschmidt 	return cpu | (threads_per_core - 1);
95fcce8109SBenjamin Herrenschmidt }
96fcce8109SBenjamin Herrenschmidt 
97ebb9d30aSchenhui zhao static inline u32 get_tensr(void)
98ebb9d30aSchenhui zhao {
99ebb9d30aSchenhui zhao #ifdef	CONFIG_BOOKE
100ebb9d30aSchenhui zhao 	if (cpu_has_feature(CPU_FTR_SMT))
101ebb9d30aSchenhui zhao 		return mfspr(SPRN_TENSR);
102ebb9d30aSchenhui zhao #endif
103ebb9d30aSchenhui zhao 	return 1;
104ebb9d30aSchenhui zhao }
105fcce8109SBenjamin Herrenschmidt 
106*d17799f9Schenhui zhao void book3e_stop_thread(int thread);
107fcce8109SBenjamin Herrenschmidt 
108b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CPUTHREADS_H */
109b8b572e1SStephen Rothwell 
110