1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CPUTHREADS_H
3 #define _ASM_POWERPC_CPUTHREADS_H
4 
5 #ifndef __ASSEMBLY__
6 #include <linux/cpumask.h>
7 #include <asm/cpu_has_feature.h>
8 
9 /*
10  * Mapping of threads to cores
11  *
12  * Note: This implementation is limited to a power of 2 number of
13  * threads per core and the same number for each core in the system
14  * (though it would work if some processors had less threads as long
15  * as the CPU numbers are still allocated, just not brought online).
16  *
17  * However, the API allows for a different implementation in the future
18  * if needed, as long as you only use the functions and not the variables
19  * directly.
20  */
21 
22 #ifdef CONFIG_SMP
23 extern int threads_per_core;
24 extern int threads_per_subcore;
25 extern int threads_shift;
26 extern cpumask_t threads_core_mask;
27 #else
28 #define threads_per_core	1
29 #define threads_per_subcore	1
30 #define threads_shift		0
31 #define threads_core_mask	(*get_cpu_mask(0))
32 #endif
33 
34 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
35  *                            hit by the argument
36  *
37  * @threads:	a cpumask of online threads
38  *
39  * This function returns a cpumask which will have one online cpu's
40  * bit set for each core that has at least one thread set in the argument.
41  *
42  * This can typically be used for things like IPI for tlb invalidations
43  * since those need to be done only once per core/TLB
44  */
45 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
46 {
47 	cpumask_t	tmp, res;
48 	int		i, cpu;
49 
50 	cpumask_clear(&res);
51 	for (i = 0; i < NR_CPUS; i += threads_per_core) {
52 		cpumask_shift_left(&tmp, &threads_core_mask, i);
53 		if (cpumask_intersects(threads, &tmp)) {
54 			cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
55 			if (cpu < nr_cpu_ids)
56 				cpumask_set_cpu(cpu, &res);
57 		}
58 	}
59 	return res;
60 }
61 
62 static inline int cpu_nr_cores(void)
63 {
64 	return nr_cpu_ids >> threads_shift;
65 }
66 
67 static inline cpumask_t cpu_online_cores_map(void)
68 {
69 	return cpu_thread_mask_to_cores(cpu_online_mask);
70 }
71 
72 #ifdef CONFIG_SMP
73 int cpu_core_index_of_thread(int cpu);
74 int cpu_first_thread_of_core(int core);
75 #else
76 static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
77 static inline int cpu_first_thread_of_core(int core) { return core; }
78 #endif
79 
80 static inline int cpu_thread_in_core(int cpu)
81 {
82 	return cpu & (threads_per_core - 1);
83 }
84 
85 static inline int cpu_thread_in_subcore(int cpu)
86 {
87 	return cpu & (threads_per_subcore - 1);
88 }
89 
90 static inline int cpu_first_thread_sibling(int cpu)
91 {
92 	return cpu & ~(threads_per_core - 1);
93 }
94 
95 static inline int cpu_last_thread_sibling(int cpu)
96 {
97 	return cpu | (threads_per_core - 1);
98 }
99 
100 static inline u32 get_tensr(void)
101 {
102 #ifdef	CONFIG_BOOKE
103 	if (cpu_has_feature(CPU_FTR_SMT))
104 		return mfspr(SPRN_TENSR);
105 #endif
106 	return 1;
107 }
108 
109 void book3e_start_thread(int thread, unsigned long addr);
110 void book3e_stop_thread(int thread);
111 
112 #endif /* __ASSEMBLY__ */
113 
114 #define INVALID_THREAD_HWID	0x0fff
115 
116 #endif /* _ASM_POWERPC_CPUTHREADS_H */
117 
118