1 #ifndef _ASM_POWERPC_CPUTHREADS_H 2 #define _ASM_POWERPC_CPUTHREADS_H 3 4 #ifndef __ASSEMBLY__ 5 #include <linux/cpumask.h> 6 7 /* 8 * Mapping of threads to cores 9 * 10 * Note: This implementation is limited to a power of 2 number of 11 * threads per core and the same number for each core in the system 12 * (though it would work if some processors had less threads as long 13 * as the CPU numbers are still allocated, just not brought online). 14 * 15 * However, the API allows for a different implementation in the future 16 * if needed, as long as you only use the functions and not the variables 17 * directly. 18 */ 19 20 #ifdef CONFIG_SMP 21 extern int threads_per_core; 22 extern int threads_per_subcore; 23 extern int threads_shift; 24 extern cpumask_t threads_core_mask; 25 #else 26 #define threads_per_core 1 27 #define threads_per_subcore 1 28 #define threads_shift 0 29 #define threads_core_mask (*get_cpu_mask(0)) 30 #endif 31 32 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores 33 * hit by the argument 34 * 35 * @threads: a cpumask of online threads 36 * 37 * This function returns a cpumask which will have one online cpu's 38 * bit set for each core that has at least one thread set in the argument. 39 * 40 * This can typically be used for things like IPI for tlb invalidations 41 * since those need to be done only once per core/TLB 42 */ 43 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) 44 { 45 cpumask_t tmp, res; 46 int i, cpu; 47 48 cpumask_clear(&res); 49 for (i = 0; i < NR_CPUS; i += threads_per_core) { 50 cpumask_shift_left(&tmp, &threads_core_mask, i); 51 if (cpumask_intersects(threads, &tmp)) { 52 cpu = cpumask_next_and(-1, &tmp, cpu_online_mask); 53 if (cpu < nr_cpu_ids) 54 cpumask_set_cpu(cpu, &res); 55 } 56 } 57 return res; 58 } 59 60 static inline int cpu_nr_cores(void) 61 { 62 return nr_cpu_ids >> threads_shift; 63 } 64 65 static inline cpumask_t cpu_online_cores_map(void) 66 { 67 return cpu_thread_mask_to_cores(cpu_online_mask); 68 } 69 70 #ifdef CONFIG_SMP 71 int cpu_core_index_of_thread(int cpu); 72 int cpu_first_thread_of_core(int core); 73 #else 74 static inline int cpu_core_index_of_thread(int cpu) { return cpu; } 75 static inline int cpu_first_thread_of_core(int core) { return core; } 76 #endif 77 78 static inline int cpu_thread_in_core(int cpu) 79 { 80 return cpu & (threads_per_core - 1); 81 } 82 83 static inline int cpu_thread_in_subcore(int cpu) 84 { 85 return cpu & (threads_per_subcore - 1); 86 } 87 88 static inline int cpu_first_thread_sibling(int cpu) 89 { 90 return cpu & ~(threads_per_core - 1); 91 } 92 93 static inline int cpu_last_thread_sibling(int cpu) 94 { 95 return cpu | (threads_per_core - 1); 96 } 97 98 static inline u32 get_tensr(void) 99 { 100 #ifdef CONFIG_BOOKE 101 if (cpu_has_feature(CPU_FTR_SMT)) 102 return mfspr(SPRN_TENSR); 103 #endif 104 return 1; 105 } 106 107 void book3e_start_thread(int thread, unsigned long addr); 108 void book3e_stop_thread(int thread); 109 110 #endif /* __ASSEMBLY__ */ 111 112 #define INVALID_THREAD_HWID 0x0fff 113 114 #endif /* _ASM_POWERPC_CPUTHREADS_H */ 115 116