1 /* 2 * Written by: Matthew Dobson, IBM Corporation 3 * 4 * Copyright (C) 2002, IBM Corp. 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send feedback to <colpatch@us.ibm.com> 24 */ 25 #ifndef _ASM_X86_TOPOLOGY_H 26 #define _ASM_X86_TOPOLOGY_H 27 28 #ifdef CONFIG_X86_32 29 # ifdef CONFIG_X86_HT 30 # define ENABLE_TOPO_DEFINES 31 # endif 32 #else 33 # ifdef CONFIG_SMP 34 # define ENABLE_TOPO_DEFINES 35 # endif 36 #endif 37 38 /* Node not present */ 39 #define NUMA_NO_NODE (-1) 40 41 #ifdef CONFIG_NUMA 42 #include <linux/cpumask.h> 43 #include <asm/mpspec.h> 44 45 #ifdef CONFIG_X86_32 46 47 /* Mappings between node number and cpus on that node. */ 48 extern cpumask_t node_to_cpumask_map[]; 49 50 /* Mappings between logical cpu number and node number */ 51 extern int cpu_to_node_map[]; 52 53 /* Returns the number of the node containing CPU 'cpu' */ 54 static inline int cpu_to_node(int cpu) 55 { 56 return cpu_to_node_map[cpu]; 57 } 58 #define early_cpu_to_node(cpu) cpu_to_node(cpu) 59 60 /* Returns a bitmask of CPUs on Node 'node'. 61 * 62 * Side note: this function creates the returned cpumask on the stack 63 * so with a high NR_CPUS count, excessive stack space is used. The 64 * cpumask_of_node function should be used whenever possible. 65 */ 66 static inline cpumask_t node_to_cpumask(int node) 67 { 68 return node_to_cpumask_map[node]; 69 } 70 71 /* Returns a bitmask of CPUs on Node 'node'. */ 72 static inline const struct cpumask *cpumask_of_node(int node) 73 { 74 return &node_to_cpumask_map[node]; 75 } 76 77 static inline void setup_node_to_cpumask_map(void) { } 78 79 #else /* CONFIG_X86_64 */ 80 81 /* Mappings between node number and cpus on that node. */ 82 extern cpumask_t *node_to_cpumask_map; 83 84 /* Mappings between logical cpu number and node number */ 85 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 86 87 /* Returns the number of the current Node. */ 88 DECLARE_PER_CPU(int, node_number); 89 #define numa_node_id() percpu_read(node_number) 90 91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 92 extern int cpu_to_node(int cpu); 93 extern int early_cpu_to_node(int cpu); 94 extern const cpumask_t *cpumask_of_node(int node); 95 extern cpumask_t node_to_cpumask(int node); 96 97 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 98 99 /* Returns the number of the node containing CPU 'cpu' */ 100 static inline int cpu_to_node(int cpu) 101 { 102 return per_cpu(x86_cpu_to_node_map, cpu); 103 } 104 105 /* Same function but used if called before per_cpu areas are setup */ 106 static inline int early_cpu_to_node(int cpu) 107 { 108 return early_per_cpu(x86_cpu_to_node_map, cpu); 109 } 110 111 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 112 static inline const cpumask_t *cpumask_of_node(int node) 113 { 114 return &node_to_cpumask_map[node]; 115 } 116 117 /* Returns a bitmask of CPUs on Node 'node'. */ 118 static inline cpumask_t node_to_cpumask(int node) 119 { 120 return node_to_cpumask_map[node]; 121 } 122 123 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 124 125 extern void setup_node_to_cpumask_map(void); 126 127 /* 128 * Replace default node_to_cpumask_ptr with optimized version 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 130 */ 131 #define node_to_cpumask_ptr(v, node) \ 132 const cpumask_t *v = cpumask_of_node(node) 133 134 #define node_to_cpumask_ptr_next(v, node) \ 135 v = cpumask_of_node(node) 136 137 #endif /* CONFIG_X86_64 */ 138 139 /* 140 * Returns the number of the node containing Node 'node'. This 141 * architecture is flat, so it is a pretty simple function! 142 */ 143 #define parent_node(node) (node) 144 145 #define pcibus_to_node(bus) __pcibus_to_node(bus) 146 #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) 147 148 #ifdef CONFIG_X86_32 149 extern unsigned long node_start_pfn[]; 150 extern unsigned long node_end_pfn[]; 151 extern unsigned long node_remap_size[]; 152 #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) 153 154 # define SD_CACHE_NICE_TRIES 1 155 # define SD_IDLE_IDX 1 156 # define SD_NEWIDLE_IDX 2 157 # define SD_FORKEXEC_IDX 0 158 159 #else 160 161 # define SD_CACHE_NICE_TRIES 2 162 # define SD_IDLE_IDX 2 163 # define SD_NEWIDLE_IDX 2 164 # define SD_FORKEXEC_IDX 1 165 166 #endif 167 168 /* sched_domains SD_NODE_INIT for NUMA machines */ 169 #define SD_NODE_INIT (struct sched_domain) { \ 170 .min_interval = 8, \ 171 .max_interval = 32, \ 172 .busy_factor = 32, \ 173 .imbalance_pct = 125, \ 174 .cache_nice_tries = SD_CACHE_NICE_TRIES, \ 175 .busy_idx = 3, \ 176 .idle_idx = SD_IDLE_IDX, \ 177 .newidle_idx = SD_NEWIDLE_IDX, \ 178 .wake_idx = 1, \ 179 .forkexec_idx = SD_FORKEXEC_IDX, \ 180 .flags = SD_LOAD_BALANCE \ 181 | SD_BALANCE_EXEC \ 182 | SD_BALANCE_FORK \ 183 | SD_WAKE_AFFINE \ 184 | SD_WAKE_BALANCE \ 185 | SD_SERIALIZE, \ 186 .last_balance = jiffies, \ 187 .balance_interval = 1, \ 188 } 189 190 #ifdef CONFIG_X86_64_ACPI_NUMA 191 extern int __node_distance(int, int); 192 #define node_distance(a, b) __node_distance(a, b) 193 #endif 194 195 #else /* !CONFIG_NUMA */ 196 197 static inline int numa_node_id(void) 198 { 199 return 0; 200 } 201 202 static inline int cpu_to_node(int cpu) 203 { 204 return 0; 205 } 206 207 static inline int early_cpu_to_node(int cpu) 208 { 209 return 0; 210 } 211 212 static inline const cpumask_t *cpumask_of_node(int node) 213 { 214 return &cpu_online_map; 215 } 216 static inline cpumask_t node_to_cpumask(int node) 217 { 218 return cpu_online_map; 219 } 220 221 static inline void setup_node_to_cpumask_map(void) { } 222 223 /* 224 * Replace default node_to_cpumask_ptr with optimized version 225 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 226 */ 227 #define node_to_cpumask_ptr(v, node) \ 228 const cpumask_t *v = cpumask_of_node(node) 229 230 #define node_to_cpumask_ptr_next(v, node) \ 231 v = cpumask_of_node(node) 232 #endif 233 234 #include <asm-generic/topology.h> 235 236 extern cpumask_t cpu_coregroup_map(int cpu); 237 extern const struct cpumask *cpu_coregroup_mask(int cpu); 238 239 #ifdef ENABLE_TOPO_DEFINES 240 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 241 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 242 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 243 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 244 #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) 245 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 246 247 /* indicates that pointers to the topology cpumask_t maps are valid */ 248 #define arch_provides_topology_pointers yes 249 #endif 250 251 static inline void arch_fix_phys_package_id(int num, u32 slot) 252 { 253 } 254 255 struct pci_bus; 256 void set_pci_bus_resources_arch_default(struct pci_bus *b); 257 258 #ifdef CONFIG_SMP 259 #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) 260 #define smt_capable() (smp_num_siblings > 1) 261 #endif 262 263 #ifdef CONFIG_NUMA 264 extern int get_mp_bus_to_node(int busnum); 265 extern void set_mp_bus_to_node(int busnum, int node); 266 #else 267 static inline int get_mp_bus_to_node(int busnum) 268 { 269 return 0; 270 } 271 static inline void set_mp_bus_to_node(int busnum, int node) 272 { 273 } 274 #endif 275 276 #endif /* _ASM_X86_TOPOLOGY_H */ 277