1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * This file contains NUMA specific variables and functions which can 7 * be split away from DISCONTIGMEM and are used on NUMA machines with 8 * contiguous memory. 9 * 2002/08/07 Erich Focht <efocht@ess.nec.de> 10 * Populate cpu entries in sysfs for non-numa systems as well 11 * Intel Corporation - Ashok Raj 12 * 02/27/2006 Zhang, Yanmin 13 * Populate cpu cache entries in sysfs for cpu cache info 14 */ 15 16 #include <linux/cpu.h> 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/node.h> 20 #include <linux/init.h> 21 #include <linux/bootmem.h> 22 #include <linux/nodemask.h> 23 #include <linux/notifier.h> 24 #include <asm/mmzone.h> 25 #include <asm/numa.h> 26 #include <asm/cpu.h> 27 28 static struct ia64_cpu *sysfs_cpus; 29 30 int arch_register_cpu(int num) 31 { 32 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 33 /* 34 * If CPEI can be re-targetted or if this is not 35 * CPEI target, then it is hotpluggable 36 */ 37 if (can_cpei_retarget() || !is_cpu_cpei_target(num)) 38 sysfs_cpus[num].cpu.hotpluggable = 1; 39 map_cpu_to_node(num, node_cpuid[num].nid); 40 #endif 41 42 return register_cpu(&sysfs_cpus[num].cpu, num); 43 } 44 45 #ifdef CONFIG_HOTPLUG_CPU 46 47 void arch_unregister_cpu(int num) 48 { 49 unregister_cpu(&sysfs_cpus[num].cpu); 50 unmap_cpu_from_node(num, cpu_to_node(num)); 51 } 52 EXPORT_SYMBOL(arch_register_cpu); 53 EXPORT_SYMBOL(arch_unregister_cpu); 54 #endif /*CONFIG_HOTPLUG_CPU*/ 55 56 57 static int __init topology_init(void) 58 { 59 int i, err = 0; 60 61 #ifdef CONFIG_NUMA 62 /* 63 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? 64 */ 65 for_each_online_node(i) { 66 if ((err = register_one_node(i))) 67 goto out; 68 } 69 #endif 70 71 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); 72 if (!sysfs_cpus) 73 panic("kzalloc in topology_init failed - NR_CPUS too big?"); 74 75 for_each_present_cpu(i) { 76 if((err = arch_register_cpu(i))) 77 goto out; 78 } 79 out: 80 return err; 81 } 82 83 subsys_initcall(topology_init); 84 85 86 /* 87 * Export cpu cache information through sysfs 88 */ 89 90 /* 91 * A bunch of string array to get pretty printing 92 */ 93 static const char *cache_types[] = { 94 "", /* not used */ 95 "Instruction", 96 "Data", 97 "Unified" /* unified */ 98 }; 99 100 static const char *cache_mattrib[]={ 101 "WriteThrough", 102 "WriteBack", 103 "", /* reserved */ 104 "" /* reserved */ 105 }; 106 107 struct cache_info { 108 pal_cache_config_info_t cci; 109 cpumask_t shared_cpu_map; 110 int level; 111 int type; 112 struct kobject kobj; 113 }; 114 115 struct cpu_cache_info { 116 struct cache_info *cache_leaves; 117 int num_cache_leaves; 118 struct kobject kobj; 119 }; 120 121 static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; 122 #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) 123 124 #ifdef CONFIG_SMP 125 static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, 126 struct cache_info * this_leaf) 127 { 128 pal_cache_shared_info_t csi; 129 int num_shared, i = 0; 130 unsigned int j; 131 132 if (cpu_data(cpu)->threads_per_core <= 1 && 133 cpu_data(cpu)->cores_per_socket <= 1) { 134 cpu_set(cpu, this_leaf->shared_cpu_map); 135 return; 136 } 137 138 if (ia64_pal_cache_shared_info(this_leaf->level, 139 this_leaf->type, 140 0, 141 &csi) != PAL_STATUS_SUCCESS) 142 return; 143 144 num_shared = (int) csi.num_shared; 145 do { 146 for_each_possible_cpu(j) 147 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id 148 && cpu_data(j)->core_id == csi.log1_cid 149 && cpu_data(j)->thread_id == csi.log1_tid) 150 cpu_set(j, this_leaf->shared_cpu_map); 151 152 i++; 153 } while (i < num_shared && 154 ia64_pal_cache_shared_info(this_leaf->level, 155 this_leaf->type, 156 i, 157 &csi) == PAL_STATUS_SUCCESS); 158 } 159 #else 160 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, 161 struct cache_info * this_leaf) 162 { 163 cpu_set(cpu, this_leaf->shared_cpu_map); 164 return; 165 } 166 #endif 167 168 static ssize_t show_coherency_line_size(struct cache_info *this_leaf, 169 char *buf) 170 { 171 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); 172 } 173 174 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, 175 char *buf) 176 { 177 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); 178 } 179 180 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) 181 { 182 return sprintf(buf, 183 "%s\n", 184 cache_mattrib[this_leaf->cci.pcci_cache_attr]); 185 } 186 187 static ssize_t show_size(struct cache_info *this_leaf, char *buf) 188 { 189 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); 190 } 191 192 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) 193 { 194 unsigned number_of_sets = this_leaf->cci.pcci_cache_size; 195 number_of_sets /= this_leaf->cci.pcci_assoc; 196 number_of_sets /= 1 << this_leaf->cci.pcci_line_size; 197 198 return sprintf(buf, "%u\n", number_of_sets); 199 } 200 201 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) 202 { 203 ssize_t len; 204 cpumask_t shared_cpu_map; 205 206 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 207 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 208 len += sprintf(buf+len, "\n"); 209 return len; 210 } 211 212 static ssize_t show_type(struct cache_info *this_leaf, char *buf) 213 { 214 int type = this_leaf->type + this_leaf->cci.pcci_unified; 215 return sprintf(buf, "%s\n", cache_types[type]); 216 } 217 218 static ssize_t show_level(struct cache_info *this_leaf, char *buf) 219 { 220 return sprintf(buf, "%u\n", this_leaf->level); 221 } 222 223 struct cache_attr { 224 struct attribute attr; 225 ssize_t (*show)(struct cache_info *, char *); 226 ssize_t (*store)(struct cache_info *, const char *, size_t count); 227 }; 228 229 #ifdef define_one_ro 230 #undef define_one_ro 231 #endif 232 #define define_one_ro(_name) \ 233 static struct cache_attr _name = \ 234 __ATTR(_name, 0444, show_##_name, NULL) 235 236 define_one_ro(level); 237 define_one_ro(type); 238 define_one_ro(coherency_line_size); 239 define_one_ro(ways_of_associativity); 240 define_one_ro(size); 241 define_one_ro(number_of_sets); 242 define_one_ro(shared_cpu_map); 243 define_one_ro(attributes); 244 245 static struct attribute * cache_default_attrs[] = { 246 &type.attr, 247 &level.attr, 248 &coherency_line_size.attr, 249 &ways_of_associativity.attr, 250 &attributes.attr, 251 &size.attr, 252 &number_of_sets.attr, 253 &shared_cpu_map.attr, 254 NULL 255 }; 256 257 #define to_object(k) container_of(k, struct cache_info, kobj) 258 #define to_attr(a) container_of(a, struct cache_attr, attr) 259 260 static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) 261 { 262 struct cache_attr *fattr = to_attr(attr); 263 struct cache_info *this_leaf = to_object(kobj); 264 ssize_t ret; 265 266 ret = fattr->show ? fattr->show(this_leaf, buf) : 0; 267 return ret; 268 } 269 270 static struct sysfs_ops cache_sysfs_ops = { 271 .show = cache_show 272 }; 273 274 static struct kobj_type cache_ktype = { 275 .sysfs_ops = &cache_sysfs_ops, 276 .default_attrs = cache_default_attrs, 277 }; 278 279 static struct kobj_type cache_ktype_percpu_entry = { 280 .sysfs_ops = &cache_sysfs_ops, 281 }; 282 283 static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) 284 { 285 kfree(all_cpu_cache_info[cpu].cache_leaves); 286 all_cpu_cache_info[cpu].cache_leaves = NULL; 287 all_cpu_cache_info[cpu].num_cache_leaves = 0; 288 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); 289 return; 290 } 291 292 static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) 293 { 294 u64 i, levels, unique_caches; 295 pal_cache_config_info_t cci; 296 int j; 297 s64 status; 298 struct cache_info *this_cache; 299 int num_cache_leaves = 0; 300 301 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { 302 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); 303 return -1; 304 } 305 306 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, 307 GFP_KERNEL); 308 if (this_cache == NULL) 309 return -ENOMEM; 310 311 for (i=0; i < levels; i++) { 312 for (j=2; j >0 ; j--) { 313 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 314 PAL_STATUS_SUCCESS) 315 continue; 316 317 this_cache[num_cache_leaves].cci = cci; 318 this_cache[num_cache_leaves].level = i + 1; 319 this_cache[num_cache_leaves].type = j; 320 321 cache_shared_cpu_map_setup(cpu, 322 &this_cache[num_cache_leaves]); 323 num_cache_leaves ++; 324 } 325 } 326 327 all_cpu_cache_info[cpu].cache_leaves = this_cache; 328 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; 329 330 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); 331 332 return 0; 333 } 334 335 /* Add cache interface for CPU device */ 336 static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 337 { 338 unsigned int cpu = sys_dev->id; 339 unsigned long i, j; 340 struct cache_info *this_object; 341 int retval = 0; 342 cpumask_t oldmask; 343 344 if (all_cpu_cache_info[cpu].kobj.parent) 345 return 0; 346 347 oldmask = current->cpus_allowed; 348 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); 349 if (unlikely(retval)) 350 return retval; 351 352 retval = cpu_cache_sysfs_init(cpu); 353 set_cpus_allowed(current, oldmask); 354 if (unlikely(retval < 0)) 355 return retval; 356 357 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, 358 &cache_ktype_percpu_entry, &sys_dev->kobj, 359 "%s", "cache"); 360 361 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { 362 this_object = LEAF_KOBJECT_PTR(cpu,i); 363 retval = kobject_init_and_add(&(this_object->kobj), 364 &cache_ktype, 365 &all_cpu_cache_info[cpu].kobj, 366 "index%1lu", i); 367 if (unlikely(retval)) { 368 for (j = 0; j < i; j++) { 369 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj)); 370 } 371 kobject_put(&all_cpu_cache_info[cpu].kobj); 372 cpu_cache_sysfs_exit(cpu); 373 break; 374 } 375 kobject_uevent(&(this_object->kobj), KOBJ_ADD); 376 } 377 kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD); 378 return retval; 379 } 380 381 /* Remove cache interface for CPU device */ 382 static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) 383 { 384 unsigned int cpu = sys_dev->id; 385 unsigned long i; 386 387 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) 388 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); 389 390 if (all_cpu_cache_info[cpu].kobj.parent) { 391 kobject_put(&all_cpu_cache_info[cpu].kobj); 392 memset(&all_cpu_cache_info[cpu].kobj, 393 0, 394 sizeof(struct kobject)); 395 } 396 397 cpu_cache_sysfs_exit(cpu); 398 399 return 0; 400 } 401 402 /* 403 * When a cpu is hot-plugged, do a check and initiate 404 * cache kobject if necessary 405 */ 406 static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, 407 unsigned long action, void *hcpu) 408 { 409 unsigned int cpu = (unsigned long)hcpu; 410 struct sys_device *sys_dev; 411 412 sys_dev = get_cpu_sysdev(cpu); 413 switch (action) { 414 case CPU_ONLINE: 415 case CPU_ONLINE_FROZEN: 416 cache_add_dev(sys_dev); 417 break; 418 case CPU_DEAD: 419 case CPU_DEAD_FROZEN: 420 cache_remove_dev(sys_dev); 421 break; 422 } 423 return NOTIFY_OK; 424 } 425 426 static struct notifier_block __cpuinitdata cache_cpu_notifier = 427 { 428 .notifier_call = cache_cpu_callback 429 }; 430 431 static int __init cache_sysfs_init(void) 432 { 433 int i; 434 435 for_each_online_cpu(i) { 436 struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); 437 cache_add_dev(sys_dev); 438 } 439 440 register_hotcpu_notifier(&cache_cpu_notifier); 441 442 return 0; 443 } 444 445 device_initcall(cache_sysfs_init); 446 447