1 /* 2 * CPU subsystem support 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/sched.h> 9 #include <linux/cpu.h> 10 #include <linux/topology.h> 11 #include <linux/device.h> 12 #include <linux/node.h> 13 #include <linux/gfp.h> 14 #include <linux/slab.h> 15 #include <linux/percpu.h> 16 17 #include "base.h" 18 19 struct bus_type cpu_subsys = { 20 .name = "cpu", 21 .dev_name = "cpu", 22 }; 23 EXPORT_SYMBOL_GPL(cpu_subsys); 24 25 static DEFINE_PER_CPU(struct device *, cpu_sys_devices); 26 27 #ifdef CONFIG_HOTPLUG_CPU 28 static ssize_t show_online(struct device *dev, 29 struct device_attribute *attr, 30 char *buf) 31 { 32 struct cpu *cpu = container_of(dev, struct cpu, dev); 33 34 return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); 35 } 36 37 static ssize_t __ref store_online(struct device *dev, 38 struct device_attribute *attr, 39 const char *buf, size_t count) 40 { 41 struct cpu *cpu = container_of(dev, struct cpu, dev); 42 ssize_t ret; 43 44 cpu_hotplug_driver_lock(); 45 switch (buf[0]) { 46 case '0': 47 ret = cpu_down(cpu->dev.id); 48 if (!ret) 49 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 50 break; 51 case '1': 52 ret = cpu_up(cpu->dev.id); 53 if (!ret) 54 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 55 break; 56 default: 57 ret = -EINVAL; 58 } 59 cpu_hotplug_driver_unlock(); 60 61 if (ret >= 0) 62 ret = count; 63 return ret; 64 } 65 static DEVICE_ATTR(online, 0644, show_online, store_online); 66 67 static void __cpuinit register_cpu_control(struct cpu *cpu) 68 { 69 device_create_file(&cpu->dev, &dev_attr_online); 70 } 71 void unregister_cpu(struct cpu *cpu) 72 { 73 int logical_cpu = cpu->dev.id; 74 75 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); 76 77 device_remove_file(&cpu->dev, &dev_attr_online); 78 79 device_unregister(&cpu->dev); 80 per_cpu(cpu_sys_devices, logical_cpu) = NULL; 81 return; 82 } 83 84 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 85 static ssize_t cpu_probe_store(struct device *dev, 86 struct device_attribute *attr, 87 const char *buf, 88 size_t count) 89 { 90 return arch_cpu_probe(buf, count); 91 } 92 93 static ssize_t cpu_release_store(struct device *dev, 94 struct device_attribute *attr, 95 const char *buf, 96 size_t count) 97 { 98 return arch_cpu_release(buf, count); 99 } 100 101 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); 102 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); 103 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 104 105 #else /* ... !CONFIG_HOTPLUG_CPU */ 106 static inline void register_cpu_control(struct cpu *cpu) 107 { 108 } 109 #endif /* CONFIG_HOTPLUG_CPU */ 110 111 #ifdef CONFIG_KEXEC 112 #include <linux/kexec.h> 113 114 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, 115 char *buf) 116 { 117 struct cpu *cpu = container_of(dev, struct cpu, dev); 118 ssize_t rc; 119 unsigned long long addr; 120 int cpunum; 121 122 cpunum = cpu->dev.id; 123 124 /* 125 * Might be reading other cpu's data based on which cpu read thread 126 * has been scheduled. But cpu data (memory) is allocated once during 127 * boot up and this data does not change there after. Hence this 128 * operation should be safe. No locking required. 129 */ 130 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); 131 rc = sprintf(buf, "%Lx\n", addr); 132 return rc; 133 } 134 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); 135 #endif 136 137 /* 138 * Print cpu online, possible, present, and system maps 139 */ 140 141 struct cpu_attr { 142 struct device_attribute attr; 143 const struct cpumask *const * const map; 144 }; 145 146 static ssize_t show_cpus_attr(struct device *dev, 147 struct device_attribute *attr, 148 char *buf) 149 { 150 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); 151 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map)); 152 153 buf[n++] = '\n'; 154 buf[n] = '\0'; 155 return n; 156 } 157 158 #define _CPU_ATTR(name, map) \ 159 { __ATTR(name, 0444, show_cpus_attr, NULL), map } 160 161 /* Keep in sync with cpu_subsys_attrs */ 162 static struct cpu_attr cpu_attrs[] = { 163 _CPU_ATTR(online, &cpu_online_mask), 164 _CPU_ATTR(possible, &cpu_possible_mask), 165 _CPU_ATTR(present, &cpu_present_mask), 166 }; 167 168 /* 169 * Print values for NR_CPUS and offlined cpus 170 */ 171 static ssize_t print_cpus_kernel_max(struct device *dev, 172 struct device_attribute *attr, char *buf) 173 { 174 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); 175 return n; 176 } 177 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); 178 179 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ 180 unsigned int total_cpus; 181 182 static ssize_t print_cpus_offline(struct device *dev, 183 struct device_attribute *attr, char *buf) 184 { 185 int n = 0, len = PAGE_SIZE-2; 186 cpumask_var_t offline; 187 188 /* display offline cpus < nr_cpu_ids */ 189 if (!alloc_cpumask_var(&offline, GFP_KERNEL)) 190 return -ENOMEM; 191 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); 192 n = cpulist_scnprintf(buf, len, offline); 193 free_cpumask_var(offline); 194 195 /* display offline cpus >= nr_cpu_ids */ 196 if (total_cpus && nr_cpu_ids < total_cpus) { 197 if (n && n < len) 198 buf[n++] = ','; 199 200 if (nr_cpu_ids == total_cpus-1) 201 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); 202 else 203 n += snprintf(&buf[n], len - n, "%d-%d", 204 nr_cpu_ids, total_cpus-1); 205 } 206 207 n += snprintf(&buf[n], len - n, "\n"); 208 return n; 209 } 210 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); 211 212 static void cpu_device_release(struct device *dev) 213 { 214 /* 215 * This is an empty function to prevent the driver core from spitting a 216 * warning at us. Yes, I know this is directly opposite of what the 217 * documentation for the driver core and kobjects say, and the author 218 * of this code has already been publically ridiculed for doing 219 * something as foolish as this. However, at this point in time, it is 220 * the only way to handle the issue of statically allocated cpu 221 * devices. The different architectures will have their cpu device 222 * code reworked to properly handle this in the near future, so this 223 * function will then be changed to correctly free up the memory held 224 * by the cpu device. 225 * 226 * Never copy this way of doing things, or you too will be made fun of 227 * on the linux-kernel list, you have been warned. 228 */ 229 } 230 231 /* 232 * register_cpu - Setup a sysfs device for a CPU. 233 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in 234 * sysfs for this CPU. 235 * @num - CPU number to use when creating the device. 236 * 237 * Initialize and register the CPU device. 238 */ 239 int __cpuinit register_cpu(struct cpu *cpu, int num) 240 { 241 int error; 242 243 cpu->node_id = cpu_to_node(num); 244 memset(&cpu->dev, 0x00, sizeof(struct device)); 245 cpu->dev.id = num; 246 cpu->dev.bus = &cpu_subsys; 247 cpu->dev.release = cpu_device_release; 248 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE 249 cpu->dev.bus->uevent = arch_cpu_uevent; 250 #endif 251 error = device_register(&cpu->dev); 252 if (!error && cpu->hotpluggable) 253 register_cpu_control(cpu); 254 if (!error) 255 per_cpu(cpu_sys_devices, num) = &cpu->dev; 256 if (!error) 257 register_cpu_under_node(num, cpu_to_node(num)); 258 259 #ifdef CONFIG_KEXEC 260 if (!error) 261 error = device_create_file(&cpu->dev, &dev_attr_crash_notes); 262 #endif 263 return error; 264 } 265 266 struct device *get_cpu_device(unsigned cpu) 267 { 268 if (cpu < nr_cpu_ids && cpu_possible(cpu)) 269 return per_cpu(cpu_sys_devices, cpu); 270 else 271 return NULL; 272 } 273 EXPORT_SYMBOL_GPL(get_cpu_device); 274 275 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE 276 static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL); 277 #endif 278 279 static struct attribute *cpu_root_attrs[] = { 280 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 281 &dev_attr_probe.attr, 282 &dev_attr_release.attr, 283 #endif 284 &cpu_attrs[0].attr.attr, 285 &cpu_attrs[1].attr.attr, 286 &cpu_attrs[2].attr.attr, 287 &dev_attr_kernel_max.attr, 288 &dev_attr_offline.attr, 289 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE 290 &dev_attr_modalias.attr, 291 #endif 292 NULL 293 }; 294 295 static struct attribute_group cpu_root_attr_group = { 296 .attrs = cpu_root_attrs, 297 }; 298 299 static const struct attribute_group *cpu_root_attr_groups[] = { 300 &cpu_root_attr_group, 301 NULL, 302 }; 303 304 bool cpu_is_hotpluggable(unsigned cpu) 305 { 306 struct device *dev = get_cpu_device(cpu); 307 return dev && container_of(dev, struct cpu, dev)->hotpluggable; 308 } 309 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); 310 311 #ifdef CONFIG_GENERIC_CPU_DEVICES 312 static DEFINE_PER_CPU(struct cpu, cpu_devices); 313 #endif 314 315 static void __init cpu_dev_register_generic(void) 316 { 317 #ifdef CONFIG_GENERIC_CPU_DEVICES 318 int i; 319 320 for_each_possible_cpu(i) { 321 if (register_cpu(&per_cpu(cpu_devices, i), i)) 322 panic("Failed to register CPU device"); 323 } 324 #endif 325 } 326 327 void __init cpu_dev_init(void) 328 { 329 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) 330 panic("Failed to register CPU subsystem"); 331 332 cpu_dev_register_generic(); 333 } 334