1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which 4 * are not related to any other subsystem 5 * 6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> 7 */ 8 9 #include <asm/byteorder.h> 10 #include <linux/kobject.h> 11 #include <linux/string.h> 12 #include <linux/sysfs.h> 13 #include <linux/export.h> 14 #include <linux/init.h> 15 #include <linux/kexec.h> 16 #include <linux/profile.h> 17 #include <linux/stat.h> 18 #include <linux/sched.h> 19 #include <linux/capability.h> 20 #include <linux/compiler.h> 21 22 #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */ 23 24 #if defined(__LITTLE_ENDIAN) 25 #define CPU_BYTEORDER_STRING "little" 26 #elif defined(__BIG_ENDIAN) 27 #define CPU_BYTEORDER_STRING "big" 28 #else 29 #error Unknown byteorder 30 #endif 31 32 #define KERNEL_ATTR_RO(_name) \ 33 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 34 35 #define KERNEL_ATTR_RW(_name) \ 36 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 37 38 /* current uevent sequence number */ 39 static ssize_t uevent_seqnum_show(struct kobject *kobj, 40 struct kobj_attribute *attr, char *buf) 41 { 42 return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum); 43 } 44 KERNEL_ATTR_RO(uevent_seqnum); 45 46 /* cpu byteorder */ 47 static ssize_t cpu_byteorder_show(struct kobject *kobj, 48 struct kobj_attribute *attr, char *buf) 49 { 50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING); 51 } 52 KERNEL_ATTR_RO(cpu_byteorder); 53 54 /* address bits */ 55 static ssize_t address_bits_show(struct kobject *kobj, 56 struct kobj_attribute *attr, char *buf) 57 { 58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */); 59 } 60 KERNEL_ATTR_RO(address_bits); 61 62 #ifdef CONFIG_UEVENT_HELPER 63 /* uevent helper program, used during early boot */ 64 static ssize_t uevent_helper_show(struct kobject *kobj, 65 struct kobj_attribute *attr, char *buf) 66 { 67 return sprintf(buf, "%s\n", uevent_helper); 68 } 69 static ssize_t uevent_helper_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count) 72 { 73 if (count+1 > UEVENT_HELPER_PATH_LEN) 74 return -ENOENT; 75 memcpy(uevent_helper, buf, count); 76 uevent_helper[count] = '\0'; 77 if (count && uevent_helper[count-1] == '\n') 78 uevent_helper[count-1] = '\0'; 79 return count; 80 } 81 KERNEL_ATTR_RW(uevent_helper); 82 #endif 83 84 #ifdef CONFIG_PROFILING 85 static ssize_t profiling_show(struct kobject *kobj, 86 struct kobj_attribute *attr, char *buf) 87 { 88 return sprintf(buf, "%d\n", prof_on); 89 } 90 static ssize_t profiling_store(struct kobject *kobj, 91 struct kobj_attribute *attr, 92 const char *buf, size_t count) 93 { 94 int ret; 95 96 if (prof_on) 97 return -EEXIST; 98 /* 99 * This eventually calls into get_option() which 100 * has a ton of callers and is not const. It is 101 * easiest to cast it away here. 102 */ 103 profile_setup((char *)buf); 104 ret = profile_init(); 105 if (ret) 106 return ret; 107 ret = create_proc_profile(); 108 if (ret) 109 return ret; 110 return count; 111 } 112 KERNEL_ATTR_RW(profiling); 113 #endif 114 115 #ifdef CONFIG_KEXEC_CORE 116 static ssize_t kexec_loaded_show(struct kobject *kobj, 117 struct kobj_attribute *attr, char *buf) 118 { 119 return sprintf(buf, "%d\n", !!kexec_image); 120 } 121 KERNEL_ATTR_RO(kexec_loaded); 122 123 static ssize_t kexec_crash_loaded_show(struct kobject *kobj, 124 struct kobj_attribute *attr, char *buf) 125 { 126 return sprintf(buf, "%d\n", kexec_crash_loaded()); 127 } 128 KERNEL_ATTR_RO(kexec_crash_loaded); 129 130 static ssize_t kexec_crash_size_show(struct kobject *kobj, 131 struct kobj_attribute *attr, char *buf) 132 { 133 ssize_t size = crash_get_memory_size(); 134 135 if (size < 0) 136 return size; 137 138 return sprintf(buf, "%zd\n", size); 139 } 140 static ssize_t kexec_crash_size_store(struct kobject *kobj, 141 struct kobj_attribute *attr, 142 const char *buf, size_t count) 143 { 144 unsigned long cnt; 145 int ret; 146 147 if (kstrtoul(buf, 0, &cnt)) 148 return -EINVAL; 149 150 ret = crash_shrink_memory(cnt); 151 return ret < 0 ? ret : count; 152 } 153 KERNEL_ATTR_RW(kexec_crash_size); 154 155 #endif /* CONFIG_KEXEC_CORE */ 156 157 #ifdef CONFIG_CRASH_CORE 158 159 static ssize_t vmcoreinfo_show(struct kobject *kobj, 160 struct kobj_attribute *attr, char *buf) 161 { 162 phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); 163 return sprintf(buf, "%pa %x\n", &vmcore_base, 164 (unsigned int)VMCOREINFO_NOTE_SIZE); 165 } 166 KERNEL_ATTR_RO(vmcoreinfo); 167 168 #endif /* CONFIG_CRASH_CORE */ 169 170 /* whether file capabilities are enabled */ 171 static ssize_t fscaps_show(struct kobject *kobj, 172 struct kobj_attribute *attr, char *buf) 173 { 174 return sprintf(buf, "%d\n", file_caps_enabled); 175 } 176 KERNEL_ATTR_RO(fscaps); 177 178 #ifndef CONFIG_TINY_RCU 179 int rcu_expedited; 180 static ssize_t rcu_expedited_show(struct kobject *kobj, 181 struct kobj_attribute *attr, char *buf) 182 { 183 return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited)); 184 } 185 static ssize_t rcu_expedited_store(struct kobject *kobj, 186 struct kobj_attribute *attr, 187 const char *buf, size_t count) 188 { 189 if (kstrtoint(buf, 0, &rcu_expedited)) 190 return -EINVAL; 191 192 return count; 193 } 194 KERNEL_ATTR_RW(rcu_expedited); 195 196 int rcu_normal; 197 static ssize_t rcu_normal_show(struct kobject *kobj, 198 struct kobj_attribute *attr, char *buf) 199 { 200 return sprintf(buf, "%d\n", READ_ONCE(rcu_normal)); 201 } 202 static ssize_t rcu_normal_store(struct kobject *kobj, 203 struct kobj_attribute *attr, 204 const char *buf, size_t count) 205 { 206 if (kstrtoint(buf, 0, &rcu_normal)) 207 return -EINVAL; 208 209 return count; 210 } 211 KERNEL_ATTR_RW(rcu_normal); 212 #endif /* #ifndef CONFIG_TINY_RCU */ 213 214 /* 215 * Make /sys/kernel/notes give the raw contents of our kernel .notes section. 216 */ 217 extern const void __start_notes __weak; 218 extern const void __stop_notes __weak; 219 #define notes_size (&__stop_notes - &__start_notes) 220 221 static ssize_t notes_read(struct file *filp, struct kobject *kobj, 222 struct bin_attribute *bin_attr, 223 char *buf, loff_t off, size_t count) 224 { 225 memcpy(buf, &__start_notes + off, count); 226 return count; 227 } 228 229 static struct bin_attribute notes_attr __ro_after_init = { 230 .attr = { 231 .name = "notes", 232 .mode = S_IRUGO, 233 }, 234 .read = ¬es_read, 235 }; 236 237 struct kobject *kernel_kobj; 238 EXPORT_SYMBOL_GPL(kernel_kobj); 239 240 static struct attribute * kernel_attrs[] = { 241 &fscaps_attr.attr, 242 &uevent_seqnum_attr.attr, 243 &cpu_byteorder_attr.attr, 244 &address_bits_attr.attr, 245 #ifdef CONFIG_UEVENT_HELPER 246 &uevent_helper_attr.attr, 247 #endif 248 #ifdef CONFIG_PROFILING 249 &profiling_attr.attr, 250 #endif 251 #ifdef CONFIG_KEXEC_CORE 252 &kexec_loaded_attr.attr, 253 &kexec_crash_loaded_attr.attr, 254 &kexec_crash_size_attr.attr, 255 #endif 256 #ifdef CONFIG_CRASH_CORE 257 &vmcoreinfo_attr.attr, 258 #endif 259 #ifndef CONFIG_TINY_RCU 260 &rcu_expedited_attr.attr, 261 &rcu_normal_attr.attr, 262 #endif 263 NULL 264 }; 265 266 static const struct attribute_group kernel_attr_group = { 267 .attrs = kernel_attrs, 268 }; 269 270 static int __init ksysfs_init(void) 271 { 272 int error; 273 274 kernel_kobj = kobject_create_and_add("kernel", NULL); 275 if (!kernel_kobj) { 276 error = -ENOMEM; 277 goto exit; 278 } 279 error = sysfs_create_group(kernel_kobj, &kernel_attr_group); 280 if (error) 281 goto kset_exit; 282 283 if (notes_size > 0) { 284 notes_attr.size = notes_size; 285 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr); 286 if (error) 287 goto group_exit; 288 } 289 290 return 0; 291 292 group_exit: 293 sysfs_remove_group(kernel_kobj, &kernel_attr_group); 294 kset_exit: 295 kobject_put(kernel_kobj); 296 exit: 297 return error; 298 } 299 300 core_initcall(ksysfs_init); 301