1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/sysfs.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Theodore Ts'o (tytso@mit.edu) 8 * 9 */ 10 11 #include <linux/time.h> 12 #include <linux/fs.h> 13 #include <linux/seq_file.h> 14 #include <linux/slab.h> 15 #include <linux/proc_fs.h> 16 17 #include "ext4.h" 18 #include "ext4_jbd2.h" 19 20 typedef enum { 21 attr_noop, 22 attr_delayed_allocation_blocks, 23 attr_session_write_kbytes, 24 attr_lifetime_write_kbytes, 25 attr_reserved_clusters, 26 attr_inode_readahead, 27 attr_trigger_test_error, 28 attr_first_error_time, 29 attr_last_error_time, 30 attr_feature, 31 attr_pointer_ui, 32 attr_pointer_atomic, 33 attr_journal_task, 34 } attr_id_t; 35 36 typedef enum { 37 ptr_explicit, 38 ptr_ext4_sb_info_offset, 39 ptr_ext4_super_block_offset, 40 } attr_ptr_t; 41 42 static const char proc_dirname[] = "fs/ext4"; 43 static struct proc_dir_entry *ext4_proc_root; 44 45 struct ext4_attr { 46 struct attribute attr; 47 short attr_id; 48 short attr_ptr; 49 union { 50 int offset; 51 void *explicit_ptr; 52 } u; 53 }; 54 55 static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf) 56 { 57 struct super_block *sb = sbi->s_buddy_cache->i_sb; 58 59 if (!sb->s_bdev->bd_part) 60 return snprintf(buf, PAGE_SIZE, "0\n"); 61 return snprintf(buf, PAGE_SIZE, "%lu\n", 62 (part_stat_read(sb->s_bdev->bd_part, 63 sectors[STAT_WRITE]) - 64 sbi->s_sectors_written_start) >> 1); 65 } 66 67 static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf) 68 { 69 struct super_block *sb = sbi->s_buddy_cache->i_sb; 70 71 if (!sb->s_bdev->bd_part) 72 return snprintf(buf, PAGE_SIZE, "0\n"); 73 return snprintf(buf, PAGE_SIZE, "%llu\n", 74 (unsigned long long)(sbi->s_kbytes_written + 75 ((part_stat_read(sb->s_bdev->bd_part, 76 sectors[STAT_WRITE]) - 77 EXT4_SB(sb)->s_sectors_written_start) >> 1))); 78 } 79 80 static ssize_t inode_readahead_blks_store(struct ext4_sb_info *sbi, 81 const char *buf, size_t count) 82 { 83 unsigned long t; 84 int ret; 85 86 ret = kstrtoul(skip_spaces(buf), 0, &t); 87 if (ret) 88 return ret; 89 90 if (t && (!is_power_of_2(t) || t > 0x40000000)) 91 return -EINVAL; 92 93 sbi->s_inode_readahead_blks = t; 94 return count; 95 } 96 97 static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi, 98 const char *buf, size_t count) 99 { 100 unsigned long long val; 101 ext4_fsblk_t clusters = (ext4_blocks_count(sbi->s_es) >> 102 sbi->s_cluster_bits); 103 int ret; 104 105 ret = kstrtoull(skip_spaces(buf), 0, &val); 106 if (ret || val >= clusters) 107 return -EINVAL; 108 109 atomic64_set(&sbi->s_resv_clusters, val); 110 return count; 111 } 112 113 static ssize_t trigger_test_error(struct ext4_sb_info *sbi, 114 const char *buf, size_t count) 115 { 116 int len = count; 117 118 if (!capable(CAP_SYS_ADMIN)) 119 return -EPERM; 120 121 if (len && buf[len-1] == '\n') 122 len--; 123 124 if (len) 125 ext4_error(sbi->s_sb, "%.*s", len, buf); 126 return count; 127 } 128 129 static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf) 130 { 131 if (!sbi->s_journal) 132 return snprintf(buf, PAGE_SIZE, "<none>\n"); 133 return snprintf(buf, PAGE_SIZE, "%d\n", 134 task_pid_vnr(sbi->s_journal->j_task)); 135 } 136 137 #define EXT4_ATTR(_name,_mode,_id) \ 138 static struct ext4_attr ext4_attr_##_name = { \ 139 .attr = {.name = __stringify(_name), .mode = _mode }, \ 140 .attr_id = attr_##_id, \ 141 } 142 143 #define EXT4_ATTR_FUNC(_name,_mode) EXT4_ATTR(_name,_mode,_name) 144 145 #define EXT4_ATTR_FEATURE(_name) EXT4_ATTR(_name, 0444, feature) 146 147 #define EXT4_ATTR_OFFSET(_name,_mode,_id,_struct,_elname) \ 148 static struct ext4_attr ext4_attr_##_name = { \ 149 .attr = {.name = __stringify(_name), .mode = _mode }, \ 150 .attr_id = attr_##_id, \ 151 .attr_ptr = ptr_##_struct##_offset, \ 152 .u = { \ 153 .offset = offsetof(struct _struct, _elname),\ 154 }, \ 155 } 156 157 #define EXT4_RO_ATTR_ES_UI(_name,_elname) \ 158 EXT4_ATTR_OFFSET(_name, 0444, pointer_ui, ext4_super_block, _elname) 159 160 #define EXT4_RW_ATTR_SBI_UI(_name,_elname) \ 161 EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname) 162 163 #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \ 164 static struct ext4_attr ext4_attr_##_name = { \ 165 .attr = {.name = __stringify(_name), .mode = _mode }, \ 166 .attr_id = attr_##_id, \ 167 .attr_ptr = ptr_explicit, \ 168 .u = { \ 169 .explicit_ptr = _ptr, \ 170 }, \ 171 } 172 173 #define ATTR_LIST(name) &ext4_attr_##name.attr 174 175 EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444); 176 EXT4_ATTR_FUNC(session_write_kbytes, 0444); 177 EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444); 178 EXT4_ATTR_FUNC(reserved_clusters, 0644); 179 180 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead, 181 ext4_sb_info, s_inode_readahead_blks); 182 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); 183 EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats); 184 EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); 185 EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); 186 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); 187 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); 188 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); 189 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); 190 EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); 191 EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); 192 EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst); 193 EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval); 194 EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst); 195 EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval); 196 EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); 197 EXT4_RO_ATTR_ES_UI(errors_count, s_error_count); 198 EXT4_ATTR(first_error_time, 0444, first_error_time); 199 EXT4_ATTR(last_error_time, 0444, last_error_time); 200 EXT4_ATTR(journal_task, 0444, journal_task); 201 202 static unsigned int old_bump_val = 128; 203 EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val); 204 205 static struct attribute *ext4_attrs[] = { 206 ATTR_LIST(delayed_allocation_blocks), 207 ATTR_LIST(session_write_kbytes), 208 ATTR_LIST(lifetime_write_kbytes), 209 ATTR_LIST(reserved_clusters), 210 ATTR_LIST(inode_readahead_blks), 211 ATTR_LIST(inode_goal), 212 ATTR_LIST(mb_stats), 213 ATTR_LIST(mb_max_to_scan), 214 ATTR_LIST(mb_min_to_scan), 215 ATTR_LIST(mb_order2_req), 216 ATTR_LIST(mb_stream_req), 217 ATTR_LIST(mb_group_prealloc), 218 ATTR_LIST(max_writeback_mb_bump), 219 ATTR_LIST(extent_max_zeroout_kb), 220 ATTR_LIST(trigger_fs_error), 221 ATTR_LIST(err_ratelimit_interval_ms), 222 ATTR_LIST(err_ratelimit_burst), 223 ATTR_LIST(warning_ratelimit_interval_ms), 224 ATTR_LIST(warning_ratelimit_burst), 225 ATTR_LIST(msg_ratelimit_interval_ms), 226 ATTR_LIST(msg_ratelimit_burst), 227 ATTR_LIST(errors_count), 228 ATTR_LIST(first_error_time), 229 ATTR_LIST(last_error_time), 230 ATTR_LIST(journal_task), 231 NULL, 232 }; 233 ATTRIBUTE_GROUPS(ext4); 234 235 /* Features this copy of ext4 supports */ 236 EXT4_ATTR_FEATURE(lazy_itable_init); 237 EXT4_ATTR_FEATURE(batched_discard); 238 EXT4_ATTR_FEATURE(meta_bg_resize); 239 #ifdef CONFIG_FS_ENCRYPTION 240 EXT4_ATTR_FEATURE(encryption); 241 #endif 242 #ifdef CONFIG_UNICODE 243 EXT4_ATTR_FEATURE(casefold); 244 #endif 245 EXT4_ATTR_FEATURE(metadata_csum_seed); 246 247 static struct attribute *ext4_feat_attrs[] = { 248 ATTR_LIST(lazy_itable_init), 249 ATTR_LIST(batched_discard), 250 ATTR_LIST(meta_bg_resize), 251 #ifdef CONFIG_FS_ENCRYPTION 252 ATTR_LIST(encryption), 253 #endif 254 #ifdef CONFIG_UNICODE 255 ATTR_LIST(casefold), 256 #endif 257 ATTR_LIST(metadata_csum_seed), 258 NULL, 259 }; 260 ATTRIBUTE_GROUPS(ext4_feat); 261 262 static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi) 263 { 264 switch (a->attr_ptr) { 265 case ptr_explicit: 266 return a->u.explicit_ptr; 267 case ptr_ext4_sb_info_offset: 268 return (void *) (((char *) sbi) + a->u.offset); 269 case ptr_ext4_super_block_offset: 270 return (void *) (((char *) sbi->s_es) + a->u.offset); 271 } 272 return NULL; 273 } 274 275 static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi) 276 { 277 return snprintf(buf, PAGE_SIZE, "%lld", 278 ((time64_t)hi << 32) + le32_to_cpu(lo)); 279 } 280 281 #define print_tstamp(buf, es, tstamp) \ 282 __print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi) 283 284 static ssize_t ext4_attr_show(struct kobject *kobj, 285 struct attribute *attr, char *buf) 286 { 287 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 288 s_kobj); 289 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); 290 void *ptr = calc_ptr(a, sbi); 291 292 switch (a->attr_id) { 293 case attr_delayed_allocation_blocks: 294 return snprintf(buf, PAGE_SIZE, "%llu\n", 295 (s64) EXT4_C2B(sbi, 296 percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 297 case attr_session_write_kbytes: 298 return session_write_kbytes_show(sbi, buf); 299 case attr_lifetime_write_kbytes: 300 return lifetime_write_kbytes_show(sbi, buf); 301 case attr_reserved_clusters: 302 return snprintf(buf, PAGE_SIZE, "%llu\n", 303 (unsigned long long) 304 atomic64_read(&sbi->s_resv_clusters)); 305 case attr_inode_readahead: 306 case attr_pointer_ui: 307 if (!ptr) 308 return 0; 309 if (a->attr_ptr == ptr_ext4_super_block_offset) 310 return snprintf(buf, PAGE_SIZE, "%u\n", 311 le32_to_cpup(ptr)); 312 else 313 return snprintf(buf, PAGE_SIZE, "%u\n", 314 *((unsigned int *) ptr)); 315 case attr_pointer_atomic: 316 if (!ptr) 317 return 0; 318 return snprintf(buf, PAGE_SIZE, "%d\n", 319 atomic_read((atomic_t *) ptr)); 320 case attr_feature: 321 return snprintf(buf, PAGE_SIZE, "supported\n"); 322 case attr_first_error_time: 323 return print_tstamp(buf, sbi->s_es, s_first_error_time); 324 case attr_last_error_time: 325 return print_tstamp(buf, sbi->s_es, s_last_error_time); 326 case attr_journal_task: 327 return journal_task_show(sbi, buf); 328 } 329 330 return 0; 331 } 332 333 static ssize_t ext4_attr_store(struct kobject *kobj, 334 struct attribute *attr, 335 const char *buf, size_t len) 336 { 337 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 338 s_kobj); 339 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); 340 void *ptr = calc_ptr(a, sbi); 341 unsigned long t; 342 int ret; 343 344 switch (a->attr_id) { 345 case attr_reserved_clusters: 346 return reserved_clusters_store(sbi, buf, len); 347 case attr_pointer_ui: 348 if (!ptr) 349 return 0; 350 ret = kstrtoul(skip_spaces(buf), 0, &t); 351 if (ret) 352 return ret; 353 if (a->attr_ptr == ptr_ext4_super_block_offset) 354 *((__le32 *) ptr) = cpu_to_le32(t); 355 else 356 *((unsigned int *) ptr) = t; 357 return len; 358 case attr_inode_readahead: 359 return inode_readahead_blks_store(sbi, buf, len); 360 case attr_trigger_test_error: 361 return trigger_test_error(sbi, buf, len); 362 } 363 return 0; 364 } 365 366 static void ext4_sb_release(struct kobject *kobj) 367 { 368 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 369 s_kobj); 370 complete(&sbi->s_kobj_unregister); 371 } 372 373 static const struct sysfs_ops ext4_attr_ops = { 374 .show = ext4_attr_show, 375 .store = ext4_attr_store, 376 }; 377 378 static struct kobj_type ext4_sb_ktype = { 379 .default_groups = ext4_groups, 380 .sysfs_ops = &ext4_attr_ops, 381 .release = ext4_sb_release, 382 }; 383 384 static struct kobj_type ext4_feat_ktype = { 385 .default_groups = ext4_feat_groups, 386 .sysfs_ops = &ext4_attr_ops, 387 .release = (void (*)(struct kobject *))kfree, 388 }; 389 390 static struct kobject *ext4_root; 391 392 static struct kobject *ext4_feat; 393 394 int ext4_register_sysfs(struct super_block *sb) 395 { 396 struct ext4_sb_info *sbi = EXT4_SB(sb); 397 int err; 398 399 init_completion(&sbi->s_kobj_unregister); 400 err = kobject_init_and_add(&sbi->s_kobj, &ext4_sb_ktype, ext4_root, 401 "%s", sb->s_id); 402 if (err) { 403 kobject_put(&sbi->s_kobj); 404 wait_for_completion(&sbi->s_kobj_unregister); 405 return err; 406 } 407 408 if (ext4_proc_root) 409 sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root); 410 if (sbi->s_proc) { 411 proc_create_single_data("options", S_IRUGO, sbi->s_proc, 412 ext4_seq_options_show, sb); 413 proc_create_single_data("es_shrinker_info", S_IRUGO, 414 sbi->s_proc, ext4_seq_es_shrinker_info_show, 415 sb); 416 proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc, 417 &ext4_mb_seq_groups_ops, sb); 418 } 419 return 0; 420 } 421 422 void ext4_unregister_sysfs(struct super_block *sb) 423 { 424 struct ext4_sb_info *sbi = EXT4_SB(sb); 425 426 if (sbi->s_proc) 427 remove_proc_subtree(sb->s_id, ext4_proc_root); 428 kobject_del(&sbi->s_kobj); 429 } 430 431 int __init ext4_init_sysfs(void) 432 { 433 int ret; 434 435 ext4_root = kobject_create_and_add("ext4", fs_kobj); 436 if (!ext4_root) 437 return -ENOMEM; 438 439 ext4_feat = kzalloc(sizeof(*ext4_feat), GFP_KERNEL); 440 if (!ext4_feat) { 441 ret = -ENOMEM; 442 goto root_err; 443 } 444 445 ret = kobject_init_and_add(ext4_feat, &ext4_feat_ktype, 446 ext4_root, "features"); 447 if (ret) 448 goto feat_err; 449 450 ext4_proc_root = proc_mkdir(proc_dirname, NULL); 451 return ret; 452 453 feat_err: 454 kobject_put(ext4_feat); 455 ext4_feat = NULL; 456 root_err: 457 kobject_put(ext4_root); 458 ext4_root = NULL; 459 return ret; 460 } 461 462 void ext4_exit_sysfs(void) 463 { 464 kobject_put(ext4_feat); 465 ext4_feat = NULL; 466 kobject_put(ext4_root); 467 ext4_root = NULL; 468 remove_proc_entry(proc_dirname, NULL); 469 ext4_proc_root = NULL; 470 } 471 472