1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Fusion IO. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/mount.h> 8 #include <linux/pseudo_fs.h> 9 #include <linux/magic.h> 10 #include "btrfs-tests.h" 11 #include "../ctree.h" 12 #include "../free-space-cache.h" 13 #include "../free-space-tree.h" 14 #include "../transaction.h" 15 #include "../volumes.h" 16 #include "../disk-io.h" 17 #include "../qgroup.h" 18 #include "../block-group.h" 19 20 static struct vfsmount *test_mnt = NULL; 21 22 const char *test_error[] = { 23 [TEST_ALLOC_FS_INFO] = "cannot allocate fs_info", 24 [TEST_ALLOC_ROOT] = "cannot allocate root", 25 [TEST_ALLOC_EXTENT_BUFFER] = "cannot extent buffer", 26 [TEST_ALLOC_PATH] = "cannot allocate path", 27 [TEST_ALLOC_INODE] = "cannot allocate inode", 28 [TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group", 29 [TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map", 30 }; 31 32 static const struct super_operations btrfs_test_super_ops = { 33 .alloc_inode = btrfs_alloc_inode, 34 .destroy_inode = btrfs_test_destroy_inode, 35 }; 36 37 38 static int btrfs_test_init_fs_context(struct fs_context *fc) 39 { 40 struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC); 41 if (!ctx) 42 return -ENOMEM; 43 ctx->ops = &btrfs_test_super_ops; 44 return 0; 45 } 46 47 static struct file_system_type test_type = { 48 .name = "btrfs_test_fs", 49 .init_fs_context = btrfs_test_init_fs_context, 50 .kill_sb = kill_anon_super, 51 }; 52 53 struct inode *btrfs_new_test_inode(void) 54 { 55 return new_inode(test_mnt->mnt_sb); 56 } 57 58 static int btrfs_init_test_fs(void) 59 { 60 int ret; 61 62 ret = register_filesystem(&test_type); 63 if (ret) { 64 printk(KERN_ERR "btrfs: cannot register test file system\n"); 65 return ret; 66 } 67 68 test_mnt = kern_mount(&test_type); 69 if (IS_ERR(test_mnt)) { 70 printk(KERN_ERR "btrfs: cannot mount test file system\n"); 71 unregister_filesystem(&test_type); 72 return PTR_ERR(test_mnt); 73 } 74 return 0; 75 } 76 77 static void btrfs_destroy_test_fs(void) 78 { 79 kern_unmount(test_mnt); 80 unregister_filesystem(&test_type); 81 } 82 83 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) 84 { 85 struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), 86 GFP_KERNEL); 87 88 if (!fs_info) 89 return fs_info; 90 fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices), 91 GFP_KERNEL); 92 if (!fs_info->fs_devices) { 93 kfree(fs_info); 94 return NULL; 95 } 96 fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block), 97 GFP_KERNEL); 98 if (!fs_info->super_copy) { 99 kfree(fs_info->fs_devices); 100 kfree(fs_info); 101 return NULL; 102 } 103 104 fs_info->nodesize = nodesize; 105 fs_info->sectorsize = sectorsize; 106 107 if (init_srcu_struct(&fs_info->subvol_srcu)) { 108 kfree(fs_info->fs_devices); 109 kfree(fs_info->super_copy); 110 kfree(fs_info); 111 return NULL; 112 } 113 114 spin_lock_init(&fs_info->buffer_lock); 115 spin_lock_init(&fs_info->qgroup_lock); 116 spin_lock_init(&fs_info->super_lock); 117 spin_lock_init(&fs_info->fs_roots_radix_lock); 118 spin_lock_init(&fs_info->tree_mod_seq_lock); 119 mutex_init(&fs_info->qgroup_ioctl_lock); 120 mutex_init(&fs_info->qgroup_rescan_lock); 121 rwlock_init(&fs_info->tree_mod_log_lock); 122 fs_info->running_transaction = NULL; 123 fs_info->qgroup_tree = RB_ROOT; 124 fs_info->qgroup_ulist = NULL; 125 atomic64_set(&fs_info->tree_mod_seq, 0); 126 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 127 INIT_LIST_HEAD(&fs_info->dead_roots); 128 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 129 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 130 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 131 extent_io_tree_init(fs_info, &fs_info->freed_extents[0], 132 IO_TREE_FS_INFO_FREED_EXTENTS0, NULL); 133 extent_io_tree_init(fs_info, &fs_info->freed_extents[1], 134 IO_TREE_FS_INFO_FREED_EXTENTS1, NULL); 135 fs_info->pinned_extents = &fs_info->freed_extents[0]; 136 set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 137 138 test_mnt->mnt_sb->s_fs_info = fs_info; 139 140 return fs_info; 141 } 142 143 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) 144 { 145 struct radix_tree_iter iter; 146 void **slot; 147 148 if (!fs_info) 149 return; 150 151 if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, 152 &fs_info->fs_state))) 153 return; 154 155 test_mnt->mnt_sb->s_fs_info = NULL; 156 157 spin_lock(&fs_info->buffer_lock); 158 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { 159 struct extent_buffer *eb; 160 161 eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); 162 if (!eb) 163 continue; 164 /* Shouldn't happen but that kind of thinking creates CVE's */ 165 if (radix_tree_exception(eb)) { 166 if (radix_tree_deref_retry(eb)) 167 slot = radix_tree_iter_retry(&iter); 168 continue; 169 } 170 slot = radix_tree_iter_resume(slot, &iter); 171 spin_unlock(&fs_info->buffer_lock); 172 free_extent_buffer_stale(eb); 173 spin_lock(&fs_info->buffer_lock); 174 } 175 spin_unlock(&fs_info->buffer_lock); 176 177 btrfs_free_qgroup_config(fs_info); 178 btrfs_free_fs_roots(fs_info); 179 cleanup_srcu_struct(&fs_info->subvol_srcu); 180 kfree(fs_info->super_copy); 181 kfree(fs_info->fs_devices); 182 kfree(fs_info); 183 } 184 185 void btrfs_free_dummy_root(struct btrfs_root *root) 186 { 187 if (!root) 188 return; 189 /* Will be freed by btrfs_free_fs_roots */ 190 if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) 191 return; 192 if (root->node) { 193 /* One for allocate_extent_buffer */ 194 free_extent_buffer(root->node); 195 } 196 kfree(root); 197 } 198 199 struct btrfs_block_group_cache * 200 btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, 201 unsigned long length) 202 { 203 struct btrfs_block_group_cache *cache; 204 205 cache = kzalloc(sizeof(*cache), GFP_KERNEL); 206 if (!cache) 207 return NULL; 208 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 209 GFP_KERNEL); 210 if (!cache->free_space_ctl) { 211 kfree(cache); 212 return NULL; 213 } 214 215 cache->key.objectid = 0; 216 cache->key.offset = length; 217 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 218 cache->full_stripe_len = fs_info->sectorsize; 219 cache->fs_info = fs_info; 220 221 INIT_LIST_HEAD(&cache->list); 222 INIT_LIST_HEAD(&cache->cluster_list); 223 INIT_LIST_HEAD(&cache->bg_list); 224 btrfs_init_free_space_ctl(cache); 225 mutex_init(&cache->free_space_lock); 226 227 return cache; 228 } 229 230 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache) 231 { 232 if (!cache) 233 return; 234 __btrfs_remove_free_space_cache(cache->free_space_ctl); 235 kfree(cache->free_space_ctl); 236 kfree(cache); 237 } 238 239 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, 240 struct btrfs_fs_info *fs_info) 241 { 242 memset(trans, 0, sizeof(*trans)); 243 trans->transid = 1; 244 trans->type = __TRANS_DUMMY; 245 trans->fs_info = fs_info; 246 } 247 248 int btrfs_run_sanity_tests(void) 249 { 250 int ret, i; 251 u32 sectorsize, nodesize; 252 u32 test_sectorsize[] = { 253 PAGE_SIZE, 254 }; 255 ret = btrfs_init_test_fs(); 256 if (ret) 257 return ret; 258 for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) { 259 sectorsize = test_sectorsize[i]; 260 for (nodesize = sectorsize; 261 nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE; 262 nodesize <<= 1) { 263 pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n", 264 sectorsize, nodesize); 265 ret = btrfs_test_free_space_cache(sectorsize, nodesize); 266 if (ret) 267 goto out; 268 ret = btrfs_test_extent_buffer_operations(sectorsize, 269 nodesize); 270 if (ret) 271 goto out; 272 ret = btrfs_test_extent_io(sectorsize, nodesize); 273 if (ret) 274 goto out; 275 ret = btrfs_test_inodes(sectorsize, nodesize); 276 if (ret) 277 goto out; 278 ret = btrfs_test_qgroups(sectorsize, nodesize); 279 if (ret) 280 goto out; 281 ret = btrfs_test_free_space_tree(sectorsize, nodesize); 282 if (ret) 283 goto out; 284 } 285 } 286 ret = btrfs_test_extent_map(); 287 288 out: 289 btrfs_destroy_test_fs(); 290 return ret; 291 } 292