xref: /openbmc/linux/fs/btrfs/tests/btrfs-tests.c (revision 31af04cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Fusion IO.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/mount.h>
8 #include <linux/magic.h>
9 #include "btrfs-tests.h"
10 #include "../ctree.h"
11 #include "../free-space-cache.h"
12 #include "../free-space-tree.h"
13 #include "../transaction.h"
14 #include "../volumes.h"
15 #include "../disk-io.h"
16 #include "../qgroup.h"
17 
18 static struct vfsmount *test_mnt = NULL;
19 
20 static const struct super_operations btrfs_test_super_ops = {
21 	.alloc_inode	= btrfs_alloc_inode,
22 	.destroy_inode	= btrfs_test_destroy_inode,
23 };
24 
25 static struct dentry *btrfs_test_mount(struct file_system_type *fs_type,
26 				       int flags, const char *dev_name,
27 				       void *data)
28 {
29 	return mount_pseudo(fs_type, "btrfs_test:", &btrfs_test_super_ops,
30 			    NULL, BTRFS_TEST_MAGIC);
31 }
32 
33 static struct file_system_type test_type = {
34 	.name		= "btrfs_test_fs",
35 	.mount		= btrfs_test_mount,
36 	.kill_sb	= kill_anon_super,
37 };
38 
39 struct inode *btrfs_new_test_inode(void)
40 {
41 	return new_inode(test_mnt->mnt_sb);
42 }
43 
44 static int btrfs_init_test_fs(void)
45 {
46 	int ret;
47 
48 	ret = register_filesystem(&test_type);
49 	if (ret) {
50 		printk(KERN_ERR "btrfs: cannot register test file system\n");
51 		return ret;
52 	}
53 
54 	test_mnt = kern_mount(&test_type);
55 	if (IS_ERR(test_mnt)) {
56 		printk(KERN_ERR "btrfs: cannot mount test file system\n");
57 		unregister_filesystem(&test_type);
58 		return PTR_ERR(test_mnt);
59 	}
60 	return 0;
61 }
62 
63 static void btrfs_destroy_test_fs(void)
64 {
65 	kern_unmount(test_mnt);
66 	unregister_filesystem(&test_type);
67 }
68 
69 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
70 {
71 	struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
72 						GFP_KERNEL);
73 
74 	if (!fs_info)
75 		return fs_info;
76 	fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
77 				      GFP_KERNEL);
78 	if (!fs_info->fs_devices) {
79 		kfree(fs_info);
80 		return NULL;
81 	}
82 	fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
83 				      GFP_KERNEL);
84 	if (!fs_info->super_copy) {
85 		kfree(fs_info->fs_devices);
86 		kfree(fs_info);
87 		return NULL;
88 	}
89 
90 	fs_info->nodesize = nodesize;
91 	fs_info->sectorsize = sectorsize;
92 
93 	if (init_srcu_struct(&fs_info->subvol_srcu)) {
94 		kfree(fs_info->fs_devices);
95 		kfree(fs_info->super_copy);
96 		kfree(fs_info);
97 		return NULL;
98 	}
99 
100 	spin_lock_init(&fs_info->buffer_lock);
101 	spin_lock_init(&fs_info->qgroup_lock);
102 	spin_lock_init(&fs_info->qgroup_op_lock);
103 	spin_lock_init(&fs_info->super_lock);
104 	spin_lock_init(&fs_info->fs_roots_radix_lock);
105 	spin_lock_init(&fs_info->tree_mod_seq_lock);
106 	mutex_init(&fs_info->qgroup_ioctl_lock);
107 	mutex_init(&fs_info->qgroup_rescan_lock);
108 	rwlock_init(&fs_info->tree_mod_log_lock);
109 	fs_info->running_transaction = NULL;
110 	fs_info->qgroup_tree = RB_ROOT;
111 	fs_info->qgroup_ulist = NULL;
112 	atomic64_set(&fs_info->tree_mod_seq, 0);
113 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
114 	INIT_LIST_HEAD(&fs_info->dead_roots);
115 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
116 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
117 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
118 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
119 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
120 	fs_info->pinned_extents = &fs_info->freed_extents[0];
121 	set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
122 
123 	test_mnt->mnt_sb->s_fs_info = fs_info;
124 
125 	return fs_info;
126 }
127 
128 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
129 {
130 	struct radix_tree_iter iter;
131 	void **slot;
132 
133 	if (!fs_info)
134 		return;
135 
136 	if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
137 			      &fs_info->fs_state)))
138 		return;
139 
140 	test_mnt->mnt_sb->s_fs_info = NULL;
141 
142 	spin_lock(&fs_info->buffer_lock);
143 	radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
144 		struct extent_buffer *eb;
145 
146 		eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
147 		if (!eb)
148 			continue;
149 		/* Shouldn't happen but that kind of thinking creates CVE's */
150 		if (radix_tree_exception(eb)) {
151 			if (radix_tree_deref_retry(eb))
152 				slot = radix_tree_iter_retry(&iter);
153 			continue;
154 		}
155 		slot = radix_tree_iter_resume(slot, &iter);
156 		spin_unlock(&fs_info->buffer_lock);
157 		free_extent_buffer_stale(eb);
158 		spin_lock(&fs_info->buffer_lock);
159 	}
160 	spin_unlock(&fs_info->buffer_lock);
161 
162 	btrfs_free_qgroup_config(fs_info);
163 	btrfs_free_fs_roots(fs_info);
164 	cleanup_srcu_struct(&fs_info->subvol_srcu);
165 	kfree(fs_info->super_copy);
166 	kfree(fs_info->fs_devices);
167 	kfree(fs_info);
168 }
169 
170 void btrfs_free_dummy_root(struct btrfs_root *root)
171 {
172 	if (!root)
173 		return;
174 	/* Will be freed by btrfs_free_fs_roots */
175 	if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
176 		return;
177 	if (root->node) {
178 		/* One for allocate_extent_buffer */
179 		free_extent_buffer(root->node);
180 	}
181 	kfree(root);
182 }
183 
184 struct btrfs_block_group_cache *
185 btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
186 			      unsigned long length)
187 {
188 	struct btrfs_block_group_cache *cache;
189 
190 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
191 	if (!cache)
192 		return NULL;
193 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
194 					GFP_KERNEL);
195 	if (!cache->free_space_ctl) {
196 		kfree(cache);
197 		return NULL;
198 	}
199 
200 	cache->key.objectid = 0;
201 	cache->key.offset = length;
202 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
203 	cache->full_stripe_len = fs_info->sectorsize;
204 	cache->fs_info = fs_info;
205 
206 	INIT_LIST_HEAD(&cache->list);
207 	INIT_LIST_HEAD(&cache->cluster_list);
208 	INIT_LIST_HEAD(&cache->bg_list);
209 	btrfs_init_free_space_ctl(cache);
210 	mutex_init(&cache->free_space_lock);
211 
212 	return cache;
213 }
214 
215 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache)
216 {
217 	if (!cache)
218 		return;
219 	__btrfs_remove_free_space_cache(cache->free_space_ctl);
220 	kfree(cache->free_space_ctl);
221 	kfree(cache);
222 }
223 
224 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans,
225 			    struct btrfs_fs_info *fs_info)
226 {
227 	memset(trans, 0, sizeof(*trans));
228 	trans->transid = 1;
229 	trans->type = __TRANS_DUMMY;
230 	trans->fs_info = fs_info;
231 }
232 
233 int btrfs_run_sanity_tests(void)
234 {
235 	int ret, i;
236 	u32 sectorsize, nodesize;
237 	u32 test_sectorsize[] = {
238 		PAGE_SIZE,
239 	};
240 	ret = btrfs_init_test_fs();
241 	if (ret)
242 		return ret;
243 	for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
244 		sectorsize = test_sectorsize[i];
245 		for (nodesize = sectorsize;
246 		     nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
247 		     nodesize <<= 1) {
248 			pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
249 				sectorsize, nodesize);
250 			ret = btrfs_test_free_space_cache(sectorsize, nodesize);
251 			if (ret)
252 				goto out;
253 			ret = btrfs_test_extent_buffer_operations(sectorsize,
254 				nodesize);
255 			if (ret)
256 				goto out;
257 			ret = btrfs_test_extent_io(sectorsize, nodesize);
258 			if (ret)
259 				goto out;
260 			ret = btrfs_test_inodes(sectorsize, nodesize);
261 			if (ret)
262 				goto out;
263 			ret = btrfs_test_qgroups(sectorsize, nodesize);
264 			if (ret)
265 				goto out;
266 			ret = btrfs_test_free_space_tree(sectorsize, nodesize);
267 			if (ret)
268 				goto out;
269 		}
270 	}
271 	ret = btrfs_test_extent_map();
272 
273 out:
274 	btrfs_destroy_test_fs();
275 	return ret;
276 }
277