1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/types.h>
7 #include "btrfs-tests.h"
8 #include "../ctree.h"
9 
10 static void free_extent_map_tree(struct extent_map_tree *em_tree)
11 {
12 	struct extent_map *em;
13 	struct rb_node *node;
14 
15 	while (!RB_EMPTY_ROOT(&em_tree->map)) {
16 		node = rb_first(&em_tree->map);
17 		em = rb_entry(node, struct extent_map, rb_node);
18 		remove_extent_mapping(em_tree, em);
19 
20 #ifdef CONFIG_BTRFS_DEBUG
21 		if (refcount_read(&em->refs) != 1) {
22 			test_msg(
23 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d\n",
24 				 em->start, em->len, em->block_start,
25 				 em->block_len, refcount_read(&em->refs));
26 
27 			refcount_set(&em->refs, 1);
28 		}
29 #endif
30 		free_extent_map(em);
31 	}
32 }
33 
34 /*
35  * Test scenario:
36  *
37  * Suppose that no extent map has been loaded into memory yet, there is a file
38  * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
39  * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
40  * reading [0, 8K)
41  *
42  *     t1                            t2
43  *  btrfs_get_extent()              btrfs_get_extent()
44  *    -> lookup_extent_mapping()      ->lookup_extent_mapping()
45  *    -> add_extent_mapping(0, 16K)
46  *    -> return em
47  *                                    ->add_extent_mapping(0, 16K)
48  *                                    -> #handle -EEXIST
49  */
50 static void test_case_1(struct extent_map_tree *em_tree)
51 {
52 	struct extent_map *em;
53 	u64 start = 0;
54 	u64 len = SZ_8K;
55 	int ret;
56 
57 	em = alloc_extent_map();
58 	if (!em)
59 		/* Skip the test on error. */
60 		return;
61 
62 	/* Add [0, 16K) */
63 	em->start = 0;
64 	em->len = SZ_16K;
65 	em->block_start = 0;
66 	em->block_len = SZ_16K;
67 	ret = add_extent_mapping(em_tree, em, 0);
68 	ASSERT(ret == 0);
69 	free_extent_map(em);
70 
71 	/* Add [16K, 20K) following [0, 16K)  */
72 	em = alloc_extent_map();
73 	if (!em)
74 		goto out;
75 
76 	em->start = SZ_16K;
77 	em->len = SZ_4K;
78 	em->block_start = SZ_32K; /* avoid merging */
79 	em->block_len = SZ_4K;
80 	ret = add_extent_mapping(em_tree, em, 0);
81 	ASSERT(ret == 0);
82 	free_extent_map(em);
83 
84 	em = alloc_extent_map();
85 	if (!em)
86 		goto out;
87 
88 	/* Add [0, 8K), should return [0, 16K) instead. */
89 	em->start = start;
90 	em->len = len;
91 	em->block_start = start;
92 	em->block_len = len;
93 	ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
94 	if (ret)
95 		test_msg("case1 [%llu %llu]: ret %d\n", start, start + len, ret);
96 	if (em &&
97 	    (em->start != 0 || extent_map_end(em) != SZ_16K ||
98 	     em->block_start != 0 || em->block_len != SZ_16K))
99 		test_msg(
100 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
101 			 start, start + len, ret, em->start, em->len,
102 			 em->block_start, em->block_len);
103 	free_extent_map(em);
104 out:
105 	/* free memory */
106 	free_extent_map_tree(em_tree);
107 }
108 
109 /*
110  * Test scenario:
111  *
112  * Reading the inline ending up with EEXIST, ie. read an inline
113  * extent and discard page cache and read it again.
114  */
115 static void test_case_2(struct extent_map_tree *em_tree)
116 {
117 	struct extent_map *em;
118 	int ret;
119 
120 	em = alloc_extent_map();
121 	if (!em)
122 		/* Skip the test on error. */
123 		return;
124 
125 	/* Add [0, 1K) */
126 	em->start = 0;
127 	em->len = SZ_1K;
128 	em->block_start = EXTENT_MAP_INLINE;
129 	em->block_len = (u64)-1;
130 	ret = add_extent_mapping(em_tree, em, 0);
131 	ASSERT(ret == 0);
132 	free_extent_map(em);
133 
134 	/* Add [4K, 4K) following [0, 1K)  */
135 	em = alloc_extent_map();
136 	if (!em)
137 		goto out;
138 
139 	em->start = SZ_4K;
140 	em->len = SZ_4K;
141 	em->block_start = SZ_4K;
142 	em->block_len = SZ_4K;
143 	ret = add_extent_mapping(em_tree, em, 0);
144 	ASSERT(ret == 0);
145 	free_extent_map(em);
146 
147 	em = alloc_extent_map();
148 	if (!em)
149 		goto out;
150 
151 	/* Add [0, 1K) */
152 	em->start = 0;
153 	em->len = SZ_1K;
154 	em->block_start = EXTENT_MAP_INLINE;
155 	em->block_len = (u64)-1;
156 	ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
157 	if (ret)
158 		test_msg("case2 [0 1K]: ret %d\n", ret);
159 	if (em &&
160 	    (em->start != 0 || extent_map_end(em) != SZ_1K ||
161 	     em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1))
162 		test_msg(
163 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
164 			 ret, em->start, em->len, em->block_start,
165 			 em->block_len);
166 	free_extent_map(em);
167 out:
168 	/* free memory */
169 	free_extent_map_tree(em_tree);
170 }
171 
172 static void __test_case_3(struct extent_map_tree *em_tree, u64 start)
173 {
174 	struct extent_map *em;
175 	u64 len = SZ_4K;
176 	int ret;
177 
178 	em = alloc_extent_map();
179 	if (!em)
180 		/* Skip this test on error. */
181 		return;
182 
183 	/* Add [4K, 8K) */
184 	em->start = SZ_4K;
185 	em->len = SZ_4K;
186 	em->block_start = SZ_4K;
187 	em->block_len = SZ_4K;
188 	ret = add_extent_mapping(em_tree, em, 0);
189 	ASSERT(ret == 0);
190 	free_extent_map(em);
191 
192 	em = alloc_extent_map();
193 	if (!em)
194 		goto out;
195 
196 	/* Add [0, 16K) */
197 	em->start = 0;
198 	em->len = SZ_16K;
199 	em->block_start = 0;
200 	em->block_len = SZ_16K;
201 	ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
202 	if (ret)
203 		test_msg("case3 [0x%llx 0x%llx): ret %d\n",
204 			 start, start + len, ret);
205 	/*
206 	 * Since bytes within em are contiguous, em->block_start is identical to
207 	 * em->start.
208 	 */
209 	if (em &&
210 	    (start < em->start || start + len > extent_map_end(em) ||
211 	     em->start != em->block_start || em->len != em->block_len))
212 		test_msg(
213 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
214 			 start, start + len, ret, em->start, em->len,
215 			 em->block_start, em->block_len);
216 	free_extent_map(em);
217 out:
218 	/* free memory */
219 	free_extent_map_tree(em_tree);
220 }
221 
222 /*
223  * Test scenario:
224  *
225  * Suppose that no extent map has been loaded into memory yet.
226  * There is a file extent [0, 16K), two jobs are running concurrently
227  * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
228  * read from [0, 4K) or [8K, 12K) or [12K, 16K).
229  *
230  * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
231  *
232  *         t1                       t2
233  *  cow_file_range()	     btrfs_get_extent()
234  *                            -> lookup_extent_mapping()
235  *   -> add_extent_mapping()
236  *                            -> add_extent_mapping()
237  */
238 static void test_case_3(struct extent_map_tree *em_tree)
239 {
240 	__test_case_3(em_tree, 0);
241 	__test_case_3(em_tree, SZ_8K);
242 	__test_case_3(em_tree, (12 * 1024ULL));
243 }
244 
245 static void __test_case_4(struct extent_map_tree *em_tree, u64 start)
246 {
247 	struct extent_map *em;
248 	u64 len = SZ_4K;
249 	int ret;
250 
251 	em = alloc_extent_map();
252 	if (!em)
253 		/* Skip this test on error. */
254 		return;
255 
256 	/* Add [0K, 8K) */
257 	em->start = 0;
258 	em->len = SZ_8K;
259 	em->block_start = 0;
260 	em->block_len = SZ_8K;
261 	ret = add_extent_mapping(em_tree, em, 0);
262 	ASSERT(ret == 0);
263 	free_extent_map(em);
264 
265 	em = alloc_extent_map();
266 	if (!em)
267 		goto out;
268 
269 	/* Add [8K, 24K) */
270 	em->start = SZ_8K;
271 	em->len = 24 * 1024ULL;
272 	em->block_start = SZ_16K; /* avoid merging */
273 	em->block_len = 24 * 1024ULL;
274 	ret = add_extent_mapping(em_tree, em, 0);
275 	ASSERT(ret == 0);
276 	free_extent_map(em);
277 
278 	em = alloc_extent_map();
279 	if (!em)
280 		goto out;
281 	/* Add [0K, 32K) */
282 	em->start = 0;
283 	em->len = SZ_32K;
284 	em->block_start = 0;
285 	em->block_len = SZ_32K;
286 	ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
287 	if (ret)
288 		test_msg("case4 [0x%llx 0x%llx): ret %d\n",
289 			 start, len, ret);
290 	if (em &&
291 	    (start < em->start || start + len > extent_map_end(em)))
292 		test_msg(
293 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
294 			 start, len, ret, em->start, em->len, em->block_start,
295 			 em->block_len);
296 	free_extent_map(em);
297 out:
298 	/* free memory */
299 	free_extent_map_tree(em_tree);
300 }
301 
302 /*
303  * Test scenario:
304  *
305  * Suppose that no extent map has been loaded into memory yet.
306  * There is a file extent [0, 32K), two jobs are running concurrently
307  * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
308  * read from [0, 4K) or [4K, 8K).
309  *
310  * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
311  *
312  *         t1                                t2
313  *  btrfs_get_blocks_direct()	       btrfs_get_blocks_direct()
314  *   -> btrfs_get_extent()              -> btrfs_get_extent()
315  *       -> lookup_extent_mapping()
316  *       -> add_extent_mapping()            -> lookup_extent_mapping()
317  *          # load [0, 32K)
318  *   -> btrfs_new_extent_direct()
319  *       -> btrfs_drop_extent_cache()
320  *          # split [0, 32K)
321  *       -> add_extent_mapping()
322  *          # add [8K, 32K)
323  *                                          -> add_extent_mapping()
324  *                                             # handle -EEXIST when adding
325  *                                             # [0, 32K)
326  */
327 static void test_case_4(struct extent_map_tree *em_tree)
328 {
329 	__test_case_4(em_tree, 0);
330 	__test_case_4(em_tree, SZ_4K);
331 }
332 
333 int btrfs_test_extent_map(void)
334 {
335 	struct extent_map_tree *em_tree;
336 
337 	test_msg("Running extent_map tests\n");
338 
339 	em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
340 	if (!em_tree)
341 		/* Skip the test on error. */
342 		return 0;
343 
344 	extent_map_tree_init(em_tree);
345 
346 	test_case_1(em_tree);
347 	test_case_2(em_tree);
348 	test_case_3(em_tree);
349 	test_case_4(em_tree);
350 
351 	kfree(em_tree);
352 	return 0;
353 }
354