xref: /openbmc/linux/fs/ext4/extents_status.c (revision 97da55fc)
1 /*
2  *  fs/ext4/extents_status.c
3  *
4  * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
5  * Modified by
6  *	Allison Henderson <achender@linux.vnet.ibm.com>
7  *	Hugh Dickins <hughd@google.com>
8  *	Zheng Liu <wenqing.lz@taobao.com>
9  *
10  * Ext4 extents status tree core functions.
11  */
12 #include <linux/rbtree.h>
13 #include "ext4.h"
14 #include "extents_status.h"
15 #include "ext4_extents.h"
16 
17 #include <trace/events/ext4.h>
18 
19 /*
20  * According to previous discussion in Ext4 Developer Workshop, we
21  * will introduce a new structure called io tree to track all extent
22  * status in order to solve some problems that we have met
23  * (e.g. Reservation space warning), and provide extent-level locking.
24  * Delay extent tree is the first step to achieve this goal.  It is
25  * original built by Yongqiang Yang.  At that time it is called delay
26  * extent tree, whose goal is only track delayed extents in memory to
27  * simplify the implementation of fiemap and bigalloc, and introduce
28  * lseek SEEK_DATA/SEEK_HOLE support.  That is why it is still called
29  * delay extent tree at the first commit.  But for better understand
30  * what it does, it has been rename to extent status tree.
31  *
32  * Step1:
33  * Currently the first step has been done.  All delayed extents are
34  * tracked in the tree.  It maintains the delayed extent when a delayed
35  * allocation is issued, and the delayed extent is written out or
36  * invalidated.  Therefore the implementation of fiemap and bigalloc
37  * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
38  *
39  * The following comment describes the implemenmtation of extent
40  * status tree and future works.
41  *
42  * Step2:
43  * In this step all extent status are tracked by extent status tree.
44  * Thus, we can first try to lookup a block mapping in this tree before
45  * finding it in extent tree.  Hence, single extent cache can be removed
46  * because extent status tree can do a better job.  Extents in status
47  * tree are loaded on-demand.  Therefore, the extent status tree may not
48  * contain all of the extents in a file.  Meanwhile we define a shrinker
49  * to reclaim memory from extent status tree because fragmented extent
50  * tree will make status tree cost too much memory.  written/unwritten/-
51  * hole extents in the tree will be reclaimed by this shrinker when we
52  * are under high memory pressure.  Delayed extents will not be
53  * reclimed because fiemap, bigalloc, and seek_data/hole need it.
54  */
55 
56 /*
57  * Extent status tree implementation for ext4.
58  *
59  *
60  * ==========================================================================
61  * Extent status tree tracks all extent status.
62  *
63  * 1. Why we need to implement extent status tree?
64  *
65  * Without extent status tree, ext4 identifies a delayed extent by looking
66  * up page cache, this has several deficiencies - complicated, buggy,
67  * and inefficient code.
68  *
69  * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
70  * block or a range of blocks are belonged to a delayed extent.
71  *
72  * Let us have a look at how they do without extent status tree.
73  *   --	FIEMAP
74  *	FIEMAP looks up page cache to identify delayed allocations from holes.
75  *
76  *   --	SEEK_HOLE/DATA
77  *	SEEK_HOLE/DATA has the same problem as FIEMAP.
78  *
79  *   --	bigalloc
80  *	bigalloc looks up page cache to figure out if a block is
81  *	already under delayed allocation or not to determine whether
82  *	quota reserving is needed for the cluster.
83  *
84  *   --	writeout
85  *	Writeout looks up whole page cache to see if a buffer is
86  *	mapped, If there are not very many delayed buffers, then it is
87  *	time comsuming.
88  *
89  * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
90  * bigalloc and writeout can figure out if a block or a range of
91  * blocks is under delayed allocation(belonged to a delayed extent) or
92  * not by searching the extent tree.
93  *
94  *
95  * ==========================================================================
96  * 2. Ext4 extent status tree impelmentation
97  *
98  *   --	extent
99  *	A extent is a range of blocks which are contiguous logically and
100  *	physically.  Unlike extent in extent tree, this extent in ext4 is
101  *	a in-memory struct, there is no corresponding on-disk data.  There
102  *	is no limit on length of extent, so an extent can contain as many
103  *	blocks as they are contiguous logically and physically.
104  *
105  *   --	extent status tree
106  *	Every inode has an extent status tree and all allocation blocks
107  *	are added to the tree with different status.  The extent in the
108  *	tree are ordered by logical block no.
109  *
110  *   --	operations on a extent status tree
111  *	There are three important operations on a delayed extent tree: find
112  *	next extent, adding a extent(a range of blocks) and removing a extent.
113  *
114  *   --	race on a extent status tree
115  *	Extent status tree is protected by inode->i_es_lock.
116  *
117  *   --	memory consumption
118  *      Fragmented extent tree will make extent status tree cost too much
119  *      memory.  Hence, we will reclaim written/unwritten/hole extents from
120  *      the tree under a heavy memory pressure.
121  *
122  *
123  * ==========================================================================
124  * 3. Performance analysis
125  *
126  *   --	overhead
127  *	1. There is a cache extent for write access, so if writes are
128  *	not very random, adding space operaions are in O(1) time.
129  *
130  *   --	gain
131  *	2. Code is much simpler, more readable, more maintainable and
132  *	more efficient.
133  *
134  *
135  * ==========================================================================
136  * 4. TODO list
137  *
138  *   -- Refactor delayed space reservation
139  *
140  *   -- Extent-level locking
141  */
142 
143 static struct kmem_cache *ext4_es_cachep;
144 
145 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
146 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
147 			      ext4_lblk_t end);
148 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
149 				       int nr_to_scan);
150 
151 int __init ext4_init_es(void)
152 {
153 	ext4_es_cachep = kmem_cache_create("ext4_extent_status",
154 					   sizeof(struct extent_status),
155 					   0, (SLAB_RECLAIM_ACCOUNT), NULL);
156 	if (ext4_es_cachep == NULL)
157 		return -ENOMEM;
158 	return 0;
159 }
160 
161 void ext4_exit_es(void)
162 {
163 	if (ext4_es_cachep)
164 		kmem_cache_destroy(ext4_es_cachep);
165 }
166 
167 void ext4_es_init_tree(struct ext4_es_tree *tree)
168 {
169 	tree->root = RB_ROOT;
170 	tree->cache_es = NULL;
171 }
172 
173 #ifdef ES_DEBUG__
174 static void ext4_es_print_tree(struct inode *inode)
175 {
176 	struct ext4_es_tree *tree;
177 	struct rb_node *node;
178 
179 	printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
180 	tree = &EXT4_I(inode)->i_es_tree;
181 	node = rb_first(&tree->root);
182 	while (node) {
183 		struct extent_status *es;
184 		es = rb_entry(node, struct extent_status, rb_node);
185 		printk(KERN_DEBUG " [%u/%u) %llu %llx",
186 		       es->es_lblk, es->es_len,
187 		       ext4_es_pblock(es), ext4_es_status(es));
188 		node = rb_next(node);
189 	}
190 	printk(KERN_DEBUG "\n");
191 }
192 #else
193 #define ext4_es_print_tree(inode)
194 #endif
195 
196 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
197 {
198 	BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
199 	return es->es_lblk + es->es_len - 1;
200 }
201 
202 /*
203  * search through the tree for an delayed extent with a given offset.  If
204  * it can't be found, try to find next extent.
205  */
206 static struct extent_status *__es_tree_search(struct rb_root *root,
207 					      ext4_lblk_t lblk)
208 {
209 	struct rb_node *node = root->rb_node;
210 	struct extent_status *es = NULL;
211 
212 	while (node) {
213 		es = rb_entry(node, struct extent_status, rb_node);
214 		if (lblk < es->es_lblk)
215 			node = node->rb_left;
216 		else if (lblk > ext4_es_end(es))
217 			node = node->rb_right;
218 		else
219 			return es;
220 	}
221 
222 	if (es && lblk < es->es_lblk)
223 		return es;
224 
225 	if (es && lblk > ext4_es_end(es)) {
226 		node = rb_next(&es->rb_node);
227 		return node ? rb_entry(node, struct extent_status, rb_node) :
228 			      NULL;
229 	}
230 
231 	return NULL;
232 }
233 
234 /*
235  * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
236  * if it exists, otherwise, the next extent after @es->lblk.
237  *
238  * @inode: the inode which owns delayed extents
239  * @lblk: the offset where we start to search
240  * @es: delayed extent that we found
241  */
242 void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
243 				 struct extent_status *es)
244 {
245 	struct ext4_es_tree *tree = NULL;
246 	struct extent_status *es1 = NULL;
247 	struct rb_node *node;
248 
249 	BUG_ON(es == NULL);
250 	trace_ext4_es_find_delayed_extent_enter(inode, lblk);
251 
252 	read_lock(&EXT4_I(inode)->i_es_lock);
253 	tree = &EXT4_I(inode)->i_es_tree;
254 
255 	/* find extent in cache firstly */
256 	es->es_lblk = es->es_len = es->es_pblk = 0;
257 	if (tree->cache_es) {
258 		es1 = tree->cache_es;
259 		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
260 			es_debug("%u cached by [%u/%u) %llu %llx\n",
261 				 lblk, es1->es_lblk, es1->es_len,
262 				 ext4_es_pblock(es1), ext4_es_status(es1));
263 			goto out;
264 		}
265 	}
266 
267 	es1 = __es_tree_search(&tree->root, lblk);
268 
269 out:
270 	if (es1 && !ext4_es_is_delayed(es1)) {
271 		while ((node = rb_next(&es1->rb_node)) != NULL) {
272 			es1 = rb_entry(node, struct extent_status, rb_node);
273 			if (ext4_es_is_delayed(es1))
274 				break;
275 		}
276 	}
277 
278 	if (es1 && ext4_es_is_delayed(es1)) {
279 		tree->cache_es = es1;
280 		es->es_lblk = es1->es_lblk;
281 		es->es_len = es1->es_len;
282 		es->es_pblk = es1->es_pblk;
283 	}
284 
285 	read_unlock(&EXT4_I(inode)->i_es_lock);
286 
287 	ext4_es_lru_add(inode);
288 	trace_ext4_es_find_delayed_extent_exit(inode, es);
289 }
290 
291 static struct extent_status *
292 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
293 		     ext4_fsblk_t pblk)
294 {
295 	struct extent_status *es;
296 	es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
297 	if (es == NULL)
298 		return NULL;
299 	es->es_lblk = lblk;
300 	es->es_len = len;
301 	es->es_pblk = pblk;
302 
303 	/*
304 	 * We don't count delayed extent because we never try to reclaim them
305 	 */
306 	if (!ext4_es_is_delayed(es)) {
307 		EXT4_I(inode)->i_es_lru_nr++;
308 		percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
309 	}
310 
311 	return es;
312 }
313 
314 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
315 {
316 	/* Decrease the lru counter when this es is not delayed */
317 	if (!ext4_es_is_delayed(es)) {
318 		BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
319 		EXT4_I(inode)->i_es_lru_nr--;
320 		percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
321 	}
322 
323 	kmem_cache_free(ext4_es_cachep, es);
324 }
325 
326 /*
327  * Check whether or not two extents can be merged
328  * Condition:
329  *  - logical block number is contiguous
330  *  - physical block number is contiguous
331  *  - status is equal
332  */
333 static int ext4_es_can_be_merged(struct extent_status *es1,
334 				 struct extent_status *es2)
335 {
336 	if (es1->es_lblk + es1->es_len != es2->es_lblk)
337 		return 0;
338 
339 	if (ext4_es_status(es1) != ext4_es_status(es2))
340 		return 0;
341 
342 	if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
343 	    (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
344 		return 0;
345 
346 	return 1;
347 }
348 
349 static struct extent_status *
350 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
351 {
352 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
353 	struct extent_status *es1;
354 	struct rb_node *node;
355 
356 	node = rb_prev(&es->rb_node);
357 	if (!node)
358 		return es;
359 
360 	es1 = rb_entry(node, struct extent_status, rb_node);
361 	if (ext4_es_can_be_merged(es1, es)) {
362 		es1->es_len += es->es_len;
363 		rb_erase(&es->rb_node, &tree->root);
364 		ext4_es_free_extent(inode, es);
365 		es = es1;
366 	}
367 
368 	return es;
369 }
370 
371 static struct extent_status *
372 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
373 {
374 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
375 	struct extent_status *es1;
376 	struct rb_node *node;
377 
378 	node = rb_next(&es->rb_node);
379 	if (!node)
380 		return es;
381 
382 	es1 = rb_entry(node, struct extent_status, rb_node);
383 	if (ext4_es_can_be_merged(es, es1)) {
384 		es->es_len += es1->es_len;
385 		rb_erase(node, &tree->root);
386 		ext4_es_free_extent(inode, es1);
387 	}
388 
389 	return es;
390 }
391 
392 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
393 {
394 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
395 	struct rb_node **p = &tree->root.rb_node;
396 	struct rb_node *parent = NULL;
397 	struct extent_status *es;
398 
399 	while (*p) {
400 		parent = *p;
401 		es = rb_entry(parent, struct extent_status, rb_node);
402 
403 		if (newes->es_lblk < es->es_lblk) {
404 			if (ext4_es_can_be_merged(newes, es)) {
405 				/*
406 				 * Here we can modify es_lblk directly
407 				 * because it isn't overlapped.
408 				 */
409 				es->es_lblk = newes->es_lblk;
410 				es->es_len += newes->es_len;
411 				if (ext4_es_is_written(es) ||
412 				    ext4_es_is_unwritten(es))
413 					ext4_es_store_pblock(es,
414 							     newes->es_pblk);
415 				es = ext4_es_try_to_merge_left(inode, es);
416 				goto out;
417 			}
418 			p = &(*p)->rb_left;
419 		} else if (newes->es_lblk > ext4_es_end(es)) {
420 			if (ext4_es_can_be_merged(es, newes)) {
421 				es->es_len += newes->es_len;
422 				es = ext4_es_try_to_merge_right(inode, es);
423 				goto out;
424 			}
425 			p = &(*p)->rb_right;
426 		} else {
427 			BUG_ON(1);
428 			return -EINVAL;
429 		}
430 	}
431 
432 	es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
433 				  newes->es_pblk);
434 	if (!es)
435 		return -ENOMEM;
436 	rb_link_node(&es->rb_node, parent, p);
437 	rb_insert_color(&es->rb_node, &tree->root);
438 
439 out:
440 	tree->cache_es = es;
441 	return 0;
442 }
443 
444 /*
445  * ext4_es_insert_extent() adds a space to a extent status tree.
446  *
447  * ext4_es_insert_extent is called by ext4_da_write_begin and
448  * ext4_es_remove_extent.
449  *
450  * Return 0 on success, error code on failure.
451  */
452 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
453 			  ext4_lblk_t len, ext4_fsblk_t pblk,
454 			  unsigned long long status)
455 {
456 	struct extent_status newes;
457 	ext4_lblk_t end = lblk + len - 1;
458 	int err = 0;
459 
460 	es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
461 		 lblk, len, pblk, status, inode->i_ino);
462 
463 	if (!len)
464 		return 0;
465 
466 	BUG_ON(end < lblk);
467 
468 	newes.es_lblk = lblk;
469 	newes.es_len = len;
470 	ext4_es_store_pblock(&newes, pblk);
471 	ext4_es_store_status(&newes, status);
472 	trace_ext4_es_insert_extent(inode, &newes);
473 
474 	write_lock(&EXT4_I(inode)->i_es_lock);
475 	err = __es_remove_extent(inode, lblk, end);
476 	if (err != 0)
477 		goto error;
478 	err = __es_insert_extent(inode, &newes);
479 
480 error:
481 	write_unlock(&EXT4_I(inode)->i_es_lock);
482 
483 	ext4_es_lru_add(inode);
484 	ext4_es_print_tree(inode);
485 
486 	return err;
487 }
488 
489 /*
490  * ext4_es_lookup_extent() looks up an extent in extent status tree.
491  *
492  * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
493  *
494  * Return: 1 on found, 0 on not
495  */
496 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
497 			  struct extent_status *es)
498 {
499 	struct ext4_es_tree *tree;
500 	struct extent_status *es1 = NULL;
501 	struct rb_node *node;
502 	int found = 0;
503 
504 	trace_ext4_es_lookup_extent_enter(inode, lblk);
505 	es_debug("lookup extent in block %u\n", lblk);
506 
507 	tree = &EXT4_I(inode)->i_es_tree;
508 	read_lock(&EXT4_I(inode)->i_es_lock);
509 
510 	/* find extent in cache firstly */
511 	es->es_lblk = es->es_len = es->es_pblk = 0;
512 	if (tree->cache_es) {
513 		es1 = tree->cache_es;
514 		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
515 			es_debug("%u cached by [%u/%u)\n",
516 				 lblk, es1->es_lblk, es1->es_len);
517 			found = 1;
518 			goto out;
519 		}
520 	}
521 
522 	node = tree->root.rb_node;
523 	while (node) {
524 		es1 = rb_entry(node, struct extent_status, rb_node);
525 		if (lblk < es1->es_lblk)
526 			node = node->rb_left;
527 		else if (lblk > ext4_es_end(es1))
528 			node = node->rb_right;
529 		else {
530 			found = 1;
531 			break;
532 		}
533 	}
534 
535 out:
536 	if (found) {
537 		BUG_ON(!es1);
538 		es->es_lblk = es1->es_lblk;
539 		es->es_len = es1->es_len;
540 		es->es_pblk = es1->es_pblk;
541 	}
542 
543 	read_unlock(&EXT4_I(inode)->i_es_lock);
544 
545 	ext4_es_lru_add(inode);
546 	trace_ext4_es_lookup_extent_exit(inode, es, found);
547 	return found;
548 }
549 
550 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
551 			      ext4_lblk_t end)
552 {
553 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
554 	struct rb_node *node;
555 	struct extent_status *es;
556 	struct extent_status orig_es;
557 	ext4_lblk_t len1, len2;
558 	ext4_fsblk_t block;
559 	int err = 0;
560 
561 	es = __es_tree_search(&tree->root, lblk);
562 	if (!es)
563 		goto out;
564 	if (es->es_lblk > end)
565 		goto out;
566 
567 	/* Simply invalidate cache_es. */
568 	tree->cache_es = NULL;
569 
570 	orig_es.es_lblk = es->es_lblk;
571 	orig_es.es_len = es->es_len;
572 	orig_es.es_pblk = es->es_pblk;
573 
574 	len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
575 	len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
576 	if (len1 > 0)
577 		es->es_len = len1;
578 	if (len2 > 0) {
579 		if (len1 > 0) {
580 			struct extent_status newes;
581 
582 			newes.es_lblk = end + 1;
583 			newes.es_len = len2;
584 			if (ext4_es_is_written(&orig_es) ||
585 			    ext4_es_is_unwritten(&orig_es)) {
586 				block = ext4_es_pblock(&orig_es) +
587 					orig_es.es_len - len2;
588 				ext4_es_store_pblock(&newes, block);
589 			}
590 			ext4_es_store_status(&newes, ext4_es_status(&orig_es));
591 			err = __es_insert_extent(inode, &newes);
592 			if (err) {
593 				es->es_lblk = orig_es.es_lblk;
594 				es->es_len = orig_es.es_len;
595 				goto out;
596 			}
597 		} else {
598 			es->es_lblk = end + 1;
599 			es->es_len = len2;
600 			if (ext4_es_is_written(es) ||
601 			    ext4_es_is_unwritten(es)) {
602 				block = orig_es.es_pblk + orig_es.es_len - len2;
603 				ext4_es_store_pblock(es, block);
604 			}
605 		}
606 		goto out;
607 	}
608 
609 	if (len1 > 0) {
610 		node = rb_next(&es->rb_node);
611 		if (node)
612 			es = rb_entry(node, struct extent_status, rb_node);
613 		else
614 			es = NULL;
615 	}
616 
617 	while (es && ext4_es_end(es) <= end) {
618 		node = rb_next(&es->rb_node);
619 		rb_erase(&es->rb_node, &tree->root);
620 		ext4_es_free_extent(inode, es);
621 		if (!node) {
622 			es = NULL;
623 			break;
624 		}
625 		es = rb_entry(node, struct extent_status, rb_node);
626 	}
627 
628 	if (es && es->es_lblk < end + 1) {
629 		ext4_lblk_t orig_len = es->es_len;
630 
631 		len1 = ext4_es_end(es) - end;
632 		es->es_lblk = end + 1;
633 		es->es_len = len1;
634 		if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
635 			block = es->es_pblk + orig_len - len1;
636 			ext4_es_store_pblock(es, block);
637 		}
638 	}
639 
640 out:
641 	return err;
642 }
643 
644 /*
645  * ext4_es_remove_extent() removes a space from a extent status tree.
646  *
647  * Return 0 on success, error code on failure.
648  */
649 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
650 			  ext4_lblk_t len)
651 {
652 	ext4_lblk_t end;
653 	int err = 0;
654 
655 	trace_ext4_es_remove_extent(inode, lblk, len);
656 	es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
657 		 lblk, len, inode->i_ino);
658 
659 	if (!len)
660 		return err;
661 
662 	end = lblk + len - 1;
663 	BUG_ON(end < lblk);
664 
665 	write_lock(&EXT4_I(inode)->i_es_lock);
666 	err = __es_remove_extent(inode, lblk, end);
667 	write_unlock(&EXT4_I(inode)->i_es_lock);
668 	ext4_es_print_tree(inode);
669 	return err;
670 }
671 
672 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
673 {
674 	struct ext4_sb_info *sbi = container_of(shrink,
675 					struct ext4_sb_info, s_es_shrinker);
676 	struct ext4_inode_info *ei;
677 	struct list_head *cur, *tmp, scanned;
678 	int nr_to_scan = sc->nr_to_scan;
679 	int ret, nr_shrunk = 0;
680 
681 	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
682 	trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
683 
684 	if (!nr_to_scan)
685 		return ret;
686 
687 	INIT_LIST_HEAD(&scanned);
688 
689 	spin_lock(&sbi->s_es_lru_lock);
690 	list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
691 		list_move_tail(cur, &scanned);
692 
693 		ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
694 
695 		read_lock(&ei->i_es_lock);
696 		if (ei->i_es_lru_nr == 0) {
697 			read_unlock(&ei->i_es_lock);
698 			continue;
699 		}
700 		read_unlock(&ei->i_es_lock);
701 
702 		write_lock(&ei->i_es_lock);
703 		ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
704 		write_unlock(&ei->i_es_lock);
705 
706 		nr_shrunk += ret;
707 		nr_to_scan -= ret;
708 		if (nr_to_scan == 0)
709 			break;
710 	}
711 	list_splice_tail(&scanned, &sbi->s_es_lru);
712 	spin_unlock(&sbi->s_es_lru_lock);
713 
714 	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
715 	trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
716 	return ret;
717 }
718 
719 void ext4_es_register_shrinker(struct super_block *sb)
720 {
721 	struct ext4_sb_info *sbi;
722 
723 	sbi = EXT4_SB(sb);
724 	INIT_LIST_HEAD(&sbi->s_es_lru);
725 	spin_lock_init(&sbi->s_es_lru_lock);
726 	sbi->s_es_shrinker.shrink = ext4_es_shrink;
727 	sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
728 	register_shrinker(&sbi->s_es_shrinker);
729 }
730 
731 void ext4_es_unregister_shrinker(struct super_block *sb)
732 {
733 	unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
734 }
735 
736 void ext4_es_lru_add(struct inode *inode)
737 {
738 	struct ext4_inode_info *ei = EXT4_I(inode);
739 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
740 
741 	spin_lock(&sbi->s_es_lru_lock);
742 	if (list_empty(&ei->i_es_lru))
743 		list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
744 	else
745 		list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
746 	spin_unlock(&sbi->s_es_lru_lock);
747 }
748 
749 void ext4_es_lru_del(struct inode *inode)
750 {
751 	struct ext4_inode_info *ei = EXT4_I(inode);
752 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
753 
754 	spin_lock(&sbi->s_es_lru_lock);
755 	if (!list_empty(&ei->i_es_lru))
756 		list_del_init(&ei->i_es_lru);
757 	spin_unlock(&sbi->s_es_lru_lock);
758 }
759 
760 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
761 				       int nr_to_scan)
762 {
763 	struct inode *inode = &ei->vfs_inode;
764 	struct ext4_es_tree *tree = &ei->i_es_tree;
765 	struct rb_node *node;
766 	struct extent_status *es;
767 	int nr_shrunk = 0;
768 
769 	if (ei->i_es_lru_nr == 0)
770 		return 0;
771 
772 	node = rb_first(&tree->root);
773 	while (node != NULL) {
774 		es = rb_entry(node, struct extent_status, rb_node);
775 		node = rb_next(&es->rb_node);
776 		/*
777 		 * We can't reclaim delayed extent from status tree because
778 		 * fiemap, bigallic, and seek_data/hole need to use it.
779 		 */
780 		if (!ext4_es_is_delayed(es)) {
781 			rb_erase(&es->rb_node, &tree->root);
782 			ext4_es_free_extent(inode, es);
783 			nr_shrunk++;
784 			if (--nr_to_scan == 0)
785 				break;
786 		}
787 	}
788 	tree->cache_es = NULL;
789 	return nr_shrunk;
790 }
791