xref: /openbmc/linux/fs/f2fs/extent_cache.c (revision 72840cccc0a1a0a0dc1bb27b669a9111be6d0f6a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs extent cache support
4  *
5  * Copyright (c) 2015 Motorola Mobility
6  * Copyright (c) 2015 Samsung Electronics
7  * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8  *          Chao Yu <chao2.yu@samsung.com>
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 
14 #include "f2fs.h"
15 #include "node.h"
16 #include <trace/events/f2fs.h>
17 
18 static void __set_extent_info(struct extent_info *ei,
19 				unsigned int fofs, unsigned int len,
20 				block_t blk, bool keep_clen,
21 				enum extent_type type)
22 {
23 	ei->fofs = fofs;
24 	ei->len = len;
25 
26 	if (type == EX_READ) {
27 		ei->blk = blk;
28 		if (keep_clen)
29 			return;
30 #ifdef CONFIG_F2FS_FS_COMPRESSION
31 		ei->c_len = 0;
32 #endif
33 	}
34 }
35 
36 static bool __may_read_extent_tree(struct inode *inode)
37 {
38 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
39 
40 	if (!test_opt(sbi, READ_EXTENT_CACHE))
41 		return false;
42 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
43 		return false;
44 	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
45 			 !f2fs_sb_has_readonly(sbi))
46 		return false;
47 	return S_ISREG(inode->i_mode);
48 }
49 
50 static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
51 {
52 	if (type == EX_READ)
53 		return __may_read_extent_tree(inode);
54 	return false;
55 }
56 
57 static bool __may_extent_tree(struct inode *inode, enum extent_type type)
58 {
59 	/*
60 	 * for recovered files during mount do not create extents
61 	 * if shrinker is not registered.
62 	 */
63 	if (list_empty(&F2FS_I_SB(inode)->s_list))
64 		return false;
65 
66 	return __init_may_extent_tree(inode, type);
67 }
68 
69 static void __try_update_largest_extent(struct extent_tree *et,
70 						struct extent_node *en)
71 {
72 	if (et->type != EX_READ)
73 		return;
74 	if (en->ei.len <= et->largest.len)
75 		return;
76 
77 	et->largest = en->ei;
78 	et->largest_updated = true;
79 }
80 
81 static bool __is_extent_mergeable(struct extent_info *back,
82 		struct extent_info *front, enum extent_type type)
83 {
84 	if (type == EX_READ) {
85 #ifdef CONFIG_F2FS_FS_COMPRESSION
86 		if (back->c_len && back->len != back->c_len)
87 			return false;
88 		if (front->c_len && front->len != front->c_len)
89 			return false;
90 #endif
91 		return (back->fofs + back->len == front->fofs &&
92 				back->blk + back->len == front->blk);
93 	}
94 	return false;
95 }
96 
97 static bool __is_back_mergeable(struct extent_info *cur,
98 		struct extent_info *back, enum extent_type type)
99 {
100 	return __is_extent_mergeable(back, cur, type);
101 }
102 
103 static bool __is_front_mergeable(struct extent_info *cur,
104 		struct extent_info *front, enum extent_type type)
105 {
106 	return __is_extent_mergeable(cur, front, type);
107 }
108 
109 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
110 							unsigned int ofs)
111 {
112 	if (cached_re) {
113 		if (cached_re->ofs <= ofs &&
114 				cached_re->ofs + cached_re->len > ofs) {
115 			return cached_re;
116 		}
117 	}
118 	return NULL;
119 }
120 
121 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
122 							unsigned int ofs)
123 {
124 	struct rb_node *node = root->rb_root.rb_node;
125 	struct rb_entry *re;
126 
127 	while (node) {
128 		re = rb_entry(node, struct rb_entry, rb_node);
129 
130 		if (ofs < re->ofs)
131 			node = node->rb_left;
132 		else if (ofs >= re->ofs + re->len)
133 			node = node->rb_right;
134 		else
135 			return re;
136 	}
137 	return NULL;
138 }
139 
140 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
141 				struct rb_entry *cached_re, unsigned int ofs)
142 {
143 	struct rb_entry *re;
144 
145 	re = __lookup_rb_tree_fast(cached_re, ofs);
146 	if (!re)
147 		return __lookup_rb_tree_slow(root, ofs);
148 
149 	return re;
150 }
151 
152 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
153 					struct rb_root_cached *root,
154 					struct rb_node **parent,
155 					unsigned long long key, bool *leftmost)
156 {
157 	struct rb_node **p = &root->rb_root.rb_node;
158 	struct rb_entry *re;
159 
160 	while (*p) {
161 		*parent = *p;
162 		re = rb_entry(*parent, struct rb_entry, rb_node);
163 
164 		if (key < re->key) {
165 			p = &(*p)->rb_left;
166 		} else {
167 			p = &(*p)->rb_right;
168 			*leftmost = false;
169 		}
170 	}
171 
172 	return p;
173 }
174 
175 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
176 				struct rb_root_cached *root,
177 				struct rb_node **parent,
178 				unsigned int ofs, bool *leftmost)
179 {
180 	struct rb_node **p = &root->rb_root.rb_node;
181 	struct rb_entry *re;
182 
183 	while (*p) {
184 		*parent = *p;
185 		re = rb_entry(*parent, struct rb_entry, rb_node);
186 
187 		if (ofs < re->ofs) {
188 			p = &(*p)->rb_left;
189 		} else if (ofs >= re->ofs + re->len) {
190 			p = &(*p)->rb_right;
191 			*leftmost = false;
192 		} else {
193 			f2fs_bug_on(sbi, 1);
194 		}
195 	}
196 
197 	return p;
198 }
199 
200 /*
201  * lookup rb entry in position of @ofs in rb-tree,
202  * if hit, return the entry, otherwise, return NULL
203  * @prev_ex: extent before ofs
204  * @next_ex: extent after ofs
205  * @insert_p: insert point for new extent at ofs
206  * in order to simpfy the insertion after.
207  * tree must stay unchanged between lookup and insertion.
208  */
209 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
210 				struct rb_entry *cached_re,
211 				unsigned int ofs,
212 				struct rb_entry **prev_entry,
213 				struct rb_entry **next_entry,
214 				struct rb_node ***insert_p,
215 				struct rb_node **insert_parent,
216 				bool force, bool *leftmost)
217 {
218 	struct rb_node **pnode = &root->rb_root.rb_node;
219 	struct rb_node *parent = NULL, *tmp_node;
220 	struct rb_entry *re = cached_re;
221 
222 	*insert_p = NULL;
223 	*insert_parent = NULL;
224 	*prev_entry = NULL;
225 	*next_entry = NULL;
226 
227 	if (RB_EMPTY_ROOT(&root->rb_root))
228 		return NULL;
229 
230 	if (re) {
231 		if (re->ofs <= ofs && re->ofs + re->len > ofs)
232 			goto lookup_neighbors;
233 	}
234 
235 	if (leftmost)
236 		*leftmost = true;
237 
238 	while (*pnode) {
239 		parent = *pnode;
240 		re = rb_entry(*pnode, struct rb_entry, rb_node);
241 
242 		if (ofs < re->ofs) {
243 			pnode = &(*pnode)->rb_left;
244 		} else if (ofs >= re->ofs + re->len) {
245 			pnode = &(*pnode)->rb_right;
246 			if (leftmost)
247 				*leftmost = false;
248 		} else {
249 			goto lookup_neighbors;
250 		}
251 	}
252 
253 	*insert_p = pnode;
254 	*insert_parent = parent;
255 
256 	re = rb_entry(parent, struct rb_entry, rb_node);
257 	tmp_node = parent;
258 	if (parent && ofs > re->ofs)
259 		tmp_node = rb_next(parent);
260 	*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
261 
262 	tmp_node = parent;
263 	if (parent && ofs < re->ofs)
264 		tmp_node = rb_prev(parent);
265 	*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
266 	return NULL;
267 
268 lookup_neighbors:
269 	if (ofs == re->ofs || force) {
270 		/* lookup prev node for merging backward later */
271 		tmp_node = rb_prev(&re->rb_node);
272 		*prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
273 	}
274 	if (ofs == re->ofs + re->len - 1 || force) {
275 		/* lookup next node for merging frontward later */
276 		tmp_node = rb_next(&re->rb_node);
277 		*next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
278 	}
279 	return re;
280 }
281 
282 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
283 				struct rb_root_cached *root, bool check_key)
284 {
285 #ifdef CONFIG_F2FS_CHECK_FS
286 	struct rb_node *cur = rb_first_cached(root), *next;
287 	struct rb_entry *cur_re, *next_re;
288 
289 	if (!cur)
290 		return true;
291 
292 	while (cur) {
293 		next = rb_next(cur);
294 		if (!next)
295 			return true;
296 
297 		cur_re = rb_entry(cur, struct rb_entry, rb_node);
298 		next_re = rb_entry(next, struct rb_entry, rb_node);
299 
300 		if (check_key) {
301 			if (cur_re->key > next_re->key) {
302 				f2fs_info(sbi, "inconsistent rbtree, "
303 					"cur(%llu) next(%llu)",
304 					cur_re->key, next_re->key);
305 				return false;
306 			}
307 			goto next;
308 		}
309 
310 		if (cur_re->ofs + cur_re->len > next_re->ofs) {
311 			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
312 				  cur_re->ofs, cur_re->len,
313 				  next_re->ofs, next_re->len);
314 			return false;
315 		}
316 next:
317 		cur = next;
318 	}
319 #endif
320 	return true;
321 }
322 
323 static struct kmem_cache *extent_tree_slab;
324 static struct kmem_cache *extent_node_slab;
325 
326 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
327 				struct extent_tree *et, struct extent_info *ei,
328 				struct rb_node *parent, struct rb_node **p,
329 				bool leftmost)
330 {
331 	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
332 	struct extent_node *en;
333 
334 	en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
335 	if (!en)
336 		return NULL;
337 
338 	en->ei = *ei;
339 	INIT_LIST_HEAD(&en->list);
340 	en->et = et;
341 
342 	rb_link_node(&en->rb_node, parent, p);
343 	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
344 	atomic_inc(&et->node_cnt);
345 	atomic_inc(&eti->total_ext_node);
346 	return en;
347 }
348 
349 static void __detach_extent_node(struct f2fs_sb_info *sbi,
350 				struct extent_tree *et, struct extent_node *en)
351 {
352 	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
353 
354 	rb_erase_cached(&en->rb_node, &et->root);
355 	atomic_dec(&et->node_cnt);
356 	atomic_dec(&eti->total_ext_node);
357 
358 	if (et->cached_en == en)
359 		et->cached_en = NULL;
360 	kmem_cache_free(extent_node_slab, en);
361 }
362 
363 /*
364  * Flow to release an extent_node:
365  * 1. list_del_init
366  * 2. __detach_extent_node
367  * 3. kmem_cache_free.
368  */
369 static void __release_extent_node(struct f2fs_sb_info *sbi,
370 			struct extent_tree *et, struct extent_node *en)
371 {
372 	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
373 
374 	spin_lock(&eti->extent_lock);
375 	f2fs_bug_on(sbi, list_empty(&en->list));
376 	list_del_init(&en->list);
377 	spin_unlock(&eti->extent_lock);
378 
379 	__detach_extent_node(sbi, et, en);
380 }
381 
382 static struct extent_tree *__grab_extent_tree(struct inode *inode,
383 						enum extent_type type)
384 {
385 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
386 	struct extent_tree_info *eti = &sbi->extent_tree[type];
387 	struct extent_tree *et;
388 	nid_t ino = inode->i_ino;
389 
390 	mutex_lock(&eti->extent_tree_lock);
391 	et = radix_tree_lookup(&eti->extent_tree_root, ino);
392 	if (!et) {
393 		et = f2fs_kmem_cache_alloc(extent_tree_slab,
394 					GFP_NOFS, true, NULL);
395 		f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
396 		memset(et, 0, sizeof(struct extent_tree));
397 		et->ino = ino;
398 		et->type = type;
399 		et->root = RB_ROOT_CACHED;
400 		et->cached_en = NULL;
401 		rwlock_init(&et->lock);
402 		INIT_LIST_HEAD(&et->list);
403 		atomic_set(&et->node_cnt, 0);
404 		atomic_inc(&eti->total_ext_tree);
405 	} else {
406 		atomic_dec(&eti->total_zombie_tree);
407 		list_del_init(&et->list);
408 	}
409 	mutex_unlock(&eti->extent_tree_lock);
410 
411 	/* never died until evict_inode */
412 	F2FS_I(inode)->extent_tree[type] = et;
413 
414 	return et;
415 }
416 
417 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
418 					struct extent_tree *et)
419 {
420 	struct rb_node *node, *next;
421 	struct extent_node *en;
422 	unsigned int count = atomic_read(&et->node_cnt);
423 
424 	node = rb_first_cached(&et->root);
425 	while (node) {
426 		next = rb_next(node);
427 		en = rb_entry(node, struct extent_node, rb_node);
428 		__release_extent_node(sbi, et, en);
429 		node = next;
430 	}
431 
432 	return count - atomic_read(&et->node_cnt);
433 }
434 
435 static void __drop_largest_extent(struct extent_tree *et,
436 					pgoff_t fofs, unsigned int len)
437 {
438 	if (fofs < et->largest.fofs + et->largest.len &&
439 			fofs + len > et->largest.fofs) {
440 		et->largest.len = 0;
441 		et->largest_updated = true;
442 	}
443 }
444 
445 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
446 {
447 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
448 	struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
449 	struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
450 	struct extent_tree *et;
451 	struct extent_node *en;
452 	struct extent_info ei;
453 
454 	if (!__may_extent_tree(inode, EX_READ)) {
455 		/* drop largest read extent */
456 		if (i_ext && i_ext->len) {
457 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
458 			i_ext->len = 0;
459 			set_page_dirty(ipage);
460 		}
461 		goto out;
462 	}
463 
464 	et = __grab_extent_tree(inode, EX_READ);
465 
466 	if (!i_ext || !i_ext->len)
467 		goto out;
468 
469 	get_read_extent_info(&ei, i_ext);
470 
471 	write_lock(&et->lock);
472 	if (atomic_read(&et->node_cnt))
473 		goto unlock_out;
474 
475 	en = __attach_extent_node(sbi, et, &ei, NULL,
476 				&et->root.rb_root.rb_node, true);
477 	if (en) {
478 		et->largest = en->ei;
479 		et->cached_en = en;
480 
481 		spin_lock(&eti->extent_lock);
482 		list_add_tail(&en->list, &eti->extent_list);
483 		spin_unlock(&eti->extent_lock);
484 	}
485 unlock_out:
486 	write_unlock(&et->lock);
487 out:
488 	if (!F2FS_I(inode)->extent_tree[EX_READ])
489 		set_inode_flag(inode, FI_NO_EXTENT);
490 }
491 
492 void f2fs_init_extent_tree(struct inode *inode)
493 {
494 	/* initialize read cache */
495 	if (__init_may_extent_tree(inode, EX_READ))
496 		__grab_extent_tree(inode, EX_READ);
497 }
498 
499 static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
500 			struct extent_info *ei, enum extent_type type)
501 {
502 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
503 	struct extent_tree_info *eti = &sbi->extent_tree[type];
504 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
505 	struct extent_node *en;
506 	bool ret = false;
507 
508 	f2fs_bug_on(sbi, !et);
509 
510 	trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
511 
512 	read_lock(&et->lock);
513 
514 	if (type == EX_READ &&
515 			et->largest.fofs <= pgofs &&
516 			et->largest.fofs + et->largest.len > pgofs) {
517 		*ei = et->largest;
518 		ret = true;
519 		stat_inc_largest_node_hit(sbi);
520 		goto out;
521 	}
522 
523 	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
524 				(struct rb_entry *)et->cached_en, pgofs);
525 	if (!en)
526 		goto out;
527 
528 	if (en == et->cached_en)
529 		stat_inc_cached_node_hit(sbi, type);
530 	else
531 		stat_inc_rbtree_node_hit(sbi, type);
532 
533 	*ei = en->ei;
534 	spin_lock(&eti->extent_lock);
535 	if (!list_empty(&en->list)) {
536 		list_move_tail(&en->list, &eti->extent_list);
537 		et->cached_en = en;
538 	}
539 	spin_unlock(&eti->extent_lock);
540 	ret = true;
541 out:
542 	stat_inc_total_hit(sbi, type);
543 	read_unlock(&et->lock);
544 
545 	if (type == EX_READ)
546 		trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
547 	return ret;
548 }
549 
550 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
551 				struct extent_tree *et, struct extent_info *ei,
552 				struct extent_node *prev_ex,
553 				struct extent_node *next_ex)
554 {
555 	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
556 	struct extent_node *en = NULL;
557 
558 	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
559 		prev_ex->ei.len += ei->len;
560 		ei = &prev_ex->ei;
561 		en = prev_ex;
562 	}
563 
564 	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
565 		next_ex->ei.fofs = ei->fofs;
566 		next_ex->ei.len += ei->len;
567 		if (et->type == EX_READ)
568 			next_ex->ei.blk = ei->blk;
569 		if (en)
570 			__release_extent_node(sbi, et, prev_ex);
571 
572 		en = next_ex;
573 	}
574 
575 	if (!en)
576 		return NULL;
577 
578 	__try_update_largest_extent(et, en);
579 
580 	spin_lock(&eti->extent_lock);
581 	if (!list_empty(&en->list)) {
582 		list_move_tail(&en->list, &eti->extent_list);
583 		et->cached_en = en;
584 	}
585 	spin_unlock(&eti->extent_lock);
586 	return en;
587 }
588 
589 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
590 				struct extent_tree *et, struct extent_info *ei,
591 				struct rb_node **insert_p,
592 				struct rb_node *insert_parent,
593 				bool leftmost)
594 {
595 	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
596 	struct rb_node **p;
597 	struct rb_node *parent = NULL;
598 	struct extent_node *en = NULL;
599 
600 	if (insert_p && insert_parent) {
601 		parent = insert_parent;
602 		p = insert_p;
603 		goto do_insert;
604 	}
605 
606 	leftmost = true;
607 
608 	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
609 						ei->fofs, &leftmost);
610 do_insert:
611 	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
612 	if (!en)
613 		return NULL;
614 
615 	__try_update_largest_extent(et, en);
616 
617 	/* update in global extent list */
618 	spin_lock(&eti->extent_lock);
619 	list_add_tail(&en->list, &eti->extent_list);
620 	et->cached_en = en;
621 	spin_unlock(&eti->extent_lock);
622 	return en;
623 }
624 
625 static void __update_extent_tree_range(struct inode *inode,
626 			struct extent_info *tei, enum extent_type type)
627 {
628 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
629 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
630 	struct extent_node *en = NULL, *en1 = NULL;
631 	struct extent_node *prev_en = NULL, *next_en = NULL;
632 	struct extent_info ei, dei, prev;
633 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
634 	unsigned int fofs = tei->fofs, len = tei->len;
635 	unsigned int end = fofs + len;
636 	bool updated = false;
637 	bool leftmost = false;
638 
639 	if (!et)
640 		return;
641 
642 	if (type == EX_READ)
643 		trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
644 						tei->blk, 0);
645 	write_lock(&et->lock);
646 
647 	if (type == EX_READ) {
648 		if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
649 			write_unlock(&et->lock);
650 			return;
651 		}
652 
653 		prev = et->largest;
654 		dei.len = 0;
655 
656 		/*
657 		 * drop largest extent before lookup, in case it's already
658 		 * been shrunk from extent tree
659 		 */
660 		__drop_largest_extent(et, fofs, len);
661 	}
662 
663 	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
664 	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
665 					(struct rb_entry *)et->cached_en, fofs,
666 					(struct rb_entry **)&prev_en,
667 					(struct rb_entry **)&next_en,
668 					&insert_p, &insert_parent, false,
669 					&leftmost);
670 	if (!en)
671 		en = next_en;
672 
673 	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
674 	while (en && en->ei.fofs < end) {
675 		unsigned int org_end;
676 		int parts = 0;	/* # of parts current extent split into */
677 
678 		next_en = en1 = NULL;
679 
680 		dei = en->ei;
681 		org_end = dei.fofs + dei.len;
682 		f2fs_bug_on(sbi, fofs >= org_end);
683 
684 		if (fofs > dei.fofs && (type != EX_READ ||
685 				fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
686 			en->ei.len = fofs - en->ei.fofs;
687 			prev_en = en;
688 			parts = 1;
689 		}
690 
691 		if (end < org_end && (type != EX_READ ||
692 				org_end - end >= F2FS_MIN_EXTENT_LEN)) {
693 			if (parts) {
694 				__set_extent_info(&ei,
695 					end, org_end - end,
696 					end - dei.fofs + dei.blk, false,
697 					type);
698 				en1 = __insert_extent_tree(sbi, et, &ei,
699 							NULL, NULL, true);
700 				next_en = en1;
701 			} else {
702 				__set_extent_info(&en->ei,
703 					end, en->ei.len - (end - dei.fofs),
704 					en->ei.blk + (end - dei.fofs), true,
705 					type);
706 				next_en = en;
707 			}
708 			parts++;
709 		}
710 
711 		if (!next_en) {
712 			struct rb_node *node = rb_next(&en->rb_node);
713 
714 			next_en = rb_entry_safe(node, struct extent_node,
715 						rb_node);
716 		}
717 
718 		if (parts)
719 			__try_update_largest_extent(et, en);
720 		else
721 			__release_extent_node(sbi, et, en);
722 
723 		/*
724 		 * if original extent is split into zero or two parts, extent
725 		 * tree has been altered by deletion or insertion, therefore
726 		 * invalidate pointers regard to tree.
727 		 */
728 		if (parts != 1) {
729 			insert_p = NULL;
730 			insert_parent = NULL;
731 		}
732 		en = next_en;
733 	}
734 
735 	/* 3. update extent in read extent cache */
736 	BUG_ON(type != EX_READ);
737 
738 	if (tei->blk) {
739 		__set_extent_info(&ei, fofs, len, tei->blk, false, EX_READ);
740 		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
741 			__insert_extent_tree(sbi, et, &ei,
742 					insert_p, insert_parent, leftmost);
743 
744 		/* give up extent_cache, if split and small updates happen */
745 		if (dei.len >= 1 &&
746 				prev.len < F2FS_MIN_EXTENT_LEN &&
747 				et->largest.len < F2FS_MIN_EXTENT_LEN) {
748 			et->largest.len = 0;
749 			et->largest_updated = true;
750 			set_inode_flag(inode, FI_NO_EXTENT);
751 		}
752 	}
753 
754 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
755 		__free_extent_tree(sbi, et);
756 
757 	if (et->largest_updated) {
758 		et->largest_updated = false;
759 		updated = true;
760 	}
761 
762 	write_unlock(&et->lock);
763 
764 	if (updated)
765 		f2fs_mark_inode_dirty_sync(inode, true);
766 }
767 
768 #ifdef CONFIG_F2FS_FS_COMPRESSION
769 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
770 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
771 				unsigned int c_len)
772 {
773 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
774 	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
775 	struct extent_node *en = NULL;
776 	struct extent_node *prev_en = NULL, *next_en = NULL;
777 	struct extent_info ei;
778 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
779 	bool leftmost = false;
780 
781 	trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
782 						blkaddr, c_len);
783 
784 	/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
785 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
786 		return;
787 
788 	write_lock(&et->lock);
789 
790 	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
791 				(struct rb_entry *)et->cached_en, fofs,
792 				(struct rb_entry **)&prev_en,
793 				(struct rb_entry **)&next_en,
794 				&insert_p, &insert_parent, false,
795 				&leftmost);
796 	if (en)
797 		goto unlock_out;
798 
799 	__set_extent_info(&ei, fofs, llen, blkaddr, true, EX_READ);
800 	ei.c_len = c_len;
801 
802 	if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
803 		__insert_extent_tree(sbi, et, &ei,
804 				insert_p, insert_parent, leftmost);
805 unlock_out:
806 	write_unlock(&et->lock);
807 }
808 #endif
809 
810 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
811 {
812 	struct extent_info ei;
813 
814 	if (!__may_extent_tree(dn->inode, type))
815 		return;
816 
817 	ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
818 								dn->ofs_in_node;
819 	ei.len = 1;
820 
821 	if (type == EX_READ) {
822 		if (dn->data_blkaddr == NEW_ADDR)
823 			ei.blk = NULL_ADDR;
824 		else
825 			ei.blk = dn->data_blkaddr;
826 	}
827 	__update_extent_tree_range(dn->inode, &ei, type);
828 }
829 
830 static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
831 					enum extent_type type)
832 {
833 	struct extent_tree_info *eti = &sbi->extent_tree[type];
834 	struct extent_tree *et, *next;
835 	struct extent_node *en;
836 	unsigned int node_cnt = 0, tree_cnt = 0;
837 	int remained;
838 
839 	if (!atomic_read(&eti->total_zombie_tree))
840 		goto free_node;
841 
842 	if (!mutex_trylock(&eti->extent_tree_lock))
843 		goto out;
844 
845 	/* 1. remove unreferenced extent tree */
846 	list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
847 		if (atomic_read(&et->node_cnt)) {
848 			write_lock(&et->lock);
849 			node_cnt += __free_extent_tree(sbi, et);
850 			write_unlock(&et->lock);
851 		}
852 		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
853 		list_del_init(&et->list);
854 		radix_tree_delete(&eti->extent_tree_root, et->ino);
855 		kmem_cache_free(extent_tree_slab, et);
856 		atomic_dec(&eti->total_ext_tree);
857 		atomic_dec(&eti->total_zombie_tree);
858 		tree_cnt++;
859 
860 		if (node_cnt + tree_cnt >= nr_shrink)
861 			goto unlock_out;
862 		cond_resched();
863 	}
864 	mutex_unlock(&eti->extent_tree_lock);
865 
866 free_node:
867 	/* 2. remove LRU extent entries */
868 	if (!mutex_trylock(&eti->extent_tree_lock))
869 		goto out;
870 
871 	remained = nr_shrink - (node_cnt + tree_cnt);
872 
873 	spin_lock(&eti->extent_lock);
874 	for (; remained > 0; remained--) {
875 		if (list_empty(&eti->extent_list))
876 			break;
877 		en = list_first_entry(&eti->extent_list,
878 					struct extent_node, list);
879 		et = en->et;
880 		if (!write_trylock(&et->lock)) {
881 			/* refresh this extent node's position in extent list */
882 			list_move_tail(&en->list, &eti->extent_list);
883 			continue;
884 		}
885 
886 		list_del_init(&en->list);
887 		spin_unlock(&eti->extent_lock);
888 
889 		__detach_extent_node(sbi, et, en);
890 
891 		write_unlock(&et->lock);
892 		node_cnt++;
893 		spin_lock(&eti->extent_lock);
894 	}
895 	spin_unlock(&eti->extent_lock);
896 
897 unlock_out:
898 	mutex_unlock(&eti->extent_tree_lock);
899 out:
900 	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
901 
902 	return node_cnt + tree_cnt;
903 }
904 
905 /* read extent cache operations */
906 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
907 				struct extent_info *ei)
908 {
909 	if (!__may_extent_tree(inode, EX_READ))
910 		return false;
911 
912 	return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
913 }
914 
915 void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
916 {
917 	return __update_extent_cache(dn, EX_READ);
918 }
919 
920 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
921 				pgoff_t fofs, block_t blkaddr, unsigned int len)
922 {
923 	struct extent_info ei = {
924 		.fofs = fofs,
925 		.len = len,
926 		.blk = blkaddr,
927 	};
928 
929 	if (!__may_extent_tree(dn->inode, EX_READ))
930 		return;
931 
932 	__update_extent_tree_range(dn->inode, &ei, EX_READ);
933 }
934 
935 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
936 {
937 	if (!test_opt(sbi, READ_EXTENT_CACHE))
938 		return 0;
939 
940 	return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
941 }
942 
943 static unsigned int __destroy_extent_node(struct inode *inode,
944 					enum extent_type type)
945 {
946 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
947 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
948 	unsigned int node_cnt = 0;
949 
950 	if (!et || !atomic_read(&et->node_cnt))
951 		return 0;
952 
953 	write_lock(&et->lock);
954 	node_cnt = __free_extent_tree(sbi, et);
955 	write_unlock(&et->lock);
956 
957 	return node_cnt;
958 }
959 
960 void f2fs_destroy_extent_node(struct inode *inode)
961 {
962 	__destroy_extent_node(inode, EX_READ);
963 }
964 
965 static void __drop_extent_tree(struct inode *inode, enum extent_type type)
966 {
967 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
968 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
969 	bool updated = false;
970 
971 	if (!__may_extent_tree(inode, type))
972 		return;
973 
974 	write_lock(&et->lock);
975 	__free_extent_tree(sbi, et);
976 	if (type == EX_READ) {
977 		set_inode_flag(inode, FI_NO_EXTENT);
978 		if (et->largest.len) {
979 			et->largest.len = 0;
980 			updated = true;
981 		}
982 	}
983 	write_unlock(&et->lock);
984 	if (updated)
985 		f2fs_mark_inode_dirty_sync(inode, true);
986 }
987 
988 void f2fs_drop_extent_tree(struct inode *inode)
989 {
990 	__drop_extent_tree(inode, EX_READ);
991 }
992 
993 static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
994 {
995 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
996 	struct extent_tree_info *eti = &sbi->extent_tree[type];
997 	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
998 	unsigned int node_cnt = 0;
999 
1000 	if (!et)
1001 		return;
1002 
1003 	if (inode->i_nlink && !is_bad_inode(inode) &&
1004 					atomic_read(&et->node_cnt)) {
1005 		mutex_lock(&eti->extent_tree_lock);
1006 		list_add_tail(&et->list, &eti->zombie_list);
1007 		atomic_inc(&eti->total_zombie_tree);
1008 		mutex_unlock(&eti->extent_tree_lock);
1009 		return;
1010 	}
1011 
1012 	/* free all extent info belong to this extent tree */
1013 	node_cnt = __destroy_extent_node(inode, type);
1014 
1015 	/* delete extent tree entry in radix tree */
1016 	mutex_lock(&eti->extent_tree_lock);
1017 	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1018 	radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1019 	kmem_cache_free(extent_tree_slab, et);
1020 	atomic_dec(&eti->total_ext_tree);
1021 	mutex_unlock(&eti->extent_tree_lock);
1022 
1023 	F2FS_I(inode)->extent_tree[type] = NULL;
1024 
1025 	trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1026 }
1027 
1028 void f2fs_destroy_extent_tree(struct inode *inode)
1029 {
1030 	__destroy_extent_tree(inode, EX_READ);
1031 }
1032 
1033 static void __init_extent_tree_info(struct extent_tree_info *eti)
1034 {
1035 	INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1036 	mutex_init(&eti->extent_tree_lock);
1037 	INIT_LIST_HEAD(&eti->extent_list);
1038 	spin_lock_init(&eti->extent_lock);
1039 	atomic_set(&eti->total_ext_tree, 0);
1040 	INIT_LIST_HEAD(&eti->zombie_list);
1041 	atomic_set(&eti->total_zombie_tree, 0);
1042 	atomic_set(&eti->total_ext_node, 0);
1043 }
1044 
1045 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
1046 {
1047 	__init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1048 }
1049 
1050 int __init f2fs_create_extent_cache(void)
1051 {
1052 	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1053 			sizeof(struct extent_tree));
1054 	if (!extent_tree_slab)
1055 		return -ENOMEM;
1056 	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1057 			sizeof(struct extent_node));
1058 	if (!extent_node_slab) {
1059 		kmem_cache_destroy(extent_tree_slab);
1060 		return -ENOMEM;
1061 	}
1062 	return 0;
1063 }
1064 
1065 void f2fs_destroy_extent_cache(void)
1066 {
1067 	kmem_cache_destroy(extent_node_slab);
1068 	kmem_cache_destroy(extent_tree_slab);
1069 }
1070