xref: /openbmc/linux/fs/f2fs/segment.c (revision 0f4b20ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/segment.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/sched/mm.h>
13 #include <linux/prefetch.h>
14 #include <linux/kthread.h>
15 #include <linux/swap.h>
16 #include <linux/timer.h>
17 #include <linux/freezer.h>
18 #include <linux/sched/signal.h>
19 #include <linux/random.h>
20 
21 #include "f2fs.h"
22 #include "segment.h"
23 #include "node.h"
24 #include "gc.h"
25 #include "iostat.h"
26 #include <trace/events/f2fs.h>
27 
28 #define __reverse_ffz(x) __reverse_ffs(~(x))
29 
30 static struct kmem_cache *discard_entry_slab;
31 static struct kmem_cache *discard_cmd_slab;
32 static struct kmem_cache *sit_entry_set_slab;
33 static struct kmem_cache *inmem_entry_slab;
34 
35 static unsigned long __reverse_ulong(unsigned char *str)
36 {
37 	unsigned long tmp = 0;
38 	int shift = 24, idx = 0;
39 
40 #if BITS_PER_LONG == 64
41 	shift = 56;
42 #endif
43 	while (shift >= 0) {
44 		tmp |= (unsigned long)str[idx++] << shift;
45 		shift -= BITS_PER_BYTE;
46 	}
47 	return tmp;
48 }
49 
50 /*
51  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
52  * MSB and LSB are reversed in a byte by f2fs_set_bit.
53  */
54 static inline unsigned long __reverse_ffs(unsigned long word)
55 {
56 	int num = 0;
57 
58 #if BITS_PER_LONG == 64
59 	if ((word & 0xffffffff00000000UL) == 0)
60 		num += 32;
61 	else
62 		word >>= 32;
63 #endif
64 	if ((word & 0xffff0000) == 0)
65 		num += 16;
66 	else
67 		word >>= 16;
68 
69 	if ((word & 0xff00) == 0)
70 		num += 8;
71 	else
72 		word >>= 8;
73 
74 	if ((word & 0xf0) == 0)
75 		num += 4;
76 	else
77 		word >>= 4;
78 
79 	if ((word & 0xc) == 0)
80 		num += 2;
81 	else
82 		word >>= 2;
83 
84 	if ((word & 0x2) == 0)
85 		num += 1;
86 	return num;
87 }
88 
89 /*
90  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
91  * f2fs_set_bit makes MSB and LSB reversed in a byte.
92  * @size must be integral times of unsigned long.
93  * Example:
94  *                             MSB <--> LSB
95  *   f2fs_set_bit(0, bitmap) => 1000 0000
96  *   f2fs_set_bit(7, bitmap) => 0000 0001
97  */
98 static unsigned long __find_rev_next_bit(const unsigned long *addr,
99 			unsigned long size, unsigned long offset)
100 {
101 	const unsigned long *p = addr + BIT_WORD(offset);
102 	unsigned long result = size;
103 	unsigned long tmp;
104 
105 	if (offset >= size)
106 		return size;
107 
108 	size -= (offset & ~(BITS_PER_LONG - 1));
109 	offset %= BITS_PER_LONG;
110 
111 	while (1) {
112 		if (*p == 0)
113 			goto pass;
114 
115 		tmp = __reverse_ulong((unsigned char *)p);
116 
117 		tmp &= ~0UL >> offset;
118 		if (size < BITS_PER_LONG)
119 			tmp &= (~0UL << (BITS_PER_LONG - size));
120 		if (tmp)
121 			goto found;
122 pass:
123 		if (size <= BITS_PER_LONG)
124 			break;
125 		size -= BITS_PER_LONG;
126 		offset = 0;
127 		p++;
128 	}
129 	return result;
130 found:
131 	return result - size + __reverse_ffs(tmp);
132 }
133 
134 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
135 			unsigned long size, unsigned long offset)
136 {
137 	const unsigned long *p = addr + BIT_WORD(offset);
138 	unsigned long result = size;
139 	unsigned long tmp;
140 
141 	if (offset >= size)
142 		return size;
143 
144 	size -= (offset & ~(BITS_PER_LONG - 1));
145 	offset %= BITS_PER_LONG;
146 
147 	while (1) {
148 		if (*p == ~0UL)
149 			goto pass;
150 
151 		tmp = __reverse_ulong((unsigned char *)p);
152 
153 		if (offset)
154 			tmp |= ~0UL << (BITS_PER_LONG - offset);
155 		if (size < BITS_PER_LONG)
156 			tmp |= ~0UL >> size;
157 		if (tmp != ~0UL)
158 			goto found;
159 pass:
160 		if (size <= BITS_PER_LONG)
161 			break;
162 		size -= BITS_PER_LONG;
163 		offset = 0;
164 		p++;
165 	}
166 	return result;
167 found:
168 	return result - size + __reverse_ffz(tmp);
169 }
170 
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
172 {
173 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
176 
177 	if (f2fs_lfs_mode(sbi))
178 		return false;
179 	if (sbi->gc_mode == GC_URGENT_HIGH)
180 		return true;
181 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
182 		return true;
183 
184 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
186 }
187 
188 void f2fs_register_inmem_page(struct inode *inode, struct page *page)
189 {
190 	struct inmem_pages *new;
191 
192 	set_page_private_atomic(page);
193 
194 	new = f2fs_kmem_cache_alloc(inmem_entry_slab,
195 					GFP_NOFS, true, NULL);
196 
197 	/* add atomic page indices to the list */
198 	new->page = page;
199 	INIT_LIST_HEAD(&new->list);
200 
201 	/* increase reference count with clean state */
202 	get_page(page);
203 	mutex_lock(&F2FS_I(inode)->inmem_lock);
204 	list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
205 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
206 	mutex_unlock(&F2FS_I(inode)->inmem_lock);
207 
208 	trace_f2fs_register_inmem_page(page, INMEM);
209 }
210 
211 static int __revoke_inmem_pages(struct inode *inode,
212 				struct list_head *head, bool drop, bool recover,
213 				bool trylock)
214 {
215 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
216 	struct inmem_pages *cur, *tmp;
217 	int err = 0;
218 
219 	list_for_each_entry_safe(cur, tmp, head, list) {
220 		struct page *page = cur->page;
221 
222 		if (drop)
223 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
224 
225 		if (trylock) {
226 			/*
227 			 * to avoid deadlock in between page lock and
228 			 * inmem_lock.
229 			 */
230 			if (!trylock_page(page))
231 				continue;
232 		} else {
233 			lock_page(page);
234 		}
235 
236 		f2fs_wait_on_page_writeback(page, DATA, true, true);
237 
238 		if (recover) {
239 			struct dnode_of_data dn;
240 			struct node_info ni;
241 
242 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
243 retry:
244 			set_new_dnode(&dn, inode, NULL, NULL, 0);
245 			err = f2fs_get_dnode_of_data(&dn, page->index,
246 								LOOKUP_NODE);
247 			if (err) {
248 				if (err == -ENOMEM) {
249 					memalloc_retry_wait(GFP_NOFS);
250 					goto retry;
251 				}
252 				err = -EAGAIN;
253 				goto next;
254 			}
255 
256 			err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
257 			if (err) {
258 				f2fs_put_dnode(&dn);
259 				return err;
260 			}
261 
262 			if (cur->old_addr == NEW_ADDR) {
263 				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
264 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
265 			} else
266 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
267 					cur->old_addr, ni.version, true, true);
268 			f2fs_put_dnode(&dn);
269 		}
270 next:
271 		/* we don't need to invalidate this in the sccessful status */
272 		if (drop || recover) {
273 			ClearPageUptodate(page);
274 			clear_page_private_gcing(page);
275 		}
276 		detach_page_private(page);
277 		set_page_private(page, 0);
278 		f2fs_put_page(page, 1);
279 
280 		list_del(&cur->list);
281 		kmem_cache_free(inmem_entry_slab, cur);
282 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 	}
284 	return err;
285 }
286 
287 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
288 {
289 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 	struct inode *inode;
291 	struct f2fs_inode_info *fi;
292 	unsigned int count = sbi->atomic_files;
293 	unsigned int looped = 0;
294 next:
295 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 	if (list_empty(head)) {
297 		spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 		return;
299 	}
300 	fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 	inode = igrab(&fi->vfs_inode);
302 	if (inode)
303 		list_move_tail(&fi->inmem_ilist, head);
304 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305 
306 	if (inode) {
307 		if (gc_failure) {
308 			if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 				goto skip;
310 		}
311 		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
312 		f2fs_drop_inmem_pages(inode);
313 skip:
314 		iput(inode);
315 	}
316 	f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
317 	if (gc_failure) {
318 		if (++looped >= count)
319 			return;
320 	}
321 	goto next;
322 }
323 
324 void f2fs_drop_inmem_pages(struct inode *inode)
325 {
326 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
327 	struct f2fs_inode_info *fi = F2FS_I(inode);
328 
329 	do {
330 		mutex_lock(&fi->inmem_lock);
331 		if (list_empty(&fi->inmem_pages)) {
332 			fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
333 
334 			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
335 			if (!list_empty(&fi->inmem_ilist))
336 				list_del_init(&fi->inmem_ilist);
337 			if (f2fs_is_atomic_file(inode)) {
338 				clear_inode_flag(inode, FI_ATOMIC_FILE);
339 				sbi->atomic_files--;
340 			}
341 			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
342 
343 			mutex_unlock(&fi->inmem_lock);
344 			break;
345 		}
346 		__revoke_inmem_pages(inode, &fi->inmem_pages,
347 						true, false, true);
348 		mutex_unlock(&fi->inmem_lock);
349 	} while (1);
350 }
351 
352 void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
353 {
354 	struct f2fs_inode_info *fi = F2FS_I(inode);
355 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
356 	struct list_head *head = &fi->inmem_pages;
357 	struct inmem_pages *cur = NULL;
358 
359 	f2fs_bug_on(sbi, !page_private_atomic(page));
360 
361 	mutex_lock(&fi->inmem_lock);
362 	list_for_each_entry(cur, head, list) {
363 		if (cur->page == page)
364 			break;
365 	}
366 
367 	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
368 	list_del(&cur->list);
369 	mutex_unlock(&fi->inmem_lock);
370 
371 	dec_page_count(sbi, F2FS_INMEM_PAGES);
372 	kmem_cache_free(inmem_entry_slab, cur);
373 
374 	ClearPageUptodate(page);
375 	clear_page_private_atomic(page);
376 	f2fs_put_page(page, 0);
377 
378 	detach_page_private(page);
379 	set_page_private(page, 0);
380 
381 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
382 }
383 
384 static int __f2fs_commit_inmem_pages(struct inode *inode)
385 {
386 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
387 	struct f2fs_inode_info *fi = F2FS_I(inode);
388 	struct inmem_pages *cur, *tmp;
389 	struct f2fs_io_info fio = {
390 		.sbi = sbi,
391 		.ino = inode->i_ino,
392 		.type = DATA,
393 		.op = REQ_OP_WRITE,
394 		.op_flags = REQ_SYNC | REQ_PRIO,
395 		.io_type = FS_DATA_IO,
396 	};
397 	struct list_head revoke_list;
398 	bool submit_bio = false;
399 	int err = 0;
400 
401 	INIT_LIST_HEAD(&revoke_list);
402 
403 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
404 		struct page *page = cur->page;
405 
406 		lock_page(page);
407 		if (page->mapping == inode->i_mapping) {
408 			trace_f2fs_commit_inmem_page(page, INMEM);
409 
410 			f2fs_wait_on_page_writeback(page, DATA, true, true);
411 
412 			set_page_dirty(page);
413 			if (clear_page_dirty_for_io(page)) {
414 				inode_dec_dirty_pages(inode);
415 				f2fs_remove_dirty_inode(inode);
416 			}
417 retry:
418 			fio.page = page;
419 			fio.old_blkaddr = NULL_ADDR;
420 			fio.encrypted_page = NULL;
421 			fio.need_lock = LOCK_DONE;
422 			err = f2fs_do_write_data_page(&fio);
423 			if (err) {
424 				if (err == -ENOMEM) {
425 					memalloc_retry_wait(GFP_NOFS);
426 					goto retry;
427 				}
428 				unlock_page(page);
429 				break;
430 			}
431 			/* record old blkaddr for revoking */
432 			cur->old_addr = fio.old_blkaddr;
433 			submit_bio = true;
434 		}
435 		unlock_page(page);
436 		list_move_tail(&cur->list, &revoke_list);
437 	}
438 
439 	if (submit_bio)
440 		f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
441 
442 	if (err) {
443 		/*
444 		 * try to revoke all committed pages, but still we could fail
445 		 * due to no memory or other reason, if that happened, EAGAIN
446 		 * will be returned, which means in such case, transaction is
447 		 * already not integrity, caller should use journal to do the
448 		 * recovery or rewrite & commit last transaction. For other
449 		 * error number, revoking was done by filesystem itself.
450 		 */
451 		err = __revoke_inmem_pages(inode, &revoke_list,
452 						false, true, false);
453 
454 		/* drop all uncommitted pages */
455 		__revoke_inmem_pages(inode, &fi->inmem_pages,
456 						true, false, false);
457 	} else {
458 		__revoke_inmem_pages(inode, &revoke_list,
459 						false, false, false);
460 	}
461 
462 	return err;
463 }
464 
465 int f2fs_commit_inmem_pages(struct inode *inode)
466 {
467 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
468 	struct f2fs_inode_info *fi = F2FS_I(inode);
469 	int err;
470 
471 	f2fs_balance_fs(sbi, true);
472 
473 	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
474 
475 	f2fs_lock_op(sbi);
476 	set_inode_flag(inode, FI_ATOMIC_COMMIT);
477 
478 	mutex_lock(&fi->inmem_lock);
479 	err = __f2fs_commit_inmem_pages(inode);
480 	mutex_unlock(&fi->inmem_lock);
481 
482 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
483 
484 	f2fs_unlock_op(sbi);
485 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
486 
487 	return err;
488 }
489 
490 /*
491  * This function balances dirty node and dentry pages.
492  * In addition, it controls garbage collection.
493  */
494 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
495 {
496 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
497 		f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
498 		f2fs_stop_checkpoint(sbi, false);
499 	}
500 
501 	/* balance_fs_bg is able to be pending */
502 	if (need && excess_cached_nats(sbi))
503 		f2fs_balance_fs_bg(sbi, false);
504 
505 	if (!f2fs_is_checkpoint_ready(sbi))
506 		return;
507 
508 	/*
509 	 * We should do GC or end up with checkpoint, if there are so many dirty
510 	 * dir/node pages without enough free segments.
511 	 */
512 	if (has_not_enough_free_secs(sbi, 0, 0)) {
513 		if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
514 					sbi->gc_thread->f2fs_gc_task) {
515 			DEFINE_WAIT(wait);
516 
517 			prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
518 						TASK_UNINTERRUPTIBLE);
519 			wake_up(&sbi->gc_thread->gc_wait_queue_head);
520 			io_schedule();
521 			finish_wait(&sbi->gc_thread->fggc_wq, &wait);
522 		} else {
523 			f2fs_down_write(&sbi->gc_lock);
524 			f2fs_gc(sbi, false, false, false, NULL_SEGNO);
525 		}
526 	}
527 }
528 
529 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
530 {
531 	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
532 	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
533 	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
534 	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
535 	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
536 	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
537 	unsigned int threshold = sbi->blocks_per_seg * factor *
538 					DEFAULT_DIRTY_THRESHOLD;
539 	unsigned int global_threshold = threshold * 3 / 2;
540 
541 	if (dents >= threshold || qdata >= threshold ||
542 		nodes >= threshold || meta >= threshold ||
543 		imeta >= threshold)
544 		return true;
545 	return dents + qdata + nodes + meta + imeta >  global_threshold;
546 }
547 
548 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
549 {
550 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
551 		return;
552 
553 	/* try to shrink extent cache when there is no enough memory */
554 	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
555 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
556 
557 	/* check the # of cached NAT entries */
558 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
559 		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
560 
561 	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
562 		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
563 	else
564 		f2fs_build_free_nids(sbi, false, false);
565 
566 	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
567 		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
568 		goto do_sync;
569 
570 	/* there is background inflight IO or foreground operation recently */
571 	if (is_inflight_io(sbi, REQ_TIME) ||
572 		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
573 		return;
574 
575 	/* exceed periodical checkpoint timeout threshold */
576 	if (f2fs_time_over(sbi, CP_TIME))
577 		goto do_sync;
578 
579 	/* checkpoint is the only way to shrink partial cached entries */
580 	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
581 		f2fs_available_free_memory(sbi, INO_ENTRIES))
582 		return;
583 
584 do_sync:
585 	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
586 		struct blk_plug plug;
587 
588 		mutex_lock(&sbi->flush_lock);
589 
590 		blk_start_plug(&plug);
591 		f2fs_sync_dirty_inodes(sbi, FILE_INODE);
592 		blk_finish_plug(&plug);
593 
594 		mutex_unlock(&sbi->flush_lock);
595 	}
596 	f2fs_sync_fs(sbi->sb, true);
597 	stat_inc_bg_cp_count(sbi->stat_info);
598 }
599 
600 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
601 				struct block_device *bdev)
602 {
603 	int ret = blkdev_issue_flush(bdev);
604 
605 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
606 				test_opt(sbi, FLUSH_MERGE), ret);
607 	return ret;
608 }
609 
610 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
611 {
612 	int ret = 0;
613 	int i;
614 
615 	if (!f2fs_is_multi_device(sbi))
616 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
617 
618 	for (i = 0; i < sbi->s_ndevs; i++) {
619 		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
620 			continue;
621 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
622 		if (ret)
623 			break;
624 	}
625 	return ret;
626 }
627 
628 static int issue_flush_thread(void *data)
629 {
630 	struct f2fs_sb_info *sbi = data;
631 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
632 	wait_queue_head_t *q = &fcc->flush_wait_queue;
633 repeat:
634 	if (kthread_should_stop())
635 		return 0;
636 
637 	if (!llist_empty(&fcc->issue_list)) {
638 		struct flush_cmd *cmd, *next;
639 		int ret;
640 
641 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
642 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
643 
644 		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
645 
646 		ret = submit_flush_wait(sbi, cmd->ino);
647 		atomic_inc(&fcc->issued_flush);
648 
649 		llist_for_each_entry_safe(cmd, next,
650 					  fcc->dispatch_list, llnode) {
651 			cmd->ret = ret;
652 			complete(&cmd->wait);
653 		}
654 		fcc->dispatch_list = NULL;
655 	}
656 
657 	wait_event_interruptible(*q,
658 		kthread_should_stop() || !llist_empty(&fcc->issue_list));
659 	goto repeat;
660 }
661 
662 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
663 {
664 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
665 	struct flush_cmd cmd;
666 	int ret;
667 
668 	if (test_opt(sbi, NOBARRIER))
669 		return 0;
670 
671 	if (!test_opt(sbi, FLUSH_MERGE)) {
672 		atomic_inc(&fcc->queued_flush);
673 		ret = submit_flush_wait(sbi, ino);
674 		atomic_dec(&fcc->queued_flush);
675 		atomic_inc(&fcc->issued_flush);
676 		return ret;
677 	}
678 
679 	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
680 	    f2fs_is_multi_device(sbi)) {
681 		ret = submit_flush_wait(sbi, ino);
682 		atomic_dec(&fcc->queued_flush);
683 
684 		atomic_inc(&fcc->issued_flush);
685 		return ret;
686 	}
687 
688 	cmd.ino = ino;
689 	init_completion(&cmd.wait);
690 
691 	llist_add(&cmd.llnode, &fcc->issue_list);
692 
693 	/*
694 	 * update issue_list before we wake up issue_flush thread, this
695 	 * smp_mb() pairs with another barrier in ___wait_event(), see
696 	 * more details in comments of waitqueue_active().
697 	 */
698 	smp_mb();
699 
700 	if (waitqueue_active(&fcc->flush_wait_queue))
701 		wake_up(&fcc->flush_wait_queue);
702 
703 	if (fcc->f2fs_issue_flush) {
704 		wait_for_completion(&cmd.wait);
705 		atomic_dec(&fcc->queued_flush);
706 	} else {
707 		struct llist_node *list;
708 
709 		list = llist_del_all(&fcc->issue_list);
710 		if (!list) {
711 			wait_for_completion(&cmd.wait);
712 			atomic_dec(&fcc->queued_flush);
713 		} else {
714 			struct flush_cmd *tmp, *next;
715 
716 			ret = submit_flush_wait(sbi, ino);
717 
718 			llist_for_each_entry_safe(tmp, next, list, llnode) {
719 				if (tmp == &cmd) {
720 					cmd.ret = ret;
721 					atomic_dec(&fcc->queued_flush);
722 					continue;
723 				}
724 				tmp->ret = ret;
725 				complete(&tmp->wait);
726 			}
727 		}
728 	}
729 
730 	return cmd.ret;
731 }
732 
733 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
734 {
735 	dev_t dev = sbi->sb->s_bdev->bd_dev;
736 	struct flush_cmd_control *fcc;
737 	int err = 0;
738 
739 	if (SM_I(sbi)->fcc_info) {
740 		fcc = SM_I(sbi)->fcc_info;
741 		if (fcc->f2fs_issue_flush)
742 			return err;
743 		goto init_thread;
744 	}
745 
746 	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
747 	if (!fcc)
748 		return -ENOMEM;
749 	atomic_set(&fcc->issued_flush, 0);
750 	atomic_set(&fcc->queued_flush, 0);
751 	init_waitqueue_head(&fcc->flush_wait_queue);
752 	init_llist_head(&fcc->issue_list);
753 	SM_I(sbi)->fcc_info = fcc;
754 	if (!test_opt(sbi, FLUSH_MERGE))
755 		return err;
756 
757 init_thread:
758 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
759 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
760 	if (IS_ERR(fcc->f2fs_issue_flush)) {
761 		err = PTR_ERR(fcc->f2fs_issue_flush);
762 		kfree(fcc);
763 		SM_I(sbi)->fcc_info = NULL;
764 		return err;
765 	}
766 
767 	return err;
768 }
769 
770 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
771 {
772 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
773 
774 	if (fcc && fcc->f2fs_issue_flush) {
775 		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
776 
777 		fcc->f2fs_issue_flush = NULL;
778 		kthread_stop(flush_thread);
779 	}
780 	if (free) {
781 		kfree(fcc);
782 		SM_I(sbi)->fcc_info = NULL;
783 	}
784 }
785 
786 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
787 {
788 	int ret = 0, i;
789 
790 	if (!f2fs_is_multi_device(sbi))
791 		return 0;
792 
793 	if (test_opt(sbi, NOBARRIER))
794 		return 0;
795 
796 	for (i = 1; i < sbi->s_ndevs; i++) {
797 		int count = DEFAULT_RETRY_IO_COUNT;
798 
799 		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
800 			continue;
801 
802 		do {
803 			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
804 			if (ret)
805 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
806 		} while (ret && --count);
807 
808 		if (ret) {
809 			f2fs_stop_checkpoint(sbi, false);
810 			break;
811 		}
812 
813 		spin_lock(&sbi->dev_lock);
814 		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
815 		spin_unlock(&sbi->dev_lock);
816 	}
817 
818 	return ret;
819 }
820 
821 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
822 		enum dirty_type dirty_type)
823 {
824 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
825 
826 	/* need not be added */
827 	if (IS_CURSEG(sbi, segno))
828 		return;
829 
830 	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
831 		dirty_i->nr_dirty[dirty_type]++;
832 
833 	if (dirty_type == DIRTY) {
834 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
835 		enum dirty_type t = sentry->type;
836 
837 		if (unlikely(t >= DIRTY)) {
838 			f2fs_bug_on(sbi, 1);
839 			return;
840 		}
841 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
842 			dirty_i->nr_dirty[t]++;
843 
844 		if (__is_large_section(sbi)) {
845 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
846 			block_t valid_blocks =
847 				get_valid_blocks(sbi, segno, true);
848 
849 			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
850 					valid_blocks == BLKS_PER_SEC(sbi)));
851 
852 			if (!IS_CURSEC(sbi, secno))
853 				set_bit(secno, dirty_i->dirty_secmap);
854 		}
855 	}
856 }
857 
858 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
859 		enum dirty_type dirty_type)
860 {
861 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
862 	block_t valid_blocks;
863 
864 	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
865 		dirty_i->nr_dirty[dirty_type]--;
866 
867 	if (dirty_type == DIRTY) {
868 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
869 		enum dirty_type t = sentry->type;
870 
871 		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
872 			dirty_i->nr_dirty[t]--;
873 
874 		valid_blocks = get_valid_blocks(sbi, segno, true);
875 		if (valid_blocks == 0) {
876 			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
877 						dirty_i->victim_secmap);
878 #ifdef CONFIG_F2FS_CHECK_FS
879 			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
880 #endif
881 		}
882 		if (__is_large_section(sbi)) {
883 			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
884 
885 			if (!valid_blocks ||
886 					valid_blocks == BLKS_PER_SEC(sbi)) {
887 				clear_bit(secno, dirty_i->dirty_secmap);
888 				return;
889 			}
890 
891 			if (!IS_CURSEC(sbi, secno))
892 				set_bit(secno, dirty_i->dirty_secmap);
893 		}
894 	}
895 }
896 
897 /*
898  * Should not occur error such as -ENOMEM.
899  * Adding dirty entry into seglist is not critical operation.
900  * If a given segment is one of current working segments, it won't be added.
901  */
902 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
903 {
904 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
905 	unsigned short valid_blocks, ckpt_valid_blocks;
906 	unsigned int usable_blocks;
907 
908 	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
909 		return;
910 
911 	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
912 	mutex_lock(&dirty_i->seglist_lock);
913 
914 	valid_blocks = get_valid_blocks(sbi, segno, false);
915 	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
916 
917 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
918 		ckpt_valid_blocks == usable_blocks)) {
919 		__locate_dirty_segment(sbi, segno, PRE);
920 		__remove_dirty_segment(sbi, segno, DIRTY);
921 	} else if (valid_blocks < usable_blocks) {
922 		__locate_dirty_segment(sbi, segno, DIRTY);
923 	} else {
924 		/* Recovery routine with SSR needs this */
925 		__remove_dirty_segment(sbi, segno, DIRTY);
926 	}
927 
928 	mutex_unlock(&dirty_i->seglist_lock);
929 }
930 
931 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
932 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
933 {
934 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
935 	unsigned int segno;
936 
937 	mutex_lock(&dirty_i->seglist_lock);
938 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
939 		if (get_valid_blocks(sbi, segno, false))
940 			continue;
941 		if (IS_CURSEG(sbi, segno))
942 			continue;
943 		__locate_dirty_segment(sbi, segno, PRE);
944 		__remove_dirty_segment(sbi, segno, DIRTY);
945 	}
946 	mutex_unlock(&dirty_i->seglist_lock);
947 }
948 
949 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
950 {
951 	int ovp_hole_segs =
952 		(overprovision_segments(sbi) - reserved_segments(sbi));
953 	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
954 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
955 	block_t holes[2] = {0, 0};	/* DATA and NODE */
956 	block_t unusable;
957 	struct seg_entry *se;
958 	unsigned int segno;
959 
960 	mutex_lock(&dirty_i->seglist_lock);
961 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
962 		se = get_seg_entry(sbi, segno);
963 		if (IS_NODESEG(se->type))
964 			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
965 							se->valid_blocks;
966 		else
967 			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
968 							se->valid_blocks;
969 	}
970 	mutex_unlock(&dirty_i->seglist_lock);
971 
972 	unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
973 	if (unusable > ovp_holes)
974 		return unusable - ovp_holes;
975 	return 0;
976 }
977 
978 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
979 {
980 	int ovp_hole_segs =
981 		(overprovision_segments(sbi) - reserved_segments(sbi));
982 	if (unusable > F2FS_OPTION(sbi).unusable_cap)
983 		return -EAGAIN;
984 	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
985 		dirty_segments(sbi) > ovp_hole_segs)
986 		return -EAGAIN;
987 	return 0;
988 }
989 
990 /* This is only used by SBI_CP_DISABLED */
991 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
992 {
993 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
994 	unsigned int segno = 0;
995 
996 	mutex_lock(&dirty_i->seglist_lock);
997 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
998 		if (get_valid_blocks(sbi, segno, false))
999 			continue;
1000 		if (get_ckpt_valid_blocks(sbi, segno, false))
1001 			continue;
1002 		mutex_unlock(&dirty_i->seglist_lock);
1003 		return segno;
1004 	}
1005 	mutex_unlock(&dirty_i->seglist_lock);
1006 	return NULL_SEGNO;
1007 }
1008 
1009 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
1010 		struct block_device *bdev, block_t lstart,
1011 		block_t start, block_t len)
1012 {
1013 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1014 	struct list_head *pend_list;
1015 	struct discard_cmd *dc;
1016 
1017 	f2fs_bug_on(sbi, !len);
1018 
1019 	pend_list = &dcc->pend_list[plist_idx(len)];
1020 
1021 	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
1022 	INIT_LIST_HEAD(&dc->list);
1023 	dc->bdev = bdev;
1024 	dc->lstart = lstart;
1025 	dc->start = start;
1026 	dc->len = len;
1027 	dc->ref = 0;
1028 	dc->state = D_PREP;
1029 	dc->queued = 0;
1030 	dc->error = 0;
1031 	init_completion(&dc->wait);
1032 	list_add_tail(&dc->list, pend_list);
1033 	spin_lock_init(&dc->lock);
1034 	dc->bio_ref = 0;
1035 	atomic_inc(&dcc->discard_cmd_cnt);
1036 	dcc->undiscard_blks += len;
1037 
1038 	return dc;
1039 }
1040 
1041 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1042 				struct block_device *bdev, block_t lstart,
1043 				block_t start, block_t len,
1044 				struct rb_node *parent, struct rb_node **p,
1045 				bool leftmost)
1046 {
1047 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1048 	struct discard_cmd *dc;
1049 
1050 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1051 
1052 	rb_link_node(&dc->rb_node, parent, p);
1053 	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1054 
1055 	return dc;
1056 }
1057 
1058 static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1059 							struct discard_cmd *dc)
1060 {
1061 	if (dc->state == D_DONE)
1062 		atomic_sub(dc->queued, &dcc->queued_discard);
1063 
1064 	list_del(&dc->list);
1065 	rb_erase_cached(&dc->rb_node, &dcc->root);
1066 	dcc->undiscard_blks -= dc->len;
1067 
1068 	kmem_cache_free(discard_cmd_slab, dc);
1069 
1070 	atomic_dec(&dcc->discard_cmd_cnt);
1071 }
1072 
1073 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1074 							struct discard_cmd *dc)
1075 {
1076 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1077 	unsigned long flags;
1078 
1079 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1080 
1081 	spin_lock_irqsave(&dc->lock, flags);
1082 	if (dc->bio_ref) {
1083 		spin_unlock_irqrestore(&dc->lock, flags);
1084 		return;
1085 	}
1086 	spin_unlock_irqrestore(&dc->lock, flags);
1087 
1088 	f2fs_bug_on(sbi, dc->ref);
1089 
1090 	if (dc->error == -EOPNOTSUPP)
1091 		dc->error = 0;
1092 
1093 	if (dc->error)
1094 		printk_ratelimited(
1095 			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1096 			KERN_INFO, sbi->sb->s_id,
1097 			dc->lstart, dc->start, dc->len, dc->error);
1098 	__detach_discard_cmd(dcc, dc);
1099 }
1100 
1101 static void f2fs_submit_discard_endio(struct bio *bio)
1102 {
1103 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1104 	unsigned long flags;
1105 
1106 	spin_lock_irqsave(&dc->lock, flags);
1107 	if (!dc->error)
1108 		dc->error = blk_status_to_errno(bio->bi_status);
1109 	dc->bio_ref--;
1110 	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1111 		dc->state = D_DONE;
1112 		complete_all(&dc->wait);
1113 	}
1114 	spin_unlock_irqrestore(&dc->lock, flags);
1115 	bio_put(bio);
1116 }
1117 
1118 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1119 				block_t start, block_t end)
1120 {
1121 #ifdef CONFIG_F2FS_CHECK_FS
1122 	struct seg_entry *sentry;
1123 	unsigned int segno;
1124 	block_t blk = start;
1125 	unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1126 	unsigned long *map;
1127 
1128 	while (blk < end) {
1129 		segno = GET_SEGNO(sbi, blk);
1130 		sentry = get_seg_entry(sbi, segno);
1131 		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1132 
1133 		if (end < START_BLOCK(sbi, segno + 1))
1134 			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1135 		else
1136 			size = max_blocks;
1137 		map = (unsigned long *)(sentry->cur_valid_map);
1138 		offset = __find_rev_next_bit(map, size, offset);
1139 		f2fs_bug_on(sbi, offset != size);
1140 		blk = START_BLOCK(sbi, segno + 1);
1141 	}
1142 #endif
1143 }
1144 
1145 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1146 				struct discard_policy *dpolicy,
1147 				int discard_type, unsigned int granularity)
1148 {
1149 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1150 
1151 	/* common policy */
1152 	dpolicy->type = discard_type;
1153 	dpolicy->sync = true;
1154 	dpolicy->ordered = false;
1155 	dpolicy->granularity = granularity;
1156 
1157 	dpolicy->max_requests = dcc->max_discard_request;
1158 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
1159 	dpolicy->timeout = false;
1160 
1161 	if (discard_type == DPOLICY_BG) {
1162 		dpolicy->min_interval = dcc->min_discard_issue_time;
1163 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1164 		dpolicy->max_interval = dcc->max_discard_issue_time;
1165 		dpolicy->io_aware = true;
1166 		dpolicy->sync = false;
1167 		dpolicy->ordered = true;
1168 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1169 			dpolicy->granularity = 1;
1170 			if (atomic_read(&dcc->discard_cmd_cnt))
1171 				dpolicy->max_interval =
1172 					dcc->min_discard_issue_time;
1173 		}
1174 	} else if (discard_type == DPOLICY_FORCE) {
1175 		dpolicy->min_interval = dcc->min_discard_issue_time;
1176 		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1177 		dpolicy->max_interval = dcc->max_discard_issue_time;
1178 		dpolicy->io_aware = false;
1179 	} else if (discard_type == DPOLICY_FSTRIM) {
1180 		dpolicy->io_aware = false;
1181 	} else if (discard_type == DPOLICY_UMOUNT) {
1182 		dpolicy->io_aware = false;
1183 		/* we need to issue all to keep CP_TRIMMED_FLAG */
1184 		dpolicy->granularity = 1;
1185 		dpolicy->timeout = true;
1186 	}
1187 }
1188 
1189 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1190 				struct block_device *bdev, block_t lstart,
1191 				block_t start, block_t len);
1192 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1193 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1194 						struct discard_policy *dpolicy,
1195 						struct discard_cmd *dc,
1196 						unsigned int *issued)
1197 {
1198 	struct block_device *bdev = dc->bdev;
1199 	struct request_queue *q = bdev_get_queue(bdev);
1200 	unsigned int max_discard_blocks =
1201 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1202 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1203 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1204 					&(dcc->fstrim_list) : &(dcc->wait_list);
1205 	int flag = dpolicy->sync ? REQ_SYNC : 0;
1206 	block_t lstart, start, len, total_len;
1207 	int err = 0;
1208 
1209 	if (dc->state != D_PREP)
1210 		return 0;
1211 
1212 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1213 		return 0;
1214 
1215 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1216 
1217 	lstart = dc->lstart;
1218 	start = dc->start;
1219 	len = dc->len;
1220 	total_len = len;
1221 
1222 	dc->len = 0;
1223 
1224 	while (total_len && *issued < dpolicy->max_requests && !err) {
1225 		struct bio *bio = NULL;
1226 		unsigned long flags;
1227 		bool last = true;
1228 
1229 		if (len > max_discard_blocks) {
1230 			len = max_discard_blocks;
1231 			last = false;
1232 		}
1233 
1234 		(*issued)++;
1235 		if (*issued == dpolicy->max_requests)
1236 			last = true;
1237 
1238 		dc->len += len;
1239 
1240 		if (time_to_inject(sbi, FAULT_DISCARD)) {
1241 			f2fs_show_injection_info(sbi, FAULT_DISCARD);
1242 			err = -EIO;
1243 			goto submit;
1244 		}
1245 		err = __blkdev_issue_discard(bdev,
1246 					SECTOR_FROM_BLOCK(start),
1247 					SECTOR_FROM_BLOCK(len),
1248 					GFP_NOFS, 0, &bio);
1249 submit:
1250 		if (err) {
1251 			spin_lock_irqsave(&dc->lock, flags);
1252 			if (dc->state == D_PARTIAL)
1253 				dc->state = D_SUBMIT;
1254 			spin_unlock_irqrestore(&dc->lock, flags);
1255 
1256 			break;
1257 		}
1258 
1259 		f2fs_bug_on(sbi, !bio);
1260 
1261 		/*
1262 		 * should keep before submission to avoid D_DONE
1263 		 * right away
1264 		 */
1265 		spin_lock_irqsave(&dc->lock, flags);
1266 		if (last)
1267 			dc->state = D_SUBMIT;
1268 		else
1269 			dc->state = D_PARTIAL;
1270 		dc->bio_ref++;
1271 		spin_unlock_irqrestore(&dc->lock, flags);
1272 
1273 		atomic_inc(&dcc->queued_discard);
1274 		dc->queued++;
1275 		list_move_tail(&dc->list, wait_list);
1276 
1277 		/* sanity check on discard range */
1278 		__check_sit_bitmap(sbi, lstart, lstart + len);
1279 
1280 		bio->bi_private = dc;
1281 		bio->bi_end_io = f2fs_submit_discard_endio;
1282 		bio->bi_opf |= flag;
1283 		submit_bio(bio);
1284 
1285 		atomic_inc(&dcc->issued_discard);
1286 
1287 		f2fs_update_iostat(sbi, FS_DISCARD, 1);
1288 
1289 		lstart += len;
1290 		start += len;
1291 		total_len -= len;
1292 		len = total_len;
1293 	}
1294 
1295 	if (!err && len) {
1296 		dcc->undiscard_blks -= len;
1297 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1298 	}
1299 	return err;
1300 }
1301 
1302 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1303 				struct block_device *bdev, block_t lstart,
1304 				block_t start, block_t len,
1305 				struct rb_node **insert_p,
1306 				struct rb_node *insert_parent)
1307 {
1308 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1309 	struct rb_node **p;
1310 	struct rb_node *parent = NULL;
1311 	bool leftmost = true;
1312 
1313 	if (insert_p && insert_parent) {
1314 		parent = insert_parent;
1315 		p = insert_p;
1316 		goto do_insert;
1317 	}
1318 
1319 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1320 							lstart, &leftmost);
1321 do_insert:
1322 	__attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1323 								p, leftmost);
1324 }
1325 
1326 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1327 						struct discard_cmd *dc)
1328 {
1329 	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1330 }
1331 
1332 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1333 				struct discard_cmd *dc, block_t blkaddr)
1334 {
1335 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1336 	struct discard_info di = dc->di;
1337 	bool modified = false;
1338 
1339 	if (dc->state == D_DONE || dc->len == 1) {
1340 		__remove_discard_cmd(sbi, dc);
1341 		return;
1342 	}
1343 
1344 	dcc->undiscard_blks -= di.len;
1345 
1346 	if (blkaddr > di.lstart) {
1347 		dc->len = blkaddr - dc->lstart;
1348 		dcc->undiscard_blks += dc->len;
1349 		__relocate_discard_cmd(dcc, dc);
1350 		modified = true;
1351 	}
1352 
1353 	if (blkaddr < di.lstart + di.len - 1) {
1354 		if (modified) {
1355 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1356 					di.start + blkaddr + 1 - di.lstart,
1357 					di.lstart + di.len - 1 - blkaddr,
1358 					NULL, NULL);
1359 		} else {
1360 			dc->lstart++;
1361 			dc->len--;
1362 			dc->start++;
1363 			dcc->undiscard_blks += dc->len;
1364 			__relocate_discard_cmd(dcc, dc);
1365 		}
1366 	}
1367 }
1368 
1369 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1370 				struct block_device *bdev, block_t lstart,
1371 				block_t start, block_t len)
1372 {
1373 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1374 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1375 	struct discard_cmd *dc;
1376 	struct discard_info di = {0};
1377 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1378 	struct request_queue *q = bdev_get_queue(bdev);
1379 	unsigned int max_discard_blocks =
1380 			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1381 	block_t end = lstart + len;
1382 
1383 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1384 					NULL, lstart,
1385 					(struct rb_entry **)&prev_dc,
1386 					(struct rb_entry **)&next_dc,
1387 					&insert_p, &insert_parent, true, NULL);
1388 	if (dc)
1389 		prev_dc = dc;
1390 
1391 	if (!prev_dc) {
1392 		di.lstart = lstart;
1393 		di.len = next_dc ? next_dc->lstart - lstart : len;
1394 		di.len = min(di.len, len);
1395 		di.start = start;
1396 	}
1397 
1398 	while (1) {
1399 		struct rb_node *node;
1400 		bool merged = false;
1401 		struct discard_cmd *tdc = NULL;
1402 
1403 		if (prev_dc) {
1404 			di.lstart = prev_dc->lstart + prev_dc->len;
1405 			if (di.lstart < lstart)
1406 				di.lstart = lstart;
1407 			if (di.lstart >= end)
1408 				break;
1409 
1410 			if (!next_dc || next_dc->lstart > end)
1411 				di.len = end - di.lstart;
1412 			else
1413 				di.len = next_dc->lstart - di.lstart;
1414 			di.start = start + di.lstart - lstart;
1415 		}
1416 
1417 		if (!di.len)
1418 			goto next;
1419 
1420 		if (prev_dc && prev_dc->state == D_PREP &&
1421 			prev_dc->bdev == bdev &&
1422 			__is_discard_back_mergeable(&di, &prev_dc->di,
1423 							max_discard_blocks)) {
1424 			prev_dc->di.len += di.len;
1425 			dcc->undiscard_blks += di.len;
1426 			__relocate_discard_cmd(dcc, prev_dc);
1427 			di = prev_dc->di;
1428 			tdc = prev_dc;
1429 			merged = true;
1430 		}
1431 
1432 		if (next_dc && next_dc->state == D_PREP &&
1433 			next_dc->bdev == bdev &&
1434 			__is_discard_front_mergeable(&di, &next_dc->di,
1435 							max_discard_blocks)) {
1436 			next_dc->di.lstart = di.lstart;
1437 			next_dc->di.len += di.len;
1438 			next_dc->di.start = di.start;
1439 			dcc->undiscard_blks += di.len;
1440 			__relocate_discard_cmd(dcc, next_dc);
1441 			if (tdc)
1442 				__remove_discard_cmd(sbi, tdc);
1443 			merged = true;
1444 		}
1445 
1446 		if (!merged) {
1447 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
1448 							di.len, NULL, NULL);
1449 		}
1450  next:
1451 		prev_dc = next_dc;
1452 		if (!prev_dc)
1453 			break;
1454 
1455 		node = rb_next(&prev_dc->rb_node);
1456 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1457 	}
1458 }
1459 
1460 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1461 		struct block_device *bdev, block_t blkstart, block_t blklen)
1462 {
1463 	block_t lblkstart = blkstart;
1464 
1465 	if (!f2fs_bdev_support_discard(bdev))
1466 		return 0;
1467 
1468 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1469 
1470 	if (f2fs_is_multi_device(sbi)) {
1471 		int devi = f2fs_target_device_index(sbi, blkstart);
1472 
1473 		blkstart -= FDEV(devi).start_blk;
1474 	}
1475 	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1476 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1477 	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1478 	return 0;
1479 }
1480 
1481 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1482 					struct discard_policy *dpolicy)
1483 {
1484 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1485 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1486 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1487 	struct discard_cmd *dc;
1488 	struct blk_plug plug;
1489 	unsigned int pos = dcc->next_pos;
1490 	unsigned int issued = 0;
1491 	bool io_interrupted = false;
1492 
1493 	mutex_lock(&dcc->cmd_lock);
1494 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1495 					NULL, pos,
1496 					(struct rb_entry **)&prev_dc,
1497 					(struct rb_entry **)&next_dc,
1498 					&insert_p, &insert_parent, true, NULL);
1499 	if (!dc)
1500 		dc = next_dc;
1501 
1502 	blk_start_plug(&plug);
1503 
1504 	while (dc) {
1505 		struct rb_node *node;
1506 		int err = 0;
1507 
1508 		if (dc->state != D_PREP)
1509 			goto next;
1510 
1511 		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1512 			io_interrupted = true;
1513 			break;
1514 		}
1515 
1516 		dcc->next_pos = dc->lstart + dc->len;
1517 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1518 
1519 		if (issued >= dpolicy->max_requests)
1520 			break;
1521 next:
1522 		node = rb_next(&dc->rb_node);
1523 		if (err)
1524 			__remove_discard_cmd(sbi, dc);
1525 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1526 	}
1527 
1528 	blk_finish_plug(&plug);
1529 
1530 	if (!dc)
1531 		dcc->next_pos = 0;
1532 
1533 	mutex_unlock(&dcc->cmd_lock);
1534 
1535 	if (!issued && io_interrupted)
1536 		issued = -1;
1537 
1538 	return issued;
1539 }
1540 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1541 					struct discard_policy *dpolicy);
1542 
1543 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1544 					struct discard_policy *dpolicy)
1545 {
1546 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1547 	struct list_head *pend_list;
1548 	struct discard_cmd *dc, *tmp;
1549 	struct blk_plug plug;
1550 	int i, issued;
1551 	bool io_interrupted = false;
1552 
1553 	if (dpolicy->timeout)
1554 		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1555 
1556 retry:
1557 	issued = 0;
1558 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1559 		if (dpolicy->timeout &&
1560 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1561 			break;
1562 
1563 		if (i + 1 < dpolicy->granularity)
1564 			break;
1565 
1566 		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1567 			return __issue_discard_cmd_orderly(sbi, dpolicy);
1568 
1569 		pend_list = &dcc->pend_list[i];
1570 
1571 		mutex_lock(&dcc->cmd_lock);
1572 		if (list_empty(pend_list))
1573 			goto next;
1574 		if (unlikely(dcc->rbtree_check))
1575 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1576 							&dcc->root, false));
1577 		blk_start_plug(&plug);
1578 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1579 			f2fs_bug_on(sbi, dc->state != D_PREP);
1580 
1581 			if (dpolicy->timeout &&
1582 				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1583 				break;
1584 
1585 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1586 						!is_idle(sbi, DISCARD_TIME)) {
1587 				io_interrupted = true;
1588 				break;
1589 			}
1590 
1591 			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1592 
1593 			if (issued >= dpolicy->max_requests)
1594 				break;
1595 		}
1596 		blk_finish_plug(&plug);
1597 next:
1598 		mutex_unlock(&dcc->cmd_lock);
1599 
1600 		if (issued >= dpolicy->max_requests || io_interrupted)
1601 			break;
1602 	}
1603 
1604 	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1605 		__wait_all_discard_cmd(sbi, dpolicy);
1606 		goto retry;
1607 	}
1608 
1609 	if (!issued && io_interrupted)
1610 		issued = -1;
1611 
1612 	return issued;
1613 }
1614 
1615 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1616 {
1617 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1618 	struct list_head *pend_list;
1619 	struct discard_cmd *dc, *tmp;
1620 	int i;
1621 	bool dropped = false;
1622 
1623 	mutex_lock(&dcc->cmd_lock);
1624 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1625 		pend_list = &dcc->pend_list[i];
1626 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1627 			f2fs_bug_on(sbi, dc->state != D_PREP);
1628 			__remove_discard_cmd(sbi, dc);
1629 			dropped = true;
1630 		}
1631 	}
1632 	mutex_unlock(&dcc->cmd_lock);
1633 
1634 	return dropped;
1635 }
1636 
1637 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1638 {
1639 	__drop_discard_cmd(sbi);
1640 }
1641 
1642 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1643 							struct discard_cmd *dc)
1644 {
1645 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1646 	unsigned int len = 0;
1647 
1648 	wait_for_completion_io(&dc->wait);
1649 	mutex_lock(&dcc->cmd_lock);
1650 	f2fs_bug_on(sbi, dc->state != D_DONE);
1651 	dc->ref--;
1652 	if (!dc->ref) {
1653 		if (!dc->error)
1654 			len = dc->len;
1655 		__remove_discard_cmd(sbi, dc);
1656 	}
1657 	mutex_unlock(&dcc->cmd_lock);
1658 
1659 	return len;
1660 }
1661 
1662 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1663 						struct discard_policy *dpolicy,
1664 						block_t start, block_t end)
1665 {
1666 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1667 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1668 					&(dcc->fstrim_list) : &(dcc->wait_list);
1669 	struct discard_cmd *dc, *tmp;
1670 	bool need_wait;
1671 	unsigned int trimmed = 0;
1672 
1673 next:
1674 	need_wait = false;
1675 
1676 	mutex_lock(&dcc->cmd_lock);
1677 	list_for_each_entry_safe(dc, tmp, wait_list, list) {
1678 		if (dc->lstart + dc->len <= start || end <= dc->lstart)
1679 			continue;
1680 		if (dc->len < dpolicy->granularity)
1681 			continue;
1682 		if (dc->state == D_DONE && !dc->ref) {
1683 			wait_for_completion_io(&dc->wait);
1684 			if (!dc->error)
1685 				trimmed += dc->len;
1686 			__remove_discard_cmd(sbi, dc);
1687 		} else {
1688 			dc->ref++;
1689 			need_wait = true;
1690 			break;
1691 		}
1692 	}
1693 	mutex_unlock(&dcc->cmd_lock);
1694 
1695 	if (need_wait) {
1696 		trimmed += __wait_one_discard_bio(sbi, dc);
1697 		goto next;
1698 	}
1699 
1700 	return trimmed;
1701 }
1702 
1703 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1704 						struct discard_policy *dpolicy)
1705 {
1706 	struct discard_policy dp;
1707 	unsigned int discard_blks;
1708 
1709 	if (dpolicy)
1710 		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1711 
1712 	/* wait all */
1713 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1714 	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1715 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1716 	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1717 
1718 	return discard_blks;
1719 }
1720 
1721 /* This should be covered by global mutex, &sit_i->sentry_lock */
1722 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1723 {
1724 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1725 	struct discard_cmd *dc;
1726 	bool need_wait = false;
1727 
1728 	mutex_lock(&dcc->cmd_lock);
1729 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1730 							NULL, blkaddr);
1731 	if (dc) {
1732 		if (dc->state == D_PREP) {
1733 			__punch_discard_cmd(sbi, dc, blkaddr);
1734 		} else {
1735 			dc->ref++;
1736 			need_wait = true;
1737 		}
1738 	}
1739 	mutex_unlock(&dcc->cmd_lock);
1740 
1741 	if (need_wait)
1742 		__wait_one_discard_bio(sbi, dc);
1743 }
1744 
1745 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1746 {
1747 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1748 
1749 	if (dcc && dcc->f2fs_issue_discard) {
1750 		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1751 
1752 		dcc->f2fs_issue_discard = NULL;
1753 		kthread_stop(discard_thread);
1754 	}
1755 }
1756 
1757 /* This comes from f2fs_put_super */
1758 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1759 {
1760 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1761 	struct discard_policy dpolicy;
1762 	bool dropped;
1763 
1764 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1765 					dcc->discard_granularity);
1766 	__issue_discard_cmd(sbi, &dpolicy);
1767 	dropped = __drop_discard_cmd(sbi);
1768 
1769 	/* just to make sure there is no pending discard commands */
1770 	__wait_all_discard_cmd(sbi, NULL);
1771 
1772 	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1773 	return dropped;
1774 }
1775 
1776 static int issue_discard_thread(void *data)
1777 {
1778 	struct f2fs_sb_info *sbi = data;
1779 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1780 	wait_queue_head_t *q = &dcc->discard_wait_queue;
1781 	struct discard_policy dpolicy;
1782 	unsigned int wait_ms = dcc->min_discard_issue_time;
1783 	int issued;
1784 
1785 	set_freezable();
1786 
1787 	do {
1788 		if (sbi->gc_mode == GC_URGENT_HIGH ||
1789 			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1790 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1791 		else
1792 			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1793 						dcc->discard_granularity);
1794 
1795 		if (!atomic_read(&dcc->discard_cmd_cnt))
1796 		       wait_ms = dpolicy.max_interval;
1797 
1798 		wait_event_interruptible_timeout(*q,
1799 				kthread_should_stop() || freezing(current) ||
1800 				dcc->discard_wake,
1801 				msecs_to_jiffies(wait_ms));
1802 
1803 		if (dcc->discard_wake)
1804 			dcc->discard_wake = 0;
1805 
1806 		/* clean up pending candidates before going to sleep */
1807 		if (atomic_read(&dcc->queued_discard))
1808 			__wait_all_discard_cmd(sbi, NULL);
1809 
1810 		if (try_to_freeze())
1811 			continue;
1812 		if (f2fs_readonly(sbi->sb))
1813 			continue;
1814 		if (kthread_should_stop())
1815 			return 0;
1816 		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1817 			wait_ms = dpolicy.max_interval;
1818 			continue;
1819 		}
1820 		if (!atomic_read(&dcc->discard_cmd_cnt))
1821 			continue;
1822 
1823 		sb_start_intwrite(sbi->sb);
1824 
1825 		issued = __issue_discard_cmd(sbi, &dpolicy);
1826 		if (issued > 0) {
1827 			__wait_all_discard_cmd(sbi, &dpolicy);
1828 			wait_ms = dpolicy.min_interval;
1829 		} else if (issued == -1) {
1830 			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1831 			if (!wait_ms)
1832 				wait_ms = dpolicy.mid_interval;
1833 		} else {
1834 			wait_ms = dpolicy.max_interval;
1835 		}
1836 
1837 		sb_end_intwrite(sbi->sb);
1838 
1839 	} while (!kthread_should_stop());
1840 	return 0;
1841 }
1842 
1843 #ifdef CONFIG_BLK_DEV_ZONED
1844 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1845 		struct block_device *bdev, block_t blkstart, block_t blklen)
1846 {
1847 	sector_t sector, nr_sects;
1848 	block_t lblkstart = blkstart;
1849 	int devi = 0;
1850 
1851 	if (f2fs_is_multi_device(sbi)) {
1852 		devi = f2fs_target_device_index(sbi, blkstart);
1853 		if (blkstart < FDEV(devi).start_blk ||
1854 		    blkstart > FDEV(devi).end_blk) {
1855 			f2fs_err(sbi, "Invalid block %x", blkstart);
1856 			return -EIO;
1857 		}
1858 		blkstart -= FDEV(devi).start_blk;
1859 	}
1860 
1861 	/* For sequential zones, reset the zone write pointer */
1862 	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1863 		sector = SECTOR_FROM_BLOCK(blkstart);
1864 		nr_sects = SECTOR_FROM_BLOCK(blklen);
1865 
1866 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
1867 				nr_sects != bdev_zone_sectors(bdev)) {
1868 			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1869 				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1870 				 blkstart, blklen);
1871 			return -EIO;
1872 		}
1873 		trace_f2fs_issue_reset_zone(bdev, blkstart);
1874 		return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1875 					sector, nr_sects, GFP_NOFS);
1876 	}
1877 
1878 	/* For conventional zones, use regular discard if supported */
1879 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1880 }
1881 #endif
1882 
1883 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1884 		struct block_device *bdev, block_t blkstart, block_t blklen)
1885 {
1886 #ifdef CONFIG_BLK_DEV_ZONED
1887 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1888 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1889 #endif
1890 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1891 }
1892 
1893 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1894 				block_t blkstart, block_t blklen)
1895 {
1896 	sector_t start = blkstart, len = 0;
1897 	struct block_device *bdev;
1898 	struct seg_entry *se;
1899 	unsigned int offset;
1900 	block_t i;
1901 	int err = 0;
1902 
1903 	bdev = f2fs_target_device(sbi, blkstart, NULL);
1904 
1905 	for (i = blkstart; i < blkstart + blklen; i++, len++) {
1906 		if (i != start) {
1907 			struct block_device *bdev2 =
1908 				f2fs_target_device(sbi, i, NULL);
1909 
1910 			if (bdev2 != bdev) {
1911 				err = __issue_discard_async(sbi, bdev,
1912 						start, len);
1913 				if (err)
1914 					return err;
1915 				bdev = bdev2;
1916 				start = i;
1917 				len = 0;
1918 			}
1919 		}
1920 
1921 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1922 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1923 
1924 		if (f2fs_block_unit_discard(sbi) &&
1925 				!f2fs_test_and_set_bit(offset, se->discard_map))
1926 			sbi->discard_blks--;
1927 	}
1928 
1929 	if (len)
1930 		err = __issue_discard_async(sbi, bdev, start, len);
1931 	return err;
1932 }
1933 
1934 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1935 							bool check_only)
1936 {
1937 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1938 	int max_blocks = sbi->blocks_per_seg;
1939 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1940 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1941 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1942 	unsigned long *discard_map = (unsigned long *)se->discard_map;
1943 	unsigned long *dmap = SIT_I(sbi)->tmp_map;
1944 	unsigned int start = 0, end = -1;
1945 	bool force = (cpc->reason & CP_DISCARD);
1946 	struct discard_entry *de = NULL;
1947 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1948 	int i;
1949 
1950 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
1951 			!f2fs_block_unit_discard(sbi))
1952 		return false;
1953 
1954 	if (!force) {
1955 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1956 			SM_I(sbi)->dcc_info->nr_discards >=
1957 				SM_I(sbi)->dcc_info->max_discards)
1958 			return false;
1959 	}
1960 
1961 	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1962 	for (i = 0; i < entries; i++)
1963 		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1964 				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1965 
1966 	while (force || SM_I(sbi)->dcc_info->nr_discards <=
1967 				SM_I(sbi)->dcc_info->max_discards) {
1968 		start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1969 		if (start >= max_blocks)
1970 			break;
1971 
1972 		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1973 		if (force && start && end != max_blocks
1974 					&& (end - start) < cpc->trim_minlen)
1975 			continue;
1976 
1977 		if (check_only)
1978 			return true;
1979 
1980 		if (!de) {
1981 			de = f2fs_kmem_cache_alloc(discard_entry_slab,
1982 						GFP_F2FS_ZERO, true, NULL);
1983 			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1984 			list_add_tail(&de->list, head);
1985 		}
1986 
1987 		for (i = start; i < end; i++)
1988 			__set_bit_le(i, (void *)de->discard_map);
1989 
1990 		SM_I(sbi)->dcc_info->nr_discards += end - start;
1991 	}
1992 	return false;
1993 }
1994 
1995 static void release_discard_addr(struct discard_entry *entry)
1996 {
1997 	list_del(&entry->list);
1998 	kmem_cache_free(discard_entry_slab, entry);
1999 }
2000 
2001 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2002 {
2003 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2004 	struct discard_entry *entry, *this;
2005 
2006 	/* drop caches */
2007 	list_for_each_entry_safe(entry, this, head, list)
2008 		release_discard_addr(entry);
2009 }
2010 
2011 /*
2012  * Should call f2fs_clear_prefree_segments after checkpoint is done.
2013  */
2014 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2015 {
2016 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2017 	unsigned int segno;
2018 
2019 	mutex_lock(&dirty_i->seglist_lock);
2020 	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2021 		__set_test_and_free(sbi, segno, false);
2022 	mutex_unlock(&dirty_i->seglist_lock);
2023 }
2024 
2025 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2026 						struct cp_control *cpc)
2027 {
2028 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2029 	struct list_head *head = &dcc->entry_list;
2030 	struct discard_entry *entry, *this;
2031 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2032 	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2033 	unsigned int start = 0, end = -1;
2034 	unsigned int secno, start_segno;
2035 	bool force = (cpc->reason & CP_DISCARD);
2036 	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2037 						DISCARD_UNIT_SECTION;
2038 
2039 	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2040 		section_alignment = true;
2041 
2042 	mutex_lock(&dirty_i->seglist_lock);
2043 
2044 	while (1) {
2045 		int i;
2046 
2047 		if (section_alignment && end != -1)
2048 			end--;
2049 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2050 		if (start >= MAIN_SEGS(sbi))
2051 			break;
2052 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2053 								start + 1);
2054 
2055 		if (section_alignment) {
2056 			start = rounddown(start, sbi->segs_per_sec);
2057 			end = roundup(end, sbi->segs_per_sec);
2058 		}
2059 
2060 		for (i = start; i < end; i++) {
2061 			if (test_and_clear_bit(i, prefree_map))
2062 				dirty_i->nr_dirty[PRE]--;
2063 		}
2064 
2065 		if (!f2fs_realtime_discard_enable(sbi))
2066 			continue;
2067 
2068 		if (force && start >= cpc->trim_start &&
2069 					(end - 1) <= cpc->trim_end)
2070 				continue;
2071 
2072 		if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2073 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2074 				(end - start) << sbi->log_blocks_per_seg);
2075 			continue;
2076 		}
2077 next:
2078 		secno = GET_SEC_FROM_SEG(sbi, start);
2079 		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2080 		if (!IS_CURSEC(sbi, secno) &&
2081 			!get_valid_blocks(sbi, start, true))
2082 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2083 				sbi->segs_per_sec << sbi->log_blocks_per_seg);
2084 
2085 		start = start_segno + sbi->segs_per_sec;
2086 		if (start < end)
2087 			goto next;
2088 		else
2089 			end = start - 1;
2090 	}
2091 	mutex_unlock(&dirty_i->seglist_lock);
2092 
2093 	if (!f2fs_block_unit_discard(sbi))
2094 		goto wakeup;
2095 
2096 	/* send small discards */
2097 	list_for_each_entry_safe(entry, this, head, list) {
2098 		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2099 		bool is_valid = test_bit_le(0, entry->discard_map);
2100 
2101 find_next:
2102 		if (is_valid) {
2103 			next_pos = find_next_zero_bit_le(entry->discard_map,
2104 					sbi->blocks_per_seg, cur_pos);
2105 			len = next_pos - cur_pos;
2106 
2107 			if (f2fs_sb_has_blkzoned(sbi) ||
2108 			    (force && len < cpc->trim_minlen))
2109 				goto skip;
2110 
2111 			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2112 									len);
2113 			total_len += len;
2114 		} else {
2115 			next_pos = find_next_bit_le(entry->discard_map,
2116 					sbi->blocks_per_seg, cur_pos);
2117 		}
2118 skip:
2119 		cur_pos = next_pos;
2120 		is_valid = !is_valid;
2121 
2122 		if (cur_pos < sbi->blocks_per_seg)
2123 			goto find_next;
2124 
2125 		release_discard_addr(entry);
2126 		dcc->nr_discards -= total_len;
2127 	}
2128 
2129 wakeup:
2130 	wake_up_discard_thread(sbi, false);
2131 }
2132 
2133 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2134 {
2135 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2136 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2137 	int err = 0;
2138 
2139 	if (!f2fs_realtime_discard_enable(sbi))
2140 		return 0;
2141 
2142 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2143 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2144 	if (IS_ERR(dcc->f2fs_issue_discard))
2145 		err = PTR_ERR(dcc->f2fs_issue_discard);
2146 
2147 	return err;
2148 }
2149 
2150 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2151 {
2152 	struct discard_cmd_control *dcc;
2153 	int err = 0, i;
2154 
2155 	if (SM_I(sbi)->dcc_info) {
2156 		dcc = SM_I(sbi)->dcc_info;
2157 		goto init_thread;
2158 	}
2159 
2160 	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2161 	if (!dcc)
2162 		return -ENOMEM;
2163 
2164 	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2165 	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2166 		dcc->discard_granularity = sbi->blocks_per_seg;
2167 	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2168 		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2169 
2170 	INIT_LIST_HEAD(&dcc->entry_list);
2171 	for (i = 0; i < MAX_PLIST_NUM; i++)
2172 		INIT_LIST_HEAD(&dcc->pend_list[i]);
2173 	INIT_LIST_HEAD(&dcc->wait_list);
2174 	INIT_LIST_HEAD(&dcc->fstrim_list);
2175 	mutex_init(&dcc->cmd_lock);
2176 	atomic_set(&dcc->issued_discard, 0);
2177 	atomic_set(&dcc->queued_discard, 0);
2178 	atomic_set(&dcc->discard_cmd_cnt, 0);
2179 	dcc->nr_discards = 0;
2180 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2181 	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2182 	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2183 	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2184 	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2185 	dcc->undiscard_blks = 0;
2186 	dcc->next_pos = 0;
2187 	dcc->root = RB_ROOT_CACHED;
2188 	dcc->rbtree_check = false;
2189 
2190 	init_waitqueue_head(&dcc->discard_wait_queue);
2191 	SM_I(sbi)->dcc_info = dcc;
2192 init_thread:
2193 	err = f2fs_start_discard_thread(sbi);
2194 	if (err) {
2195 		kfree(dcc);
2196 		SM_I(sbi)->dcc_info = NULL;
2197 	}
2198 
2199 	return err;
2200 }
2201 
2202 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2203 {
2204 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2205 
2206 	if (!dcc)
2207 		return;
2208 
2209 	f2fs_stop_discard_thread(sbi);
2210 
2211 	/*
2212 	 * Recovery can cache discard commands, so in error path of
2213 	 * fill_super(), it needs to give a chance to handle them.
2214 	 */
2215 	if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2216 		f2fs_issue_discard_timeout(sbi);
2217 
2218 	kfree(dcc);
2219 	SM_I(sbi)->dcc_info = NULL;
2220 }
2221 
2222 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2223 {
2224 	struct sit_info *sit_i = SIT_I(sbi);
2225 
2226 	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2227 		sit_i->dirty_sentries++;
2228 		return false;
2229 	}
2230 
2231 	return true;
2232 }
2233 
2234 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2235 					unsigned int segno, int modified)
2236 {
2237 	struct seg_entry *se = get_seg_entry(sbi, segno);
2238 
2239 	se->type = type;
2240 	if (modified)
2241 		__mark_sit_entry_dirty(sbi, segno);
2242 }
2243 
2244 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2245 								block_t blkaddr)
2246 {
2247 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2248 
2249 	if (segno == NULL_SEGNO)
2250 		return 0;
2251 	return get_seg_entry(sbi, segno)->mtime;
2252 }
2253 
2254 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2255 						unsigned long long old_mtime)
2256 {
2257 	struct seg_entry *se;
2258 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2259 	unsigned long long ctime = get_mtime(sbi, false);
2260 	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2261 
2262 	if (segno == NULL_SEGNO)
2263 		return;
2264 
2265 	se = get_seg_entry(sbi, segno);
2266 
2267 	if (!se->mtime)
2268 		se->mtime = mtime;
2269 	else
2270 		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2271 						se->valid_blocks + 1);
2272 
2273 	if (ctime > SIT_I(sbi)->max_mtime)
2274 		SIT_I(sbi)->max_mtime = ctime;
2275 }
2276 
2277 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2278 {
2279 	struct seg_entry *se;
2280 	unsigned int segno, offset;
2281 	long int new_vblocks;
2282 	bool exist;
2283 #ifdef CONFIG_F2FS_CHECK_FS
2284 	bool mir_exist;
2285 #endif
2286 
2287 	segno = GET_SEGNO(sbi, blkaddr);
2288 
2289 	se = get_seg_entry(sbi, segno);
2290 	new_vblocks = se->valid_blocks + del;
2291 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2292 
2293 	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2294 			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2295 
2296 	se->valid_blocks = new_vblocks;
2297 
2298 	/* Update valid block bitmap */
2299 	if (del > 0) {
2300 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2301 #ifdef CONFIG_F2FS_CHECK_FS
2302 		mir_exist = f2fs_test_and_set_bit(offset,
2303 						se->cur_valid_map_mir);
2304 		if (unlikely(exist != mir_exist)) {
2305 			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2306 				 blkaddr, exist);
2307 			f2fs_bug_on(sbi, 1);
2308 		}
2309 #endif
2310 		if (unlikely(exist)) {
2311 			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2312 				 blkaddr);
2313 			f2fs_bug_on(sbi, 1);
2314 			se->valid_blocks--;
2315 			del = 0;
2316 		}
2317 
2318 		if (f2fs_block_unit_discard(sbi) &&
2319 				!f2fs_test_and_set_bit(offset, se->discard_map))
2320 			sbi->discard_blks--;
2321 
2322 		/*
2323 		 * SSR should never reuse block which is checkpointed
2324 		 * or newly invalidated.
2325 		 */
2326 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2327 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2328 				se->ckpt_valid_blocks++;
2329 		}
2330 	} else {
2331 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2332 #ifdef CONFIG_F2FS_CHECK_FS
2333 		mir_exist = f2fs_test_and_clear_bit(offset,
2334 						se->cur_valid_map_mir);
2335 		if (unlikely(exist != mir_exist)) {
2336 			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2337 				 blkaddr, exist);
2338 			f2fs_bug_on(sbi, 1);
2339 		}
2340 #endif
2341 		if (unlikely(!exist)) {
2342 			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2343 				 blkaddr);
2344 			f2fs_bug_on(sbi, 1);
2345 			se->valid_blocks++;
2346 			del = 0;
2347 		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2348 			/*
2349 			 * If checkpoints are off, we must not reuse data that
2350 			 * was used in the previous checkpoint. If it was used
2351 			 * before, we must track that to know how much space we
2352 			 * really have.
2353 			 */
2354 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2355 				spin_lock(&sbi->stat_lock);
2356 				sbi->unusable_block_count++;
2357 				spin_unlock(&sbi->stat_lock);
2358 			}
2359 		}
2360 
2361 		if (f2fs_block_unit_discard(sbi) &&
2362 			f2fs_test_and_clear_bit(offset, se->discard_map))
2363 			sbi->discard_blks++;
2364 	}
2365 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2366 		se->ckpt_valid_blocks += del;
2367 
2368 	__mark_sit_entry_dirty(sbi, segno);
2369 
2370 	/* update total number of valid blocks to be written in ckpt area */
2371 	SIT_I(sbi)->written_valid_blocks += del;
2372 
2373 	if (__is_large_section(sbi))
2374 		get_sec_entry(sbi, segno)->valid_blocks += del;
2375 }
2376 
2377 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2378 {
2379 	unsigned int segno = GET_SEGNO(sbi, addr);
2380 	struct sit_info *sit_i = SIT_I(sbi);
2381 
2382 	f2fs_bug_on(sbi, addr == NULL_ADDR);
2383 	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2384 		return;
2385 
2386 	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2387 	f2fs_invalidate_compress_page(sbi, addr);
2388 
2389 	/* add it into sit main buffer */
2390 	down_write(&sit_i->sentry_lock);
2391 
2392 	update_segment_mtime(sbi, addr, 0);
2393 	update_sit_entry(sbi, addr, -1);
2394 
2395 	/* add it into dirty seglist */
2396 	locate_dirty_segment(sbi, segno);
2397 
2398 	up_write(&sit_i->sentry_lock);
2399 }
2400 
2401 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2402 {
2403 	struct sit_info *sit_i = SIT_I(sbi);
2404 	unsigned int segno, offset;
2405 	struct seg_entry *se;
2406 	bool is_cp = false;
2407 
2408 	if (!__is_valid_data_blkaddr(blkaddr))
2409 		return true;
2410 
2411 	down_read(&sit_i->sentry_lock);
2412 
2413 	segno = GET_SEGNO(sbi, blkaddr);
2414 	se = get_seg_entry(sbi, segno);
2415 	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2416 
2417 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2418 		is_cp = true;
2419 
2420 	up_read(&sit_i->sentry_lock);
2421 
2422 	return is_cp;
2423 }
2424 
2425 /*
2426  * This function should be resided under the curseg_mutex lock
2427  */
2428 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2429 					struct f2fs_summary *sum)
2430 {
2431 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2432 	void *addr = curseg->sum_blk;
2433 
2434 	addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2435 	memcpy(addr, sum, sizeof(struct f2fs_summary));
2436 }
2437 
2438 /*
2439  * Calculate the number of current summary pages for writing
2440  */
2441 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2442 {
2443 	int valid_sum_count = 0;
2444 	int i, sum_in_page;
2445 
2446 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2447 		if (sbi->ckpt->alloc_type[i] == SSR)
2448 			valid_sum_count += sbi->blocks_per_seg;
2449 		else {
2450 			if (for_ra)
2451 				valid_sum_count += le16_to_cpu(
2452 					F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2453 			else
2454 				valid_sum_count += curseg_blkoff(sbi, i);
2455 		}
2456 	}
2457 
2458 	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2459 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2460 	if (valid_sum_count <= sum_in_page)
2461 		return 1;
2462 	else if ((valid_sum_count - sum_in_page) <=
2463 		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2464 		return 2;
2465 	return 3;
2466 }
2467 
2468 /*
2469  * Caller should put this summary page
2470  */
2471 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2472 {
2473 	if (unlikely(f2fs_cp_error(sbi)))
2474 		return ERR_PTR(-EIO);
2475 	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2476 }
2477 
2478 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2479 					void *src, block_t blk_addr)
2480 {
2481 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2482 
2483 	memcpy(page_address(page), src, PAGE_SIZE);
2484 	set_page_dirty(page);
2485 	f2fs_put_page(page, 1);
2486 }
2487 
2488 static void write_sum_page(struct f2fs_sb_info *sbi,
2489 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2490 {
2491 	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2492 }
2493 
2494 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2495 						int type, block_t blk_addr)
2496 {
2497 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2498 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2499 	struct f2fs_summary_block *src = curseg->sum_blk;
2500 	struct f2fs_summary_block *dst;
2501 
2502 	dst = (struct f2fs_summary_block *)page_address(page);
2503 	memset(dst, 0, PAGE_SIZE);
2504 
2505 	mutex_lock(&curseg->curseg_mutex);
2506 
2507 	down_read(&curseg->journal_rwsem);
2508 	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2509 	up_read(&curseg->journal_rwsem);
2510 
2511 	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2512 	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2513 
2514 	mutex_unlock(&curseg->curseg_mutex);
2515 
2516 	set_page_dirty(page);
2517 	f2fs_put_page(page, 1);
2518 }
2519 
2520 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2521 				struct curseg_info *curseg, int type)
2522 {
2523 	unsigned int segno = curseg->segno + 1;
2524 	struct free_segmap_info *free_i = FREE_I(sbi);
2525 
2526 	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2527 		return !test_bit(segno, free_i->free_segmap);
2528 	return 0;
2529 }
2530 
2531 /*
2532  * Find a new segment from the free segments bitmap to right order
2533  * This function should be returned with success, otherwise BUG
2534  */
2535 static void get_new_segment(struct f2fs_sb_info *sbi,
2536 			unsigned int *newseg, bool new_sec, int dir)
2537 {
2538 	struct free_segmap_info *free_i = FREE_I(sbi);
2539 	unsigned int segno, secno, zoneno;
2540 	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2541 	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2542 	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2543 	unsigned int left_start = hint;
2544 	bool init = true;
2545 	int go_left = 0;
2546 	int i;
2547 
2548 	spin_lock(&free_i->segmap_lock);
2549 
2550 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2551 		segno = find_next_zero_bit(free_i->free_segmap,
2552 			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2553 		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2554 			goto got_it;
2555 	}
2556 find_other_zone:
2557 	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2558 	if (secno >= MAIN_SECS(sbi)) {
2559 		if (dir == ALLOC_RIGHT) {
2560 			secno = find_first_zero_bit(free_i->free_secmap,
2561 							MAIN_SECS(sbi));
2562 			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2563 		} else {
2564 			go_left = 1;
2565 			left_start = hint - 1;
2566 		}
2567 	}
2568 	if (go_left == 0)
2569 		goto skip_left;
2570 
2571 	while (test_bit(left_start, free_i->free_secmap)) {
2572 		if (left_start > 0) {
2573 			left_start--;
2574 			continue;
2575 		}
2576 		left_start = find_first_zero_bit(free_i->free_secmap,
2577 							MAIN_SECS(sbi));
2578 		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2579 		break;
2580 	}
2581 	secno = left_start;
2582 skip_left:
2583 	segno = GET_SEG_FROM_SEC(sbi, secno);
2584 	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2585 
2586 	/* give up on finding another zone */
2587 	if (!init)
2588 		goto got_it;
2589 	if (sbi->secs_per_zone == 1)
2590 		goto got_it;
2591 	if (zoneno == old_zoneno)
2592 		goto got_it;
2593 	if (dir == ALLOC_LEFT) {
2594 		if (!go_left && zoneno + 1 >= total_zones)
2595 			goto got_it;
2596 		if (go_left && zoneno == 0)
2597 			goto got_it;
2598 	}
2599 	for (i = 0; i < NR_CURSEG_TYPE; i++)
2600 		if (CURSEG_I(sbi, i)->zone == zoneno)
2601 			break;
2602 
2603 	if (i < NR_CURSEG_TYPE) {
2604 		/* zone is in user, try another */
2605 		if (go_left)
2606 			hint = zoneno * sbi->secs_per_zone - 1;
2607 		else if (zoneno + 1 >= total_zones)
2608 			hint = 0;
2609 		else
2610 			hint = (zoneno + 1) * sbi->secs_per_zone;
2611 		init = false;
2612 		goto find_other_zone;
2613 	}
2614 got_it:
2615 	/* set it as dirty segment in free segmap */
2616 	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2617 	__set_inuse(sbi, segno);
2618 	*newseg = segno;
2619 	spin_unlock(&free_i->segmap_lock);
2620 }
2621 
2622 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2623 {
2624 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2625 	struct summary_footer *sum_footer;
2626 	unsigned short seg_type = curseg->seg_type;
2627 
2628 	curseg->inited = true;
2629 	curseg->segno = curseg->next_segno;
2630 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2631 	curseg->next_blkoff = 0;
2632 	curseg->next_segno = NULL_SEGNO;
2633 
2634 	sum_footer = &(curseg->sum_blk->footer);
2635 	memset(sum_footer, 0, sizeof(struct summary_footer));
2636 
2637 	sanity_check_seg_type(sbi, seg_type);
2638 
2639 	if (IS_DATASEG(seg_type))
2640 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2641 	if (IS_NODESEG(seg_type))
2642 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2643 	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2644 }
2645 
2646 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2647 {
2648 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2649 	unsigned short seg_type = curseg->seg_type;
2650 
2651 	sanity_check_seg_type(sbi, seg_type);
2652 	if (f2fs_need_rand_seg(sbi))
2653 		return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
2654 
2655 	/* if segs_per_sec is large than 1, we need to keep original policy. */
2656 	if (__is_large_section(sbi))
2657 		return curseg->segno;
2658 
2659 	/* inmem log may not locate on any segment after mount */
2660 	if (!curseg->inited)
2661 		return 0;
2662 
2663 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2664 		return 0;
2665 
2666 	if (test_opt(sbi, NOHEAP) &&
2667 		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2668 		return 0;
2669 
2670 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2671 		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2672 
2673 	/* find segments from 0 to reuse freed segments */
2674 	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2675 		return 0;
2676 
2677 	return curseg->segno;
2678 }
2679 
2680 /*
2681  * Allocate a current working segment.
2682  * This function always allocates a free segment in LFS manner.
2683  */
2684 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2685 {
2686 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2687 	unsigned short seg_type = curseg->seg_type;
2688 	unsigned int segno = curseg->segno;
2689 	int dir = ALLOC_LEFT;
2690 
2691 	if (curseg->inited)
2692 		write_sum_page(sbi, curseg->sum_blk,
2693 				GET_SUM_BLOCK(sbi, segno));
2694 	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2695 		dir = ALLOC_RIGHT;
2696 
2697 	if (test_opt(sbi, NOHEAP))
2698 		dir = ALLOC_RIGHT;
2699 
2700 	segno = __get_next_segno(sbi, type);
2701 	get_new_segment(sbi, &segno, new_sec, dir);
2702 	curseg->next_segno = segno;
2703 	reset_curseg(sbi, type, 1);
2704 	curseg->alloc_type = LFS;
2705 	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2706 		curseg->fragment_remained_chunk =
2707 				prandom_u32() % sbi->max_fragment_chunk + 1;
2708 }
2709 
2710 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2711 					int segno, block_t start)
2712 {
2713 	struct seg_entry *se = get_seg_entry(sbi, segno);
2714 	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2715 	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2716 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2717 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2718 	int i;
2719 
2720 	for (i = 0; i < entries; i++)
2721 		target_map[i] = ckpt_map[i] | cur_map[i];
2722 
2723 	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2724 }
2725 
2726 /*
2727  * If a segment is written by LFS manner, next block offset is just obtained
2728  * by increasing the current block offset. However, if a segment is written by
2729  * SSR manner, next block offset obtained by calling __next_free_blkoff
2730  */
2731 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2732 				struct curseg_info *seg)
2733 {
2734 	if (seg->alloc_type == SSR) {
2735 		seg->next_blkoff =
2736 			__next_free_blkoff(sbi, seg->segno,
2737 						seg->next_blkoff + 1);
2738 	} else {
2739 		seg->next_blkoff++;
2740 		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
2741 			/* To allocate block chunks in different sizes, use random number */
2742 			if (--seg->fragment_remained_chunk <= 0) {
2743 				seg->fragment_remained_chunk =
2744 				   prandom_u32() % sbi->max_fragment_chunk + 1;
2745 				seg->next_blkoff +=
2746 				   prandom_u32() % sbi->max_fragment_hole + 1;
2747 			}
2748 		}
2749 	}
2750 }
2751 
2752 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2753 {
2754 	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2755 }
2756 
2757 /*
2758  * This function always allocates a used segment(from dirty seglist) by SSR
2759  * manner, so it should recover the existing segment information of valid blocks
2760  */
2761 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2762 {
2763 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2764 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2765 	unsigned int new_segno = curseg->next_segno;
2766 	struct f2fs_summary_block *sum_node;
2767 	struct page *sum_page;
2768 
2769 	if (flush)
2770 		write_sum_page(sbi, curseg->sum_blk,
2771 					GET_SUM_BLOCK(sbi, curseg->segno));
2772 
2773 	__set_test_and_inuse(sbi, new_segno);
2774 
2775 	mutex_lock(&dirty_i->seglist_lock);
2776 	__remove_dirty_segment(sbi, new_segno, PRE);
2777 	__remove_dirty_segment(sbi, new_segno, DIRTY);
2778 	mutex_unlock(&dirty_i->seglist_lock);
2779 
2780 	reset_curseg(sbi, type, 1);
2781 	curseg->alloc_type = SSR;
2782 	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2783 
2784 	sum_page = f2fs_get_sum_page(sbi, new_segno);
2785 	if (IS_ERR(sum_page)) {
2786 		/* GC won't be able to use stale summary pages by cp_error */
2787 		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2788 		return;
2789 	}
2790 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2791 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2792 	f2fs_put_page(sum_page, 1);
2793 }
2794 
2795 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2796 				int alloc_mode, unsigned long long age);
2797 
2798 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2799 					int target_type, int alloc_mode,
2800 					unsigned long long age)
2801 {
2802 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2803 
2804 	curseg->seg_type = target_type;
2805 
2806 	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2807 		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2808 
2809 		curseg->seg_type = se->type;
2810 		change_curseg(sbi, type, true);
2811 	} else {
2812 		/* allocate cold segment by default */
2813 		curseg->seg_type = CURSEG_COLD_DATA;
2814 		new_curseg(sbi, type, true);
2815 	}
2816 	stat_inc_seg_type(sbi, curseg);
2817 }
2818 
2819 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2820 {
2821 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2822 
2823 	if (!sbi->am.atgc_enabled)
2824 		return;
2825 
2826 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2827 
2828 	mutex_lock(&curseg->curseg_mutex);
2829 	down_write(&SIT_I(sbi)->sentry_lock);
2830 
2831 	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2832 
2833 	up_write(&SIT_I(sbi)->sentry_lock);
2834 	mutex_unlock(&curseg->curseg_mutex);
2835 
2836 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2837 
2838 }
2839 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2840 {
2841 	__f2fs_init_atgc_curseg(sbi);
2842 }
2843 
2844 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2845 {
2846 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2847 
2848 	mutex_lock(&curseg->curseg_mutex);
2849 	if (!curseg->inited)
2850 		goto out;
2851 
2852 	if (get_valid_blocks(sbi, curseg->segno, false)) {
2853 		write_sum_page(sbi, curseg->sum_blk,
2854 				GET_SUM_BLOCK(sbi, curseg->segno));
2855 	} else {
2856 		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2857 		__set_test_and_free(sbi, curseg->segno, true);
2858 		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2859 	}
2860 out:
2861 	mutex_unlock(&curseg->curseg_mutex);
2862 }
2863 
2864 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2865 {
2866 	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2867 
2868 	if (sbi->am.atgc_enabled)
2869 		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2870 }
2871 
2872 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2873 {
2874 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2875 
2876 	mutex_lock(&curseg->curseg_mutex);
2877 	if (!curseg->inited)
2878 		goto out;
2879 	if (get_valid_blocks(sbi, curseg->segno, false))
2880 		goto out;
2881 
2882 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2883 	__set_test_and_inuse(sbi, curseg->segno);
2884 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2885 out:
2886 	mutex_unlock(&curseg->curseg_mutex);
2887 }
2888 
2889 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2890 {
2891 	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2892 
2893 	if (sbi->am.atgc_enabled)
2894 		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2895 }
2896 
2897 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2898 				int alloc_mode, unsigned long long age)
2899 {
2900 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2901 	const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2902 	unsigned segno = NULL_SEGNO;
2903 	unsigned short seg_type = curseg->seg_type;
2904 	int i, cnt;
2905 	bool reversed = false;
2906 
2907 	sanity_check_seg_type(sbi, seg_type);
2908 
2909 	/* f2fs_need_SSR() already forces to do this */
2910 	if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2911 		curseg->next_segno = segno;
2912 		return 1;
2913 	}
2914 
2915 	/* For node segments, let's do SSR more intensively */
2916 	if (IS_NODESEG(seg_type)) {
2917 		if (seg_type >= CURSEG_WARM_NODE) {
2918 			reversed = true;
2919 			i = CURSEG_COLD_NODE;
2920 		} else {
2921 			i = CURSEG_HOT_NODE;
2922 		}
2923 		cnt = NR_CURSEG_NODE_TYPE;
2924 	} else {
2925 		if (seg_type >= CURSEG_WARM_DATA) {
2926 			reversed = true;
2927 			i = CURSEG_COLD_DATA;
2928 		} else {
2929 			i = CURSEG_HOT_DATA;
2930 		}
2931 		cnt = NR_CURSEG_DATA_TYPE;
2932 	}
2933 
2934 	for (; cnt-- > 0; reversed ? i-- : i++) {
2935 		if (i == seg_type)
2936 			continue;
2937 		if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2938 			curseg->next_segno = segno;
2939 			return 1;
2940 		}
2941 	}
2942 
2943 	/* find valid_blocks=0 in dirty list */
2944 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2945 		segno = get_free_segment(sbi);
2946 		if (segno != NULL_SEGNO) {
2947 			curseg->next_segno = segno;
2948 			return 1;
2949 		}
2950 	}
2951 	return 0;
2952 }
2953 
2954 /*
2955  * flush out current segment and replace it with new segment
2956  * This function should be returned with success, otherwise BUG
2957  */
2958 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2959 						int type, bool force)
2960 {
2961 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2962 
2963 	if (force)
2964 		new_curseg(sbi, type, true);
2965 	else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2966 					curseg->seg_type == CURSEG_WARM_NODE)
2967 		new_curseg(sbi, type, false);
2968 	else if (curseg->alloc_type == LFS &&
2969 			is_next_segment_free(sbi, curseg, type) &&
2970 			likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2971 		new_curseg(sbi, type, false);
2972 	else if (f2fs_need_SSR(sbi) &&
2973 			get_ssr_segment(sbi, type, SSR, 0))
2974 		change_curseg(sbi, type, true);
2975 	else
2976 		new_curseg(sbi, type, false);
2977 
2978 	stat_inc_seg_type(sbi, curseg);
2979 }
2980 
2981 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2982 					unsigned int start, unsigned int end)
2983 {
2984 	struct curseg_info *curseg = CURSEG_I(sbi, type);
2985 	unsigned int segno;
2986 
2987 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2988 	mutex_lock(&curseg->curseg_mutex);
2989 	down_write(&SIT_I(sbi)->sentry_lock);
2990 
2991 	segno = CURSEG_I(sbi, type)->segno;
2992 	if (segno < start || segno > end)
2993 		goto unlock;
2994 
2995 	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2996 		change_curseg(sbi, type, true);
2997 	else
2998 		new_curseg(sbi, type, true);
2999 
3000 	stat_inc_seg_type(sbi, curseg);
3001 
3002 	locate_dirty_segment(sbi, segno);
3003 unlock:
3004 	up_write(&SIT_I(sbi)->sentry_lock);
3005 
3006 	if (segno != curseg->segno)
3007 		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3008 			    type, segno, curseg->segno);
3009 
3010 	mutex_unlock(&curseg->curseg_mutex);
3011 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3012 }
3013 
3014 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3015 						bool new_sec, bool force)
3016 {
3017 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3018 	unsigned int old_segno;
3019 
3020 	if (!curseg->inited)
3021 		goto alloc;
3022 
3023 	if (force || curseg->next_blkoff ||
3024 		get_valid_blocks(sbi, curseg->segno, new_sec))
3025 		goto alloc;
3026 
3027 	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3028 		return;
3029 alloc:
3030 	old_segno = curseg->segno;
3031 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
3032 	locate_dirty_segment(sbi, old_segno);
3033 }
3034 
3035 static void __allocate_new_section(struct f2fs_sb_info *sbi,
3036 						int type, bool force)
3037 {
3038 	__allocate_new_segment(sbi, type, true, force);
3039 }
3040 
3041 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3042 {
3043 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3044 	down_write(&SIT_I(sbi)->sentry_lock);
3045 	__allocate_new_section(sbi, type, force);
3046 	up_write(&SIT_I(sbi)->sentry_lock);
3047 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3048 }
3049 
3050 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3051 {
3052 	int i;
3053 
3054 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3055 	down_write(&SIT_I(sbi)->sentry_lock);
3056 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3057 		__allocate_new_segment(sbi, i, false, false);
3058 	up_write(&SIT_I(sbi)->sentry_lock);
3059 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3060 }
3061 
3062 static const struct segment_allocation default_salloc_ops = {
3063 	.allocate_segment = allocate_segment_by_default,
3064 };
3065 
3066 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3067 						struct cp_control *cpc)
3068 {
3069 	__u64 trim_start = cpc->trim_start;
3070 	bool has_candidate = false;
3071 
3072 	down_write(&SIT_I(sbi)->sentry_lock);
3073 	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3074 		if (add_discard_addrs(sbi, cpc, true)) {
3075 			has_candidate = true;
3076 			break;
3077 		}
3078 	}
3079 	up_write(&SIT_I(sbi)->sentry_lock);
3080 
3081 	cpc->trim_start = trim_start;
3082 	return has_candidate;
3083 }
3084 
3085 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3086 					struct discard_policy *dpolicy,
3087 					unsigned int start, unsigned int end)
3088 {
3089 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3090 	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3091 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3092 	struct discard_cmd *dc;
3093 	struct blk_plug plug;
3094 	int issued;
3095 	unsigned int trimmed = 0;
3096 
3097 next:
3098 	issued = 0;
3099 
3100 	mutex_lock(&dcc->cmd_lock);
3101 	if (unlikely(dcc->rbtree_check))
3102 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
3103 							&dcc->root, false));
3104 
3105 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
3106 					NULL, start,
3107 					(struct rb_entry **)&prev_dc,
3108 					(struct rb_entry **)&next_dc,
3109 					&insert_p, &insert_parent, true, NULL);
3110 	if (!dc)
3111 		dc = next_dc;
3112 
3113 	blk_start_plug(&plug);
3114 
3115 	while (dc && dc->lstart <= end) {
3116 		struct rb_node *node;
3117 		int err = 0;
3118 
3119 		if (dc->len < dpolicy->granularity)
3120 			goto skip;
3121 
3122 		if (dc->state != D_PREP) {
3123 			list_move_tail(&dc->list, &dcc->fstrim_list);
3124 			goto skip;
3125 		}
3126 
3127 		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3128 
3129 		if (issued >= dpolicy->max_requests) {
3130 			start = dc->lstart + dc->len;
3131 
3132 			if (err)
3133 				__remove_discard_cmd(sbi, dc);
3134 
3135 			blk_finish_plug(&plug);
3136 			mutex_unlock(&dcc->cmd_lock);
3137 			trimmed += __wait_all_discard_cmd(sbi, NULL);
3138 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3139 			goto next;
3140 		}
3141 skip:
3142 		node = rb_next(&dc->rb_node);
3143 		if (err)
3144 			__remove_discard_cmd(sbi, dc);
3145 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3146 
3147 		if (fatal_signal_pending(current))
3148 			break;
3149 	}
3150 
3151 	blk_finish_plug(&plug);
3152 	mutex_unlock(&dcc->cmd_lock);
3153 
3154 	return trimmed;
3155 }
3156 
3157 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3158 {
3159 	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3160 	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3161 	unsigned int start_segno, end_segno;
3162 	block_t start_block, end_block;
3163 	struct cp_control cpc;
3164 	struct discard_policy dpolicy;
3165 	unsigned long long trimmed = 0;
3166 	int err = 0;
3167 	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3168 
3169 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3170 		return -EINVAL;
3171 
3172 	if (end < MAIN_BLKADDR(sbi))
3173 		goto out;
3174 
3175 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3176 		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3177 		return -EFSCORRUPTED;
3178 	}
3179 
3180 	/* start/end segment number in main_area */
3181 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3182 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3183 						GET_SEGNO(sbi, end);
3184 	if (need_align) {
3185 		start_segno = rounddown(start_segno, sbi->segs_per_sec);
3186 		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3187 	}
3188 
3189 	cpc.reason = CP_DISCARD;
3190 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3191 	cpc.trim_start = start_segno;
3192 	cpc.trim_end = end_segno;
3193 
3194 	if (sbi->discard_blks == 0)
3195 		goto out;
3196 
3197 	f2fs_down_write(&sbi->gc_lock);
3198 	err = f2fs_write_checkpoint(sbi, &cpc);
3199 	f2fs_up_write(&sbi->gc_lock);
3200 	if (err)
3201 		goto out;
3202 
3203 	/*
3204 	 * We filed discard candidates, but actually we don't need to wait for
3205 	 * all of them, since they'll be issued in idle time along with runtime
3206 	 * discard option. User configuration looks like using runtime discard
3207 	 * or periodic fstrim instead of it.
3208 	 */
3209 	if (f2fs_realtime_discard_enable(sbi))
3210 		goto out;
3211 
3212 	start_block = START_BLOCK(sbi, start_segno);
3213 	end_block = START_BLOCK(sbi, end_segno + 1);
3214 
3215 	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3216 	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3217 					start_block, end_block);
3218 
3219 	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3220 					start_block, end_block);
3221 out:
3222 	if (!err)
3223 		range->len = F2FS_BLK_TO_BYTES(trimmed);
3224 	return err;
3225 }
3226 
3227 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3228 					struct curseg_info *curseg)
3229 {
3230 	return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3231 							curseg->segno);
3232 }
3233 
3234 int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3235 {
3236 	switch (hint) {
3237 	case WRITE_LIFE_SHORT:
3238 		return CURSEG_HOT_DATA;
3239 	case WRITE_LIFE_EXTREME:
3240 		return CURSEG_COLD_DATA;
3241 	default:
3242 		return CURSEG_WARM_DATA;
3243 	}
3244 }
3245 
3246 /* This returns write hints for each segment type. This hints will be
3247  * passed down to block layer. There are mapping tables which depend on
3248  * the mount option 'whint_mode'.
3249  *
3250  * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
3251  *
3252  * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
3253  *
3254  * User                  F2FS                     Block
3255  * ----                  ----                     -----
3256  *                       META                     WRITE_LIFE_NOT_SET
3257  *                       HOT_NODE                 "
3258  *                       WARM_NODE                "
3259  *                       COLD_NODE                "
3260  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3261  * extension list        "                        "
3262  *
3263  * -- buffered io
3264  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3265  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3266  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3267  * WRITE_LIFE_NONE       "                        "
3268  * WRITE_LIFE_MEDIUM     "                        "
3269  * WRITE_LIFE_LONG       "                        "
3270  *
3271  * -- direct io
3272  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3273  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3274  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3275  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3276  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3277  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3278  *
3279  * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
3280  *
3281  * User                  F2FS                     Block
3282  * ----                  ----                     -----
3283  *                       META                     WRITE_LIFE_MEDIUM;
3284  *                       HOT_NODE                 WRITE_LIFE_NOT_SET
3285  *                       WARM_NODE                "
3286  *                       COLD_NODE                WRITE_LIFE_NONE
3287  * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
3288  * extension list        "                        "
3289  *
3290  * -- buffered io
3291  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3292  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3293  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
3294  * WRITE_LIFE_NONE       "                        "
3295  * WRITE_LIFE_MEDIUM     "                        "
3296  * WRITE_LIFE_LONG       "                        "
3297  *
3298  * -- direct io
3299  * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
3300  * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
3301  * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
3302  * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
3303  * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
3304  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
3305  */
3306 
3307 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3308 				enum page_type type, enum temp_type temp)
3309 {
3310 	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3311 		if (type == DATA) {
3312 			if (temp == WARM)
3313 				return WRITE_LIFE_NOT_SET;
3314 			else if (temp == HOT)
3315 				return WRITE_LIFE_SHORT;
3316 			else if (temp == COLD)
3317 				return WRITE_LIFE_EXTREME;
3318 		} else {
3319 			return WRITE_LIFE_NOT_SET;
3320 		}
3321 	} else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3322 		if (type == DATA) {
3323 			if (temp == WARM)
3324 				return WRITE_LIFE_LONG;
3325 			else if (temp == HOT)
3326 				return WRITE_LIFE_SHORT;
3327 			else if (temp == COLD)
3328 				return WRITE_LIFE_EXTREME;
3329 		} else if (type == NODE) {
3330 			if (temp == WARM || temp == HOT)
3331 				return WRITE_LIFE_NOT_SET;
3332 			else if (temp == COLD)
3333 				return WRITE_LIFE_NONE;
3334 		} else if (type == META) {
3335 			return WRITE_LIFE_MEDIUM;
3336 		}
3337 	}
3338 	return WRITE_LIFE_NOT_SET;
3339 }
3340 
3341 static int __get_segment_type_2(struct f2fs_io_info *fio)
3342 {
3343 	if (fio->type == DATA)
3344 		return CURSEG_HOT_DATA;
3345 	else
3346 		return CURSEG_HOT_NODE;
3347 }
3348 
3349 static int __get_segment_type_4(struct f2fs_io_info *fio)
3350 {
3351 	if (fio->type == DATA) {
3352 		struct inode *inode = fio->page->mapping->host;
3353 
3354 		if (S_ISDIR(inode->i_mode))
3355 			return CURSEG_HOT_DATA;
3356 		else
3357 			return CURSEG_COLD_DATA;
3358 	} else {
3359 		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3360 			return CURSEG_WARM_NODE;
3361 		else
3362 			return CURSEG_COLD_NODE;
3363 	}
3364 }
3365 
3366 static int __get_segment_type_6(struct f2fs_io_info *fio)
3367 {
3368 	if (fio->type == DATA) {
3369 		struct inode *inode = fio->page->mapping->host;
3370 
3371 		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3372 			return CURSEG_COLD_DATA_PINNED;
3373 
3374 		if (page_private_gcing(fio->page)) {
3375 			if (fio->sbi->am.atgc_enabled &&
3376 				(fio->io_type == FS_DATA_IO) &&
3377 				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3378 				return CURSEG_ALL_DATA_ATGC;
3379 			else
3380 				return CURSEG_COLD_DATA;
3381 		}
3382 		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3383 			return CURSEG_COLD_DATA;
3384 		if (file_is_hot(inode) ||
3385 				is_inode_flag_set(inode, FI_HOT_DATA) ||
3386 				f2fs_is_atomic_file(inode) ||
3387 				f2fs_is_volatile_file(inode))
3388 			return CURSEG_HOT_DATA;
3389 		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3390 	} else {
3391 		if (IS_DNODE(fio->page))
3392 			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3393 						CURSEG_HOT_NODE;
3394 		return CURSEG_COLD_NODE;
3395 	}
3396 }
3397 
3398 static int __get_segment_type(struct f2fs_io_info *fio)
3399 {
3400 	int type = 0;
3401 
3402 	switch (F2FS_OPTION(fio->sbi).active_logs) {
3403 	case 2:
3404 		type = __get_segment_type_2(fio);
3405 		break;
3406 	case 4:
3407 		type = __get_segment_type_4(fio);
3408 		break;
3409 	case 6:
3410 		type = __get_segment_type_6(fio);
3411 		break;
3412 	default:
3413 		f2fs_bug_on(fio->sbi, true);
3414 	}
3415 
3416 	if (IS_HOT(type))
3417 		fio->temp = HOT;
3418 	else if (IS_WARM(type))
3419 		fio->temp = WARM;
3420 	else
3421 		fio->temp = COLD;
3422 	return type;
3423 }
3424 
3425 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3426 		block_t old_blkaddr, block_t *new_blkaddr,
3427 		struct f2fs_summary *sum, int type,
3428 		struct f2fs_io_info *fio)
3429 {
3430 	struct sit_info *sit_i = SIT_I(sbi);
3431 	struct curseg_info *curseg = CURSEG_I(sbi, type);
3432 	unsigned long long old_mtime;
3433 	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3434 	struct seg_entry *se = NULL;
3435 
3436 	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3437 
3438 	mutex_lock(&curseg->curseg_mutex);
3439 	down_write(&sit_i->sentry_lock);
3440 
3441 	if (from_gc) {
3442 		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3443 		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3444 		sanity_check_seg_type(sbi, se->type);
3445 		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3446 	}
3447 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3448 
3449 	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3450 
3451 	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3452 
3453 	/*
3454 	 * __add_sum_entry should be resided under the curseg_mutex
3455 	 * because, this function updates a summary entry in the
3456 	 * current summary block.
3457 	 */
3458 	__add_sum_entry(sbi, type, sum);
3459 
3460 	__refresh_next_blkoff(sbi, curseg);
3461 
3462 	stat_inc_block_count(sbi, curseg);
3463 
3464 	if (from_gc) {
3465 		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3466 	} else {
3467 		update_segment_mtime(sbi, old_blkaddr, 0);
3468 		old_mtime = 0;
3469 	}
3470 	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3471 
3472 	/*
3473 	 * SIT information should be updated before segment allocation,
3474 	 * since SSR needs latest valid block information.
3475 	 */
3476 	update_sit_entry(sbi, *new_blkaddr, 1);
3477 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3478 		update_sit_entry(sbi, old_blkaddr, -1);
3479 
3480 	if (!__has_curseg_space(sbi, curseg)) {
3481 		if (from_gc)
3482 			get_atssr_segment(sbi, type, se->type,
3483 						AT_SSR, se->mtime);
3484 		else
3485 			sit_i->s_ops->allocate_segment(sbi, type, false);
3486 	}
3487 	/*
3488 	 * segment dirty status should be updated after segment allocation,
3489 	 * so we just need to update status only one time after previous
3490 	 * segment being closed.
3491 	 */
3492 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3493 	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3494 
3495 	up_write(&sit_i->sentry_lock);
3496 
3497 	if (page && IS_NODESEG(type)) {
3498 		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3499 
3500 		f2fs_inode_chksum_set(sbi, page);
3501 	}
3502 
3503 	if (fio) {
3504 		struct f2fs_bio_info *io;
3505 
3506 		if (F2FS_IO_ALIGNED(sbi))
3507 			fio->retry = false;
3508 
3509 		INIT_LIST_HEAD(&fio->list);
3510 		fio->in_list = true;
3511 		io = sbi->write_io[fio->type] + fio->temp;
3512 		spin_lock(&io->io_lock);
3513 		list_add_tail(&fio->list, &io->io_list);
3514 		spin_unlock(&io->io_lock);
3515 	}
3516 
3517 	mutex_unlock(&curseg->curseg_mutex);
3518 
3519 	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3520 }
3521 
3522 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3523 					block_t blkaddr, unsigned int blkcnt)
3524 {
3525 	if (!f2fs_is_multi_device(sbi))
3526 		return;
3527 
3528 	while (1) {
3529 		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3530 		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3531 
3532 		/* update device state for fsync */
3533 		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3534 
3535 		/* update device state for checkpoint */
3536 		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3537 			spin_lock(&sbi->dev_lock);
3538 			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3539 			spin_unlock(&sbi->dev_lock);
3540 		}
3541 
3542 		if (blkcnt <= blks)
3543 			break;
3544 		blkcnt -= blks;
3545 		blkaddr += blks;
3546 	}
3547 }
3548 
3549 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3550 {
3551 	int type = __get_segment_type(fio);
3552 	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3553 
3554 	if (keep_order)
3555 		f2fs_down_read(&fio->sbi->io_order_lock);
3556 reallocate:
3557 	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3558 			&fio->new_blkaddr, sum, type, fio);
3559 	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
3560 		invalidate_mapping_pages(META_MAPPING(fio->sbi),
3561 					fio->old_blkaddr, fio->old_blkaddr);
3562 		f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
3563 	}
3564 
3565 	/* writeout dirty page into bdev */
3566 	f2fs_submit_page_write(fio);
3567 	if (fio->retry) {
3568 		fio->old_blkaddr = fio->new_blkaddr;
3569 		goto reallocate;
3570 	}
3571 
3572 	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3573 
3574 	if (keep_order)
3575 		f2fs_up_read(&fio->sbi->io_order_lock);
3576 }
3577 
3578 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3579 					enum iostat_type io_type)
3580 {
3581 	struct f2fs_io_info fio = {
3582 		.sbi = sbi,
3583 		.type = META,
3584 		.temp = HOT,
3585 		.op = REQ_OP_WRITE,
3586 		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3587 		.old_blkaddr = page->index,
3588 		.new_blkaddr = page->index,
3589 		.page = page,
3590 		.encrypted_page = NULL,
3591 		.in_list = false,
3592 	};
3593 
3594 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3595 		fio.op_flags &= ~REQ_META;
3596 
3597 	set_page_writeback(page);
3598 	ClearPageError(page);
3599 	f2fs_submit_page_write(&fio);
3600 
3601 	stat_inc_meta_count(sbi, page->index);
3602 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3603 }
3604 
3605 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3606 {
3607 	struct f2fs_summary sum;
3608 
3609 	set_summary(&sum, nid, 0, 0);
3610 	do_write_page(&sum, fio);
3611 
3612 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3613 }
3614 
3615 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3616 					struct f2fs_io_info *fio)
3617 {
3618 	struct f2fs_sb_info *sbi = fio->sbi;
3619 	struct f2fs_summary sum;
3620 
3621 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3622 	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3623 	do_write_page(&sum, fio);
3624 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3625 
3626 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3627 }
3628 
3629 int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3630 {
3631 	int err;
3632 	struct f2fs_sb_info *sbi = fio->sbi;
3633 	unsigned int segno;
3634 
3635 	fio->new_blkaddr = fio->old_blkaddr;
3636 	/* i/o temperature is needed for passing down write hints */
3637 	__get_segment_type(fio);
3638 
3639 	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3640 
3641 	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3642 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3643 		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3644 			  __func__, segno);
3645 		err = -EFSCORRUPTED;
3646 		goto drop_bio;
3647 	}
3648 
3649 	if (f2fs_cp_error(sbi)) {
3650 		err = -EIO;
3651 		goto drop_bio;
3652 	}
3653 
3654 	invalidate_mapping_pages(META_MAPPING(sbi),
3655 				fio->new_blkaddr, fio->new_blkaddr);
3656 
3657 	stat_inc_inplace_blocks(fio->sbi);
3658 
3659 	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3660 		err = f2fs_merge_page_bio(fio);
3661 	else
3662 		err = f2fs_submit_page_bio(fio);
3663 	if (!err) {
3664 		f2fs_update_device_state(fio->sbi, fio->ino,
3665 						fio->new_blkaddr, 1);
3666 		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3667 	}
3668 
3669 	return err;
3670 drop_bio:
3671 	if (fio->bio && *(fio->bio)) {
3672 		struct bio *bio = *(fio->bio);
3673 
3674 		bio->bi_status = BLK_STS_IOERR;
3675 		bio_endio(bio);
3676 		*(fio->bio) = NULL;
3677 	}
3678 	return err;
3679 }
3680 
3681 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3682 						unsigned int segno)
3683 {
3684 	int i;
3685 
3686 	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3687 		if (CURSEG_I(sbi, i)->segno == segno)
3688 			break;
3689 	}
3690 	return i;
3691 }
3692 
3693 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3694 				block_t old_blkaddr, block_t new_blkaddr,
3695 				bool recover_curseg, bool recover_newaddr,
3696 				bool from_gc)
3697 {
3698 	struct sit_info *sit_i = SIT_I(sbi);
3699 	struct curseg_info *curseg;
3700 	unsigned int segno, old_cursegno;
3701 	struct seg_entry *se;
3702 	int type;
3703 	unsigned short old_blkoff;
3704 	unsigned char old_alloc_type;
3705 
3706 	segno = GET_SEGNO(sbi, new_blkaddr);
3707 	se = get_seg_entry(sbi, segno);
3708 	type = se->type;
3709 
3710 	f2fs_down_write(&SM_I(sbi)->curseg_lock);
3711 
3712 	if (!recover_curseg) {
3713 		/* for recovery flow */
3714 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3715 			if (old_blkaddr == NULL_ADDR)
3716 				type = CURSEG_COLD_DATA;
3717 			else
3718 				type = CURSEG_WARM_DATA;
3719 		}
3720 	} else {
3721 		if (IS_CURSEG(sbi, segno)) {
3722 			/* se->type is volatile as SSR allocation */
3723 			type = __f2fs_get_curseg(sbi, segno);
3724 			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3725 		} else {
3726 			type = CURSEG_WARM_DATA;
3727 		}
3728 	}
3729 
3730 	f2fs_bug_on(sbi, !IS_DATASEG(type));
3731 	curseg = CURSEG_I(sbi, type);
3732 
3733 	mutex_lock(&curseg->curseg_mutex);
3734 	down_write(&sit_i->sentry_lock);
3735 
3736 	old_cursegno = curseg->segno;
3737 	old_blkoff = curseg->next_blkoff;
3738 	old_alloc_type = curseg->alloc_type;
3739 
3740 	/* change the current segment */
3741 	if (segno != curseg->segno) {
3742 		curseg->next_segno = segno;
3743 		change_curseg(sbi, type, true);
3744 	}
3745 
3746 	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3747 	__add_sum_entry(sbi, type, sum);
3748 
3749 	if (!recover_curseg || recover_newaddr) {
3750 		if (!from_gc)
3751 			update_segment_mtime(sbi, new_blkaddr, 0);
3752 		update_sit_entry(sbi, new_blkaddr, 1);
3753 	}
3754 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3755 		invalidate_mapping_pages(META_MAPPING(sbi),
3756 					old_blkaddr, old_blkaddr);
3757 		f2fs_invalidate_compress_page(sbi, old_blkaddr);
3758 		if (!from_gc)
3759 			update_segment_mtime(sbi, old_blkaddr, 0);
3760 		update_sit_entry(sbi, old_blkaddr, -1);
3761 	}
3762 
3763 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3764 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3765 
3766 	locate_dirty_segment(sbi, old_cursegno);
3767 
3768 	if (recover_curseg) {
3769 		if (old_cursegno != curseg->segno) {
3770 			curseg->next_segno = old_cursegno;
3771 			change_curseg(sbi, type, true);
3772 		}
3773 		curseg->next_blkoff = old_blkoff;
3774 		curseg->alloc_type = old_alloc_type;
3775 	}
3776 
3777 	up_write(&sit_i->sentry_lock);
3778 	mutex_unlock(&curseg->curseg_mutex);
3779 	f2fs_up_write(&SM_I(sbi)->curseg_lock);
3780 }
3781 
3782 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3783 				block_t old_addr, block_t new_addr,
3784 				unsigned char version, bool recover_curseg,
3785 				bool recover_newaddr)
3786 {
3787 	struct f2fs_summary sum;
3788 
3789 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3790 
3791 	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3792 					recover_curseg, recover_newaddr, false);
3793 
3794 	f2fs_update_data_blkaddr(dn, new_addr);
3795 }
3796 
3797 void f2fs_wait_on_page_writeback(struct page *page,
3798 				enum page_type type, bool ordered, bool locked)
3799 {
3800 	if (PageWriteback(page)) {
3801 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3802 
3803 		/* submit cached LFS IO */
3804 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3805 		/* sbumit cached IPU IO */
3806 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3807 		if (ordered) {
3808 			wait_on_page_writeback(page);
3809 			f2fs_bug_on(sbi, locked && PageWriteback(page));
3810 		} else {
3811 			wait_for_stable_page(page);
3812 		}
3813 	}
3814 }
3815 
3816 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3817 {
3818 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3819 	struct page *cpage;
3820 
3821 	if (!f2fs_post_read_required(inode))
3822 		return;
3823 
3824 	if (!__is_valid_data_blkaddr(blkaddr))
3825 		return;
3826 
3827 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3828 	if (cpage) {
3829 		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3830 		f2fs_put_page(cpage, 1);
3831 	}
3832 }
3833 
3834 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3835 								block_t len)
3836 {
3837 	block_t i;
3838 
3839 	for (i = 0; i < len; i++)
3840 		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3841 }
3842 
3843 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3844 {
3845 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3846 	struct curseg_info *seg_i;
3847 	unsigned char *kaddr;
3848 	struct page *page;
3849 	block_t start;
3850 	int i, j, offset;
3851 
3852 	start = start_sum_block(sbi);
3853 
3854 	page = f2fs_get_meta_page(sbi, start++);
3855 	if (IS_ERR(page))
3856 		return PTR_ERR(page);
3857 	kaddr = (unsigned char *)page_address(page);
3858 
3859 	/* Step 1: restore nat cache */
3860 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3861 	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3862 
3863 	/* Step 2: restore sit cache */
3864 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3865 	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3866 	offset = 2 * SUM_JOURNAL_SIZE;
3867 
3868 	/* Step 3: restore summary entries */
3869 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3870 		unsigned short blk_off;
3871 		unsigned int segno;
3872 
3873 		seg_i = CURSEG_I(sbi, i);
3874 		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3875 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3876 		seg_i->next_segno = segno;
3877 		reset_curseg(sbi, i, 0);
3878 		seg_i->alloc_type = ckpt->alloc_type[i];
3879 		seg_i->next_blkoff = blk_off;
3880 
3881 		if (seg_i->alloc_type == SSR)
3882 			blk_off = sbi->blocks_per_seg;
3883 
3884 		for (j = 0; j < blk_off; j++) {
3885 			struct f2fs_summary *s;
3886 
3887 			s = (struct f2fs_summary *)(kaddr + offset);
3888 			seg_i->sum_blk->entries[j] = *s;
3889 			offset += SUMMARY_SIZE;
3890 			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3891 						SUM_FOOTER_SIZE)
3892 				continue;
3893 
3894 			f2fs_put_page(page, 1);
3895 			page = NULL;
3896 
3897 			page = f2fs_get_meta_page(sbi, start++);
3898 			if (IS_ERR(page))
3899 				return PTR_ERR(page);
3900 			kaddr = (unsigned char *)page_address(page);
3901 			offset = 0;
3902 		}
3903 	}
3904 	f2fs_put_page(page, 1);
3905 	return 0;
3906 }
3907 
3908 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3909 {
3910 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3911 	struct f2fs_summary_block *sum;
3912 	struct curseg_info *curseg;
3913 	struct page *new;
3914 	unsigned short blk_off;
3915 	unsigned int segno = 0;
3916 	block_t blk_addr = 0;
3917 	int err = 0;
3918 
3919 	/* get segment number and block addr */
3920 	if (IS_DATASEG(type)) {
3921 		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3922 		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3923 							CURSEG_HOT_DATA]);
3924 		if (__exist_node_summaries(sbi))
3925 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3926 		else
3927 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3928 	} else {
3929 		segno = le32_to_cpu(ckpt->cur_node_segno[type -
3930 							CURSEG_HOT_NODE]);
3931 		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3932 							CURSEG_HOT_NODE]);
3933 		if (__exist_node_summaries(sbi))
3934 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3935 							type - CURSEG_HOT_NODE);
3936 		else
3937 			blk_addr = GET_SUM_BLOCK(sbi, segno);
3938 	}
3939 
3940 	new = f2fs_get_meta_page(sbi, blk_addr);
3941 	if (IS_ERR(new))
3942 		return PTR_ERR(new);
3943 	sum = (struct f2fs_summary_block *)page_address(new);
3944 
3945 	if (IS_NODESEG(type)) {
3946 		if (__exist_node_summaries(sbi)) {
3947 			struct f2fs_summary *ns = &sum->entries[0];
3948 			int i;
3949 
3950 			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3951 				ns->version = 0;
3952 				ns->ofs_in_node = 0;
3953 			}
3954 		} else {
3955 			err = f2fs_restore_node_summary(sbi, segno, sum);
3956 			if (err)
3957 				goto out;
3958 		}
3959 	}
3960 
3961 	/* set uncompleted segment to curseg */
3962 	curseg = CURSEG_I(sbi, type);
3963 	mutex_lock(&curseg->curseg_mutex);
3964 
3965 	/* update journal info */
3966 	down_write(&curseg->journal_rwsem);
3967 	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3968 	up_write(&curseg->journal_rwsem);
3969 
3970 	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3971 	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3972 	curseg->next_segno = segno;
3973 	reset_curseg(sbi, type, 0);
3974 	curseg->alloc_type = ckpt->alloc_type[type];
3975 	curseg->next_blkoff = blk_off;
3976 	mutex_unlock(&curseg->curseg_mutex);
3977 out:
3978 	f2fs_put_page(new, 1);
3979 	return err;
3980 }
3981 
3982 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3983 {
3984 	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3985 	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3986 	int type = CURSEG_HOT_DATA;
3987 	int err;
3988 
3989 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3990 		int npages = f2fs_npages_for_summary_flush(sbi, true);
3991 
3992 		if (npages >= 2)
3993 			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3994 							META_CP, true);
3995 
3996 		/* restore for compacted data summary */
3997 		err = read_compacted_summaries(sbi);
3998 		if (err)
3999 			return err;
4000 		type = CURSEG_HOT_NODE;
4001 	}
4002 
4003 	if (__exist_node_summaries(sbi))
4004 		f2fs_ra_meta_pages(sbi,
4005 				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4006 				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4007 
4008 	for (; type <= CURSEG_COLD_NODE; type++) {
4009 		err = read_normal_summaries(sbi, type);
4010 		if (err)
4011 			return err;
4012 	}
4013 
4014 	/* sanity check for summary blocks */
4015 	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
4016 			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4017 		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4018 			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4019 		return -EINVAL;
4020 	}
4021 
4022 	return 0;
4023 }
4024 
4025 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4026 {
4027 	struct page *page;
4028 	unsigned char *kaddr;
4029 	struct f2fs_summary *summary;
4030 	struct curseg_info *seg_i;
4031 	int written_size = 0;
4032 	int i, j;
4033 
4034 	page = f2fs_grab_meta_page(sbi, blkaddr++);
4035 	kaddr = (unsigned char *)page_address(page);
4036 	memset(kaddr, 0, PAGE_SIZE);
4037 
4038 	/* Step 1: write nat cache */
4039 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4040 	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4041 	written_size += SUM_JOURNAL_SIZE;
4042 
4043 	/* Step 2: write sit cache */
4044 	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4045 	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4046 	written_size += SUM_JOURNAL_SIZE;
4047 
4048 	/* Step 3: write summary entries */
4049 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4050 		unsigned short blkoff;
4051 
4052 		seg_i = CURSEG_I(sbi, i);
4053 		if (sbi->ckpt->alloc_type[i] == SSR)
4054 			blkoff = sbi->blocks_per_seg;
4055 		else
4056 			blkoff = curseg_blkoff(sbi, i);
4057 
4058 		for (j = 0; j < blkoff; j++) {
4059 			if (!page) {
4060 				page = f2fs_grab_meta_page(sbi, blkaddr++);
4061 				kaddr = (unsigned char *)page_address(page);
4062 				memset(kaddr, 0, PAGE_SIZE);
4063 				written_size = 0;
4064 			}
4065 			summary = (struct f2fs_summary *)(kaddr + written_size);
4066 			*summary = seg_i->sum_blk->entries[j];
4067 			written_size += SUMMARY_SIZE;
4068 
4069 			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4070 							SUM_FOOTER_SIZE)
4071 				continue;
4072 
4073 			set_page_dirty(page);
4074 			f2fs_put_page(page, 1);
4075 			page = NULL;
4076 		}
4077 	}
4078 	if (page) {
4079 		set_page_dirty(page);
4080 		f2fs_put_page(page, 1);
4081 	}
4082 }
4083 
4084 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4085 					block_t blkaddr, int type)
4086 {
4087 	int i, end;
4088 
4089 	if (IS_DATASEG(type))
4090 		end = type + NR_CURSEG_DATA_TYPE;
4091 	else
4092 		end = type + NR_CURSEG_NODE_TYPE;
4093 
4094 	for (i = type; i < end; i++)
4095 		write_current_sum_page(sbi, i, blkaddr + (i - type));
4096 }
4097 
4098 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4099 {
4100 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4101 		write_compacted_summaries(sbi, start_blk);
4102 	else
4103 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4104 }
4105 
4106 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4107 {
4108 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4109 }
4110 
4111 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4112 					unsigned int val, int alloc)
4113 {
4114 	int i;
4115 
4116 	if (type == NAT_JOURNAL) {
4117 		for (i = 0; i < nats_in_cursum(journal); i++) {
4118 			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4119 				return i;
4120 		}
4121 		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4122 			return update_nats_in_cursum(journal, 1);
4123 	} else if (type == SIT_JOURNAL) {
4124 		for (i = 0; i < sits_in_cursum(journal); i++)
4125 			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4126 				return i;
4127 		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4128 			return update_sits_in_cursum(journal, 1);
4129 	}
4130 	return -1;
4131 }
4132 
4133 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4134 					unsigned int segno)
4135 {
4136 	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4137 }
4138 
4139 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4140 					unsigned int start)
4141 {
4142 	struct sit_info *sit_i = SIT_I(sbi);
4143 	struct page *page;
4144 	pgoff_t src_off, dst_off;
4145 
4146 	src_off = current_sit_addr(sbi, start);
4147 	dst_off = next_sit_addr(sbi, src_off);
4148 
4149 	page = f2fs_grab_meta_page(sbi, dst_off);
4150 	seg_info_to_sit_page(sbi, page, start);
4151 
4152 	set_page_dirty(page);
4153 	set_to_next_sit(sit_i, start);
4154 
4155 	return page;
4156 }
4157 
4158 static struct sit_entry_set *grab_sit_entry_set(void)
4159 {
4160 	struct sit_entry_set *ses =
4161 			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4162 						GFP_NOFS, true, NULL);
4163 
4164 	ses->entry_cnt = 0;
4165 	INIT_LIST_HEAD(&ses->set_list);
4166 	return ses;
4167 }
4168 
4169 static void release_sit_entry_set(struct sit_entry_set *ses)
4170 {
4171 	list_del(&ses->set_list);
4172 	kmem_cache_free(sit_entry_set_slab, ses);
4173 }
4174 
4175 static void adjust_sit_entry_set(struct sit_entry_set *ses,
4176 						struct list_head *head)
4177 {
4178 	struct sit_entry_set *next = ses;
4179 
4180 	if (list_is_last(&ses->set_list, head))
4181 		return;
4182 
4183 	list_for_each_entry_continue(next, head, set_list)
4184 		if (ses->entry_cnt <= next->entry_cnt)
4185 			break;
4186 
4187 	list_move_tail(&ses->set_list, &next->set_list);
4188 }
4189 
4190 static void add_sit_entry(unsigned int segno, struct list_head *head)
4191 {
4192 	struct sit_entry_set *ses;
4193 	unsigned int start_segno = START_SEGNO(segno);
4194 
4195 	list_for_each_entry(ses, head, set_list) {
4196 		if (ses->start_segno == start_segno) {
4197 			ses->entry_cnt++;
4198 			adjust_sit_entry_set(ses, head);
4199 			return;
4200 		}
4201 	}
4202 
4203 	ses = grab_sit_entry_set();
4204 
4205 	ses->start_segno = start_segno;
4206 	ses->entry_cnt++;
4207 	list_add(&ses->set_list, head);
4208 }
4209 
4210 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4211 {
4212 	struct f2fs_sm_info *sm_info = SM_I(sbi);
4213 	struct list_head *set_list = &sm_info->sit_entry_set;
4214 	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4215 	unsigned int segno;
4216 
4217 	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4218 		add_sit_entry(segno, set_list);
4219 }
4220 
4221 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4222 {
4223 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4224 	struct f2fs_journal *journal = curseg->journal;
4225 	int i;
4226 
4227 	down_write(&curseg->journal_rwsem);
4228 	for (i = 0; i < sits_in_cursum(journal); i++) {
4229 		unsigned int segno;
4230 		bool dirtied;
4231 
4232 		segno = le32_to_cpu(segno_in_journal(journal, i));
4233 		dirtied = __mark_sit_entry_dirty(sbi, segno);
4234 
4235 		if (!dirtied)
4236 			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4237 	}
4238 	update_sits_in_cursum(journal, -i);
4239 	up_write(&curseg->journal_rwsem);
4240 }
4241 
4242 /*
4243  * CP calls this function, which flushes SIT entries including sit_journal,
4244  * and moves prefree segs to free segs.
4245  */
4246 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4247 {
4248 	struct sit_info *sit_i = SIT_I(sbi);
4249 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4250 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4251 	struct f2fs_journal *journal = curseg->journal;
4252 	struct sit_entry_set *ses, *tmp;
4253 	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4254 	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4255 	struct seg_entry *se;
4256 
4257 	down_write(&sit_i->sentry_lock);
4258 
4259 	if (!sit_i->dirty_sentries)
4260 		goto out;
4261 
4262 	/*
4263 	 * add and account sit entries of dirty bitmap in sit entry
4264 	 * set temporarily
4265 	 */
4266 	add_sits_in_set(sbi);
4267 
4268 	/*
4269 	 * if there are no enough space in journal to store dirty sit
4270 	 * entries, remove all entries from journal and add and account
4271 	 * them in sit entry set.
4272 	 */
4273 	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4274 								!to_journal)
4275 		remove_sits_in_journal(sbi);
4276 
4277 	/*
4278 	 * there are two steps to flush sit entries:
4279 	 * #1, flush sit entries to journal in current cold data summary block.
4280 	 * #2, flush sit entries to sit page.
4281 	 */
4282 	list_for_each_entry_safe(ses, tmp, head, set_list) {
4283 		struct page *page = NULL;
4284 		struct f2fs_sit_block *raw_sit = NULL;
4285 		unsigned int start_segno = ses->start_segno;
4286 		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4287 						(unsigned long)MAIN_SEGS(sbi));
4288 		unsigned int segno = start_segno;
4289 
4290 		if (to_journal &&
4291 			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4292 			to_journal = false;
4293 
4294 		if (to_journal) {
4295 			down_write(&curseg->journal_rwsem);
4296 		} else {
4297 			page = get_next_sit_page(sbi, start_segno);
4298 			raw_sit = page_address(page);
4299 		}
4300 
4301 		/* flush dirty sit entries in region of current sit set */
4302 		for_each_set_bit_from(segno, bitmap, end) {
4303 			int offset, sit_offset;
4304 
4305 			se = get_seg_entry(sbi, segno);
4306 #ifdef CONFIG_F2FS_CHECK_FS
4307 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4308 						SIT_VBLOCK_MAP_SIZE))
4309 				f2fs_bug_on(sbi, 1);
4310 #endif
4311 
4312 			/* add discard candidates */
4313 			if (!(cpc->reason & CP_DISCARD)) {
4314 				cpc->trim_start = segno;
4315 				add_discard_addrs(sbi, cpc, false);
4316 			}
4317 
4318 			if (to_journal) {
4319 				offset = f2fs_lookup_journal_in_cursum(journal,
4320 							SIT_JOURNAL, segno, 1);
4321 				f2fs_bug_on(sbi, offset < 0);
4322 				segno_in_journal(journal, offset) =
4323 							cpu_to_le32(segno);
4324 				seg_info_to_raw_sit(se,
4325 					&sit_in_journal(journal, offset));
4326 				check_block_count(sbi, segno,
4327 					&sit_in_journal(journal, offset));
4328 			} else {
4329 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4330 				seg_info_to_raw_sit(se,
4331 						&raw_sit->entries[sit_offset]);
4332 				check_block_count(sbi, segno,
4333 						&raw_sit->entries[sit_offset]);
4334 			}
4335 
4336 			__clear_bit(segno, bitmap);
4337 			sit_i->dirty_sentries--;
4338 			ses->entry_cnt--;
4339 		}
4340 
4341 		if (to_journal)
4342 			up_write(&curseg->journal_rwsem);
4343 		else
4344 			f2fs_put_page(page, 1);
4345 
4346 		f2fs_bug_on(sbi, ses->entry_cnt);
4347 		release_sit_entry_set(ses);
4348 	}
4349 
4350 	f2fs_bug_on(sbi, !list_empty(head));
4351 	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4352 out:
4353 	if (cpc->reason & CP_DISCARD) {
4354 		__u64 trim_start = cpc->trim_start;
4355 
4356 		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4357 			add_discard_addrs(sbi, cpc, false);
4358 
4359 		cpc->trim_start = trim_start;
4360 	}
4361 	up_write(&sit_i->sentry_lock);
4362 
4363 	set_prefree_as_free_segments(sbi);
4364 }
4365 
4366 static int build_sit_info(struct f2fs_sb_info *sbi)
4367 {
4368 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4369 	struct sit_info *sit_i;
4370 	unsigned int sit_segs, start;
4371 	char *src_bitmap, *bitmap;
4372 	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4373 	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4374 
4375 	/* allocate memory for SIT information */
4376 	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4377 	if (!sit_i)
4378 		return -ENOMEM;
4379 
4380 	SM_I(sbi)->sit_info = sit_i;
4381 
4382 	sit_i->sentries =
4383 		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4384 					      MAIN_SEGS(sbi)),
4385 			      GFP_KERNEL);
4386 	if (!sit_i->sentries)
4387 		return -ENOMEM;
4388 
4389 	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4390 	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4391 								GFP_KERNEL);
4392 	if (!sit_i->dirty_sentries_bitmap)
4393 		return -ENOMEM;
4394 
4395 #ifdef CONFIG_F2FS_CHECK_FS
4396 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4397 #else
4398 	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4399 #endif
4400 	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4401 	if (!sit_i->bitmap)
4402 		return -ENOMEM;
4403 
4404 	bitmap = sit_i->bitmap;
4405 
4406 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4407 		sit_i->sentries[start].cur_valid_map = bitmap;
4408 		bitmap += SIT_VBLOCK_MAP_SIZE;
4409 
4410 		sit_i->sentries[start].ckpt_valid_map = bitmap;
4411 		bitmap += SIT_VBLOCK_MAP_SIZE;
4412 
4413 #ifdef CONFIG_F2FS_CHECK_FS
4414 		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4415 		bitmap += SIT_VBLOCK_MAP_SIZE;
4416 #endif
4417 
4418 		if (discard_map) {
4419 			sit_i->sentries[start].discard_map = bitmap;
4420 			bitmap += SIT_VBLOCK_MAP_SIZE;
4421 		}
4422 	}
4423 
4424 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4425 	if (!sit_i->tmp_map)
4426 		return -ENOMEM;
4427 
4428 	if (__is_large_section(sbi)) {
4429 		sit_i->sec_entries =
4430 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4431 						      MAIN_SECS(sbi)),
4432 				      GFP_KERNEL);
4433 		if (!sit_i->sec_entries)
4434 			return -ENOMEM;
4435 	}
4436 
4437 	/* get information related with SIT */
4438 	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4439 
4440 	/* setup SIT bitmap from ckeckpoint pack */
4441 	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4442 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4443 
4444 	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4445 	if (!sit_i->sit_bitmap)
4446 		return -ENOMEM;
4447 
4448 #ifdef CONFIG_F2FS_CHECK_FS
4449 	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4450 					sit_bitmap_size, GFP_KERNEL);
4451 	if (!sit_i->sit_bitmap_mir)
4452 		return -ENOMEM;
4453 
4454 	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4455 					main_bitmap_size, GFP_KERNEL);
4456 	if (!sit_i->invalid_segmap)
4457 		return -ENOMEM;
4458 #endif
4459 
4460 	/* init SIT information */
4461 	sit_i->s_ops = &default_salloc_ops;
4462 
4463 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4464 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4465 	sit_i->written_valid_blocks = 0;
4466 	sit_i->bitmap_size = sit_bitmap_size;
4467 	sit_i->dirty_sentries = 0;
4468 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4469 	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4470 	sit_i->mounted_time = ktime_get_boottime_seconds();
4471 	init_rwsem(&sit_i->sentry_lock);
4472 	return 0;
4473 }
4474 
4475 static int build_free_segmap(struct f2fs_sb_info *sbi)
4476 {
4477 	struct free_segmap_info *free_i;
4478 	unsigned int bitmap_size, sec_bitmap_size;
4479 
4480 	/* allocate memory for free segmap information */
4481 	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4482 	if (!free_i)
4483 		return -ENOMEM;
4484 
4485 	SM_I(sbi)->free_info = free_i;
4486 
4487 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4488 	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4489 	if (!free_i->free_segmap)
4490 		return -ENOMEM;
4491 
4492 	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4493 	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4494 	if (!free_i->free_secmap)
4495 		return -ENOMEM;
4496 
4497 	/* set all segments as dirty temporarily */
4498 	memset(free_i->free_segmap, 0xff, bitmap_size);
4499 	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4500 
4501 	/* init free segmap information */
4502 	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4503 	free_i->free_segments = 0;
4504 	free_i->free_sections = 0;
4505 	spin_lock_init(&free_i->segmap_lock);
4506 	return 0;
4507 }
4508 
4509 static int build_curseg(struct f2fs_sb_info *sbi)
4510 {
4511 	struct curseg_info *array;
4512 	int i;
4513 
4514 	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4515 					sizeof(*array)), GFP_KERNEL);
4516 	if (!array)
4517 		return -ENOMEM;
4518 
4519 	SM_I(sbi)->curseg_array = array;
4520 
4521 	for (i = 0; i < NO_CHECK_TYPE; i++) {
4522 		mutex_init(&array[i].curseg_mutex);
4523 		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4524 		if (!array[i].sum_blk)
4525 			return -ENOMEM;
4526 		init_rwsem(&array[i].journal_rwsem);
4527 		array[i].journal = f2fs_kzalloc(sbi,
4528 				sizeof(struct f2fs_journal), GFP_KERNEL);
4529 		if (!array[i].journal)
4530 			return -ENOMEM;
4531 		if (i < NR_PERSISTENT_LOG)
4532 			array[i].seg_type = CURSEG_HOT_DATA + i;
4533 		else if (i == CURSEG_COLD_DATA_PINNED)
4534 			array[i].seg_type = CURSEG_COLD_DATA;
4535 		else if (i == CURSEG_ALL_DATA_ATGC)
4536 			array[i].seg_type = CURSEG_COLD_DATA;
4537 		array[i].segno = NULL_SEGNO;
4538 		array[i].next_blkoff = 0;
4539 		array[i].inited = false;
4540 	}
4541 	return restore_curseg_summaries(sbi);
4542 }
4543 
4544 static int build_sit_entries(struct f2fs_sb_info *sbi)
4545 {
4546 	struct sit_info *sit_i = SIT_I(sbi);
4547 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4548 	struct f2fs_journal *journal = curseg->journal;
4549 	struct seg_entry *se;
4550 	struct f2fs_sit_entry sit;
4551 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4552 	unsigned int i, start, end;
4553 	unsigned int readed, start_blk = 0;
4554 	int err = 0;
4555 	block_t total_node_blocks = 0;
4556 
4557 	do {
4558 		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4559 							META_SIT, true);
4560 
4561 		start = start_blk * sit_i->sents_per_block;
4562 		end = (start_blk + readed) * sit_i->sents_per_block;
4563 
4564 		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4565 			struct f2fs_sit_block *sit_blk;
4566 			struct page *page;
4567 
4568 			se = &sit_i->sentries[start];
4569 			page = get_current_sit_page(sbi, start);
4570 			if (IS_ERR(page))
4571 				return PTR_ERR(page);
4572 			sit_blk = (struct f2fs_sit_block *)page_address(page);
4573 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4574 			f2fs_put_page(page, 1);
4575 
4576 			err = check_block_count(sbi, start, &sit);
4577 			if (err)
4578 				return err;
4579 			seg_info_from_raw_sit(se, &sit);
4580 			if (IS_NODESEG(se->type))
4581 				total_node_blocks += se->valid_blocks;
4582 
4583 			if (f2fs_block_unit_discard(sbi)) {
4584 				/* build discard map only one time */
4585 				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4586 					memset(se->discard_map, 0xff,
4587 						SIT_VBLOCK_MAP_SIZE);
4588 				} else {
4589 					memcpy(se->discard_map,
4590 						se->cur_valid_map,
4591 						SIT_VBLOCK_MAP_SIZE);
4592 					sbi->discard_blks +=
4593 						sbi->blocks_per_seg -
4594 						se->valid_blocks;
4595 				}
4596 			}
4597 
4598 			if (__is_large_section(sbi))
4599 				get_sec_entry(sbi, start)->valid_blocks +=
4600 							se->valid_blocks;
4601 		}
4602 		start_blk += readed;
4603 	} while (start_blk < sit_blk_cnt);
4604 
4605 	down_read(&curseg->journal_rwsem);
4606 	for (i = 0; i < sits_in_cursum(journal); i++) {
4607 		unsigned int old_valid_blocks;
4608 
4609 		start = le32_to_cpu(segno_in_journal(journal, i));
4610 		if (start >= MAIN_SEGS(sbi)) {
4611 			f2fs_err(sbi, "Wrong journal entry on segno %u",
4612 				 start);
4613 			err = -EFSCORRUPTED;
4614 			break;
4615 		}
4616 
4617 		se = &sit_i->sentries[start];
4618 		sit = sit_in_journal(journal, i);
4619 
4620 		old_valid_blocks = se->valid_blocks;
4621 		if (IS_NODESEG(se->type))
4622 			total_node_blocks -= old_valid_blocks;
4623 
4624 		err = check_block_count(sbi, start, &sit);
4625 		if (err)
4626 			break;
4627 		seg_info_from_raw_sit(se, &sit);
4628 		if (IS_NODESEG(se->type))
4629 			total_node_blocks += se->valid_blocks;
4630 
4631 		if (f2fs_block_unit_discard(sbi)) {
4632 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4633 				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4634 			} else {
4635 				memcpy(se->discard_map, se->cur_valid_map,
4636 							SIT_VBLOCK_MAP_SIZE);
4637 				sbi->discard_blks += old_valid_blocks;
4638 				sbi->discard_blks -= se->valid_blocks;
4639 			}
4640 		}
4641 
4642 		if (__is_large_section(sbi)) {
4643 			get_sec_entry(sbi, start)->valid_blocks +=
4644 							se->valid_blocks;
4645 			get_sec_entry(sbi, start)->valid_blocks -=
4646 							old_valid_blocks;
4647 		}
4648 	}
4649 	up_read(&curseg->journal_rwsem);
4650 
4651 	if (!err && total_node_blocks != valid_node_count(sbi)) {
4652 		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4653 			 total_node_blocks, valid_node_count(sbi));
4654 		err = -EFSCORRUPTED;
4655 	}
4656 
4657 	return err;
4658 }
4659 
4660 static void init_free_segmap(struct f2fs_sb_info *sbi)
4661 {
4662 	unsigned int start;
4663 	int type;
4664 	struct seg_entry *sentry;
4665 
4666 	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4667 		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4668 			continue;
4669 		sentry = get_seg_entry(sbi, start);
4670 		if (!sentry->valid_blocks)
4671 			__set_free(sbi, start);
4672 		else
4673 			SIT_I(sbi)->written_valid_blocks +=
4674 						sentry->valid_blocks;
4675 	}
4676 
4677 	/* set use the current segments */
4678 	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4679 		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4680 
4681 		__set_test_and_inuse(sbi, curseg_t->segno);
4682 	}
4683 }
4684 
4685 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4686 {
4687 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4688 	struct free_segmap_info *free_i = FREE_I(sbi);
4689 	unsigned int segno = 0, offset = 0, secno;
4690 	block_t valid_blocks, usable_blks_in_seg;
4691 	block_t blks_per_sec = BLKS_PER_SEC(sbi);
4692 
4693 	while (1) {
4694 		/* find dirty segment based on free segmap */
4695 		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4696 		if (segno >= MAIN_SEGS(sbi))
4697 			break;
4698 		offset = segno + 1;
4699 		valid_blocks = get_valid_blocks(sbi, segno, false);
4700 		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4701 		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4702 			continue;
4703 		if (valid_blocks > usable_blks_in_seg) {
4704 			f2fs_bug_on(sbi, 1);
4705 			continue;
4706 		}
4707 		mutex_lock(&dirty_i->seglist_lock);
4708 		__locate_dirty_segment(sbi, segno, DIRTY);
4709 		mutex_unlock(&dirty_i->seglist_lock);
4710 	}
4711 
4712 	if (!__is_large_section(sbi))
4713 		return;
4714 
4715 	mutex_lock(&dirty_i->seglist_lock);
4716 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4717 		valid_blocks = get_valid_blocks(sbi, segno, true);
4718 		secno = GET_SEC_FROM_SEG(sbi, segno);
4719 
4720 		if (!valid_blocks || valid_blocks == blks_per_sec)
4721 			continue;
4722 		if (IS_CURSEC(sbi, secno))
4723 			continue;
4724 		set_bit(secno, dirty_i->dirty_secmap);
4725 	}
4726 	mutex_unlock(&dirty_i->seglist_lock);
4727 }
4728 
4729 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4730 {
4731 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4732 	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4733 
4734 	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4735 	if (!dirty_i->victim_secmap)
4736 		return -ENOMEM;
4737 	return 0;
4738 }
4739 
4740 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4741 {
4742 	struct dirty_seglist_info *dirty_i;
4743 	unsigned int bitmap_size, i;
4744 
4745 	/* allocate memory for dirty segments list information */
4746 	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4747 								GFP_KERNEL);
4748 	if (!dirty_i)
4749 		return -ENOMEM;
4750 
4751 	SM_I(sbi)->dirty_info = dirty_i;
4752 	mutex_init(&dirty_i->seglist_lock);
4753 
4754 	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4755 
4756 	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4757 		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4758 								GFP_KERNEL);
4759 		if (!dirty_i->dirty_segmap[i])
4760 			return -ENOMEM;
4761 	}
4762 
4763 	if (__is_large_section(sbi)) {
4764 		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4765 		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4766 						bitmap_size, GFP_KERNEL);
4767 		if (!dirty_i->dirty_secmap)
4768 			return -ENOMEM;
4769 	}
4770 
4771 	init_dirty_segmap(sbi);
4772 	return init_victim_secmap(sbi);
4773 }
4774 
4775 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4776 {
4777 	int i;
4778 
4779 	/*
4780 	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4781 	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4782 	 */
4783 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4784 		struct curseg_info *curseg = CURSEG_I(sbi, i);
4785 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4786 		unsigned int blkofs = curseg->next_blkoff;
4787 
4788 		if (f2fs_sb_has_readonly(sbi) &&
4789 			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4790 			continue;
4791 
4792 		sanity_check_seg_type(sbi, curseg->seg_type);
4793 
4794 		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4795 			f2fs_err(sbi,
4796 				 "Current segment has invalid alloc_type:%d",
4797 				 curseg->alloc_type);
4798 			return -EFSCORRUPTED;
4799 		}
4800 
4801 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4802 			goto out;
4803 
4804 		if (curseg->alloc_type == SSR)
4805 			continue;
4806 
4807 		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4808 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4809 				continue;
4810 out:
4811 			f2fs_err(sbi,
4812 				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4813 				 i, curseg->segno, curseg->alloc_type,
4814 				 curseg->next_blkoff, blkofs);
4815 			return -EFSCORRUPTED;
4816 		}
4817 	}
4818 	return 0;
4819 }
4820 
4821 #ifdef CONFIG_BLK_DEV_ZONED
4822 
4823 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4824 				    struct f2fs_dev_info *fdev,
4825 				    struct blk_zone *zone)
4826 {
4827 	unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4828 	block_t zone_block, wp_block, last_valid_block;
4829 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4830 	int i, s, b, ret;
4831 	struct seg_entry *se;
4832 
4833 	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4834 		return 0;
4835 
4836 	wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4837 	wp_segno = GET_SEGNO(sbi, wp_block);
4838 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4839 	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4840 	zone_segno = GET_SEGNO(sbi, zone_block);
4841 	zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4842 
4843 	if (zone_segno >= MAIN_SEGS(sbi))
4844 		return 0;
4845 
4846 	/*
4847 	 * Skip check of zones cursegs point to, since
4848 	 * fix_curseg_write_pointer() checks them.
4849 	 */
4850 	for (i = 0; i < NO_CHECK_TYPE; i++)
4851 		if (zone_secno == GET_SEC_FROM_SEG(sbi,
4852 						   CURSEG_I(sbi, i)->segno))
4853 			return 0;
4854 
4855 	/*
4856 	 * Get last valid block of the zone.
4857 	 */
4858 	last_valid_block = zone_block - 1;
4859 	for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4860 		segno = zone_segno + s;
4861 		se = get_seg_entry(sbi, segno);
4862 		for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4863 			if (f2fs_test_bit(b, se->cur_valid_map)) {
4864 				last_valid_block = START_BLOCK(sbi, segno) + b;
4865 				break;
4866 			}
4867 		if (last_valid_block >= zone_block)
4868 			break;
4869 	}
4870 
4871 	/*
4872 	 * If last valid block is beyond the write pointer, report the
4873 	 * inconsistency. This inconsistency does not cause write error
4874 	 * because the zone will not be selected for write operation until
4875 	 * it get discarded. Just report it.
4876 	 */
4877 	if (last_valid_block >= wp_block) {
4878 		f2fs_notice(sbi, "Valid block beyond write pointer: "
4879 			    "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4880 			    GET_SEGNO(sbi, last_valid_block),
4881 			    GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4882 			    wp_segno, wp_blkoff);
4883 		return 0;
4884 	}
4885 
4886 	/*
4887 	 * If there is no valid block in the zone and if write pointer is
4888 	 * not at zone start, reset the write pointer.
4889 	 */
4890 	if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4891 		f2fs_notice(sbi,
4892 			    "Zone without valid block has non-zero write "
4893 			    "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4894 			    wp_segno, wp_blkoff);
4895 		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4896 					zone->len >> log_sectors_per_block);
4897 		if (ret) {
4898 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4899 				 fdev->path, ret);
4900 			return ret;
4901 		}
4902 	}
4903 
4904 	return 0;
4905 }
4906 
4907 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4908 						  block_t zone_blkaddr)
4909 {
4910 	int i;
4911 
4912 	for (i = 0; i < sbi->s_ndevs; i++) {
4913 		if (!bdev_is_zoned(FDEV(i).bdev))
4914 			continue;
4915 		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4916 				zone_blkaddr <= FDEV(i).end_blk))
4917 			return &FDEV(i);
4918 	}
4919 
4920 	return NULL;
4921 }
4922 
4923 static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4924 			      void *data)
4925 {
4926 	memcpy(data, zone, sizeof(struct blk_zone));
4927 	return 0;
4928 }
4929 
4930 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4931 {
4932 	struct curseg_info *cs = CURSEG_I(sbi, type);
4933 	struct f2fs_dev_info *zbd;
4934 	struct blk_zone zone;
4935 	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4936 	block_t cs_zone_block, wp_block;
4937 	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4938 	sector_t zone_sector;
4939 	int err;
4940 
4941 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4942 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4943 
4944 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4945 	if (!zbd)
4946 		return 0;
4947 
4948 	/* report zone for the sector the curseg points to */
4949 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4950 		<< log_sectors_per_block;
4951 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4952 				  report_one_zone_cb, &zone);
4953 	if (err != 1) {
4954 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4955 			 zbd->path, err);
4956 		return err;
4957 	}
4958 
4959 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4960 		return 0;
4961 
4962 	wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4963 	wp_segno = GET_SEGNO(sbi, wp_block);
4964 	wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4965 	wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4966 
4967 	if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4968 		wp_sector_off == 0)
4969 		return 0;
4970 
4971 	f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4972 		    "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4973 		    type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4974 
4975 	f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4976 		    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4977 
4978 	f2fs_allocate_new_section(sbi, type, true);
4979 
4980 	/* check consistency of the zone curseg pointed to */
4981 	if (check_zone_write_pointer(sbi, zbd, &zone))
4982 		return -EIO;
4983 
4984 	/* check newly assigned zone */
4985 	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4986 	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4987 
4988 	zbd = get_target_zoned_dev(sbi, cs_zone_block);
4989 	if (!zbd)
4990 		return 0;
4991 
4992 	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4993 		<< log_sectors_per_block;
4994 	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4995 				  report_one_zone_cb, &zone);
4996 	if (err != 1) {
4997 		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4998 			 zbd->path, err);
4999 		return err;
5000 	}
5001 
5002 	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5003 		return 0;
5004 
5005 	if (zone.wp != zone.start) {
5006 		f2fs_notice(sbi,
5007 			    "New zone for curseg[%d] is not yet discarded. "
5008 			    "Reset the zone: curseg[0x%x,0x%x]",
5009 			    type, cs->segno, cs->next_blkoff);
5010 		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
5011 				zone_sector >> log_sectors_per_block,
5012 				zone.len >> log_sectors_per_block);
5013 		if (err) {
5014 			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5015 				 zbd->path, err);
5016 			return err;
5017 		}
5018 	}
5019 
5020 	return 0;
5021 }
5022 
5023 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5024 {
5025 	int i, ret;
5026 
5027 	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5028 		ret = fix_curseg_write_pointer(sbi, i);
5029 		if (ret)
5030 			return ret;
5031 	}
5032 
5033 	return 0;
5034 }
5035 
5036 struct check_zone_write_pointer_args {
5037 	struct f2fs_sb_info *sbi;
5038 	struct f2fs_dev_info *fdev;
5039 };
5040 
5041 static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5042 				      void *data)
5043 {
5044 	struct check_zone_write_pointer_args *args;
5045 
5046 	args = (struct check_zone_write_pointer_args *)data;
5047 
5048 	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5049 }
5050 
5051 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5052 {
5053 	int i, ret;
5054 	struct check_zone_write_pointer_args args;
5055 
5056 	for (i = 0; i < sbi->s_ndevs; i++) {
5057 		if (!bdev_is_zoned(FDEV(i).bdev))
5058 			continue;
5059 
5060 		args.sbi = sbi;
5061 		args.fdev = &FDEV(i);
5062 		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5063 					  check_zone_write_pointer_cb, &args);
5064 		if (ret < 0)
5065 			return ret;
5066 	}
5067 
5068 	return 0;
5069 }
5070 
5071 static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
5072 						unsigned int dev_idx)
5073 {
5074 	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
5075 		return true;
5076 	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
5077 }
5078 
5079 /* Return the zone index in the given device */
5080 static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
5081 					int dev_idx)
5082 {
5083 	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5084 
5085 	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
5086 						sbi->log_blocks_per_blkz;
5087 }
5088 
5089 /*
5090  * Return the usable segments in a section based on the zone's
5091  * corresponding zone capacity. Zone is equal to a section.
5092  */
5093 static inline unsigned int f2fs_usable_zone_segs_in_sec(
5094 		struct f2fs_sb_info *sbi, unsigned int segno)
5095 {
5096 	unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
5097 
5098 	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
5099 	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
5100 
5101 	/* Conventional zone's capacity is always equal to zone size */
5102 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5103 		return sbi->segs_per_sec;
5104 
5105 	/*
5106 	 * If the zone_capacity_blocks array is NULL, then zone capacity
5107 	 * is equal to the zone size for all zones
5108 	 */
5109 	if (!FDEV(dev_idx).zone_capacity_blocks)
5110 		return sbi->segs_per_sec;
5111 
5112 	/* Get the segment count beyond zone capacity block */
5113 	unusable_segs_in_sec = (sbi->blocks_per_blkz -
5114 				FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
5115 				sbi->log_blocks_per_seg;
5116 	return sbi->segs_per_sec - unusable_segs_in_sec;
5117 }
5118 
5119 /*
5120  * Return the number of usable blocks in a segment. The number of blocks
5121  * returned is always equal to the number of blocks in a segment for
5122  * segments fully contained within a sequential zone capacity or a
5123  * conventional zone. For segments partially contained in a sequential
5124  * zone capacity, the number of usable blocks up to the zone capacity
5125  * is returned. 0 is returned in all other cases.
5126  */
5127 static inline unsigned int f2fs_usable_zone_blks_in_seg(
5128 			struct f2fs_sb_info *sbi, unsigned int segno)
5129 {
5130 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5131 	unsigned int zone_idx, dev_idx, secno;
5132 
5133 	secno = GET_SEC_FROM_SEG(sbi, segno);
5134 	seg_start = START_BLOCK(sbi, segno);
5135 	dev_idx = f2fs_target_device_index(sbi, seg_start);
5136 	zone_idx = get_zone_idx(sbi, secno, dev_idx);
5137 
5138 	/*
5139 	 * Conventional zone's capacity is always equal to zone size,
5140 	 * so, blocks per segment is unchanged.
5141 	 */
5142 	if (is_conv_zone(sbi, zone_idx, dev_idx))
5143 		return sbi->blocks_per_seg;
5144 
5145 	if (!FDEV(dev_idx).zone_capacity_blocks)
5146 		return sbi->blocks_per_seg;
5147 
5148 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5149 	sec_cap_blkaddr = sec_start_blkaddr +
5150 				FDEV(dev_idx).zone_capacity_blocks[zone_idx];
5151 
5152 	/*
5153 	 * If segment starts before zone capacity and spans beyond
5154 	 * zone capacity, then usable blocks are from seg start to
5155 	 * zone capacity. If the segment starts after the zone capacity,
5156 	 * then there are no usable blocks.
5157 	 */
5158 	if (seg_start >= sec_cap_blkaddr)
5159 		return 0;
5160 	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5161 		return sec_cap_blkaddr - seg_start;
5162 
5163 	return sbi->blocks_per_seg;
5164 }
5165 #else
5166 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5167 {
5168 	return 0;
5169 }
5170 
5171 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5172 {
5173 	return 0;
5174 }
5175 
5176 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5177 							unsigned int segno)
5178 {
5179 	return 0;
5180 }
5181 
5182 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5183 							unsigned int segno)
5184 {
5185 	return 0;
5186 }
5187 #endif
5188 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5189 					unsigned int segno)
5190 {
5191 	if (f2fs_sb_has_blkzoned(sbi))
5192 		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5193 
5194 	return sbi->blocks_per_seg;
5195 }
5196 
5197 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5198 					unsigned int segno)
5199 {
5200 	if (f2fs_sb_has_blkzoned(sbi))
5201 		return f2fs_usable_zone_segs_in_sec(sbi, segno);
5202 
5203 	return sbi->segs_per_sec;
5204 }
5205 
5206 /*
5207  * Update min, max modified time for cost-benefit GC algorithm
5208  */
5209 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5210 {
5211 	struct sit_info *sit_i = SIT_I(sbi);
5212 	unsigned int segno;
5213 
5214 	down_write(&sit_i->sentry_lock);
5215 
5216 	sit_i->min_mtime = ULLONG_MAX;
5217 
5218 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5219 		unsigned int i;
5220 		unsigned long long mtime = 0;
5221 
5222 		for (i = 0; i < sbi->segs_per_sec; i++)
5223 			mtime += get_seg_entry(sbi, segno + i)->mtime;
5224 
5225 		mtime = div_u64(mtime, sbi->segs_per_sec);
5226 
5227 		if (sit_i->min_mtime > mtime)
5228 			sit_i->min_mtime = mtime;
5229 	}
5230 	sit_i->max_mtime = get_mtime(sbi, false);
5231 	sit_i->dirty_max_mtime = 0;
5232 	up_write(&sit_i->sentry_lock);
5233 }
5234 
5235 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5236 {
5237 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5238 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5239 	struct f2fs_sm_info *sm_info;
5240 	int err;
5241 
5242 	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5243 	if (!sm_info)
5244 		return -ENOMEM;
5245 
5246 	/* init sm info */
5247 	sbi->sm_info = sm_info;
5248 	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5249 	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5250 	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5251 	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5252 	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5253 	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5254 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5255 	sm_info->rec_prefree_segments = sm_info->main_segments *
5256 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5257 	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5258 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5259 
5260 	if (!f2fs_lfs_mode(sbi))
5261 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5262 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5263 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5264 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
5265 	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5266 	sm_info->min_ssr_sections = reserved_sections(sbi);
5267 
5268 	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5269 
5270 	init_f2fs_rwsem(&sm_info->curseg_lock);
5271 
5272 	if (!f2fs_readonly(sbi->sb)) {
5273 		err = f2fs_create_flush_cmd_control(sbi);
5274 		if (err)
5275 			return err;
5276 	}
5277 
5278 	err = create_discard_cmd_control(sbi);
5279 	if (err)
5280 		return err;
5281 
5282 	err = build_sit_info(sbi);
5283 	if (err)
5284 		return err;
5285 	err = build_free_segmap(sbi);
5286 	if (err)
5287 		return err;
5288 	err = build_curseg(sbi);
5289 	if (err)
5290 		return err;
5291 
5292 	/* reinit free segmap based on SIT */
5293 	err = build_sit_entries(sbi);
5294 	if (err)
5295 		return err;
5296 
5297 	init_free_segmap(sbi);
5298 	err = build_dirty_segmap(sbi);
5299 	if (err)
5300 		return err;
5301 
5302 	err = sanity_check_curseg(sbi);
5303 	if (err)
5304 		return err;
5305 
5306 	init_min_max_mtime(sbi);
5307 	return 0;
5308 }
5309 
5310 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5311 		enum dirty_type dirty_type)
5312 {
5313 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5314 
5315 	mutex_lock(&dirty_i->seglist_lock);
5316 	kvfree(dirty_i->dirty_segmap[dirty_type]);
5317 	dirty_i->nr_dirty[dirty_type] = 0;
5318 	mutex_unlock(&dirty_i->seglist_lock);
5319 }
5320 
5321 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5322 {
5323 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5324 
5325 	kvfree(dirty_i->victim_secmap);
5326 }
5327 
5328 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5329 {
5330 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5331 	int i;
5332 
5333 	if (!dirty_i)
5334 		return;
5335 
5336 	/* discard pre-free/dirty segments list */
5337 	for (i = 0; i < NR_DIRTY_TYPE; i++)
5338 		discard_dirty_segmap(sbi, i);
5339 
5340 	if (__is_large_section(sbi)) {
5341 		mutex_lock(&dirty_i->seglist_lock);
5342 		kvfree(dirty_i->dirty_secmap);
5343 		mutex_unlock(&dirty_i->seglist_lock);
5344 	}
5345 
5346 	destroy_victim_secmap(sbi);
5347 	SM_I(sbi)->dirty_info = NULL;
5348 	kfree(dirty_i);
5349 }
5350 
5351 static void destroy_curseg(struct f2fs_sb_info *sbi)
5352 {
5353 	struct curseg_info *array = SM_I(sbi)->curseg_array;
5354 	int i;
5355 
5356 	if (!array)
5357 		return;
5358 	SM_I(sbi)->curseg_array = NULL;
5359 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5360 		kfree(array[i].sum_blk);
5361 		kfree(array[i].journal);
5362 	}
5363 	kfree(array);
5364 }
5365 
5366 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5367 {
5368 	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5369 
5370 	if (!free_i)
5371 		return;
5372 	SM_I(sbi)->free_info = NULL;
5373 	kvfree(free_i->free_segmap);
5374 	kvfree(free_i->free_secmap);
5375 	kfree(free_i);
5376 }
5377 
5378 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5379 {
5380 	struct sit_info *sit_i = SIT_I(sbi);
5381 
5382 	if (!sit_i)
5383 		return;
5384 
5385 	if (sit_i->sentries)
5386 		kvfree(sit_i->bitmap);
5387 	kfree(sit_i->tmp_map);
5388 
5389 	kvfree(sit_i->sentries);
5390 	kvfree(sit_i->sec_entries);
5391 	kvfree(sit_i->dirty_sentries_bitmap);
5392 
5393 	SM_I(sbi)->sit_info = NULL;
5394 	kvfree(sit_i->sit_bitmap);
5395 #ifdef CONFIG_F2FS_CHECK_FS
5396 	kvfree(sit_i->sit_bitmap_mir);
5397 	kvfree(sit_i->invalid_segmap);
5398 #endif
5399 	kfree(sit_i);
5400 }
5401 
5402 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5403 {
5404 	struct f2fs_sm_info *sm_info = SM_I(sbi);
5405 
5406 	if (!sm_info)
5407 		return;
5408 	f2fs_destroy_flush_cmd_control(sbi, true);
5409 	destroy_discard_cmd_control(sbi);
5410 	destroy_dirty_segmap(sbi);
5411 	destroy_curseg(sbi);
5412 	destroy_free_segmap(sbi);
5413 	destroy_sit_info(sbi);
5414 	sbi->sm_info = NULL;
5415 	kfree(sm_info);
5416 }
5417 
5418 int __init f2fs_create_segment_manager_caches(void)
5419 {
5420 	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5421 			sizeof(struct discard_entry));
5422 	if (!discard_entry_slab)
5423 		goto fail;
5424 
5425 	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5426 			sizeof(struct discard_cmd));
5427 	if (!discard_cmd_slab)
5428 		goto destroy_discard_entry;
5429 
5430 	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5431 			sizeof(struct sit_entry_set));
5432 	if (!sit_entry_set_slab)
5433 		goto destroy_discard_cmd;
5434 
5435 	inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5436 			sizeof(struct inmem_pages));
5437 	if (!inmem_entry_slab)
5438 		goto destroy_sit_entry_set;
5439 	return 0;
5440 
5441 destroy_sit_entry_set:
5442 	kmem_cache_destroy(sit_entry_set_slab);
5443 destroy_discard_cmd:
5444 	kmem_cache_destroy(discard_cmd_slab);
5445 destroy_discard_entry:
5446 	kmem_cache_destroy(discard_entry_slab);
5447 fail:
5448 	return -ENOMEM;
5449 }
5450 
5451 void f2fs_destroy_segment_manager_caches(void)
5452 {
5453 	kmem_cache_destroy(sit_entry_set_slab);
5454 	kmem_cache_destroy(discard_cmd_slab);
5455 	kmem_cache_destroy(discard_entry_slab);
5456 	kmem_cache_destroy(inmem_entry_slab);
5457 }
5458