segment.c (e98bdb3059cbf2b1cd4261e126b08429f64466c3) segment.c (ad4d307fce0909a5f70635826f779321ab95b469)
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 12 unchanged lines hidden (view full) ---

21#include "segment.h"
22#include "node.h"
23#include "trace.h"
24#include <trace/events/f2fs.h>
25
26#define __reverse_ffz(x) __reverse_ffs(~(x))
27
28static struct kmem_cache *discard_entry_slab;
1/*
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 12 unchanged lines hidden (view full) ---

21#include "segment.h"
22#include "node.h"
23#include "trace.h"
24#include <trace/events/f2fs.h>
25
26#define __reverse_ffz(x) __reverse_ffs(~(x))
27
28static struct kmem_cache *discard_entry_slab;
29static struct kmem_cache *bio_entry_slab;
29static struct kmem_cache *discard_cmd_slab;
30static struct kmem_cache *sit_entry_set_slab;
31static struct kmem_cache *inmem_entry_slab;
32
33static unsigned long __reverse_ulong(unsigned char *str)
34{
35 unsigned long tmp = 0;
36 int shift = 24, idx = 0;
37

--- 199 unchanged lines hidden (view full) ---

237 }
238 return err;
239}
240
241void drop_inmem_pages(struct inode *inode)
242{
243 struct f2fs_inode_info *fi = F2FS_I(inode);
244
30static struct kmem_cache *sit_entry_set_slab;
31static struct kmem_cache *inmem_entry_slab;
32
33static unsigned long __reverse_ulong(unsigned char *str)
34{
35 unsigned long tmp = 0;
36 int shift = 24, idx = 0;
37

--- 199 unchanged lines hidden (view full) ---

237 }
238 return err;
239}
240
241void drop_inmem_pages(struct inode *inode)
242{
243 struct f2fs_inode_info *fi = F2FS_I(inode);
244
245 clear_inode_flag(inode, FI_ATOMIC_FILE);
246
247 mutex_lock(&fi->inmem_lock);
248 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
249 mutex_unlock(&fi->inmem_lock);
245 mutex_lock(&fi->inmem_lock);
246 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
247 mutex_unlock(&fi->inmem_lock);
248
249 clear_inode_flag(inode, FI_ATOMIC_FILE);
250 stat_dec_atomic_write(inode);
250}
251
252static int __commit_inmem_pages(struct inode *inode,
253 struct list_head *revoke_list)
254{
255 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
256 struct f2fs_inode_info *fi = F2FS_I(inode);
257 struct inmem_pages *cur, *tmp;
258 struct f2fs_io_info fio = {
259 .sbi = sbi,
260 .type = DATA,
261 .op = REQ_OP_WRITE,
262 .op_flags = REQ_SYNC | REQ_PRIO,
263 .encrypted_page = NULL,
264 };
251}
252
253static int __commit_inmem_pages(struct inode *inode,
254 struct list_head *revoke_list)
255{
256 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
257 struct f2fs_inode_info *fi = F2FS_I(inode);
258 struct inmem_pages *cur, *tmp;
259 struct f2fs_io_info fio = {
260 .sbi = sbi,
261 .type = DATA,
262 .op = REQ_OP_WRITE,
263 .op_flags = REQ_SYNC | REQ_PRIO,
264 .encrypted_page = NULL,
265 };
265 bool submit_bio = false;
266 pgoff_t last_idx = ULONG_MAX;
266 int err = 0;
267
268 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
269 struct page *page = cur->page;
270
271 lock_page(page);
272 if (page->mapping == inode->i_mapping) {
273 trace_f2fs_commit_inmem_page(page, INMEM);

--- 9 unchanged lines hidden (view full) ---

283 err = do_write_data_page(&fio);
284 if (err) {
285 unlock_page(page);
286 break;
287 }
288
289 /* record old blkaddr for revoking */
290 cur->old_addr = fio.old_blkaddr;
267 int err = 0;
268
269 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
270 struct page *page = cur->page;
271
272 lock_page(page);
273 if (page->mapping == inode->i_mapping) {
274 trace_f2fs_commit_inmem_page(page, INMEM);

--- 9 unchanged lines hidden (view full) ---

284 err = do_write_data_page(&fio);
285 if (err) {
286 unlock_page(page);
287 break;
288 }
289
290 /* record old blkaddr for revoking */
291 cur->old_addr = fio.old_blkaddr;
291
292 submit_bio = true;
292 last_idx = page->index;
293 }
294 unlock_page(page);
295 list_move_tail(&cur->list, revoke_list);
296 }
297
293 }
294 unlock_page(page);
295 list_move_tail(&cur->list, revoke_list);
296 }
297
298 if (submit_bio)
299 f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
298 if (last_idx != ULONG_MAX)
299 f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx,
300 DATA, WRITE);
300
301 if (!err)
302 __revoke_inmem_pages(inode, revoke_list, false, false);
303
304 return err;
305}
306
307int commit_inmem_pages(struct inode *inode)
308{
309 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
310 struct f2fs_inode_info *fi = F2FS_I(inode);
311 struct list_head revoke_list;
312 int err;
313
314 INIT_LIST_HEAD(&revoke_list);
315 f2fs_balance_fs(sbi, true);
316 f2fs_lock_op(sbi);
317
301
302 if (!err)
303 __revoke_inmem_pages(inode, revoke_list, false, false);
304
305 return err;
306}
307
308int commit_inmem_pages(struct inode *inode)
309{
310 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
311 struct f2fs_inode_info *fi = F2FS_I(inode);
312 struct list_head revoke_list;
313 int err;
314
315 INIT_LIST_HEAD(&revoke_list);
316 f2fs_balance_fs(sbi, true);
317 f2fs_lock_op(sbi);
318
319 set_inode_flag(inode, FI_ATOMIC_COMMIT);
320
318 mutex_lock(&fi->inmem_lock);
319 err = __commit_inmem_pages(inode, &revoke_list);
320 if (err) {
321 int ret;
322 /*
323 * try to revoke all committed pages, but still we could fail
324 * due to no memory or other reason, if that happened, EAGAIN
325 * will be returned, which means in such case, transaction is

--- 5 unchanged lines hidden (view full) ---

331 if (ret)
332 err = ret;
333
334 /* drop all uncommitted pages */
335 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
336 }
337 mutex_unlock(&fi->inmem_lock);
338
321 mutex_lock(&fi->inmem_lock);
322 err = __commit_inmem_pages(inode, &revoke_list);
323 if (err) {
324 int ret;
325 /*
326 * try to revoke all committed pages, but still we could fail
327 * due to no memory or other reason, if that happened, EAGAIN
328 * will be returned, which means in such case, transaction is

--- 5 unchanged lines hidden (view full) ---

334 if (ret)
335 err = ret;
336
337 /* drop all uncommitted pages */
338 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
339 }
340 mutex_unlock(&fi->inmem_lock);
341
342 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
343
339 f2fs_unlock_op(sbi);
340 return err;
341}
342
343/*
344 * This function balances dirty node and dentry pages.
345 * In addition, it controls garbage collection.
346 */

--- 71 unchanged lines hidden (view full) ---

418
419static int submit_flush_wait(struct f2fs_sb_info *sbi)
420{
421 int ret = __submit_flush_wait(sbi->sb->s_bdev);
422 int i;
423
424 if (sbi->s_ndevs && !ret) {
425 for (i = 1; i < sbi->s_ndevs; i++) {
344 f2fs_unlock_op(sbi);
345 return err;
346}
347
348/*
349 * This function balances dirty node and dentry pages.
350 * In addition, it controls garbage collection.
351 */

--- 71 unchanged lines hidden (view full) ---

423
424static int submit_flush_wait(struct f2fs_sb_info *sbi)
425{
426 int ret = __submit_flush_wait(sbi->sb->s_bdev);
427 int i;
428
429 if (sbi->s_ndevs && !ret) {
430 for (i = 1; i < sbi->s_ndevs; i++) {
431 trace_f2fs_issue_flush(FDEV(i).bdev,
432 test_opt(sbi, NOBARRIER),
433 test_opt(sbi, FLUSH_MERGE));
426 ret = __submit_flush_wait(FDEV(i).bdev);
427 if (ret)
428 break;
429 }
430 }
431 return ret;
432}
433
434static int issue_flush_thread(void *data)
435{
436 struct f2fs_sb_info *sbi = data;
434 ret = __submit_flush_wait(FDEV(i).bdev);
435 if (ret)
436 break;
437 }
438 }
439 return ret;
440}
441
442static int issue_flush_thread(void *data)
443{
444 struct f2fs_sb_info *sbi = data;
437 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
445 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
438 wait_queue_head_t *q = &fcc->flush_wait_queue;
439repeat:
440 if (kthread_should_stop())
441 return 0;
442
443 if (!llist_empty(&fcc->issue_list)) {
444 struct flush_cmd *cmd, *next;
445 int ret;

--- 12 unchanged lines hidden (view full) ---

458
459 wait_event_interruptible(*q,
460 kthread_should_stop() || !llist_empty(&fcc->issue_list));
461 goto repeat;
462}
463
464int f2fs_issue_flush(struct f2fs_sb_info *sbi)
465{
446 wait_queue_head_t *q = &fcc->flush_wait_queue;
447repeat:
448 if (kthread_should_stop())
449 return 0;
450
451 if (!llist_empty(&fcc->issue_list)) {
452 struct flush_cmd *cmd, *next;
453 int ret;

--- 12 unchanged lines hidden (view full) ---

466
467 wait_event_interruptible(*q,
468 kthread_should_stop() || !llist_empty(&fcc->issue_list));
469 goto repeat;
470}
471
472int f2fs_issue_flush(struct f2fs_sb_info *sbi)
473{
466 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
474 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
467 struct flush_cmd cmd;
468
475 struct flush_cmd cmd;
476
469 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
470 test_opt(sbi, FLUSH_MERGE));
471
472 if (test_opt(sbi, NOBARRIER))
473 return 0;
474
477 if (test_opt(sbi, NOBARRIER))
478 return 0;
479
475 if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
480 if (!test_opt(sbi, FLUSH_MERGE))
481 return submit_flush_wait(sbi);
482
483 if (!atomic_read(&fcc->submit_flush)) {
476 int ret;
477
478 atomic_inc(&fcc->submit_flush);
479 ret = submit_flush_wait(sbi);
480 atomic_dec(&fcc->submit_flush);
481 return ret;
482 }
483

--- 17 unchanged lines hidden (view full) ---

501}
502
503int create_flush_cmd_control(struct f2fs_sb_info *sbi)
504{
505 dev_t dev = sbi->sb->s_bdev->bd_dev;
506 struct flush_cmd_control *fcc;
507 int err = 0;
508
484 int ret;
485
486 atomic_inc(&fcc->submit_flush);
487 ret = submit_flush_wait(sbi);
488 atomic_dec(&fcc->submit_flush);
489 return ret;
490 }
491

--- 17 unchanged lines hidden (view full) ---

509}
510
511int create_flush_cmd_control(struct f2fs_sb_info *sbi)
512{
513 dev_t dev = sbi->sb->s_bdev->bd_dev;
514 struct flush_cmd_control *fcc;
515 int err = 0;
516
509 if (SM_I(sbi)->cmd_control_info) {
510 fcc = SM_I(sbi)->cmd_control_info;
517 if (SM_I(sbi)->fcc_info) {
518 fcc = SM_I(sbi)->fcc_info;
511 goto init_thread;
512 }
513
514 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
515 if (!fcc)
516 return -ENOMEM;
517 atomic_set(&fcc->submit_flush, 0);
518 init_waitqueue_head(&fcc->flush_wait_queue);
519 init_llist_head(&fcc->issue_list);
519 goto init_thread;
520 }
521
522 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
523 if (!fcc)
524 return -ENOMEM;
525 atomic_set(&fcc->submit_flush, 0);
526 init_waitqueue_head(&fcc->flush_wait_queue);
527 init_llist_head(&fcc->issue_list);
520 SM_I(sbi)->cmd_control_info = fcc;
528 SM_I(sbi)->fcc_info = fcc;
521init_thread:
522 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
523 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
524 if (IS_ERR(fcc->f2fs_issue_flush)) {
525 err = PTR_ERR(fcc->f2fs_issue_flush);
526 kfree(fcc);
529init_thread:
530 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
531 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
532 if (IS_ERR(fcc->f2fs_issue_flush)) {
533 err = PTR_ERR(fcc->f2fs_issue_flush);
534 kfree(fcc);
527 SM_I(sbi)->cmd_control_info = NULL;
535 SM_I(sbi)->fcc_info = NULL;
528 return err;
529 }
530
531 return err;
532}
533
534void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
535{
536 return err;
537 }
538
539 return err;
540}
541
542void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
543{
536 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
544 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
537
538 if (fcc && fcc->f2fs_issue_flush) {
539 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
540
541 fcc->f2fs_issue_flush = NULL;
542 kthread_stop(flush_thread);
543 }
544 if (free) {
545 kfree(fcc);
545
546 if (fcc && fcc->f2fs_issue_flush) {
547 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
548
549 fcc->f2fs_issue_flush = NULL;
550 kthread_stop(flush_thread);
551 }
552 if (free) {
553 kfree(fcc);
546 SM_I(sbi)->cmd_control_info = NULL;
554 SM_I(sbi)->fcc_info = NULL;
547 }
548}
549
550static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
551 enum dirty_type dirty_type)
552{
553 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
554

--- 63 unchanged lines hidden (view full) ---

618 } else {
619 /* Recovery routine with SSR needs this */
620 __remove_dirty_segment(sbi, segno, DIRTY);
621 }
622
623 mutex_unlock(&dirty_i->seglist_lock);
624}
625
555 }
556}
557
558static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
559 enum dirty_type dirty_type)
560{
561 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
562

--- 63 unchanged lines hidden (view full) ---

626 } else {
627 /* Recovery routine with SSR needs this */
628 __remove_dirty_segment(sbi, segno, DIRTY);
629 }
630
631 mutex_unlock(&dirty_i->seglist_lock);
632}
633
626static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
627 struct bio *bio)
634static void __add_discard_cmd(struct f2fs_sb_info *sbi,
635 struct bio *bio, block_t lstart, block_t len)
628{
636{
629 struct list_head *wait_list = &(SM_I(sbi)->wait_list);
630 struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
637 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
638 struct list_head *cmd_list = &(dcc->discard_cmd_list);
639 struct discard_cmd *dc;
631
640
632 INIT_LIST_HEAD(&be->list);
633 be->bio = bio;
634 init_completion(&be->event);
635 list_add_tail(&be->list, wait_list);
641 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
642 INIT_LIST_HEAD(&dc->list);
643 dc->bio = bio;
644 bio->bi_private = dc;
645 dc->lstart = lstart;
646 dc->len = len;
647 dc->state = D_PREP;
648 init_completion(&dc->wait);
636
649
637 return be;
650 mutex_lock(&dcc->cmd_lock);
651 list_add_tail(&dc->list, cmd_list);
652 mutex_unlock(&dcc->cmd_lock);
638}
639
653}
654
640void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
655static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
641{
656{
642 struct list_head *wait_list = &(SM_I(sbi)->wait_list);
643 struct bio_entry *be, *tmp;
657 int err = dc->bio->bi_error;
644
658
645 list_for_each_entry_safe(be, tmp, wait_list, list) {
646 struct bio *bio = be->bio;
647 int err;
659 if (dc->state == D_DONE)
660 atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard));
648
661
649 wait_for_completion_io(&be->event);
650 err = be->error;
651 if (err == -EOPNOTSUPP)
652 err = 0;
662 if (err == -EOPNOTSUPP)
663 err = 0;
653
664
654 if (err)
655 f2fs_msg(sbi->sb, KERN_INFO,
665 if (err)
666 f2fs_msg(sbi->sb, KERN_INFO,
656 "Issue discard failed, ret: %d", err);
667 "Issue discard failed, ret: %d", err);
668 bio_put(dc->bio);
669 list_del(&dc->list);
670 kmem_cache_free(discard_cmd_slab, dc);
671}
657
672
658 bio_put(bio);
659 list_del(&be->list);
660 kmem_cache_free(bio_entry_slab, be);
673/* This should be covered by global mutex, &sit_i->sentry_lock */
674void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
675{
676 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
677 struct list_head *wait_list = &(dcc->discard_cmd_list);
678 struct discard_cmd *dc, *tmp;
679
680 mutex_lock(&dcc->cmd_lock);
681 list_for_each_entry_safe(dc, tmp, wait_list, list) {
682
683 if (blkaddr == NULL_ADDR) {
684 if (dc->state == D_PREP) {
685 dc->state = D_SUBMIT;
686 submit_bio(dc->bio);
687 atomic_inc(&dcc->submit_discard);
688 }
689 wait_for_completion_io(&dc->wait);
690
691 __remove_discard_cmd(sbi, dc);
692 continue;
693 }
694
695 if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
696 if (dc->state == D_SUBMIT)
697 wait_for_completion_io(&dc->wait);
698 else
699 __remove_discard_cmd(sbi, dc);
700 }
661 }
701 }
702 mutex_unlock(&dcc->cmd_lock);
662}
663
703}
704
664static void f2fs_submit_bio_wait_endio(struct bio *bio)
705static void f2fs_submit_discard_endio(struct bio *bio)
665{
706{
666 struct bio_entry *be = (struct bio_entry *)bio->bi_private;
707 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
667
708
668 be->error = bio->bi_error;
669 complete(&be->event);
709 complete(&dc->wait);
710 dc->state = D_DONE;
670}
671
711}
712
713static int issue_discard_thread(void *data)
714{
715 struct f2fs_sb_info *sbi = data;
716 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
717 wait_queue_head_t *q = &dcc->discard_wait_queue;
718 struct list_head *cmd_list = &dcc->discard_cmd_list;
719 struct discard_cmd *dc, *tmp;
720 struct blk_plug plug;
721 int iter = 0;
722repeat:
723 if (kthread_should_stop())
724 return 0;
725
726 blk_start_plug(&plug);
727
728 mutex_lock(&dcc->cmd_lock);
729 list_for_each_entry_safe(dc, tmp, cmd_list, list) {
730 if (dc->state == D_PREP) {
731 dc->state = D_SUBMIT;
732 submit_bio(dc->bio);
733 atomic_inc(&dcc->submit_discard);
734 if (iter++ > DISCARD_ISSUE_RATE)
735 break;
736 } else if (dc->state == D_DONE) {
737 __remove_discard_cmd(sbi, dc);
738 }
739 }
740 mutex_unlock(&dcc->cmd_lock);
741
742 blk_finish_plug(&plug);
743
744 iter = 0;
745 congestion_wait(BLK_RW_SYNC, HZ/50);
746
747 wait_event_interruptible(*q,
748 kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
749 goto repeat;
750}
751
752
672/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
673static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
674 struct block_device *bdev, block_t blkstart, block_t blklen)
675{
676 struct bio *bio = NULL;
753/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
754static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
755 struct block_device *bdev, block_t blkstart, block_t blklen)
756{
757 struct bio *bio = NULL;
758 block_t lblkstart = blkstart;
677 int err;
678
759 int err;
760
679 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
761 trace_f2fs_issue_discard(bdev, blkstart, blklen);
680
681 if (sbi->s_ndevs) {
682 int devi = f2fs_target_device_index(sbi, blkstart);
683
684 blkstart -= FDEV(devi).start_blk;
685 }
686 err = __blkdev_issue_discard(bdev,
687 SECTOR_FROM_BLOCK(blkstart),
688 SECTOR_FROM_BLOCK(blklen),
689 GFP_NOFS, 0, &bio);
690 if (!err && bio) {
762
763 if (sbi->s_ndevs) {
764 int devi = f2fs_target_device_index(sbi, blkstart);
765
766 blkstart -= FDEV(devi).start_blk;
767 }
768 err = __blkdev_issue_discard(bdev,
769 SECTOR_FROM_BLOCK(blkstart),
770 SECTOR_FROM_BLOCK(blklen),
771 GFP_NOFS, 0, &bio);
772 if (!err && bio) {
691 struct bio_entry *be = __add_bio_entry(sbi, bio);
692
693 bio->bi_private = be;
694 bio->bi_end_io = f2fs_submit_bio_wait_endio;
773 bio->bi_end_io = f2fs_submit_discard_endio;
695 bio->bi_opf |= REQ_SYNC;
774 bio->bi_opf |= REQ_SYNC;
696 submit_bio(bio);
697 }
698
775
776 __add_discard_cmd(sbi, bio, lblkstart, blklen);
777 wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
778 }
699 return err;
700}
701
702#ifdef CONFIG_BLK_DEV_ZONED
703static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
704 struct block_device *bdev, block_t blkstart, block_t blklen)
705{
706 sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);

--- 23 unchanged lines hidden (view full) ---

730 switch (get_blkz_type(sbi, bdev, blkstart)) {
731
732 case BLK_ZONE_TYPE_CONVENTIONAL:
733 if (!blk_queue_discard(bdev_get_queue(bdev)))
734 return 0;
735 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
736 case BLK_ZONE_TYPE_SEQWRITE_REQ:
737 case BLK_ZONE_TYPE_SEQWRITE_PREF:
779 return err;
780}
781
782#ifdef CONFIG_BLK_DEV_ZONED
783static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
784 struct block_device *bdev, block_t blkstart, block_t blklen)
785{
786 sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);

--- 23 unchanged lines hidden (view full) ---

810 switch (get_blkz_type(sbi, bdev, blkstart)) {
811
812 case BLK_ZONE_TYPE_CONVENTIONAL:
813 if (!blk_queue_discard(bdev_get_queue(bdev)))
814 return 0;
815 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
816 case BLK_ZONE_TYPE_SEQWRITE_REQ:
817 case BLK_ZONE_TYPE_SEQWRITE_PREF:
738 trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
818 trace_f2fs_issue_reset_zone(bdev, blkstart);
739 return blkdev_reset_zones(bdev, sector,
740 nr_sects, GFP_NOFS);
741 default:
742 /* Unknown zone type: broken device ? */
743 return -EIO;
744 }
745}
746#endif

--- 48 unchanged lines hidden (view full) ---

795 err = __issue_discard_async(sbi, bdev, start, len);
796 return err;
797}
798
799static void __add_discard_entry(struct f2fs_sb_info *sbi,
800 struct cp_control *cpc, struct seg_entry *se,
801 unsigned int start, unsigned int end)
802{
819 return blkdev_reset_zones(bdev, sector,
820 nr_sects, GFP_NOFS);
821 default:
822 /* Unknown zone type: broken device ? */
823 return -EIO;
824 }
825}
826#endif

--- 48 unchanged lines hidden (view full) ---

875 err = __issue_discard_async(sbi, bdev, start, len);
876 return err;
877}
878
879static void __add_discard_entry(struct f2fs_sb_info *sbi,
880 struct cp_control *cpc, struct seg_entry *se,
881 unsigned int start, unsigned int end)
882{
803 struct list_head *head = &SM_I(sbi)->discard_list;
883 struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
804 struct discard_entry *new, *last;
805
806 if (!list_empty(head)) {
807 last = list_last_entry(head, struct discard_entry, list);
808 if (START_BLOCK(sbi, cpc->trim_start) + start ==
884 struct discard_entry *new, *last;
885
886 if (!list_empty(head)) {
887 last = list_last_entry(head, struct discard_entry, list);
888 if (START_BLOCK(sbi, cpc->trim_start) + start ==
809 last->blkaddr + last->len) {
889 last->blkaddr + last->len &&
890 last->len < MAX_DISCARD_BLOCKS(sbi)) {
810 last->len += end - start;
811 goto done;
812 }
813 }
814
815 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
816 INIT_LIST_HEAD(&new->list);
817 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
818 new->len = end - start;
819 list_add_tail(&new->list, head);
820done:
891 last->len += end - start;
892 goto done;
893 }
894 }
895
896 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
897 INIT_LIST_HEAD(&new->list);
898 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
899 new->len = end - start;
900 list_add_tail(&new->list, head);
901done:
821 SM_I(sbi)->nr_discards += end - start;
902 SM_I(sbi)->dcc_info->nr_discards += end - start;
822}
823
903}
904
824static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
905static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
906 bool check_only)
825{
826 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
827 int max_blocks = sbi->blocks_per_seg;
828 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
829 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
830 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
831 unsigned long *discard_map = (unsigned long *)se->discard_map;
832 unsigned long *dmap = SIT_I(sbi)->tmp_map;
833 unsigned int start = 0, end = -1;
834 bool force = (cpc->reason == CP_DISCARD);
835 int i;
836
837 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
907{
908 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
909 int max_blocks = sbi->blocks_per_seg;
910 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
911 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
912 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
913 unsigned long *discard_map = (unsigned long *)se->discard_map;
914 unsigned long *dmap = SIT_I(sbi)->tmp_map;
915 unsigned int start = 0, end = -1;
916 bool force = (cpc->reason == CP_DISCARD);
917 int i;
918
919 if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
838 return;
920 return false;
839
840 if (!force) {
841 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
921
922 if (!force) {
923 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
842 SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
843 return;
924 SM_I(sbi)->dcc_info->nr_discards >=
925 SM_I(sbi)->dcc_info->max_discards)
926 return false;
844 }
845
846 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
847 for (i = 0; i < entries; i++)
848 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
849 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
850
927 }
928
929 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
930 for (i = 0; i < entries; i++)
931 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
932 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
933
851 while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
934 while (force || SM_I(sbi)->dcc_info->nr_discards <=
935 SM_I(sbi)->dcc_info->max_discards) {
852 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
853 if (start >= max_blocks)
854 break;
855
856 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
857 if (force && start && end != max_blocks
858 && (end - start) < cpc->trim_minlen)
859 continue;
860
936 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
937 if (start >= max_blocks)
938 break;
939
940 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
941 if (force && start && end != max_blocks
942 && (end - start) < cpc->trim_minlen)
943 continue;
944
945 if (check_only)
946 return true;
947
861 __add_discard_entry(sbi, cpc, se, start, end);
862 }
948 __add_discard_entry(sbi, cpc, se, start, end);
949 }
950 return false;
863}
864
865void release_discard_addrs(struct f2fs_sb_info *sbi)
866{
951}
952
953void release_discard_addrs(struct f2fs_sb_info *sbi)
954{
867 struct list_head *head = &(SM_I(sbi)->discard_list);
955 struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
868 struct discard_entry *entry, *this;
869
870 /* drop caches */
871 list_for_each_entry_safe(entry, this, head, list) {
872 list_del(&entry->list);
873 kmem_cache_free(discard_entry_slab, entry);
874 }
875}

--- 9 unchanged lines hidden (view full) ---

885 mutex_lock(&dirty_i->seglist_lock);
886 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
887 __set_test_and_free(sbi, segno);
888 mutex_unlock(&dirty_i->seglist_lock);
889}
890
891void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
892{
956 struct discard_entry *entry, *this;
957
958 /* drop caches */
959 list_for_each_entry_safe(entry, this, head, list) {
960 list_del(&entry->list);
961 kmem_cache_free(discard_entry_slab, entry);
962 }
963}

--- 9 unchanged lines hidden (view full) ---

973 mutex_lock(&dirty_i->seglist_lock);
974 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
975 __set_test_and_free(sbi, segno);
976 mutex_unlock(&dirty_i->seglist_lock);
977}
978
979void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
980{
893 struct list_head *head = &(SM_I(sbi)->discard_list);
981 struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
894 struct discard_entry *entry, *this;
895 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
982 struct discard_entry *entry, *this;
983 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
896 struct blk_plug plug;
897 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
898 unsigned int start = 0, end = -1;
899 unsigned int secno, start_segno;
900 bool force = (cpc->reason == CP_DISCARD);
901
984 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
985 unsigned int start = 0, end = -1;
986 unsigned int secno, start_segno;
987 bool force = (cpc->reason == CP_DISCARD);
988
902 blk_start_plug(&plug);
903
904 mutex_lock(&dirty_i->seglist_lock);
905
906 while (1) {
907 int i;
908 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
909 if (start >= MAIN_SEGS(sbi))
910 break;
911 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
912 start + 1);
913
914 for (i = start; i < end; i++)
915 clear_bit(i, prefree_map);
916
917 dirty_i->nr_dirty[PRE] -= end - start;
918
989 mutex_lock(&dirty_i->seglist_lock);
990
991 while (1) {
992 int i;
993 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
994 if (start >= MAIN_SEGS(sbi))
995 break;
996 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
997 start + 1);
998
999 for (i = start; i < end; i++)
1000 clear_bit(i, prefree_map);
1001
1002 dirty_i->nr_dirty[PRE] -= end - start;
1003
919 if (force || !test_opt(sbi, DISCARD))
1004 if (!test_opt(sbi, DISCARD))
920 continue;
921
1005 continue;
1006
1007 if (force && start >= cpc->trim_start &&
1008 (end - 1) <= cpc->trim_end)
1009 continue;
1010
922 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
923 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
924 (end - start) << sbi->log_blocks_per_seg);
925 continue;
926 }
927next:
928 secno = GET_SECNO(sbi, start);
929 start_segno = secno * sbi->segs_per_sec;

--- 11 unchanged lines hidden (view full) ---

941 /* send small discards */
942 list_for_each_entry_safe(entry, this, head, list) {
943 if (force && entry->len < cpc->trim_minlen)
944 goto skip;
945 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
946 cpc->trimmed += entry->len;
947skip:
948 list_del(&entry->list);
1011 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1012 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1013 (end - start) << sbi->log_blocks_per_seg);
1014 continue;
1015 }
1016next:
1017 secno = GET_SECNO(sbi, start);
1018 start_segno = secno * sbi->segs_per_sec;

--- 11 unchanged lines hidden (view full) ---

1030 /* send small discards */
1031 list_for_each_entry_safe(entry, this, head, list) {
1032 if (force && entry->len < cpc->trim_minlen)
1033 goto skip;
1034 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
1035 cpc->trimmed += entry->len;
1036skip:
1037 list_del(&entry->list);
949 SM_I(sbi)->nr_discards -= entry->len;
1038 SM_I(sbi)->dcc_info->nr_discards -= entry->len;
950 kmem_cache_free(discard_entry_slab, entry);
951 }
1039 kmem_cache_free(discard_entry_slab, entry);
1040 }
1041}
952
1042
953 blk_finish_plug(&plug);
1043static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1044{
1045 dev_t dev = sbi->sb->s_bdev->bd_dev;
1046 struct discard_cmd_control *dcc;
1047 int err = 0;
1048
1049 if (SM_I(sbi)->dcc_info) {
1050 dcc = SM_I(sbi)->dcc_info;
1051 goto init_thread;
1052 }
1053
1054 dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1055 if (!dcc)
1056 return -ENOMEM;
1057
1058 INIT_LIST_HEAD(&dcc->discard_entry_list);
1059 INIT_LIST_HEAD(&dcc->discard_cmd_list);
1060 mutex_init(&dcc->cmd_lock);
1061 atomic_set(&dcc->submit_discard, 0);
1062 dcc->nr_discards = 0;
1063 dcc->max_discards = 0;
1064
1065 init_waitqueue_head(&dcc->discard_wait_queue);
1066 SM_I(sbi)->dcc_info = dcc;
1067init_thread:
1068 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1069 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1070 if (IS_ERR(dcc->f2fs_issue_discard)) {
1071 err = PTR_ERR(dcc->f2fs_issue_discard);
1072 kfree(dcc);
1073 SM_I(sbi)->dcc_info = NULL;
1074 return err;
1075 }
1076
1077 return err;
954}
955
1078}
1079
1080static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free)
1081{
1082 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1083
1084 if (dcc && dcc->f2fs_issue_discard) {
1085 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1086
1087 dcc->f2fs_issue_discard = NULL;
1088 kthread_stop(discard_thread);
1089 }
1090 if (free) {
1091 kfree(dcc);
1092 SM_I(sbi)->dcc_info = NULL;
1093 }
1094}
1095
956static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
957{
958 struct sit_info *sit_i = SIT_I(sbi);
959
960 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
961 sit_i->dirty_sentries++;
962 return false;
963 }

--- 26 unchanged lines hidden (view full) ---

990 (new_vblocks > sbi->blocks_per_seg)));
991
992 se->valid_blocks = new_vblocks;
993 se->mtime = get_mtime(sbi);
994 SIT_I(sbi)->max_mtime = se->mtime;
995
996 /* Update valid block bitmap */
997 if (del > 0) {
1096static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1097{
1098 struct sit_info *sit_i = SIT_I(sbi);
1099
1100 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1101 sit_i->dirty_sentries++;
1102 return false;
1103 }

--- 26 unchanged lines hidden (view full) ---

1130 (new_vblocks > sbi->blocks_per_seg)));
1131
1132 se->valid_blocks = new_vblocks;
1133 se->mtime = get_mtime(sbi);
1134 SIT_I(sbi)->max_mtime = se->mtime;
1135
1136 /* Update valid block bitmap */
1137 if (del > 0) {
998 if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
1138 if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
1139#ifdef CONFIG_F2FS_CHECK_FS
1140 if (f2fs_test_and_set_bit(offset,
1141 se->cur_valid_map_mir))
1142 f2fs_bug_on(sbi, 1);
1143 else
1144 WARN_ON(1);
1145#else
999 f2fs_bug_on(sbi, 1);
1146 f2fs_bug_on(sbi, 1);
1147#endif
1148 }
1000 if (f2fs_discard_en(sbi) &&
1001 !f2fs_test_and_set_bit(offset, se->discard_map))
1002 sbi->discard_blks--;
1003 } else {
1149 if (f2fs_discard_en(sbi) &&
1150 !f2fs_test_and_set_bit(offset, se->discard_map))
1151 sbi->discard_blks--;
1152 } else {
1004 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
1153 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
1154#ifdef CONFIG_F2FS_CHECK_FS
1155 if (!f2fs_test_and_clear_bit(offset,
1156 se->cur_valid_map_mir))
1157 f2fs_bug_on(sbi, 1);
1158 else
1159 WARN_ON(1);
1160#else
1005 f2fs_bug_on(sbi, 1);
1161 f2fs_bug_on(sbi, 1);
1162#endif
1163 }
1006 if (f2fs_discard_en(sbi) &&
1007 f2fs_test_and_clear_bit(offset, se->discard_map))
1008 sbi->discard_blks++;
1009 }
1010 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1011 se->ckpt_valid_blocks += del;
1012
1013 __mark_sit_entry_dirty(sbi, segno);

--- 148 unchanged lines hidden (view full) ---

1162 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1163
1164 mutex_unlock(&curseg->curseg_mutex);
1165
1166 set_page_dirty(page);
1167 f2fs_put_page(page, 1);
1168}
1169
1164 if (f2fs_discard_en(sbi) &&
1165 f2fs_test_and_clear_bit(offset, se->discard_map))
1166 sbi->discard_blks++;
1167 }
1168 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1169 se->ckpt_valid_blocks += del;
1170
1171 __mark_sit_entry_dirty(sbi, segno);

--- 148 unchanged lines hidden (view full) ---

1320 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1321
1322 mutex_unlock(&curseg->curseg_mutex);
1323
1324 set_page_dirty(page);
1325 f2fs_put_page(page, 1);
1326}
1327
1170static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
1171{
1172 struct curseg_info *curseg = CURSEG_I(sbi, type);
1173 unsigned int segno = curseg->segno + 1;
1174 struct free_segmap_info *free_i = FREE_I(sbi);
1175
1176 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
1177 return !test_bit(segno, free_i->free_segmap);
1178 return 0;
1179}
1180
1181/*
1182 * Find a new segment from the free segments bitmap to right order
1183 * This function should be returned with success, otherwise BUG
1184 */
1185static void get_new_segment(struct f2fs_sb_info *sbi,
1186 unsigned int *newseg, bool new_sec, int dir)
1187{
1188 struct free_segmap_info *free_i = FREE_I(sbi);

--- 188 unchanged lines hidden (view full) ---

1377 f2fs_put_page(sum_page, 1);
1378 }
1379}
1380
1381static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1382{
1383 struct curseg_info *curseg = CURSEG_I(sbi, type);
1384 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1328/*
1329 * Find a new segment from the free segments bitmap to right order
1330 * This function should be returned with success, otherwise BUG
1331 */
1332static void get_new_segment(struct f2fs_sb_info *sbi,
1333 unsigned int *newseg, bool new_sec, int dir)
1334{
1335 struct free_segmap_info *free_i = FREE_I(sbi);

--- 188 unchanged lines hidden (view full) ---

1524 f2fs_put_page(sum_page, 1);
1525 }
1526}
1527
1528static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1529{
1530 struct curseg_info *curseg = CURSEG_I(sbi, type);
1531 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1532 int i, n;
1385
1533
1386 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
1387 return v_ops->get_victim(sbi,
1388 &(curseg)->next_segno, BG_GC, type, SSR);
1534 /* need_SSR() already forces to do this */
1535 if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
1536 return 1;
1389
1537
1390 /* For data segments, let's do SSR more intensively */
1391 for (; type >= CURSEG_HOT_DATA; type--)
1538 /* For node segments, let's do SSR more intensively */
1539 if (IS_NODESEG(type)) {
1540 i = CURSEG_HOT_NODE;
1541 n = CURSEG_COLD_NODE;
1542 } else {
1543 i = CURSEG_HOT_DATA;
1544 n = CURSEG_COLD_DATA;
1545 }
1546
1547 for (; i <= n; i++) {
1548 if (i == type)
1549 continue;
1392 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1550 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1393 BG_GC, type, SSR))
1551 BG_GC, i, SSR))
1394 return 1;
1552 return 1;
1553 }
1395 return 0;
1396}
1397
1398/*
1399 * flush out current segment and replace it with new segment
1400 * This function should be returned with success, otherwise BUG
1401 */
1402static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1403 int type, bool force)
1404{
1554 return 0;
1555}
1556
1557/*
1558 * flush out current segment and replace it with new segment
1559 * This function should be returned with success, otherwise BUG
1560 */
1561static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1562 int type, bool force)
1563{
1405 struct curseg_info *curseg = CURSEG_I(sbi, type);
1406
1407 if (force)
1408 new_curseg(sbi, type, true);
1564 if (force)
1565 new_curseg(sbi, type, true);
1409 else if (type == CURSEG_WARM_NODE)
1566 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
1567 type == CURSEG_WARM_NODE)
1410 new_curseg(sbi, type, false);
1568 new_curseg(sbi, type, false);
1411 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1412 new_curseg(sbi, type, false);
1413 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1414 change_curseg(sbi, type, true);
1415 else
1416 new_curseg(sbi, type, false);
1417
1569 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1570 change_curseg(sbi, type, true);
1571 else
1572 new_curseg(sbi, type, false);
1573
1418 stat_inc_seg_type(sbi, curseg);
1574 stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
1419}
1420
1421void allocate_new_segments(struct f2fs_sb_info *sbi)
1422{
1423 struct curseg_info *curseg;
1424 unsigned int old_segno;
1425 int i;
1426
1575}
1576
1577void allocate_new_segments(struct f2fs_sb_info *sbi)
1578{
1579 struct curseg_info *curseg;
1580 unsigned int old_segno;
1581 int i;
1582
1427 if (test_opt(sbi, LFS))
1428 return;
1429
1430 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1431 curseg = CURSEG_I(sbi, i);
1432 old_segno = curseg->segno;
1433 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
1434 locate_dirty_segment(sbi, old_segno);
1435 }
1436}
1437
1438static const struct segment_allocation default_salloc_ops = {
1439 .allocate_segment = allocate_segment_by_default,
1440};
1441
1583 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1584 curseg = CURSEG_I(sbi, i);
1585 old_segno = curseg->segno;
1586 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
1587 locate_dirty_segment(sbi, old_segno);
1588 }
1589}
1590
1591static const struct segment_allocation default_salloc_ops = {
1592 .allocate_segment = allocate_segment_by_default,
1593};
1594
1595bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1596{
1597 __u64 trim_start = cpc->trim_start;
1598 bool has_candidate = false;
1599
1600 mutex_lock(&SIT_I(sbi)->sentry_lock);
1601 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
1602 if (add_discard_addrs(sbi, cpc, true)) {
1603 has_candidate = true;
1604 break;
1605 }
1606 }
1607 mutex_unlock(&SIT_I(sbi)->sentry_lock);
1608
1609 cpc->trim_start = trim_start;
1610 return has_candidate;
1611}
1612
1442int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1443{
1444 __u64 start = F2FS_BYTES_TO_BLK(range->start);
1445 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1446 unsigned int start_segno, end_segno;
1447 struct cp_control cpc;
1448 int err = 0;
1449

--- 118 unchanged lines hidden (view full) ---

1568 struct sit_info *sit_i = SIT_I(sbi);
1569 struct curseg_info *curseg = CURSEG_I(sbi, type);
1570
1571 mutex_lock(&curseg->curseg_mutex);
1572 mutex_lock(&sit_i->sentry_lock);
1573
1574 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1575
1613int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1614{
1615 __u64 start = F2FS_BYTES_TO_BLK(range->start);
1616 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1617 unsigned int start_segno, end_segno;
1618 struct cp_control cpc;
1619 int err = 0;
1620

--- 118 unchanged lines hidden (view full) ---

1739 struct sit_info *sit_i = SIT_I(sbi);
1740 struct curseg_info *curseg = CURSEG_I(sbi, type);
1741
1742 mutex_lock(&curseg->curseg_mutex);
1743 mutex_lock(&sit_i->sentry_lock);
1744
1745 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1746
1747 f2fs_wait_discard_bio(sbi, *new_blkaddr);
1748
1576 /*
1577 * __add_sum_entry should be resided under the curseg_mutex
1578 * because, this function updates a summary entry in the
1579 * current summary block.
1580 */
1581 __add_sum_entry(sbi, type, sum);
1582
1583 __refresh_next_blkoff(sbi, curseg);
1584
1585 stat_inc_block_count(sbi, curseg);
1586
1749 /*
1750 * __add_sum_entry should be resided under the curseg_mutex
1751 * because, this function updates a summary entry in the
1752 * current summary block.
1753 */
1754 __add_sum_entry(sbi, type, sum);
1755
1756 __refresh_next_blkoff(sbi, curseg);
1757
1758 stat_inc_block_count(sbi, curseg);
1759
1587 if (!__has_curseg_space(sbi, type))
1588 sit_i->s_ops->allocate_segment(sbi, type, false);
1589 /*
1590 * SIT information should be updated before segment allocation,
1591 * since SSR needs latest valid block information.
1592 */
1593 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1594
1760 /*
1761 * SIT information should be updated before segment allocation,
1762 * since SSR needs latest valid block information.
1763 */
1764 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1765
1766 if (!__has_curseg_space(sbi, type))
1767 sit_i->s_ops->allocate_segment(sbi, type, false);
1768
1595 mutex_unlock(&sit_i->sentry_lock);
1596
1597 if (page && IS_NODESEG(type))
1598 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1599
1600 mutex_unlock(&curseg->curseg_mutex);
1601}
1602
1603static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1604{
1605 int type = __get_segment_type(fio->page, fio->type);
1769 mutex_unlock(&sit_i->sentry_lock);
1770
1771 if (page && IS_NODESEG(type))
1772 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1773
1774 mutex_unlock(&curseg->curseg_mutex);
1775}
1776
1777static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1778{
1779 int type = __get_segment_type(fio->page, fio->type);
1780 int err;
1606
1607 if (fio->type == NODE || fio->type == DATA)
1608 mutex_lock(&fio->sbi->wio_mutex[fio->type]);
1781
1782 if (fio->type == NODE || fio->type == DATA)
1783 mutex_lock(&fio->sbi->wio_mutex[fio->type]);
1609
1784reallocate:
1610 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
1611 &fio->new_blkaddr, sum, type);
1612
1613 /* writeout dirty page into bdev */
1785 allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
1786 &fio->new_blkaddr, sum, type);
1787
1788 /* writeout dirty page into bdev */
1614 f2fs_submit_page_mbio(fio);
1789 err = f2fs_submit_page_mbio(fio);
1790 if (err == -EAGAIN) {
1791 fio->old_blkaddr = fio->new_blkaddr;
1792 goto reallocate;
1793 }
1615
1616 if (fio->type == NODE || fio->type == DATA)
1617 mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
1618}
1619
1620void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1621{
1622 struct f2fs_io_info fio = {

--- 125 unchanged lines hidden (view full) ---

1748}
1749
1750void f2fs_wait_on_page_writeback(struct page *page,
1751 enum page_type type, bool ordered)
1752{
1753 if (PageWriteback(page)) {
1754 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1755
1794
1795 if (fio->type == NODE || fio->type == DATA)
1796 mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
1797}
1798
1799void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1800{
1801 struct f2fs_io_info fio = {

--- 125 unchanged lines hidden (view full) ---

1927}
1928
1929void f2fs_wait_on_page_writeback(struct page *page,
1930 enum page_type type, bool ordered)
1931{
1932 if (PageWriteback(page)) {
1933 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1934
1756 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
1935 f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
1936 0, page->index, type, WRITE);
1757 if (ordered)
1758 wait_on_page_writeback(page);
1759 else
1760 wait_for_stable_page(page);
1761 }
1762}
1763
1764void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,

--- 458 unchanged lines hidden (view full) ---

2223 for_each_set_bit_from(segno, bitmap, end) {
2224 int offset, sit_offset;
2225
2226 se = get_seg_entry(sbi, segno);
2227
2228 /* add discard candidates */
2229 if (cpc->reason != CP_DISCARD) {
2230 cpc->trim_start = segno;
1937 if (ordered)
1938 wait_on_page_writeback(page);
1939 else
1940 wait_for_stable_page(page);
1941 }
1942}
1943
1944void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,

--- 458 unchanged lines hidden (view full) ---

2403 for_each_set_bit_from(segno, bitmap, end) {
2404 int offset, sit_offset;
2405
2406 se = get_seg_entry(sbi, segno);
2407
2408 /* add discard candidates */
2409 if (cpc->reason != CP_DISCARD) {
2410 cpc->trim_start = segno;
2231 add_discard_addrs(sbi, cpc);
2411 add_discard_addrs(sbi, cpc, false);
2232 }
2233
2234 if (to_journal) {
2235 offset = lookup_journal_in_cursum(journal,
2236 SIT_JOURNAL, segno, 1);
2237 f2fs_bug_on(sbi, offset < 0);
2238 segno_in_journal(journal, offset) =
2239 cpu_to_le32(segno);

--- 18 unchanged lines hidden (view full) ---

2258 f2fs_bug_on(sbi, ses->entry_cnt);
2259 release_sit_entry_set(ses);
2260 }
2261
2262 f2fs_bug_on(sbi, !list_empty(head));
2263 f2fs_bug_on(sbi, sit_i->dirty_sentries);
2264out:
2265 if (cpc->reason == CP_DISCARD) {
2412 }
2413
2414 if (to_journal) {
2415 offset = lookup_journal_in_cursum(journal,
2416 SIT_JOURNAL, segno, 1);
2417 f2fs_bug_on(sbi, offset < 0);
2418 segno_in_journal(journal, offset) =
2419 cpu_to_le32(segno);

--- 18 unchanged lines hidden (view full) ---

2438 f2fs_bug_on(sbi, ses->entry_cnt);
2439 release_sit_entry_set(ses);
2440 }
2441
2442 f2fs_bug_on(sbi, !list_empty(head));
2443 f2fs_bug_on(sbi, sit_i->dirty_sentries);
2444out:
2445 if (cpc->reason == CP_DISCARD) {
2446 __u64 trim_start = cpc->trim_start;
2447
2266 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
2448 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
2267 add_discard_addrs(sbi, cpc);
2449 add_discard_addrs(sbi, cpc, false);
2450
2451 cpc->trim_start = trim_start;
2268 }
2269 mutex_unlock(&sit_i->sentry_lock);
2270
2271 set_prefree_as_free_segments(sbi);
2272}
2273
2274static int build_sit_info(struct f2fs_sb_info *sbi)
2275{
2276 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2277 struct sit_info *sit_i;
2278 unsigned int sit_segs, start;
2452 }
2453 mutex_unlock(&sit_i->sentry_lock);
2454
2455 set_prefree_as_free_segments(sbi);
2456}
2457
2458static int build_sit_info(struct f2fs_sb_info *sbi)
2459{
2460 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2461 struct sit_info *sit_i;
2462 unsigned int sit_segs, start;
2279 char *src_bitmap, *dst_bitmap;
2463 char *src_bitmap;
2280 unsigned int bitmap_size;
2281
2282 /* allocate memory for SIT information */
2283 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2284 if (!sit_i)
2285 return -ENOMEM;
2286
2287 SM_I(sbi)->sit_info = sit_i;

--- 12 unchanged lines hidden (view full) ---

2300 sit_i->sentries[start].cur_valid_map
2301 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2302 sit_i->sentries[start].ckpt_valid_map
2303 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2304 if (!sit_i->sentries[start].cur_valid_map ||
2305 !sit_i->sentries[start].ckpt_valid_map)
2306 return -ENOMEM;
2307
2464 unsigned int bitmap_size;
2465
2466 /* allocate memory for SIT information */
2467 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2468 if (!sit_i)
2469 return -ENOMEM;
2470
2471 SM_I(sbi)->sit_info = sit_i;

--- 12 unchanged lines hidden (view full) ---

2484 sit_i->sentries[start].cur_valid_map
2485 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2486 sit_i->sentries[start].ckpt_valid_map
2487 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2488 if (!sit_i->sentries[start].cur_valid_map ||
2489 !sit_i->sentries[start].ckpt_valid_map)
2490 return -ENOMEM;
2491
2492#ifdef CONFIG_F2FS_CHECK_FS
2493 sit_i->sentries[start].cur_valid_map_mir
2494 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2495 if (!sit_i->sentries[start].cur_valid_map_mir)
2496 return -ENOMEM;
2497#endif
2498
2308 if (f2fs_discard_en(sbi)) {
2309 sit_i->sentries[start].discard_map
2310 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2311 if (!sit_i->sentries[start].discard_map)
2312 return -ENOMEM;
2313 }
2314 }
2315

--- 10 unchanged lines hidden (view full) ---

2326
2327 /* get information related with SIT */
2328 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2329
2330 /* setup SIT bitmap from ckeckpoint pack */
2331 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2332 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2333
2499 if (f2fs_discard_en(sbi)) {
2500 sit_i->sentries[start].discard_map
2501 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2502 if (!sit_i->sentries[start].discard_map)
2503 return -ENOMEM;
2504 }
2505 }
2506

--- 10 unchanged lines hidden (view full) ---

2517
2518 /* get information related with SIT */
2519 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2520
2521 /* setup SIT bitmap from ckeckpoint pack */
2522 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2523 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2524
2334 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2335 if (!dst_bitmap)
2525 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2526 if (!sit_i->sit_bitmap)
2336 return -ENOMEM;
2337
2527 return -ENOMEM;
2528
2529#ifdef CONFIG_F2FS_CHECK_FS
2530 sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2531 if (!sit_i->sit_bitmap_mir)
2532 return -ENOMEM;
2533#endif
2534
2338 /* init SIT information */
2339 sit_i->s_ops = &default_salloc_ops;
2340
2341 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2342 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2343 sit_i->written_valid_blocks = 0;
2535 /* init SIT information */
2536 sit_i->s_ops = &default_salloc_ops;
2537
2538 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2539 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2540 sit_i->written_valid_blocks = 0;
2344 sit_i->sit_bitmap = dst_bitmap;
2345 sit_i->bitmap_size = bitmap_size;
2346 sit_i->dirty_sentries = 0;
2347 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2348 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2349 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2350 mutex_init(&sit_i->sentry_lock);
2351 return 0;
2352}

--- 268 unchanged lines hidden (view full) ---

2621 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
2622 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
2623
2624 if (!test_opt(sbi, LFS))
2625 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2626 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2627 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2628
2541 sit_i->bitmap_size = bitmap_size;
2542 sit_i->dirty_sentries = 0;
2543 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2544 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2545 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2546 mutex_init(&sit_i->sentry_lock);
2547 return 0;
2548}

--- 268 unchanged lines hidden (view full) ---

2817 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
2818 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
2819
2820 if (!test_opt(sbi, LFS))
2821 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2822 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2823 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2824
2629 INIT_LIST_HEAD(&sm_info->discard_list);
2630 INIT_LIST_HEAD(&sm_info->wait_list);
2631 sm_info->nr_discards = 0;
2632 sm_info->max_discards = 0;
2633
2634 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2635
2636 INIT_LIST_HEAD(&sm_info->sit_entry_set);
2637
2638 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2639 err = create_flush_cmd_control(sbi);
2640 if (err)
2641 return err;
2642 }
2643
2825 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2826
2827 INIT_LIST_HEAD(&sm_info->sit_entry_set);
2828
2829 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2830 err = create_flush_cmd_control(sbi);
2831 if (err)
2832 return err;
2833 }
2834
2835 err = create_discard_cmd_control(sbi);
2836 if (err)
2837 return err;
2838
2644 err = build_sit_info(sbi);
2645 if (err)
2646 return err;
2647 err = build_free_segmap(sbi);
2648 if (err)
2649 return err;
2650 err = build_curseg(sbi);
2651 if (err)

--- 77 unchanged lines hidden (view full) ---

2729 unsigned int start;
2730
2731 if (!sit_i)
2732 return;
2733
2734 if (sit_i->sentries) {
2735 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2736 kfree(sit_i->sentries[start].cur_valid_map);
2839 err = build_sit_info(sbi);
2840 if (err)
2841 return err;
2842 err = build_free_segmap(sbi);
2843 if (err)
2844 return err;
2845 err = build_curseg(sbi);
2846 if (err)

--- 77 unchanged lines hidden (view full) ---

2924 unsigned int start;
2925
2926 if (!sit_i)
2927 return;
2928
2929 if (sit_i->sentries) {
2930 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2931 kfree(sit_i->sentries[start].cur_valid_map);
2932#ifdef CONFIG_F2FS_CHECK_FS
2933 kfree(sit_i->sentries[start].cur_valid_map_mir);
2934#endif
2737 kfree(sit_i->sentries[start].ckpt_valid_map);
2738 kfree(sit_i->sentries[start].discard_map);
2739 }
2740 }
2741 kfree(sit_i->tmp_map);
2742
2743 kvfree(sit_i->sentries);
2744 kvfree(sit_i->sec_entries);
2745 kvfree(sit_i->dirty_sentries_bitmap);
2746
2747 SM_I(sbi)->sit_info = NULL;
2748 kfree(sit_i->sit_bitmap);
2935 kfree(sit_i->sentries[start].ckpt_valid_map);
2936 kfree(sit_i->sentries[start].discard_map);
2937 }
2938 }
2939 kfree(sit_i->tmp_map);
2940
2941 kvfree(sit_i->sentries);
2942 kvfree(sit_i->sec_entries);
2943 kvfree(sit_i->dirty_sentries_bitmap);
2944
2945 SM_I(sbi)->sit_info = NULL;
2946 kfree(sit_i->sit_bitmap);
2947#ifdef CONFIG_F2FS_CHECK_FS
2948 kfree(sit_i->sit_bitmap_mir);
2949#endif
2749 kfree(sit_i);
2750}
2751
2752void destroy_segment_manager(struct f2fs_sb_info *sbi)
2753{
2754 struct f2fs_sm_info *sm_info = SM_I(sbi);
2755
2756 if (!sm_info)
2757 return;
2758 destroy_flush_cmd_control(sbi, true);
2950 kfree(sit_i);
2951}
2952
2953void destroy_segment_manager(struct f2fs_sb_info *sbi)
2954{
2955 struct f2fs_sm_info *sm_info = SM_I(sbi);
2956
2957 if (!sm_info)
2958 return;
2959 destroy_flush_cmd_control(sbi, true);
2960 destroy_discard_cmd_control(sbi, true);
2759 destroy_dirty_segmap(sbi);
2760 destroy_curseg(sbi);
2761 destroy_free_segmap(sbi);
2762 destroy_sit_info(sbi);
2763 sbi->sm_info = NULL;
2764 kfree(sm_info);
2765}
2766
2767int __init create_segment_manager_caches(void)
2768{
2769 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2770 sizeof(struct discard_entry));
2771 if (!discard_entry_slab)
2772 goto fail;
2773
2961 destroy_dirty_segmap(sbi);
2962 destroy_curseg(sbi);
2963 destroy_free_segmap(sbi);
2964 destroy_sit_info(sbi);
2965 sbi->sm_info = NULL;
2966 kfree(sm_info);
2967}
2968
2969int __init create_segment_manager_caches(void)
2970{
2971 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2972 sizeof(struct discard_entry));
2973 if (!discard_entry_slab)
2974 goto fail;
2975
2774 bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
2775 sizeof(struct bio_entry));
2776 if (!bio_entry_slab)
2976 discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
2977 sizeof(struct discard_cmd));
2978 if (!discard_cmd_slab)
2777 goto destroy_discard_entry;
2778
2779 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2780 sizeof(struct sit_entry_set));
2781 if (!sit_entry_set_slab)
2979 goto destroy_discard_entry;
2980
2981 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2982 sizeof(struct sit_entry_set));
2983 if (!sit_entry_set_slab)
2782 goto destroy_bio_entry;
2984 goto destroy_discard_cmd;
2783
2784 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2785 sizeof(struct inmem_pages));
2786 if (!inmem_entry_slab)
2787 goto destroy_sit_entry_set;
2788 return 0;
2789
2790destroy_sit_entry_set:
2791 kmem_cache_destroy(sit_entry_set_slab);
2985
2986 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2987 sizeof(struct inmem_pages));
2988 if (!inmem_entry_slab)
2989 goto destroy_sit_entry_set;
2990 return 0;
2991
2992destroy_sit_entry_set:
2993 kmem_cache_destroy(sit_entry_set_slab);
2792destroy_bio_entry:
2793 kmem_cache_destroy(bio_entry_slab);
2994destroy_discard_cmd:
2995 kmem_cache_destroy(discard_cmd_slab);
2794destroy_discard_entry:
2795 kmem_cache_destroy(discard_entry_slab);
2796fail:
2797 return -ENOMEM;
2798}
2799
2800void destroy_segment_manager_caches(void)
2801{
2802 kmem_cache_destroy(sit_entry_set_slab);
2996destroy_discard_entry:
2997 kmem_cache_destroy(discard_entry_slab);
2998fail:
2999 return -ENOMEM;
3000}
3001
3002void destroy_segment_manager_caches(void)
3003{
3004 kmem_cache_destroy(sit_entry_set_slab);
2803 kmem_cache_destroy(bio_entry_slab);
3005 kmem_cache_destroy(discard_cmd_slab);
2804 kmem_cache_destroy(discard_entry_slab);
2805 kmem_cache_destroy(inmem_entry_slab);
2806}
3006 kmem_cache_destroy(discard_entry_slab);
3007 kmem_cache_destroy(inmem_entry_slab);
3008}