segment.c (8df22a4d6f5b81c9c1703579d4907b57002689ed) | segment.c (88b88a66797159949cec32eaab12b4968f6fae2d) |
---|---|
1/* 2 * fs/f2fs/segment.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 11 unchanged lines hidden (view full) --- 20#include "f2fs.h" 21#include "segment.h" 22#include "node.h" 23#include <trace/events/f2fs.h> 24 25#define __reverse_ffz(x) __reverse_ffs(~(x)) 26 27static struct kmem_cache *discard_entry_slab; | 1/* 2 * fs/f2fs/segment.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 11 unchanged lines hidden (view full) --- 20#include "f2fs.h" 21#include "segment.h" 22#include "node.h" 23#include <trace/events/f2fs.h> 24 25#define __reverse_ffz(x) __reverse_ffs(~(x)) 26 27static struct kmem_cache *discard_entry_slab; |
28static struct kmem_cache *sit_entry_set_slab; 29static struct kmem_cache *inmem_entry_slab; |
|
28 29/* 30 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 31 * MSB and LSB are reversed in a byte by f2fs_set_bit. 32 */ 33static inline unsigned long __reverse_ffs(unsigned long word) 34{ 35 int num = 0; --- 131 unchanged lines hidden (view full) --- 167found_first: 168 tmp |= ~0UL << size; 169 if (tmp == ~0UL) /* Are any bits zero? */ 170 return result + size; /* Nope. */ 171found_middle: 172 return result + __reverse_ffz(tmp); 173} 174 | 30 31/* 32 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 33 * MSB and LSB are reversed in a byte by f2fs_set_bit. 34 */ 35static inline unsigned long __reverse_ffs(unsigned long word) 36{ 37 int num = 0; --- 131 unchanged lines hidden (view full) --- 169found_first: 170 tmp |= ~0UL << size; 171 if (tmp == ~0UL) /* Are any bits zero? */ 172 return result + size; /* Nope. */ 173found_middle: 174 return result + __reverse_ffz(tmp); 175} 176 |
177void register_inmem_page(struct inode *inode, struct page *page) 178{ 179 struct f2fs_inode_info *fi = F2FS_I(inode); 180 struct inmem_pages *new; 181 182 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); 183 184 /* add atomic page indices to the list */ 185 new->page = page; 186 INIT_LIST_HEAD(&new->list); 187 188 /* increase reference count with clean state */ 189 mutex_lock(&fi->inmem_lock); 190 get_page(page); 191 list_add_tail(&new->list, &fi->inmem_pages); 192 mutex_unlock(&fi->inmem_lock); 193} 194 195void commit_inmem_pages(struct inode *inode, bool abort) 196{ 197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 198 struct f2fs_inode_info *fi = F2FS_I(inode); 199 struct inmem_pages *cur, *tmp; 200 bool submit_bio = false; 201 struct f2fs_io_info fio = { 202 .type = DATA, 203 .rw = WRITE_SYNC, 204 }; 205 206 f2fs_balance_fs(sbi); 207 f2fs_lock_op(sbi); 208 209 mutex_lock(&fi->inmem_lock); 210 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { 211 lock_page(cur->page); 212 if (!abort && cur->page->mapping == inode->i_mapping) { 213 f2fs_wait_on_page_writeback(cur->page, DATA); 214 if (clear_page_dirty_for_io(cur->page)) 215 inode_dec_dirty_pages(inode); 216 do_write_data_page(cur->page, &fio); 217 submit_bio = true; 218 } 219 f2fs_put_page(cur->page, 1); 220 list_del(&cur->list); 221 kmem_cache_free(inmem_entry_slab, cur); 222 } 223 if (submit_bio) 224 f2fs_submit_merged_bio(sbi, DATA, WRITE); 225 mutex_unlock(&fi->inmem_lock); 226 227 filemap_fdatawait_range(inode->i_mapping, 0, LLONG_MAX); 228 f2fs_unlock_op(sbi); 229} 230 |
|
175/* 176 * This function balances dirty node and dentry pages. 177 * In addition, it controls garbage collection. 178 */ 179void f2fs_balance_fs(struct f2fs_sb_info *sbi) 180{ 181 /* 182 * We should do GC or end up with checkpoint, if there are so many dirty --- 17 unchanged lines hidden (view full) --- 200{ 201 struct f2fs_sb_info *sbi = data; 202 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 203 wait_queue_head_t *q = &fcc->flush_wait_queue; 204repeat: 205 if (kthread_should_stop()) 206 return 0; 207 | 231/* 232 * This function balances dirty node and dentry pages. 233 * In addition, it controls garbage collection. 234 */ 235void f2fs_balance_fs(struct f2fs_sb_info *sbi) 236{ 237 /* 238 * We should do GC or end up with checkpoint, if there are so many dirty --- 17 unchanged lines hidden (view full) --- 256{ 257 struct f2fs_sb_info *sbi = data; 258 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 259 wait_queue_head_t *q = &fcc->flush_wait_queue; 260repeat: 261 if (kthread_should_stop()) 262 return 0; 263 |
208 spin_lock(&fcc->issue_lock); 209 if (fcc->issue_list) { 210 fcc->dispatch_list = fcc->issue_list; 211 fcc->issue_list = fcc->issue_tail = NULL; 212 } 213 spin_unlock(&fcc->issue_lock); 214 215 if (fcc->dispatch_list) { | 264 if (!llist_empty(&fcc->issue_list)) { |
216 struct bio *bio = bio_alloc(GFP_NOIO, 0); 217 struct flush_cmd *cmd, *next; 218 int ret; 219 | 265 struct bio *bio = bio_alloc(GFP_NOIO, 0); 266 struct flush_cmd *cmd, *next; 267 int ret; 268 |
269 fcc->dispatch_list = llist_del_all(&fcc->issue_list); 270 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 271 |
|
220 bio->bi_bdev = sbi->sb->s_bdev; 221 ret = submit_bio_wait(WRITE_FLUSH, bio); 222 | 272 bio->bi_bdev = sbi->sb->s_bdev; 273 ret = submit_bio_wait(WRITE_FLUSH, bio); 274 |
223 for (cmd = fcc->dispatch_list; cmd; cmd = next) { | 275 llist_for_each_entry_safe(cmd, next, 276 fcc->dispatch_list, llnode) { |
224 cmd->ret = ret; | 277 cmd->ret = ret; |
225 next = cmd->next; | |
226 complete(&cmd->wait); 227 } 228 bio_put(bio); 229 fcc->dispatch_list = NULL; 230 } 231 232 wait_event_interruptible(*q, | 278 complete(&cmd->wait); 279 } 280 bio_put(bio); 281 fcc->dispatch_list = NULL; 282 } 283 284 wait_event_interruptible(*q, |
233 kthread_should_stop() || fcc->issue_list); | 285 kthread_should_stop() || !llist_empty(&fcc->issue_list)); |
234 goto repeat; 235} 236 237int f2fs_issue_flush(struct f2fs_sb_info *sbi) 238{ 239 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 240 struct flush_cmd cmd; 241 242 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), 243 test_opt(sbi, FLUSH_MERGE)); 244 245 if (test_opt(sbi, NOBARRIER)) 246 return 0; 247 248 if (!test_opt(sbi, FLUSH_MERGE)) 249 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL); 250 251 init_completion(&cmd.wait); | 286 goto repeat; 287} 288 289int f2fs_issue_flush(struct f2fs_sb_info *sbi) 290{ 291 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 292 struct flush_cmd cmd; 293 294 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), 295 test_opt(sbi, FLUSH_MERGE)); 296 297 if (test_opt(sbi, NOBARRIER)) 298 return 0; 299 300 if (!test_opt(sbi, FLUSH_MERGE)) 301 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL); 302 303 init_completion(&cmd.wait); |
252 cmd.next = NULL; | |
253 | 304 |
254 spin_lock(&fcc->issue_lock); 255 if (fcc->issue_list) 256 fcc->issue_tail->next = &cmd; 257 else 258 fcc->issue_list = &cmd; 259 fcc->issue_tail = &cmd; 260 spin_unlock(&fcc->issue_lock); | 305 llist_add(&cmd.llnode, &fcc->issue_list); |
261 262 if (!fcc->dispatch_list) 263 wake_up(&fcc->flush_wait_queue); 264 265 wait_for_completion(&cmd.wait); 266 267 return cmd.ret; 268} 269 270int create_flush_cmd_control(struct f2fs_sb_info *sbi) 271{ 272 dev_t dev = sbi->sb->s_bdev->bd_dev; 273 struct flush_cmd_control *fcc; 274 int err = 0; 275 276 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); 277 if (!fcc) 278 return -ENOMEM; | 306 307 if (!fcc->dispatch_list) 308 wake_up(&fcc->flush_wait_queue); 309 310 wait_for_completion(&cmd.wait); 311 312 return cmd.ret; 313} 314 315int create_flush_cmd_control(struct f2fs_sb_info *sbi) 316{ 317 dev_t dev = sbi->sb->s_bdev->bd_dev; 318 struct flush_cmd_control *fcc; 319 int err = 0; 320 321 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); 322 if (!fcc) 323 return -ENOMEM; |
279 spin_lock_init(&fcc->issue_lock); | |
280 init_waitqueue_head(&fcc->flush_wait_queue); | 324 init_waitqueue_head(&fcc->flush_wait_queue); |
325 init_llist_head(&fcc->issue_list); |
|
281 SM_I(sbi)->cmd_control_info = fcc; 282 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 283 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 284 if (IS_ERR(fcc->f2fs_issue_flush)) { 285 err = PTR_ERR(fcc->f2fs_issue_flush); 286 kfree(fcc); 287 SM_I(sbi)->cmd_control_info = NULL; 288 return err; --- 23 unchanged lines hidden (view full) --- 312 313 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 314 dirty_i->nr_dirty[dirty_type]++; 315 316 if (dirty_type == DIRTY) { 317 struct seg_entry *sentry = get_seg_entry(sbi, segno); 318 enum dirty_type t = sentry->type; 319 | 326 SM_I(sbi)->cmd_control_info = fcc; 327 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 328 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 329 if (IS_ERR(fcc->f2fs_issue_flush)) { 330 err = PTR_ERR(fcc->f2fs_issue_flush); 331 kfree(fcc); 332 SM_I(sbi)->cmd_control_info = NULL; 333 return err; --- 23 unchanged lines hidden (view full) --- 357 358 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) 359 dirty_i->nr_dirty[dirty_type]++; 360 361 if (dirty_type == DIRTY) { 362 struct seg_entry *sentry = get_seg_entry(sbi, segno); 363 enum dirty_type t = sentry->type; 364 |
365 if (unlikely(t >= DIRTY)) { 366 f2fs_bug_on(sbi, 1); 367 return; 368 } |
|
320 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 321 dirty_i->nr_dirty[t]++; 322 } 323} 324 325static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 326 enum dirty_type dirty_type) 327{ --- 43 unchanged lines hidden (view full) --- 371 } 372 373 mutex_unlock(&dirty_i->seglist_lock); 374} 375 376static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 377 block_t blkstart, block_t blklen) 378{ | 369 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) 370 dirty_i->nr_dirty[t]++; 371 } 372} 373 374static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, 375 enum dirty_type dirty_type) 376{ --- 43 unchanged lines hidden (view full) --- 420 } 421 422 mutex_unlock(&dirty_i->seglist_lock); 423} 424 425static int f2fs_issue_discard(struct f2fs_sb_info *sbi, 426 block_t blkstart, block_t blklen) 427{ |
379 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart); 380 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen); | 428 sector_t start = SECTOR_FROM_BLOCK(blkstart); 429 sector_t len = SECTOR_FROM_BLOCK(blklen); |
381 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 382 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 383} 384 385void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) 386{ 387 if (f2fs_issue_discard(sbi, blkaddr, 1)) { 388 struct page *page = grab_meta_page(sbi, blkaddr); 389 /* zero-filled page */ 390 set_page_dirty(page); 391 f2fs_put_page(page, 1); 392 } 393} 394 | 430 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); 431 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); 432} 433 434void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) 435{ 436 if (f2fs_issue_discard(sbi, blkaddr, 1)) { 437 struct page *page = grab_meta_page(sbi, blkaddr); 438 /* zero-filled page */ 439 set_page_dirty(page); 440 f2fs_put_page(page, 1); 441 } 442} 443 |
395static void add_discard_addrs(struct f2fs_sb_info *sbi, 396 unsigned int segno, struct seg_entry *se) | 444static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) |
397{ 398 struct list_head *head = &SM_I(sbi)->discard_list; 399 struct discard_entry *new; 400 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 401 int max_blocks = sbi->blocks_per_seg; | 445{ 446 struct list_head *head = &SM_I(sbi)->discard_list; 447 struct discard_entry *new; 448 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); 449 int max_blocks = sbi->blocks_per_seg; |
450 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); |
|
402 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 403 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 404 unsigned long dmap[entries]; 405 unsigned int start = 0, end = -1; | 451 unsigned long *cur_map = (unsigned long *)se->cur_valid_map; 452 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; 453 unsigned long dmap[entries]; 454 unsigned int start = 0, end = -1; |
455 bool force = (cpc->reason == CP_DISCARD); |
|
406 int i; 407 | 456 int i; 457 |
408 if (!test_opt(sbi, DISCARD)) | 458 if (!force && !test_opt(sbi, DISCARD)) |
409 return; 410 | 459 return; 460 |
461 if (force && !se->valid_blocks) { 462 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 463 /* 464 * if this segment is registered in the prefree list, then 465 * we should skip adding a discard candidate, and let the 466 * checkpoint do that later. 467 */ 468 mutex_lock(&dirty_i->seglist_lock); 469 if (test_bit(cpc->trim_start, dirty_i->dirty_segmap[PRE])) { 470 mutex_unlock(&dirty_i->seglist_lock); 471 cpc->trimmed += sbi->blocks_per_seg; 472 return; 473 } 474 mutex_unlock(&dirty_i->seglist_lock); 475 476 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 477 INIT_LIST_HEAD(&new->list); 478 new->blkaddr = START_BLOCK(sbi, cpc->trim_start); 479 new->len = sbi->blocks_per_seg; 480 list_add_tail(&new->list, head); 481 SM_I(sbi)->nr_discards += sbi->blocks_per_seg; 482 cpc->trimmed += sbi->blocks_per_seg; 483 return; 484 } 485 |
|
411 /* zero block will be discarded through the prefree list */ 412 if (!se->valid_blocks || se->valid_blocks == max_blocks) 413 return; 414 415 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 416 for (i = 0; i < entries; i++) 417 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 418 | 486 /* zero block will be discarded through the prefree list */ 487 if (!se->valid_blocks || se->valid_blocks == max_blocks) 488 return; 489 490 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ 491 for (i = 0; i < entries; i++) 492 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; 493 |
419 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { | 494 while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { |
420 start = __find_rev_next_bit(dmap, max_blocks, end + 1); 421 if (start >= max_blocks) 422 break; 423 424 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 425 | 495 start = __find_rev_next_bit(dmap, max_blocks, end + 1); 496 if (start >= max_blocks) 497 break; 498 499 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); 500 |
501 if (end - start < cpc->trim_minlen) 502 continue; 503 |
|
426 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 427 INIT_LIST_HEAD(&new->list); | 504 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); 505 INIT_LIST_HEAD(&new->list); |
428 new->blkaddr = START_BLOCK(sbi, segno) + start; | 506 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start; |
429 new->len = end - start; | 507 new->len = end - start; |
508 cpc->trimmed += end - start; |
|
430 431 list_add_tail(&new->list, head); 432 SM_I(sbi)->nr_discards += end - start; 433 } 434} 435 | 509 510 list_add_tail(&new->list, head); 511 SM_I(sbi)->nr_discards += end - start; 512 } 513} 514 |
515void release_discard_addrs(struct f2fs_sb_info *sbi) 516{ 517 struct list_head *head = &(SM_I(sbi)->discard_list); 518 struct discard_entry *entry, *this; 519 520 /* drop caches */ 521 list_for_each_entry_safe(entry, this, head, list) { 522 list_del(&entry->list); 523 kmem_cache_free(discard_entry_slab, entry); 524 } 525} 526 |
|
436/* 437 * Should call clear_prefree_segments after checkpoint is done. 438 */ 439static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 440{ 441 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 442 unsigned int segno; | 527/* 528 * Should call clear_prefree_segments after checkpoint is done. 529 */ 530static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) 531{ 532 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 533 unsigned int segno; |
443 unsigned int total_segs = TOTAL_SEGS(sbi); | |
444 445 mutex_lock(&dirty_i->seglist_lock); | 534 535 mutex_lock(&dirty_i->seglist_lock); |
446 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], total_segs) | 536 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) |
447 __set_test_and_free(sbi, segno); 448 mutex_unlock(&dirty_i->seglist_lock); 449} 450 451void clear_prefree_segments(struct f2fs_sb_info *sbi) 452{ 453 struct list_head *head = &(SM_I(sbi)->discard_list); 454 struct discard_entry *entry, *this; 455 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 456 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; | 537 __set_test_and_free(sbi, segno); 538 mutex_unlock(&dirty_i->seglist_lock); 539} 540 541void clear_prefree_segments(struct f2fs_sb_info *sbi) 542{ 543 struct list_head *head = &(SM_I(sbi)->discard_list); 544 struct discard_entry *entry, *this; 545 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 546 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; |
457 unsigned int total_segs = TOTAL_SEGS(sbi); | |
458 unsigned int start = 0, end = -1; 459 460 mutex_lock(&dirty_i->seglist_lock); 461 462 while (1) { 463 int i; | 547 unsigned int start = 0, end = -1; 548 549 mutex_lock(&dirty_i->seglist_lock); 550 551 while (1) { 552 int i; |
464 start = find_next_bit(prefree_map, total_segs, end + 1); 465 if (start >= total_segs) | 553 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); 554 if (start >= MAIN_SEGS(sbi)) |
466 break; | 555 break; |
467 end = find_next_zero_bit(prefree_map, total_segs, start + 1); | 556 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), 557 start + 1); |
468 469 for (i = start; i < end; i++) 470 clear_bit(i, prefree_map); 471 472 dirty_i->nr_dirty[PRE] -= end - start; 473 474 if (!test_opt(sbi, DISCARD)) 475 continue; --- 7 unchanged lines hidden (view full) --- 483 list_for_each_entry_safe(entry, this, head, list) { 484 f2fs_issue_discard(sbi, entry->blkaddr, entry->len); 485 list_del(&entry->list); 486 SM_I(sbi)->nr_discards -= entry->len; 487 kmem_cache_free(discard_entry_slab, entry); 488 } 489} 490 | 558 559 for (i = start; i < end; i++) 560 clear_bit(i, prefree_map); 561 562 dirty_i->nr_dirty[PRE] -= end - start; 563 564 if (!test_opt(sbi, DISCARD)) 565 continue; --- 7 unchanged lines hidden (view full) --- 573 list_for_each_entry_safe(entry, this, head, list) { 574 f2fs_issue_discard(sbi, entry->blkaddr, entry->len); 575 list_del(&entry->list); 576 SM_I(sbi)->nr_discards -= entry->len; 577 kmem_cache_free(discard_entry_slab, entry); 578 } 579} 580 |
491static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) | 581static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) |
492{ 493 struct sit_info *sit_i = SIT_I(sbi); | 582{ 583 struct sit_info *sit_i = SIT_I(sbi); |
494 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) | 584 585 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { |
495 sit_i->dirty_sentries++; | 586 sit_i->dirty_sentries++; |
587 return false; 588 } 589 590 return true; |
|
496} 497 498static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 499 unsigned int segno, int modified) 500{ 501 struct seg_entry *se = get_seg_entry(sbi, segno); 502 se->type = type; 503 if (modified) --- 7 unchanged lines hidden (view full) --- 511 long int new_vblocks; 512 513 segno = GET_SEGNO(sbi, blkaddr); 514 515 se = get_seg_entry(sbi, segno); 516 new_vblocks = se->valid_blocks + del; 517 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 518 | 591} 592 593static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, 594 unsigned int segno, int modified) 595{ 596 struct seg_entry *se = get_seg_entry(sbi, segno); 597 se->type = type; 598 if (modified) --- 7 unchanged lines hidden (view full) --- 606 long int new_vblocks; 607 608 segno = GET_SEGNO(sbi, blkaddr); 609 610 se = get_seg_entry(sbi, segno); 611 new_vblocks = se->valid_blocks + del; 612 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 613 |
519 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) || | 614 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || |
520 (new_vblocks > sbi->blocks_per_seg))); 521 522 se->valid_blocks = new_vblocks; 523 se->mtime = get_mtime(sbi); 524 SIT_I(sbi)->max_mtime = se->mtime; 525 526 /* Update valid block bitmap */ 527 if (del > 0) { 528 if (f2fs_set_bit(offset, se->cur_valid_map)) | 615 (new_vblocks > sbi->blocks_per_seg))); 616 617 se->valid_blocks = new_vblocks; 618 se->mtime = get_mtime(sbi); 619 SIT_I(sbi)->max_mtime = se->mtime; 620 621 /* Update valid block bitmap */ 622 if (del > 0) { 623 if (f2fs_set_bit(offset, se->cur_valid_map)) |
529 BUG(); | 624 f2fs_bug_on(sbi, 1); |
530 } else { 531 if (!f2fs_clear_bit(offset, se->cur_valid_map)) | 625 } else { 626 if (!f2fs_clear_bit(offset, se->cur_valid_map)) |
532 BUG(); | 627 f2fs_bug_on(sbi, 1); |
533 } 534 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 535 se->ckpt_valid_blocks += del; 536 537 __mark_sit_entry_dirty(sbi, segno); 538 539 /* update total number of valid blocks to be written in ckpt area */ 540 SIT_I(sbi)->written_valid_blocks += del; --- 12 unchanged lines hidden (view full) --- 553 locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); 554} 555 556void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 557{ 558 unsigned int segno = GET_SEGNO(sbi, addr); 559 struct sit_info *sit_i = SIT_I(sbi); 560 | 628 } 629 if (!f2fs_test_bit(offset, se->ckpt_valid_map)) 630 se->ckpt_valid_blocks += del; 631 632 __mark_sit_entry_dirty(sbi, segno); 633 634 /* update total number of valid blocks to be written in ckpt area */ 635 SIT_I(sbi)->written_valid_blocks += del; --- 12 unchanged lines hidden (view full) --- 648 locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); 649} 650 651void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) 652{ 653 unsigned int segno = GET_SEGNO(sbi, addr); 654 struct sit_info *sit_i = SIT_I(sbi); 655 |
561 f2fs_bug_on(addr == NULL_ADDR); | 656 f2fs_bug_on(sbi, addr == NULL_ADDR); |
562 if (addr == NEW_ADDR) 563 return; 564 565 /* add it into sit main buffer */ 566 mutex_lock(&sit_i->sentry_lock); 567 568 update_sit_entry(sbi, addr, -1); 569 --- 59 unchanged lines hidden (view full) --- 629} 630 631static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 632{ 633 struct curseg_info *curseg = CURSEG_I(sbi, type); 634 unsigned int segno = curseg->segno + 1; 635 struct free_segmap_info *free_i = FREE_I(sbi); 636 | 657 if (addr == NEW_ADDR) 658 return; 659 660 /* add it into sit main buffer */ 661 mutex_lock(&sit_i->sentry_lock); 662 663 update_sit_entry(sbi, addr, -1); 664 --- 59 unchanged lines hidden (view full) --- 724} 725 726static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) 727{ 728 struct curseg_info *curseg = CURSEG_I(sbi, type); 729 unsigned int segno = curseg->segno + 1; 730 struct free_segmap_info *free_i = FREE_I(sbi); 731 |
637 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) | 732 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) |
638 return !test_bit(segno, free_i->free_segmap); 639 return 0; 640} 641 642/* 643 * Find a new segment from the free segments bitmap to right order 644 * This function should be returned with success, otherwise BUG 645 */ 646static void get_new_segment(struct f2fs_sb_info *sbi, 647 unsigned int *newseg, bool new_sec, int dir) 648{ 649 struct free_segmap_info *free_i = FREE_I(sbi); 650 unsigned int segno, secno, zoneno; | 733 return !test_bit(segno, free_i->free_segmap); 734 return 0; 735} 736 737/* 738 * Find a new segment from the free segments bitmap to right order 739 * This function should be returned with success, otherwise BUG 740 */ 741static void get_new_segment(struct f2fs_sb_info *sbi, 742 unsigned int *newseg, bool new_sec, int dir) 743{ 744 struct free_segmap_info *free_i = FREE_I(sbi); 745 unsigned int segno, secno, zoneno; |
651 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; | 746 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; |
652 unsigned int hint = *newseg / sbi->segs_per_sec; 653 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 654 unsigned int left_start = hint; 655 bool init = true; 656 int go_left = 0; 657 int i; 658 659 write_lock(&free_i->segmap_lock); 660 661 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 662 segno = find_next_zero_bit(free_i->free_segmap, | 747 unsigned int hint = *newseg / sbi->segs_per_sec; 748 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); 749 unsigned int left_start = hint; 750 bool init = true; 751 int go_left = 0; 752 int i; 753 754 write_lock(&free_i->segmap_lock); 755 756 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { 757 segno = find_next_zero_bit(free_i->free_segmap, |
663 TOTAL_SEGS(sbi), *newseg + 1); | 758 MAIN_SEGS(sbi), *newseg + 1); |
664 if (segno - *newseg < sbi->segs_per_sec - 665 (*newseg % sbi->segs_per_sec)) 666 goto got_it; 667 } 668find_other_zone: | 759 if (segno - *newseg < sbi->segs_per_sec - 760 (*newseg % sbi->segs_per_sec)) 761 goto got_it; 762 } 763find_other_zone: |
669 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); 670 if (secno >= TOTAL_SECS(sbi)) { | 764 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 765 if (secno >= MAIN_SECS(sbi)) { |
671 if (dir == ALLOC_RIGHT) { 672 secno = find_next_zero_bit(free_i->free_secmap, | 766 if (dir == ALLOC_RIGHT) { 767 secno = find_next_zero_bit(free_i->free_secmap, |
673 TOTAL_SECS(sbi), 0); 674 f2fs_bug_on(secno >= TOTAL_SECS(sbi)); | 768 MAIN_SECS(sbi), 0); 769 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); |
675 } else { 676 go_left = 1; 677 left_start = hint - 1; 678 } 679 } 680 if (go_left == 0) 681 goto skip_left; 682 683 while (test_bit(left_start, free_i->free_secmap)) { 684 if (left_start > 0) { 685 left_start--; 686 continue; 687 } 688 left_start = find_next_zero_bit(free_i->free_secmap, | 770 } else { 771 go_left = 1; 772 left_start = hint - 1; 773 } 774 } 775 if (go_left == 0) 776 goto skip_left; 777 778 while (test_bit(left_start, free_i->free_secmap)) { 779 if (left_start > 0) { 780 left_start--; 781 continue; 782 } 783 left_start = find_next_zero_bit(free_i->free_secmap, |
689 TOTAL_SECS(sbi), 0); 690 f2fs_bug_on(left_start >= TOTAL_SECS(sbi)); | 784 MAIN_SECS(sbi), 0); 785 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); |
691 break; 692 } 693 secno = left_start; 694skip_left: 695 hint = secno; 696 segno = secno * sbi->segs_per_sec; 697 zoneno = secno / sbi->secs_per_zone; 698 --- 22 unchanged lines hidden (view full) --- 721 hint = 0; 722 else 723 hint = (zoneno + 1) * sbi->secs_per_zone; 724 init = false; 725 goto find_other_zone; 726 } 727got_it: 728 /* set it as dirty segment in free segmap */ | 786 break; 787 } 788 secno = left_start; 789skip_left: 790 hint = secno; 791 segno = secno * sbi->segs_per_sec; 792 zoneno = secno / sbi->secs_per_zone; 793 --- 22 unchanged lines hidden (view full) --- 816 hint = 0; 817 else 818 hint = (zoneno + 1) * sbi->secs_per_zone; 819 init = false; 820 goto find_other_zone; 821 } 822got_it: 823 /* set it as dirty segment in free segmap */ |
729 f2fs_bug_on(test_bit(segno, free_i->free_segmap)); | 824 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); |
730 __set_inuse(sbi, segno); 731 *newseg = segno; 732 write_unlock(&free_i->segmap_lock); 733} 734 735static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 736{ 737 struct curseg_info *curseg = CURSEG_I(sbi, type); --- 155 unchanged lines hidden (view full) --- 893 locate_dirty_segment(sbi, old_curseg); 894 } 895} 896 897static const struct segment_allocation default_salloc_ops = { 898 .allocate_segment = allocate_segment_by_default, 899}; 900 | 825 __set_inuse(sbi, segno); 826 *newseg = segno; 827 write_unlock(&free_i->segmap_lock); 828} 829 830static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) 831{ 832 struct curseg_info *curseg = CURSEG_I(sbi, type); --- 155 unchanged lines hidden (view full) --- 988 locate_dirty_segment(sbi, old_curseg); 989 } 990} 991 992static const struct segment_allocation default_salloc_ops = { 993 .allocate_segment = allocate_segment_by_default, 994}; 995 |
996int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) 997{ 998 __u64 start = range->start >> sbi->log_blocksize; 999 __u64 end = start + (range->len >> sbi->log_blocksize) - 1; 1000 unsigned int start_segno, end_segno; 1001 struct cp_control cpc; 1002 1003 if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) || 1004 range->len < sbi->blocksize) 1005 return -EINVAL; 1006 1007 if (end <= MAIN_BLKADDR(sbi)) 1008 goto out; 1009 1010 /* start/end segment number in main_area */ 1011 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); 1012 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : 1013 GET_SEGNO(sbi, end); 1014 cpc.reason = CP_DISCARD; 1015 cpc.trim_start = start_segno; 1016 cpc.trim_end = end_segno; 1017 cpc.trim_minlen = range->minlen >> sbi->log_blocksize; 1018 cpc.trimmed = 0; 1019 1020 /* do checkpoint to issue discard commands safely */ 1021 write_checkpoint(sbi, &cpc); 1022out: 1023 range->len = cpc.trimmed << sbi->log_blocksize; 1024 return 0; 1025} 1026 |
|
901static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 902{ 903 struct curseg_info *curseg = CURSEG_I(sbi, type); 904 if (curseg->next_blkoff < sbi->blocks_per_seg) 905 return true; 906 return false; 907} 908 --- 39 unchanged lines hidden (view full) --- 948 CURSEG_HOT_NODE; 949 else 950 return CURSEG_COLD_NODE; 951 } 952} 953 954static int __get_segment_type(struct page *page, enum page_type p_type) 955{ | 1027static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) 1028{ 1029 struct curseg_info *curseg = CURSEG_I(sbi, type); 1030 if (curseg->next_blkoff < sbi->blocks_per_seg) 1031 return true; 1032 return false; 1033} 1034 --- 39 unchanged lines hidden (view full) --- 1074 CURSEG_HOT_NODE; 1075 else 1076 return CURSEG_COLD_NODE; 1077 } 1078} 1079 1080static int __get_segment_type(struct page *page, enum page_type p_type) 1081{ |
956 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); 957 switch (sbi->active_logs) { | 1082 switch (F2FS_P_SB(page)->active_logs) { |
958 case 2: 959 return __get_segment_type_2(page, p_type); 960 case 4: 961 return __get_segment_type_4(page, p_type); 962 } 963 /* NR_CURSEG_TYPE(6) logs by default */ | 1083 case 2: 1084 return __get_segment_type_2(page, p_type); 1085 case 4: 1086 return __get_segment_type_4(page, p_type); 1087 } 1088 /* NR_CURSEG_TYPE(6) logs by default */ |
964 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE); | 1089 f2fs_bug_on(F2FS_P_SB(page), 1090 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE); |
965 return __get_segment_type_6(page, p_type); 966} 967 968void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 969 block_t old_blkaddr, block_t *new_blkaddr, 970 struct f2fs_summary *sum, int type) 971{ 972 struct sit_info *sit_i = SIT_I(sbi); --- 63 unchanged lines hidden (view full) --- 1036 struct f2fs_summary sum; 1037 set_summary(&sum, nid, 0, 0); 1038 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); 1039} 1040 1041void write_data_page(struct page *page, struct dnode_of_data *dn, 1042 block_t *new_blkaddr, struct f2fs_io_info *fio) 1043{ | 1091 return __get_segment_type_6(page, p_type); 1092} 1093 1094void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 1095 block_t old_blkaddr, block_t *new_blkaddr, 1096 struct f2fs_summary *sum, int type) 1097{ 1098 struct sit_info *sit_i = SIT_I(sbi); --- 63 unchanged lines hidden (view full) --- 1162 struct f2fs_summary sum; 1163 set_summary(&sum, nid, 0, 0); 1164 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); 1165} 1166 1167void write_data_page(struct page *page, struct dnode_of_data *dn, 1168 block_t *new_blkaddr, struct f2fs_io_info *fio) 1169{ |
1044 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | 1170 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
1045 struct f2fs_summary sum; 1046 struct node_info ni; 1047 | 1171 struct f2fs_summary sum; 1172 struct node_info ni; 1173 |
1048 f2fs_bug_on(dn->data_blkaddr == NULL_ADDR); | 1174 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); |
1049 get_node_info(sbi, dn->nid, &ni); 1050 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 1051 1052 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); 1053} 1054 1055void rewrite_data_page(struct page *page, block_t old_blkaddr, 1056 struct f2fs_io_info *fio) 1057{ | 1175 get_node_info(sbi, dn->nid, &ni); 1176 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 1177 1178 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); 1179} 1180 1181void rewrite_data_page(struct page *page, block_t old_blkaddr, 1182 struct f2fs_io_info *fio) 1183{ |
1058 struct inode *inode = page->mapping->host; 1059 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 1060 f2fs_submit_page_mbio(sbi, page, old_blkaddr, fio); | 1184 f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio); |
1061} 1062 1063void recover_data_page(struct f2fs_sb_info *sbi, 1064 struct page *page, struct f2fs_summary *sum, 1065 block_t old_blkaddr, block_t new_blkaddr) 1066{ 1067 struct sit_info *sit_i = SIT_I(sbi); 1068 struct curseg_info *curseg; --- 56 unchanged lines hidden (view full) --- 1125out: 1126 up_read(&io->io_rwsem); 1127 return false; 1128} 1129 1130void f2fs_wait_on_page_writeback(struct page *page, 1131 enum page_type type) 1132{ | 1185} 1186 1187void recover_data_page(struct f2fs_sb_info *sbi, 1188 struct page *page, struct f2fs_summary *sum, 1189 block_t old_blkaddr, block_t new_blkaddr) 1190{ 1191 struct sit_info *sit_i = SIT_I(sbi); 1192 struct curseg_info *curseg; --- 56 unchanged lines hidden (view full) --- 1249out: 1250 up_read(&io->io_rwsem); 1251 return false; 1252} 1253 1254void f2fs_wait_on_page_writeback(struct page *page, 1255 enum page_type type) 1256{ |
1133 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | |
1134 if (PageWriteback(page)) { | 1257 if (PageWriteback(page)) { |
1258 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1259 |
|
1135 if (is_merged_page(sbi, page, type)) 1136 f2fs_submit_merged_bio(sbi, type, WRITE); 1137 wait_on_page_writeback(page); 1138 } 1139} 1140 1141static int read_compacted_summaries(struct f2fs_sb_info *sbi) 1142{ --- 252 unchanged lines hidden (view full) --- 1395 } 1396 return -1; 1397} 1398 1399static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1400 unsigned int segno) 1401{ 1402 struct sit_info *sit_i = SIT_I(sbi); | 1260 if (is_merged_page(sbi, page, type)) 1261 f2fs_submit_merged_bio(sbi, type, WRITE); 1262 wait_on_page_writeback(page); 1263 } 1264} 1265 1266static int read_compacted_summaries(struct f2fs_sb_info *sbi) 1267{ --- 252 unchanged lines hidden (view full) --- 1520 } 1521 return -1; 1522} 1523 1524static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, 1525 unsigned int segno) 1526{ 1527 struct sit_info *sit_i = SIT_I(sbi); |
1403 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); | 1528 unsigned int offset = SIT_BLOCK_OFFSET(segno); |
1404 block_t blk_addr = sit_i->sit_base_addr + offset; 1405 1406 check_seg_range(sbi, segno); 1407 1408 /* calculate sit block address */ 1409 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1410 blk_addr += sit_i->sit_blocks; 1411 --- 9 unchanged lines hidden (view full) --- 1421 void *src_addr, *dst_addr; 1422 1423 src_off = current_sit_addr(sbi, start); 1424 dst_off = next_sit_addr(sbi, src_off); 1425 1426 /* get current sit block page without lock */ 1427 src_page = get_meta_page(sbi, src_off); 1428 dst_page = grab_meta_page(sbi, dst_off); | 1529 block_t blk_addr = sit_i->sit_base_addr + offset; 1530 1531 check_seg_range(sbi, segno); 1532 1533 /* calculate sit block address */ 1534 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 1535 blk_addr += sit_i->sit_blocks; 1536 --- 9 unchanged lines hidden (view full) --- 1546 void *src_addr, *dst_addr; 1547 1548 src_off = current_sit_addr(sbi, start); 1549 dst_off = next_sit_addr(sbi, src_off); 1550 1551 /* get current sit block page without lock */ 1552 src_page = get_meta_page(sbi, src_off); 1553 dst_page = grab_meta_page(sbi, dst_off); |
1429 f2fs_bug_on(PageDirty(src_page)); | 1554 f2fs_bug_on(sbi, PageDirty(src_page)); |
1430 1431 src_addr = page_address(src_page); 1432 dst_addr = page_address(dst_page); 1433 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1434 1435 set_page_dirty(dst_page); 1436 f2fs_put_page(src_page, 1); 1437 1438 set_to_next_sit(sit_i, start); 1439 1440 return dst_page; 1441} 1442 | 1555 1556 src_addr = page_address(src_page); 1557 dst_addr = page_address(dst_page); 1558 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); 1559 1560 set_page_dirty(dst_page); 1561 f2fs_put_page(src_page, 1); 1562 1563 set_to_next_sit(sit_i, start); 1564 1565 return dst_page; 1566} 1567 |
1443static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) | 1568static struct sit_entry_set *grab_sit_entry_set(void) |
1444{ | 1569{ |
1570 struct sit_entry_set *ses = 1571 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC); 1572 1573 ses->entry_cnt = 0; 1574 INIT_LIST_HEAD(&ses->set_list); 1575 return ses; 1576} 1577 1578static void release_sit_entry_set(struct sit_entry_set *ses) 1579{ 1580 list_del(&ses->set_list); 1581 kmem_cache_free(sit_entry_set_slab, ses); 1582} 1583 1584static void adjust_sit_entry_set(struct sit_entry_set *ses, 1585 struct list_head *head) 1586{ 1587 struct sit_entry_set *next = ses; 1588 1589 if (list_is_last(&ses->set_list, head)) 1590 return; 1591 1592 list_for_each_entry_continue(next, head, set_list) 1593 if (ses->entry_cnt <= next->entry_cnt) 1594 break; 1595 1596 list_move_tail(&ses->set_list, &next->set_list); 1597} 1598 1599static void add_sit_entry(unsigned int segno, struct list_head *head) 1600{ 1601 struct sit_entry_set *ses; 1602 unsigned int start_segno = START_SEGNO(segno); 1603 1604 list_for_each_entry(ses, head, set_list) { 1605 if (ses->start_segno == start_segno) { 1606 ses->entry_cnt++; 1607 adjust_sit_entry_set(ses, head); 1608 return; 1609 } 1610 } 1611 1612 ses = grab_sit_entry_set(); 1613 1614 ses->start_segno = start_segno; 1615 ses->entry_cnt++; 1616 list_add(&ses->set_list, head); 1617} 1618 1619static void add_sits_in_set(struct f2fs_sb_info *sbi) 1620{ 1621 struct f2fs_sm_info *sm_info = SM_I(sbi); 1622 struct list_head *set_list = &sm_info->sit_entry_set; 1623 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; 1624 unsigned int segno; 1625 1626 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) 1627 add_sit_entry(segno, set_list); 1628} 1629 1630static void remove_sits_in_journal(struct f2fs_sb_info *sbi) 1631{ |
|
1445 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1446 struct f2fs_summary_block *sum = curseg->sum_blk; 1447 int i; 1448 | 1632 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1633 struct f2fs_summary_block *sum = curseg->sum_blk; 1634 int i; 1635 |
1449 /* 1450 * If the journal area in the current summary is full of sit entries, 1451 * all the sit entries will be flushed. Otherwise the sit entries 1452 * are not able to replace with newly hot sit entries. 1453 */ 1454 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { 1455 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1456 unsigned int segno; 1457 segno = le32_to_cpu(segno_in_journal(sum, i)); 1458 __mark_sit_entry_dirty(sbi, segno); 1459 } 1460 update_sits_in_cursum(sum, -sits_in_cursum(sum)); 1461 return true; | 1636 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { 1637 unsigned int segno; 1638 bool dirtied; 1639 1640 segno = le32_to_cpu(segno_in_journal(sum, i)); 1641 dirtied = __mark_sit_entry_dirty(sbi, segno); 1642 1643 if (!dirtied) 1644 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); |
1462 } | 1645 } |
1463 return false; | 1646 update_sits_in_cursum(sum, -sits_in_cursum(sum)); |
1464} 1465 1466/* 1467 * CP calls this function, which flushes SIT entries including sit_journal, 1468 * and moves prefree segs to free segs. 1469 */ | 1647} 1648 1649/* 1650 * CP calls this function, which flushes SIT entries including sit_journal, 1651 * and moves prefree segs to free segs. 1652 */ |
1470void flush_sit_entries(struct f2fs_sb_info *sbi) | 1653void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) |
1471{ 1472 struct sit_info *sit_i = SIT_I(sbi); 1473 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1474 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1475 struct f2fs_summary_block *sum = curseg->sum_blk; | 1654{ 1655 struct sit_info *sit_i = SIT_I(sbi); 1656 unsigned long *bitmap = sit_i->dirty_sentries_bitmap; 1657 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1658 struct f2fs_summary_block *sum = curseg->sum_blk; |
1476 unsigned long nsegs = TOTAL_SEGS(sbi); 1477 struct page *page = NULL; 1478 struct f2fs_sit_block *raw_sit = NULL; 1479 unsigned int start = 0, end = 0; 1480 unsigned int segno; 1481 bool flushed; | 1659 struct sit_entry_set *ses, *tmp; 1660 struct list_head *head = &SM_I(sbi)->sit_entry_set; 1661 bool to_journal = true; 1662 struct seg_entry *se; |
1482 1483 mutex_lock(&curseg->curseg_mutex); 1484 mutex_lock(&sit_i->sentry_lock); 1485 1486 /* | 1663 1664 mutex_lock(&curseg->curseg_mutex); 1665 mutex_lock(&sit_i->sentry_lock); 1666 1667 /* |
1487 * "flushed" indicates whether sit entries in journal are flushed 1488 * to the SIT area or not. | 1668 * add and account sit entries of dirty bitmap in sit entry 1669 * set temporarily |
1489 */ | 1670 */ |
1490 flushed = flush_sits_in_journal(sbi); | 1671 add_sits_in_set(sbi); |
1491 | 1672 |
1492 for_each_set_bit(segno, bitmap, nsegs) { 1493 struct seg_entry *se = get_seg_entry(sbi, segno); 1494 int sit_offset, offset; | 1673 /* 1674 * if there are no enough space in journal to store dirty sit 1675 * entries, remove all entries from journal and add and account 1676 * them in sit entry set. 1677 */ 1678 if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL)) 1679 remove_sits_in_journal(sbi); |
1495 | 1680 |
1496 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); | 1681 if (!sit_i->dirty_sentries) 1682 goto out; |
1497 | 1683 |
1498 /* add discard candidates */ 1499 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) 1500 add_discard_addrs(sbi, segno, se); | 1684 /* 1685 * there are two steps to flush sit entries: 1686 * #1, flush sit entries to journal in current cold data summary block. 1687 * #2, flush sit entries to sit page. 1688 */ 1689 list_for_each_entry_safe(ses, tmp, head, set_list) { 1690 struct page *page; 1691 struct f2fs_sit_block *raw_sit = NULL; 1692 unsigned int start_segno = ses->start_segno; 1693 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, 1694 (unsigned long)MAIN_SEGS(sbi)); 1695 unsigned int segno = start_segno; |
1501 | 1696 |
1502 if (flushed) 1503 goto to_sit_page; | 1697 if (to_journal && 1698 !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL)) 1699 to_journal = false; |
1504 | 1700 |
1505 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); 1506 if (offset >= 0) { 1507 segno_in_journal(sum, offset) = cpu_to_le32(segno); 1508 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); 1509 goto flush_done; | 1701 if (!to_journal) { 1702 page = get_next_sit_page(sbi, start_segno); 1703 raw_sit = page_address(page); |
1510 } | 1704 } |
1511to_sit_page: 1512 if (!page || (start > segno) || (segno > end)) { 1513 if (page) { 1514 f2fs_put_page(page, 1); 1515 page = NULL; | 1705 1706 /* flush dirty sit entries in region of current sit set */ 1707 for_each_set_bit_from(segno, bitmap, end) { 1708 int offset, sit_offset; 1709 1710 se = get_seg_entry(sbi, segno); 1711 1712 /* add discard candidates */ 1713 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) { 1714 cpc->trim_start = segno; 1715 add_discard_addrs(sbi, cpc); |
1516 } 1517 | 1716 } 1717 |
1518 start = START_SEGNO(sit_i, segno); 1519 end = start + SIT_ENTRY_PER_BLOCK - 1; | 1718 if (to_journal) { 1719 offset = lookup_journal_in_cursum(sum, 1720 SIT_JOURNAL, segno, 1); 1721 f2fs_bug_on(sbi, offset < 0); 1722 segno_in_journal(sum, offset) = 1723 cpu_to_le32(segno); 1724 seg_info_to_raw_sit(se, 1725 &sit_in_journal(sum, offset)); 1726 } else { 1727 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); 1728 seg_info_to_raw_sit(se, 1729 &raw_sit->entries[sit_offset]); 1730 } |
1520 | 1731 |
1521 /* read sit block that will be updated */ 1522 page = get_next_sit_page(sbi, start); 1523 raw_sit = page_address(page); | 1732 __clear_bit(segno, bitmap); 1733 sit_i->dirty_sentries--; 1734 ses->entry_cnt--; |
1524 } 1525 | 1735 } 1736 |
1526 /* udpate entry in SIT block */ 1527 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); 1528flush_done: 1529 __clear_bit(segno, bitmap); 1530 sit_i->dirty_sentries--; | 1737 if (!to_journal) 1738 f2fs_put_page(page, 1); 1739 1740 f2fs_bug_on(sbi, ses->entry_cnt); 1741 release_sit_entry_set(ses); |
1531 } | 1742 } |
1743 1744 f2fs_bug_on(sbi, !list_empty(head)); 1745 f2fs_bug_on(sbi, sit_i->dirty_sentries); 1746out: 1747 if (cpc->reason == CP_DISCARD) { 1748 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) 1749 add_discard_addrs(sbi, cpc); 1750 } |
|
1532 mutex_unlock(&sit_i->sentry_lock); 1533 mutex_unlock(&curseg->curseg_mutex); 1534 | 1751 mutex_unlock(&sit_i->sentry_lock); 1752 mutex_unlock(&curseg->curseg_mutex); 1753 |
1535 /* writeout last modified SIT block */ 1536 f2fs_put_page(page, 1); 1537 | |
1538 set_prefree_as_free_segments(sbi); 1539} 1540 1541static int build_sit_info(struct f2fs_sb_info *sbi) 1542{ 1543 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1544 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1545 struct sit_info *sit_i; 1546 unsigned int sit_segs, start; 1547 char *src_bitmap, *dst_bitmap; 1548 unsigned int bitmap_size; 1549 1550 /* allocate memory for SIT information */ 1551 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1552 if (!sit_i) 1553 return -ENOMEM; 1554 1555 SM_I(sbi)->sit_info = sit_i; 1556 | 1754 set_prefree_as_free_segments(sbi); 1755} 1756 1757static int build_sit_info(struct f2fs_sb_info *sbi) 1758{ 1759 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 1760 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1761 struct sit_info *sit_i; 1762 unsigned int sit_segs, start; 1763 char *src_bitmap, *dst_bitmap; 1764 unsigned int bitmap_size; 1765 1766 /* allocate memory for SIT information */ 1767 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); 1768 if (!sit_i) 1769 return -ENOMEM; 1770 1771 SM_I(sbi)->sit_info = sit_i; 1772 |
1557 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); | 1773 sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry)); |
1558 if (!sit_i->sentries) 1559 return -ENOMEM; 1560 | 1774 if (!sit_i->sentries) 1775 return -ENOMEM; 1776 |
1561 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | 1777 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); |
1562 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1563 if (!sit_i->dirty_sentries_bitmap) 1564 return -ENOMEM; 1565 | 1778 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1779 if (!sit_i->dirty_sentries_bitmap) 1780 return -ENOMEM; 1781 |
1566 for (start = 0; start < TOTAL_SEGS(sbi); start++) { | 1782 for (start = 0; start < MAIN_SEGS(sbi); start++) { |
1567 sit_i->sentries[start].cur_valid_map 1568 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1569 sit_i->sentries[start].ckpt_valid_map 1570 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1571 if (!sit_i->sentries[start].cur_valid_map 1572 || !sit_i->sentries[start].ckpt_valid_map) 1573 return -ENOMEM; 1574 } 1575 1576 if (sbi->segs_per_sec > 1) { | 1783 sit_i->sentries[start].cur_valid_map 1784 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1785 sit_i->sentries[start].ckpt_valid_map 1786 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 1787 if (!sit_i->sentries[start].cur_valid_map 1788 || !sit_i->sentries[start].ckpt_valid_map) 1789 return -ENOMEM; 1790 } 1791 1792 if (sbi->segs_per_sec > 1) { |
1577 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * | 1793 sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) * |
1578 sizeof(struct sec_entry)); 1579 if (!sit_i->sec_entries) 1580 return -ENOMEM; 1581 } 1582 1583 /* get information related with SIT */ 1584 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1585 --- 18 unchanged lines hidden (view full) --- 1604 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1605 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1606 mutex_init(&sit_i->sentry_lock); 1607 return 0; 1608} 1609 1610static int build_free_segmap(struct f2fs_sb_info *sbi) 1611{ | 1794 sizeof(struct sec_entry)); 1795 if (!sit_i->sec_entries) 1796 return -ENOMEM; 1797 } 1798 1799 /* get information related with SIT */ 1800 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 1801 --- 18 unchanged lines hidden (view full) --- 1820 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); 1821 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; 1822 mutex_init(&sit_i->sentry_lock); 1823 return 0; 1824} 1825 1826static int build_free_segmap(struct f2fs_sb_info *sbi) 1827{ |
1612 struct f2fs_sm_info *sm_info = SM_I(sbi); | |
1613 struct free_segmap_info *free_i; 1614 unsigned int bitmap_size, sec_bitmap_size; 1615 1616 /* allocate memory for free segmap information */ 1617 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1618 if (!free_i) 1619 return -ENOMEM; 1620 1621 SM_I(sbi)->free_info = free_i; 1622 | 1828 struct free_segmap_info *free_i; 1829 unsigned int bitmap_size, sec_bitmap_size; 1830 1831 /* allocate memory for free segmap information */ 1832 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); 1833 if (!free_i) 1834 return -ENOMEM; 1835 1836 SM_I(sbi)->free_info = free_i; 1837 |
1623 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | 1838 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); |
1624 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1625 if (!free_i->free_segmap) 1626 return -ENOMEM; 1627 | 1839 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); 1840 if (!free_i->free_segmap) 1841 return -ENOMEM; 1842 |
1628 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); | 1843 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); |
1629 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1630 if (!free_i->free_secmap) 1631 return -ENOMEM; 1632 1633 /* set all segments as dirty temporarily */ 1634 memset(free_i->free_segmap, 0xff, bitmap_size); 1635 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1636 1637 /* init free segmap information */ | 1844 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); 1845 if (!free_i->free_secmap) 1846 return -ENOMEM; 1847 1848 /* set all segments as dirty temporarily */ 1849 memset(free_i->free_segmap, 0xff, bitmap_size); 1850 memset(free_i->free_secmap, 0xff, sec_bitmap_size); 1851 1852 /* init free segmap information */ |
1638 free_i->start_segno = 1639 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); | 1853 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); |
1640 free_i->free_segments = 0; 1641 free_i->free_sections = 0; 1642 rwlock_init(&free_i->segmap_lock); 1643 return 0; 1644} 1645 1646static int build_curseg(struct f2fs_sb_info *sbi) 1647{ --- 20 unchanged lines hidden (view full) --- 1668static void build_sit_entries(struct f2fs_sb_info *sbi) 1669{ 1670 struct sit_info *sit_i = SIT_I(sbi); 1671 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1672 struct f2fs_summary_block *sum = curseg->sum_blk; 1673 int sit_blk_cnt = SIT_BLK_CNT(sbi); 1674 unsigned int i, start, end; 1675 unsigned int readed, start_blk = 0; | 1854 free_i->free_segments = 0; 1855 free_i->free_sections = 0; 1856 rwlock_init(&free_i->segmap_lock); 1857 return 0; 1858} 1859 1860static int build_curseg(struct f2fs_sb_info *sbi) 1861{ --- 20 unchanged lines hidden (view full) --- 1882static void build_sit_entries(struct f2fs_sb_info *sbi) 1883{ 1884 struct sit_info *sit_i = SIT_I(sbi); 1885 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1886 struct f2fs_summary_block *sum = curseg->sum_blk; 1887 int sit_blk_cnt = SIT_BLK_CNT(sbi); 1888 unsigned int i, start, end; 1889 unsigned int readed, start_blk = 0; |
1676 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | 1890 int nrpages = MAX_BIO_BLOCKS(sbi); |
1677 1678 do { 1679 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT); 1680 1681 start = start_blk * sit_i->sents_per_block; 1682 end = (start_blk + readed) * sit_i->sents_per_block; 1683 | 1891 1892 do { 1893 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT); 1894 1895 start = start_blk * sit_i->sents_per_block; 1896 end = (start_blk + readed) * sit_i->sents_per_block; 1897 |
1684 for (; start < end && start < TOTAL_SEGS(sbi); start++) { | 1898 for (; start < end && start < MAIN_SEGS(sbi); start++) { |
1685 struct seg_entry *se = &sit_i->sentries[start]; 1686 struct f2fs_sit_block *sit_blk; 1687 struct f2fs_sit_entry sit; 1688 struct page *page; 1689 1690 mutex_lock(&curseg->curseg_mutex); 1691 for (i = 0; i < sits_in_cursum(sum); i++) { 1692 if (le32_to_cpu(segno_in_journal(sum, i)) --- 21 unchanged lines hidden (view full) --- 1714 } while (start_blk < sit_blk_cnt); 1715} 1716 1717static void init_free_segmap(struct f2fs_sb_info *sbi) 1718{ 1719 unsigned int start; 1720 int type; 1721 | 1899 struct seg_entry *se = &sit_i->sentries[start]; 1900 struct f2fs_sit_block *sit_blk; 1901 struct f2fs_sit_entry sit; 1902 struct page *page; 1903 1904 mutex_lock(&curseg->curseg_mutex); 1905 for (i = 0; i < sits_in_cursum(sum); i++) { 1906 if (le32_to_cpu(segno_in_journal(sum, i)) --- 21 unchanged lines hidden (view full) --- 1928 } while (start_blk < sit_blk_cnt); 1929} 1930 1931static void init_free_segmap(struct f2fs_sb_info *sbi) 1932{ 1933 unsigned int start; 1934 int type; 1935 |
1722 for (start = 0; start < TOTAL_SEGS(sbi); start++) { | 1936 for (start = 0; start < MAIN_SEGS(sbi); start++) { |
1723 struct seg_entry *sentry = get_seg_entry(sbi, start); 1724 if (!sentry->valid_blocks) 1725 __set_free(sbi, start); 1726 } 1727 1728 /* set use the current segments */ 1729 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1730 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1731 __set_test_and_inuse(sbi, curseg_t->segno); 1732 } 1733} 1734 1735static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1736{ 1737 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1738 struct free_segmap_info *free_i = FREE_I(sbi); | 1937 struct seg_entry *sentry = get_seg_entry(sbi, start); 1938 if (!sentry->valid_blocks) 1939 __set_free(sbi, start); 1940 } 1941 1942 /* set use the current segments */ 1943 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { 1944 struct curseg_info *curseg_t = CURSEG_I(sbi, type); 1945 __set_test_and_inuse(sbi, curseg_t->segno); 1946 } 1947} 1948 1949static void init_dirty_segmap(struct f2fs_sb_info *sbi) 1950{ 1951 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 1952 struct free_segmap_info *free_i = FREE_I(sbi); |
1739 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); | 1953 unsigned int segno = 0, offset = 0; |
1740 unsigned short valid_blocks; 1741 1742 while (1) { 1743 /* find dirty segment based on free segmap */ | 1954 unsigned short valid_blocks; 1955 1956 while (1) { 1957 /* find dirty segment based on free segmap */ |
1744 segno = find_next_inuse(free_i, total_segs, offset); 1745 if (segno >= total_segs) | 1958 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); 1959 if (segno >= MAIN_SEGS(sbi)) |
1746 break; 1747 offset = segno + 1; 1748 valid_blocks = get_valid_blocks(sbi, segno, 0); | 1960 break; 1961 offset = segno + 1; 1962 valid_blocks = get_valid_blocks(sbi, segno, 0); |
1749 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) | 1963 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) |
1750 continue; | 1964 continue; |
1965 if (valid_blocks > sbi->blocks_per_seg) { 1966 f2fs_bug_on(sbi, 1); 1967 continue; 1968 } |
|
1751 mutex_lock(&dirty_i->seglist_lock); 1752 __locate_dirty_segment(sbi, segno, DIRTY); 1753 mutex_unlock(&dirty_i->seglist_lock); 1754 } 1755} 1756 1757static int init_victim_secmap(struct f2fs_sb_info *sbi) 1758{ 1759 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | 1969 mutex_lock(&dirty_i->seglist_lock); 1970 __locate_dirty_segment(sbi, segno, DIRTY); 1971 mutex_unlock(&dirty_i->seglist_lock); 1972 } 1973} 1974 1975static int init_victim_secmap(struct f2fs_sb_info *sbi) 1976{ 1977 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); |
1760 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); | 1978 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); |
1761 1762 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 1763 if (!dirty_i->victim_secmap) 1764 return -ENOMEM; 1765 return 0; 1766} 1767 1768static int build_dirty_segmap(struct f2fs_sb_info *sbi) --- 4 unchanged lines hidden (view full) --- 1773 /* allocate memory for dirty segments list information */ 1774 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1775 if (!dirty_i) 1776 return -ENOMEM; 1777 1778 SM_I(sbi)->dirty_info = dirty_i; 1779 mutex_init(&dirty_i->seglist_lock); 1780 | 1979 1980 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); 1981 if (!dirty_i->victim_secmap) 1982 return -ENOMEM; 1983 return 0; 1984} 1985 1986static int build_dirty_segmap(struct f2fs_sb_info *sbi) --- 4 unchanged lines hidden (view full) --- 1991 /* allocate memory for dirty segments list information */ 1992 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); 1993 if (!dirty_i) 1994 return -ENOMEM; 1995 1996 SM_I(sbi)->dirty_info = dirty_i; 1997 mutex_init(&dirty_i->seglist_lock); 1998 |
1781 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | 1999 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); |
1782 1783 for (i = 0; i < NR_DIRTY_TYPE; i++) { 1784 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1785 if (!dirty_i->dirty_segmap[i]) 1786 return -ENOMEM; 1787 } 1788 1789 init_dirty_segmap(sbi); --- 7 unchanged lines hidden (view full) --- 1797{ 1798 struct sit_info *sit_i = SIT_I(sbi); 1799 unsigned int segno; 1800 1801 mutex_lock(&sit_i->sentry_lock); 1802 1803 sit_i->min_mtime = LLONG_MAX; 1804 | 2000 2001 for (i = 0; i < NR_DIRTY_TYPE; i++) { 2002 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 2003 if (!dirty_i->dirty_segmap[i]) 2004 return -ENOMEM; 2005 } 2006 2007 init_dirty_segmap(sbi); --- 7 unchanged lines hidden (view full) --- 2015{ 2016 struct sit_info *sit_i = SIT_I(sbi); 2017 unsigned int segno; 2018 2019 mutex_lock(&sit_i->sentry_lock); 2020 2021 sit_i->min_mtime = LLONG_MAX; 2022 |
1805 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { | 2023 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { |
1806 unsigned int i; 1807 unsigned long long mtime = 0; 1808 1809 for (i = 0; i < sbi->segs_per_sec; i++) 1810 mtime += get_seg_entry(sbi, segno + i)->mtime; 1811 1812 mtime = div_u64(mtime, sbi->segs_per_sec); 1813 --- 21 unchanged lines hidden (view full) --- 1835 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1836 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1837 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 1838 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 1839 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 1840 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 1841 sm_info->rec_prefree_segments = sm_info->main_segments * 1842 DEF_RECLAIM_PREFREE_SEGMENTS / 100; | 2024 unsigned int i; 2025 unsigned long long mtime = 0; 2026 2027 for (i = 0; i < sbi->segs_per_sec; i++) 2028 mtime += get_seg_entry(sbi, segno + i)->mtime; 2029 2030 mtime = div_u64(mtime, sbi->segs_per_sec); 2031 --- 21 unchanged lines hidden (view full) --- 2053 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 2054 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 2055 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 2056 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 2057 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 2058 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 2059 sm_info->rec_prefree_segments = sm_info->main_segments * 2060 DEF_RECLAIM_PREFREE_SEGMENTS / 100; |
1843 sm_info->ipu_policy = F2FS_IPU_DISABLE; | 2061 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; |
1844 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; | 2062 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; |
2063 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; |
|
1845 1846 INIT_LIST_HEAD(&sm_info->discard_list); 1847 sm_info->nr_discards = 0; 1848 sm_info->max_discards = 0; 1849 | 2064 2065 INIT_LIST_HEAD(&sm_info->discard_list); 2066 sm_info->nr_discards = 0; 2067 sm_info->max_discards = 0; 2068 |
2069 INIT_LIST_HEAD(&sm_info->sit_entry_set); 2070 |
|
1850 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { 1851 err = create_flush_cmd_control(sbi); 1852 if (err) 1853 return err; 1854 } 1855 1856 err = build_sit_info(sbi); 1857 if (err) --- 79 unchanged lines hidden (view full) --- 1937{ 1938 struct sit_info *sit_i = SIT_I(sbi); 1939 unsigned int start; 1940 1941 if (!sit_i) 1942 return; 1943 1944 if (sit_i->sentries) { | 2071 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { 2072 err = create_flush_cmd_control(sbi); 2073 if (err) 2074 return err; 2075 } 2076 2077 err = build_sit_info(sbi); 2078 if (err) --- 79 unchanged lines hidden (view full) --- 2158{ 2159 struct sit_info *sit_i = SIT_I(sbi); 2160 unsigned int start; 2161 2162 if (!sit_i) 2163 return; 2164 2165 if (sit_i->sentries) { |
1945 for (start = 0; start < TOTAL_SEGS(sbi); start++) { | 2166 for (start = 0; start < MAIN_SEGS(sbi); start++) { |
1946 kfree(sit_i->sentries[start].cur_valid_map); 1947 kfree(sit_i->sentries[start].ckpt_valid_map); 1948 } 1949 } 1950 vfree(sit_i->sentries); 1951 vfree(sit_i->sec_entries); 1952 kfree(sit_i->dirty_sentries_bitmap); 1953 --- 17 unchanged lines hidden (view full) --- 1971 kfree(sm_info); 1972} 1973 1974int __init create_segment_manager_caches(void) 1975{ 1976 discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 1977 sizeof(struct discard_entry)); 1978 if (!discard_entry_slab) | 2167 kfree(sit_i->sentries[start].cur_valid_map); 2168 kfree(sit_i->sentries[start].ckpt_valid_map); 2169 } 2170 } 2171 vfree(sit_i->sentries); 2172 vfree(sit_i->sec_entries); 2173 kfree(sit_i->dirty_sentries_bitmap); 2174 --- 17 unchanged lines hidden (view full) --- 2192 kfree(sm_info); 2193} 2194 2195int __init create_segment_manager_caches(void) 2196{ 2197 discard_entry_slab = f2fs_kmem_cache_create("discard_entry", 2198 sizeof(struct discard_entry)); 2199 if (!discard_entry_slab) |
1979 return -ENOMEM; | 2200 goto fail; 2201 2202 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", 2203 sizeof(struct nat_entry_set)); 2204 if (!sit_entry_set_slab) 2205 goto destory_discard_entry; 2206 2207 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", 2208 sizeof(struct inmem_pages)); 2209 if (!inmem_entry_slab) 2210 goto destroy_sit_entry_set; |
1980 return 0; | 2211 return 0; |
2212 2213destroy_sit_entry_set: 2214 kmem_cache_destroy(sit_entry_set_slab); 2215destory_discard_entry: 2216 kmem_cache_destroy(discard_entry_slab); 2217fail: 2218 return -ENOMEM; |
|
1981} 1982 1983void destroy_segment_manager_caches(void) 1984{ | 2219} 2220 2221void destroy_segment_manager_caches(void) 2222{ |
2223 kmem_cache_destroy(sit_entry_set_slab); |
|
1985 kmem_cache_destroy(discard_entry_slab); | 2224 kmem_cache_destroy(discard_entry_slab); |
2225 kmem_cache_destroy(inmem_entry_slab); |
|
1986} | 2226} |