1 /* 2 * fs/ext4/extents_status.c 3 * 4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 5 * Modified by 6 * Allison Henderson <achender@linux.vnet.ibm.com> 7 * Hugh Dickins <hughd@google.com> 8 * Zheng Liu <wenqing.lz@taobao.com> 9 * 10 * Ext4 extents status tree core functions. 11 */ 12 #include <linux/rbtree.h> 13 #include <linux/list_sort.h> 14 #include <linux/proc_fs.h> 15 #include <linux/seq_file.h> 16 #include "ext4.h" 17 #include "extents_status.h" 18 19 #include <trace/events/ext4.h> 20 21 /* 22 * According to previous discussion in Ext4 Developer Workshop, we 23 * will introduce a new structure called io tree to track all extent 24 * status in order to solve some problems that we have met 25 * (e.g. Reservation space warning), and provide extent-level locking. 26 * Delay extent tree is the first step to achieve this goal. It is 27 * original built by Yongqiang Yang. At that time it is called delay 28 * extent tree, whose goal is only track delayed extents in memory to 29 * simplify the implementation of fiemap and bigalloc, and introduce 30 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 31 * delay extent tree at the first commit. But for better understand 32 * what it does, it has been rename to extent status tree. 33 * 34 * Step1: 35 * Currently the first step has been done. All delayed extents are 36 * tracked in the tree. It maintains the delayed extent when a delayed 37 * allocation is issued, and the delayed extent is written out or 38 * invalidated. Therefore the implementation of fiemap and bigalloc 39 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 40 * 41 * The following comment describes the implemenmtation of extent 42 * status tree and future works. 43 * 44 * Step2: 45 * In this step all extent status are tracked by extent status tree. 46 * Thus, we can first try to lookup a block mapping in this tree before 47 * finding it in extent tree. Hence, single extent cache can be removed 48 * because extent status tree can do a better job. Extents in status 49 * tree are loaded on-demand. Therefore, the extent status tree may not 50 * contain all of the extents in a file. Meanwhile we define a shrinker 51 * to reclaim memory from extent status tree because fragmented extent 52 * tree will make status tree cost too much memory. written/unwritten/- 53 * hole extents in the tree will be reclaimed by this shrinker when we 54 * are under high memory pressure. Delayed extents will not be 55 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 56 */ 57 58 /* 59 * Extent status tree implementation for ext4. 60 * 61 * 62 * ========================================================================== 63 * Extent status tree tracks all extent status. 64 * 65 * 1. Why we need to implement extent status tree? 66 * 67 * Without extent status tree, ext4 identifies a delayed extent by looking 68 * up page cache, this has several deficiencies - complicated, buggy, 69 * and inefficient code. 70 * 71 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 72 * block or a range of blocks are belonged to a delayed extent. 73 * 74 * Let us have a look at how they do without extent status tree. 75 * -- FIEMAP 76 * FIEMAP looks up page cache to identify delayed allocations from holes. 77 * 78 * -- SEEK_HOLE/DATA 79 * SEEK_HOLE/DATA has the same problem as FIEMAP. 80 * 81 * -- bigalloc 82 * bigalloc looks up page cache to figure out if a block is 83 * already under delayed allocation or not to determine whether 84 * quota reserving is needed for the cluster. 85 * 86 * -- writeout 87 * Writeout looks up whole page cache to see if a buffer is 88 * mapped, If there are not very many delayed buffers, then it is 89 * time comsuming. 90 * 91 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 92 * bigalloc and writeout can figure out if a block or a range of 93 * blocks is under delayed allocation(belonged to a delayed extent) or 94 * not by searching the extent tree. 95 * 96 * 97 * ========================================================================== 98 * 2. Ext4 extent status tree impelmentation 99 * 100 * -- extent 101 * A extent is a range of blocks which are contiguous logically and 102 * physically. Unlike extent in extent tree, this extent in ext4 is 103 * a in-memory struct, there is no corresponding on-disk data. There 104 * is no limit on length of extent, so an extent can contain as many 105 * blocks as they are contiguous logically and physically. 106 * 107 * -- extent status tree 108 * Every inode has an extent status tree and all allocation blocks 109 * are added to the tree with different status. The extent in the 110 * tree are ordered by logical block no. 111 * 112 * -- operations on a extent status tree 113 * There are three important operations on a delayed extent tree: find 114 * next extent, adding a extent(a range of blocks) and removing a extent. 115 * 116 * -- race on a extent status tree 117 * Extent status tree is protected by inode->i_es_lock. 118 * 119 * -- memory consumption 120 * Fragmented extent tree will make extent status tree cost too much 121 * memory. Hence, we will reclaim written/unwritten/hole extents from 122 * the tree under a heavy memory pressure. 123 * 124 * 125 * ========================================================================== 126 * 3. Performance analysis 127 * 128 * -- overhead 129 * 1. There is a cache extent for write access, so if writes are 130 * not very random, adding space operaions are in O(1) time. 131 * 132 * -- gain 133 * 2. Code is much simpler, more readable, more maintainable and 134 * more efficient. 135 * 136 * 137 * ========================================================================== 138 * 4. TODO list 139 * 140 * -- Refactor delayed space reservation 141 * 142 * -- Extent-level locking 143 */ 144 145 static struct kmem_cache *ext4_es_cachep; 146 147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes); 148 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 149 ext4_lblk_t end); 150 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); 151 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 152 struct ext4_inode_info *locked_ei); 153 154 int __init ext4_init_es(void) 155 { 156 ext4_es_cachep = kmem_cache_create("ext4_extent_status", 157 sizeof(struct extent_status), 158 0, (SLAB_RECLAIM_ACCOUNT), NULL); 159 if (ext4_es_cachep == NULL) 160 return -ENOMEM; 161 return 0; 162 } 163 164 void ext4_exit_es(void) 165 { 166 if (ext4_es_cachep) 167 kmem_cache_destroy(ext4_es_cachep); 168 } 169 170 void ext4_es_init_tree(struct ext4_es_tree *tree) 171 { 172 tree->root = RB_ROOT; 173 tree->cache_es = NULL; 174 } 175 176 #ifdef ES_DEBUG__ 177 static void ext4_es_print_tree(struct inode *inode) 178 { 179 struct ext4_es_tree *tree; 180 struct rb_node *node; 181 182 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 183 tree = &EXT4_I(inode)->i_es_tree; 184 node = rb_first(&tree->root); 185 while (node) { 186 struct extent_status *es; 187 es = rb_entry(node, struct extent_status, rb_node); 188 printk(KERN_DEBUG " [%u/%u) %llu %x", 189 es->es_lblk, es->es_len, 190 ext4_es_pblock(es), ext4_es_status(es)); 191 node = rb_next(node); 192 } 193 printk(KERN_DEBUG "\n"); 194 } 195 #else 196 #define ext4_es_print_tree(inode) 197 #endif 198 199 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 200 { 201 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 202 return es->es_lblk + es->es_len - 1; 203 } 204 205 /* 206 * search through the tree for an delayed extent with a given offset. If 207 * it can't be found, try to find next extent. 208 */ 209 static struct extent_status *__es_tree_search(struct rb_root *root, 210 ext4_lblk_t lblk) 211 { 212 struct rb_node *node = root->rb_node; 213 struct extent_status *es = NULL; 214 215 while (node) { 216 es = rb_entry(node, struct extent_status, rb_node); 217 if (lblk < es->es_lblk) 218 node = node->rb_left; 219 else if (lblk > ext4_es_end(es)) 220 node = node->rb_right; 221 else 222 return es; 223 } 224 225 if (es && lblk < es->es_lblk) 226 return es; 227 228 if (es && lblk > ext4_es_end(es)) { 229 node = rb_next(&es->rb_node); 230 return node ? rb_entry(node, struct extent_status, rb_node) : 231 NULL; 232 } 233 234 return NULL; 235 } 236 237 /* 238 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering 239 * @es->lblk if it exists, otherwise, the next extent after @es->lblk. 240 * 241 * @inode: the inode which owns delayed extents 242 * @lblk: the offset where we start to search 243 * @end: the offset where we stop to search 244 * @es: delayed extent that we found 245 */ 246 void ext4_es_find_delayed_extent_range(struct inode *inode, 247 ext4_lblk_t lblk, ext4_lblk_t end, 248 struct extent_status *es) 249 { 250 struct ext4_es_tree *tree = NULL; 251 struct extent_status *es1 = NULL; 252 struct rb_node *node; 253 254 BUG_ON(es == NULL); 255 BUG_ON(end < lblk); 256 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); 257 258 read_lock(&EXT4_I(inode)->i_es_lock); 259 tree = &EXT4_I(inode)->i_es_tree; 260 261 /* find extent in cache firstly */ 262 es->es_lblk = es->es_len = es->es_pblk = 0; 263 if (tree->cache_es) { 264 es1 = tree->cache_es; 265 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 266 es_debug("%u cached by [%u/%u) %llu %x\n", 267 lblk, es1->es_lblk, es1->es_len, 268 ext4_es_pblock(es1), ext4_es_status(es1)); 269 goto out; 270 } 271 } 272 273 es1 = __es_tree_search(&tree->root, lblk); 274 275 out: 276 if (es1 && !ext4_es_is_delayed(es1)) { 277 while ((node = rb_next(&es1->rb_node)) != NULL) { 278 es1 = rb_entry(node, struct extent_status, rb_node); 279 if (es1->es_lblk > end) { 280 es1 = NULL; 281 break; 282 } 283 if (ext4_es_is_delayed(es1)) 284 break; 285 } 286 } 287 288 if (es1 && ext4_es_is_delayed(es1)) { 289 tree->cache_es = es1; 290 es->es_lblk = es1->es_lblk; 291 es->es_len = es1->es_len; 292 es->es_pblk = es1->es_pblk; 293 } 294 295 read_unlock(&EXT4_I(inode)->i_es_lock); 296 297 trace_ext4_es_find_delayed_extent_range_exit(inode, es); 298 } 299 300 static void ext4_es_list_add(struct inode *inode) 301 { 302 struct ext4_inode_info *ei = EXT4_I(inode); 303 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 304 305 if (!list_empty(&ei->i_es_list)) 306 return; 307 308 spin_lock(&sbi->s_es_lock); 309 if (list_empty(&ei->i_es_list)) { 310 list_add_tail(&ei->i_es_list, &sbi->s_es_list); 311 sbi->s_es_nr_inode++; 312 } 313 spin_unlock(&sbi->s_es_lock); 314 } 315 316 static void ext4_es_list_del(struct inode *inode) 317 { 318 struct ext4_inode_info *ei = EXT4_I(inode); 319 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 320 321 spin_lock(&sbi->s_es_lock); 322 if (!list_empty(&ei->i_es_list)) { 323 list_del_init(&ei->i_es_list); 324 sbi->s_es_nr_inode--; 325 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); 326 } 327 spin_unlock(&sbi->s_es_lock); 328 } 329 330 static struct extent_status * 331 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, 332 ext4_fsblk_t pblk) 333 { 334 struct extent_status *es; 335 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 336 if (es == NULL) 337 return NULL; 338 es->es_lblk = lblk; 339 es->es_len = len; 340 es->es_pblk = pblk; 341 342 /* 343 * We don't count delayed extent because we never try to reclaim them 344 */ 345 if (!ext4_es_is_delayed(es)) { 346 if (!EXT4_I(inode)->i_es_shk_nr++) 347 ext4_es_list_add(inode); 348 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 349 s_es_stats.es_stats_shk_cnt); 350 } 351 352 EXT4_I(inode)->i_es_all_nr++; 353 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 354 355 return es; 356 } 357 358 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 359 { 360 EXT4_I(inode)->i_es_all_nr--; 361 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 362 363 /* Decrease the shrink counter when this es is not delayed */ 364 if (!ext4_es_is_delayed(es)) { 365 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); 366 if (!--EXT4_I(inode)->i_es_shk_nr) 367 ext4_es_list_del(inode); 368 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 369 s_es_stats.es_stats_shk_cnt); 370 } 371 372 kmem_cache_free(ext4_es_cachep, es); 373 } 374 375 /* 376 * Check whether or not two extents can be merged 377 * Condition: 378 * - logical block number is contiguous 379 * - physical block number is contiguous 380 * - status is equal 381 */ 382 static int ext4_es_can_be_merged(struct extent_status *es1, 383 struct extent_status *es2) 384 { 385 if (ext4_es_type(es1) != ext4_es_type(es2)) 386 return 0; 387 388 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 389 pr_warn("ES assertion failed when merging extents. " 390 "The sum of lengths of es1 (%d) and es2 (%d) " 391 "is bigger than allowed file size (%d)\n", 392 es1->es_len, es2->es_len, EXT_MAX_BLOCKS); 393 WARN_ON(1); 394 return 0; 395 } 396 397 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 398 return 0; 399 400 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 401 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 402 return 1; 403 404 if (ext4_es_is_hole(es1)) 405 return 1; 406 407 /* we need to check delayed extent is without unwritten status */ 408 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 409 return 1; 410 411 return 0; 412 } 413 414 static struct extent_status * 415 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 416 { 417 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 418 struct extent_status *es1; 419 struct rb_node *node; 420 421 node = rb_prev(&es->rb_node); 422 if (!node) 423 return es; 424 425 es1 = rb_entry(node, struct extent_status, rb_node); 426 if (ext4_es_can_be_merged(es1, es)) { 427 es1->es_len += es->es_len; 428 if (ext4_es_is_referenced(es)) 429 ext4_es_set_referenced(es1); 430 rb_erase(&es->rb_node, &tree->root); 431 ext4_es_free_extent(inode, es); 432 es = es1; 433 } 434 435 return es; 436 } 437 438 static struct extent_status * 439 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 440 { 441 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 442 struct extent_status *es1; 443 struct rb_node *node; 444 445 node = rb_next(&es->rb_node); 446 if (!node) 447 return es; 448 449 es1 = rb_entry(node, struct extent_status, rb_node); 450 if (ext4_es_can_be_merged(es, es1)) { 451 es->es_len += es1->es_len; 452 if (ext4_es_is_referenced(es1)) 453 ext4_es_set_referenced(es); 454 rb_erase(node, &tree->root); 455 ext4_es_free_extent(inode, es1); 456 } 457 458 return es; 459 } 460 461 #ifdef ES_AGGRESSIVE_TEST 462 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ 463 464 static void ext4_es_insert_extent_ext_check(struct inode *inode, 465 struct extent_status *es) 466 { 467 struct ext4_ext_path *path = NULL; 468 struct ext4_extent *ex; 469 ext4_lblk_t ee_block; 470 ext4_fsblk_t ee_start; 471 unsigned short ee_len; 472 int depth, ee_status, es_status; 473 474 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 475 if (IS_ERR(path)) 476 return; 477 478 depth = ext_depth(inode); 479 ex = path[depth].p_ext; 480 481 if (ex) { 482 483 ee_block = le32_to_cpu(ex->ee_block); 484 ee_start = ext4_ext_pblock(ex); 485 ee_len = ext4_ext_get_actual_len(ex); 486 487 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; 488 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 489 490 /* 491 * Make sure ex and es are not overlap when we try to insert 492 * a delayed/hole extent. 493 */ 494 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 495 if (in_range(es->es_lblk, ee_block, ee_len)) { 496 pr_warn("ES insert assertion failed for " 497 "inode: %lu we can find an extent " 498 "at block [%d/%d/%llu/%c], but we " 499 "want to add a delayed/hole extent " 500 "[%d/%d/%llu/%x]\n", 501 inode->i_ino, ee_block, ee_len, 502 ee_start, ee_status ? 'u' : 'w', 503 es->es_lblk, es->es_len, 504 ext4_es_pblock(es), ext4_es_status(es)); 505 } 506 goto out; 507 } 508 509 /* 510 * We don't check ee_block == es->es_lblk, etc. because es 511 * might be a part of whole extent, vice versa. 512 */ 513 if (es->es_lblk < ee_block || 514 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 515 pr_warn("ES insert assertion failed for inode: %lu " 516 "ex_status [%d/%d/%llu/%c] != " 517 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 518 ee_block, ee_len, ee_start, 519 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 520 ext4_es_pblock(es), es_status ? 'u' : 'w'); 521 goto out; 522 } 523 524 if (ee_status ^ es_status) { 525 pr_warn("ES insert assertion failed for inode: %lu " 526 "ex_status [%d/%d/%llu/%c] != " 527 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 528 ee_block, ee_len, ee_start, 529 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 530 ext4_es_pblock(es), es_status ? 'u' : 'w'); 531 } 532 } else { 533 /* 534 * We can't find an extent on disk. So we need to make sure 535 * that we don't want to add an written/unwritten extent. 536 */ 537 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 538 pr_warn("ES insert assertion failed for inode: %lu " 539 "can't find an extent at block %d but we want " 540 "to add a written/unwritten extent " 541 "[%d/%d/%llu/%x]\n", inode->i_ino, 542 es->es_lblk, es->es_lblk, es->es_len, 543 ext4_es_pblock(es), ext4_es_status(es)); 544 } 545 } 546 out: 547 ext4_ext_drop_refs(path); 548 kfree(path); 549 } 550 551 static void ext4_es_insert_extent_ind_check(struct inode *inode, 552 struct extent_status *es) 553 { 554 struct ext4_map_blocks map; 555 int retval; 556 557 /* 558 * Here we call ext4_ind_map_blocks to lookup a block mapping because 559 * 'Indirect' structure is defined in indirect.c. So we couldn't 560 * access direct/indirect tree from outside. It is too dirty to define 561 * this function in indirect.c file. 562 */ 563 564 map.m_lblk = es->es_lblk; 565 map.m_len = es->es_len; 566 567 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 568 if (retval > 0) { 569 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 570 /* 571 * We want to add a delayed/hole extent but this 572 * block has been allocated. 573 */ 574 pr_warn("ES insert assertion failed for inode: %lu " 575 "We can find blocks but we want to add a " 576 "delayed/hole extent [%d/%d/%llu/%x]\n", 577 inode->i_ino, es->es_lblk, es->es_len, 578 ext4_es_pblock(es), ext4_es_status(es)); 579 return; 580 } else if (ext4_es_is_written(es)) { 581 if (retval != es->es_len) { 582 pr_warn("ES insert assertion failed for " 583 "inode: %lu retval %d != es_len %d\n", 584 inode->i_ino, retval, es->es_len); 585 return; 586 } 587 if (map.m_pblk != ext4_es_pblock(es)) { 588 pr_warn("ES insert assertion failed for " 589 "inode: %lu m_pblk %llu != " 590 "es_pblk %llu\n", 591 inode->i_ino, map.m_pblk, 592 ext4_es_pblock(es)); 593 return; 594 } 595 } else { 596 /* 597 * We don't need to check unwritten extent because 598 * indirect-based file doesn't have it. 599 */ 600 BUG_ON(1); 601 } 602 } else if (retval == 0) { 603 if (ext4_es_is_written(es)) { 604 pr_warn("ES insert assertion failed for inode: %lu " 605 "We can't find the block but we want to add " 606 "a written extent [%d/%d/%llu/%x]\n", 607 inode->i_ino, es->es_lblk, es->es_len, 608 ext4_es_pblock(es), ext4_es_status(es)); 609 return; 610 } 611 } 612 } 613 614 static inline void ext4_es_insert_extent_check(struct inode *inode, 615 struct extent_status *es) 616 { 617 /* 618 * We don't need to worry about the race condition because 619 * caller takes i_data_sem locking. 620 */ 621 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 622 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 623 ext4_es_insert_extent_ext_check(inode, es); 624 else 625 ext4_es_insert_extent_ind_check(inode, es); 626 } 627 #else 628 static inline void ext4_es_insert_extent_check(struct inode *inode, 629 struct extent_status *es) 630 { 631 } 632 #endif 633 634 static int __es_insert_extent(struct inode *inode, struct extent_status *newes) 635 { 636 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 637 struct rb_node **p = &tree->root.rb_node; 638 struct rb_node *parent = NULL; 639 struct extent_status *es; 640 641 while (*p) { 642 parent = *p; 643 es = rb_entry(parent, struct extent_status, rb_node); 644 645 if (newes->es_lblk < es->es_lblk) { 646 if (ext4_es_can_be_merged(newes, es)) { 647 /* 648 * Here we can modify es_lblk directly 649 * because it isn't overlapped. 650 */ 651 es->es_lblk = newes->es_lblk; 652 es->es_len += newes->es_len; 653 if (ext4_es_is_written(es) || 654 ext4_es_is_unwritten(es)) 655 ext4_es_store_pblock(es, 656 newes->es_pblk); 657 es = ext4_es_try_to_merge_left(inode, es); 658 goto out; 659 } 660 p = &(*p)->rb_left; 661 } else if (newes->es_lblk > ext4_es_end(es)) { 662 if (ext4_es_can_be_merged(es, newes)) { 663 es->es_len += newes->es_len; 664 es = ext4_es_try_to_merge_right(inode, es); 665 goto out; 666 } 667 p = &(*p)->rb_right; 668 } else { 669 BUG_ON(1); 670 return -EINVAL; 671 } 672 } 673 674 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len, 675 newes->es_pblk); 676 if (!es) 677 return -ENOMEM; 678 rb_link_node(&es->rb_node, parent, p); 679 rb_insert_color(&es->rb_node, &tree->root); 680 681 out: 682 tree->cache_es = es; 683 return 0; 684 } 685 686 /* 687 * ext4_es_insert_extent() adds information to an inode's extent 688 * status tree. 689 * 690 * Return 0 on success, error code on failure. 691 */ 692 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 693 ext4_lblk_t len, ext4_fsblk_t pblk, 694 unsigned int status) 695 { 696 struct extent_status newes; 697 ext4_lblk_t end = lblk + len - 1; 698 int err = 0; 699 700 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", 701 lblk, len, pblk, status, inode->i_ino); 702 703 if (!len) 704 return 0; 705 706 BUG_ON(end < lblk); 707 708 newes.es_lblk = lblk; 709 newes.es_len = len; 710 ext4_es_store_pblock_status(&newes, pblk, status); 711 trace_ext4_es_insert_extent(inode, &newes); 712 713 ext4_es_insert_extent_check(inode, &newes); 714 715 write_lock(&EXT4_I(inode)->i_es_lock); 716 err = __es_remove_extent(inode, lblk, end); 717 if (err != 0) 718 goto error; 719 retry: 720 err = __es_insert_extent(inode, &newes); 721 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), 722 128, EXT4_I(inode))) 723 goto retry; 724 if (err == -ENOMEM && !ext4_es_is_delayed(&newes)) 725 err = 0; 726 727 error: 728 write_unlock(&EXT4_I(inode)->i_es_lock); 729 730 ext4_es_print_tree(inode); 731 732 return err; 733 } 734 735 /* 736 * ext4_es_cache_extent() inserts information into the extent status 737 * tree if and only if there isn't information about the range in 738 * question already. 739 */ 740 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, 741 ext4_lblk_t len, ext4_fsblk_t pblk, 742 unsigned int status) 743 { 744 struct extent_status *es; 745 struct extent_status newes; 746 ext4_lblk_t end = lblk + len - 1; 747 748 newes.es_lblk = lblk; 749 newes.es_len = len; 750 ext4_es_store_pblock_status(&newes, pblk, status); 751 trace_ext4_es_cache_extent(inode, &newes); 752 753 if (!len) 754 return; 755 756 BUG_ON(end < lblk); 757 758 write_lock(&EXT4_I(inode)->i_es_lock); 759 760 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); 761 if (!es || es->es_lblk > end) 762 __es_insert_extent(inode, &newes); 763 write_unlock(&EXT4_I(inode)->i_es_lock); 764 } 765 766 /* 767 * ext4_es_lookup_extent() looks up an extent in extent status tree. 768 * 769 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 770 * 771 * Return: 1 on found, 0 on not 772 */ 773 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 774 struct extent_status *es) 775 { 776 struct ext4_es_tree *tree; 777 struct ext4_es_stats *stats; 778 struct extent_status *es1 = NULL; 779 struct rb_node *node; 780 int found = 0; 781 782 trace_ext4_es_lookup_extent_enter(inode, lblk); 783 es_debug("lookup extent in block %u\n", lblk); 784 785 tree = &EXT4_I(inode)->i_es_tree; 786 read_lock(&EXT4_I(inode)->i_es_lock); 787 788 /* find extent in cache firstly */ 789 es->es_lblk = es->es_len = es->es_pblk = 0; 790 if (tree->cache_es) { 791 es1 = tree->cache_es; 792 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 793 es_debug("%u cached by [%u/%u)\n", 794 lblk, es1->es_lblk, es1->es_len); 795 found = 1; 796 goto out; 797 } 798 } 799 800 node = tree->root.rb_node; 801 while (node) { 802 es1 = rb_entry(node, struct extent_status, rb_node); 803 if (lblk < es1->es_lblk) 804 node = node->rb_left; 805 else if (lblk > ext4_es_end(es1)) 806 node = node->rb_right; 807 else { 808 found = 1; 809 break; 810 } 811 } 812 813 out: 814 stats = &EXT4_SB(inode->i_sb)->s_es_stats; 815 if (found) { 816 BUG_ON(!es1); 817 es->es_lblk = es1->es_lblk; 818 es->es_len = es1->es_len; 819 es->es_pblk = es1->es_pblk; 820 if (!ext4_es_is_referenced(es)) 821 ext4_es_set_referenced(es); 822 stats->es_stats_cache_hits++; 823 } else { 824 stats->es_stats_cache_misses++; 825 } 826 827 read_unlock(&EXT4_I(inode)->i_es_lock); 828 829 trace_ext4_es_lookup_extent_exit(inode, es, found); 830 return found; 831 } 832 833 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 834 ext4_lblk_t end) 835 { 836 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 837 struct rb_node *node; 838 struct extent_status *es; 839 struct extent_status orig_es; 840 ext4_lblk_t len1, len2; 841 ext4_fsblk_t block; 842 int err; 843 844 retry: 845 err = 0; 846 es = __es_tree_search(&tree->root, lblk); 847 if (!es) 848 goto out; 849 if (es->es_lblk > end) 850 goto out; 851 852 /* Simply invalidate cache_es. */ 853 tree->cache_es = NULL; 854 855 orig_es.es_lblk = es->es_lblk; 856 orig_es.es_len = es->es_len; 857 orig_es.es_pblk = es->es_pblk; 858 859 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 860 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 861 if (len1 > 0) 862 es->es_len = len1; 863 if (len2 > 0) { 864 if (len1 > 0) { 865 struct extent_status newes; 866 867 newes.es_lblk = end + 1; 868 newes.es_len = len2; 869 block = 0x7FDEADBEEFULL; 870 if (ext4_es_is_written(&orig_es) || 871 ext4_es_is_unwritten(&orig_es)) 872 block = ext4_es_pblock(&orig_es) + 873 orig_es.es_len - len2; 874 ext4_es_store_pblock_status(&newes, block, 875 ext4_es_status(&orig_es)); 876 err = __es_insert_extent(inode, &newes); 877 if (err) { 878 es->es_lblk = orig_es.es_lblk; 879 es->es_len = orig_es.es_len; 880 if ((err == -ENOMEM) && 881 __es_shrink(EXT4_SB(inode->i_sb), 882 128, EXT4_I(inode))) 883 goto retry; 884 goto out; 885 } 886 } else { 887 es->es_lblk = end + 1; 888 es->es_len = len2; 889 if (ext4_es_is_written(es) || 890 ext4_es_is_unwritten(es)) { 891 block = orig_es.es_pblk + orig_es.es_len - len2; 892 ext4_es_store_pblock(es, block); 893 } 894 } 895 goto out; 896 } 897 898 if (len1 > 0) { 899 node = rb_next(&es->rb_node); 900 if (node) 901 es = rb_entry(node, struct extent_status, rb_node); 902 else 903 es = NULL; 904 } 905 906 while (es && ext4_es_end(es) <= end) { 907 node = rb_next(&es->rb_node); 908 rb_erase(&es->rb_node, &tree->root); 909 ext4_es_free_extent(inode, es); 910 if (!node) { 911 es = NULL; 912 break; 913 } 914 es = rb_entry(node, struct extent_status, rb_node); 915 } 916 917 if (es && es->es_lblk < end + 1) { 918 ext4_lblk_t orig_len = es->es_len; 919 920 len1 = ext4_es_end(es) - end; 921 es->es_lblk = end + 1; 922 es->es_len = len1; 923 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 924 block = es->es_pblk + orig_len - len1; 925 ext4_es_store_pblock(es, block); 926 } 927 } 928 929 out: 930 return err; 931 } 932 933 /* 934 * ext4_es_remove_extent() removes a space from a extent status tree. 935 * 936 * Return 0 on success, error code on failure. 937 */ 938 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 939 ext4_lblk_t len) 940 { 941 ext4_lblk_t end; 942 int err = 0; 943 944 trace_ext4_es_remove_extent(inode, lblk, len); 945 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 946 lblk, len, inode->i_ino); 947 948 if (!len) 949 return err; 950 951 end = lblk + len - 1; 952 BUG_ON(end < lblk); 953 954 /* 955 * ext4_clear_inode() depends on us taking i_es_lock unconditionally 956 * so that we are sure __es_shrink() is done with the inode before it 957 * is reclaimed. 958 */ 959 write_lock(&EXT4_I(inode)->i_es_lock); 960 err = __es_remove_extent(inode, lblk, end); 961 write_unlock(&EXT4_I(inode)->i_es_lock); 962 ext4_es_print_tree(inode); 963 return err; 964 } 965 966 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 967 struct ext4_inode_info *locked_ei) 968 { 969 struct ext4_inode_info *ei; 970 struct ext4_es_stats *es_stats; 971 ktime_t start_time; 972 u64 scan_time; 973 int nr_to_walk; 974 int nr_shrunk = 0; 975 int retried = 0, nr_skipped = 0; 976 977 es_stats = &sbi->s_es_stats; 978 start_time = ktime_get(); 979 980 retry: 981 spin_lock(&sbi->s_es_lock); 982 nr_to_walk = sbi->s_es_nr_inode; 983 while (nr_to_walk-- > 0) { 984 if (list_empty(&sbi->s_es_list)) { 985 spin_unlock(&sbi->s_es_lock); 986 goto out; 987 } 988 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, 989 i_es_list); 990 /* Move the inode to the tail */ 991 list_move_tail(&ei->i_es_list, &sbi->s_es_list); 992 993 /* 994 * Normally we try hard to avoid shrinking precached inodes, 995 * but we will as a last resort. 996 */ 997 if (!retried && ext4_test_inode_state(&ei->vfs_inode, 998 EXT4_STATE_EXT_PRECACHED)) { 999 nr_skipped++; 1000 continue; 1001 } 1002 1003 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { 1004 nr_skipped++; 1005 continue; 1006 } 1007 /* 1008 * Now we hold i_es_lock which protects us from inode reclaim 1009 * freeing inode under us 1010 */ 1011 spin_unlock(&sbi->s_es_lock); 1012 1013 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); 1014 write_unlock(&ei->i_es_lock); 1015 1016 if (nr_to_scan <= 0) 1017 goto out; 1018 spin_lock(&sbi->s_es_lock); 1019 } 1020 spin_unlock(&sbi->s_es_lock); 1021 1022 /* 1023 * If we skipped any inodes, and we weren't able to make any 1024 * forward progress, try again to scan precached inodes. 1025 */ 1026 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1027 retried++; 1028 goto retry; 1029 } 1030 1031 if (locked_ei && nr_shrunk == 0) 1032 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); 1033 1034 out: 1035 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1036 if (likely(es_stats->es_stats_scan_time)) 1037 es_stats->es_stats_scan_time = (scan_time + 1038 es_stats->es_stats_scan_time*3) / 4; 1039 else 1040 es_stats->es_stats_scan_time = scan_time; 1041 if (scan_time > es_stats->es_stats_max_scan_time) 1042 es_stats->es_stats_max_scan_time = scan_time; 1043 if (likely(es_stats->es_stats_shrunk)) 1044 es_stats->es_stats_shrunk = (nr_shrunk + 1045 es_stats->es_stats_shrunk*3) / 4; 1046 else 1047 es_stats->es_stats_shrunk = nr_shrunk; 1048 1049 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, 1050 nr_skipped, retried); 1051 return nr_shrunk; 1052 } 1053 1054 static unsigned long ext4_es_count(struct shrinker *shrink, 1055 struct shrink_control *sc) 1056 { 1057 unsigned long nr; 1058 struct ext4_sb_info *sbi; 1059 1060 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1061 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1062 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1063 return nr; 1064 } 1065 1066 static unsigned long ext4_es_scan(struct shrinker *shrink, 1067 struct shrink_control *sc) 1068 { 1069 struct ext4_sb_info *sbi = container_of(shrink, 1070 struct ext4_sb_info, s_es_shrinker); 1071 int nr_to_scan = sc->nr_to_scan; 1072 int ret, nr_shrunk; 1073 1074 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1075 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1076 1077 if (!nr_to_scan) 1078 return ret; 1079 1080 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); 1081 1082 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1083 return nr_shrunk; 1084 } 1085 1086 static void *ext4_es_seq_shrinker_info_start(struct seq_file *seq, loff_t *pos) 1087 { 1088 return *pos ? NULL : SEQ_START_TOKEN; 1089 } 1090 1091 static void * 1092 ext4_es_seq_shrinker_info_next(struct seq_file *seq, void *v, loff_t *pos) 1093 { 1094 return NULL; 1095 } 1096 1097 static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v) 1098 { 1099 struct ext4_sb_info *sbi = seq->private; 1100 struct ext4_es_stats *es_stats = &sbi->s_es_stats; 1101 struct ext4_inode_info *ei, *max = NULL; 1102 unsigned int inode_cnt = 0; 1103 1104 if (v != SEQ_START_TOKEN) 1105 return 0; 1106 1107 /* here we just find an inode that has the max nr. of objects */ 1108 spin_lock(&sbi->s_es_lock); 1109 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { 1110 inode_cnt++; 1111 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1112 max = ei; 1113 else if (!max) 1114 max = ei; 1115 } 1116 spin_unlock(&sbi->s_es_lock); 1117 1118 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1119 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1120 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); 1121 seq_printf(seq, " %lu/%lu cache hits/misses\n", 1122 es_stats->es_stats_cache_hits, 1123 es_stats->es_stats_cache_misses); 1124 if (inode_cnt) 1125 seq_printf(seq, " %d inodes on list\n", inode_cnt); 1126 1127 seq_printf(seq, "average:\n %llu us scan time\n", 1128 div_u64(es_stats->es_stats_scan_time, 1000)); 1129 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); 1130 if (inode_cnt) 1131 seq_printf(seq, 1132 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1133 " %llu us max scan time\n", 1134 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, 1135 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1136 1137 return 0; 1138 } 1139 1140 static void ext4_es_seq_shrinker_info_stop(struct seq_file *seq, void *v) 1141 { 1142 } 1143 1144 static const struct seq_operations ext4_es_seq_shrinker_info_ops = { 1145 .start = ext4_es_seq_shrinker_info_start, 1146 .next = ext4_es_seq_shrinker_info_next, 1147 .stop = ext4_es_seq_shrinker_info_stop, 1148 .show = ext4_es_seq_shrinker_info_show, 1149 }; 1150 1151 static int 1152 ext4_es_seq_shrinker_info_open(struct inode *inode, struct file *file) 1153 { 1154 int ret; 1155 1156 ret = seq_open(file, &ext4_es_seq_shrinker_info_ops); 1157 if (!ret) { 1158 struct seq_file *m = file->private_data; 1159 m->private = PDE_DATA(inode); 1160 } 1161 1162 return ret; 1163 } 1164 1165 static int 1166 ext4_es_seq_shrinker_info_release(struct inode *inode, struct file *file) 1167 { 1168 return seq_release(inode, file); 1169 } 1170 1171 static const struct file_operations ext4_es_seq_shrinker_info_fops = { 1172 .owner = THIS_MODULE, 1173 .open = ext4_es_seq_shrinker_info_open, 1174 .read = seq_read, 1175 .llseek = seq_lseek, 1176 .release = ext4_es_seq_shrinker_info_release, 1177 }; 1178 1179 int ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1180 { 1181 int err; 1182 1183 /* Make sure we have enough bits for physical block number */ 1184 BUILD_BUG_ON(ES_SHIFT < 48); 1185 INIT_LIST_HEAD(&sbi->s_es_list); 1186 sbi->s_es_nr_inode = 0; 1187 spin_lock_init(&sbi->s_es_lock); 1188 sbi->s_es_stats.es_stats_shrunk = 0; 1189 sbi->s_es_stats.es_stats_cache_hits = 0; 1190 sbi->s_es_stats.es_stats_cache_misses = 0; 1191 sbi->s_es_stats.es_stats_scan_time = 0; 1192 sbi->s_es_stats.es_stats_max_scan_time = 0; 1193 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1194 if (err) 1195 return err; 1196 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); 1197 if (err) 1198 goto err1; 1199 1200 sbi->s_es_shrinker.scan_objects = ext4_es_scan; 1201 sbi->s_es_shrinker.count_objects = ext4_es_count; 1202 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1203 err = register_shrinker(&sbi->s_es_shrinker); 1204 if (err) 1205 goto err2; 1206 1207 if (sbi->s_proc) 1208 proc_create_data("es_shrinker_info", S_IRUGO, sbi->s_proc, 1209 &ext4_es_seq_shrinker_info_fops, sbi); 1210 1211 return 0; 1212 1213 err2: 1214 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1215 err1: 1216 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1217 return err; 1218 } 1219 1220 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1221 { 1222 if (sbi->s_proc) 1223 remove_proc_entry("es_shrinker_info", sbi->s_proc); 1224 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1225 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1226 unregister_shrinker(&sbi->s_es_shrinker); 1227 } 1228 1229 /* 1230 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at 1231 * most *nr_to_scan extents, update *nr_to_scan accordingly. 1232 * 1233 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. 1234 * Increment *nr_shrunk by the number of reclaimed extents. Also update 1235 * ei->i_es_shrink_lblk to where we should continue scanning. 1236 */ 1237 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, 1238 int *nr_to_scan, int *nr_shrunk) 1239 { 1240 struct inode *inode = &ei->vfs_inode; 1241 struct ext4_es_tree *tree = &ei->i_es_tree; 1242 struct extent_status *es; 1243 struct rb_node *node; 1244 1245 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); 1246 if (!es) 1247 goto out_wrap; 1248 node = &es->rb_node; 1249 while (*nr_to_scan > 0) { 1250 if (es->es_lblk > end) { 1251 ei->i_es_shrink_lblk = end + 1; 1252 return 0; 1253 } 1254 1255 (*nr_to_scan)--; 1256 node = rb_next(&es->rb_node); 1257 /* 1258 * We can't reclaim delayed extent from status tree because 1259 * fiemap, bigallic, and seek_data/hole need to use it. 1260 */ 1261 if (ext4_es_is_delayed(es)) 1262 goto next; 1263 if (ext4_es_is_referenced(es)) { 1264 ext4_es_clear_referenced(es); 1265 goto next; 1266 } 1267 1268 rb_erase(&es->rb_node, &tree->root); 1269 ext4_es_free_extent(inode, es); 1270 (*nr_shrunk)++; 1271 next: 1272 if (!node) 1273 goto out_wrap; 1274 es = rb_entry(node, struct extent_status, rb_node); 1275 } 1276 ei->i_es_shrink_lblk = es->es_lblk; 1277 return 1; 1278 out_wrap: 1279 ei->i_es_shrink_lblk = 0; 1280 return 0; 1281 } 1282 1283 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) 1284 { 1285 struct inode *inode = &ei->vfs_inode; 1286 int nr_shrunk = 0; 1287 ext4_lblk_t start = ei->i_es_shrink_lblk; 1288 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1289 DEFAULT_RATELIMIT_BURST); 1290 1291 if (ei->i_es_shk_nr == 0) 1292 return 0; 1293 1294 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1295 __ratelimit(&_rs)) 1296 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1297 1298 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && 1299 start != 0) 1300 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); 1301 1302 ei->i_es_tree.cache_es = NULL; 1303 return nr_shrunk; 1304 } 1305