1 /* 2 * fs/ext4/extents_status.c 3 * 4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 5 * Modified by 6 * Allison Henderson <achender@linux.vnet.ibm.com> 7 * Hugh Dickins <hughd@google.com> 8 * Zheng Liu <wenqing.lz@taobao.com> 9 * 10 * Ext4 extents status tree core functions. 11 */ 12 #include <linux/rbtree.h> 13 #include "ext4.h" 14 #include "extents_status.h" 15 #include "ext4_extents.h" 16 17 #include <trace/events/ext4.h> 18 19 /* 20 * According to previous discussion in Ext4 Developer Workshop, we 21 * will introduce a new structure called io tree to track all extent 22 * status in order to solve some problems that we have met 23 * (e.g. Reservation space warning), and provide extent-level locking. 24 * Delay extent tree is the first step to achieve this goal. It is 25 * original built by Yongqiang Yang. At that time it is called delay 26 * extent tree, whose goal is only track delayed extents in memory to 27 * simplify the implementation of fiemap and bigalloc, and introduce 28 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 29 * delay extent tree at the first commit. But for better understand 30 * what it does, it has been rename to extent status tree. 31 * 32 * Step1: 33 * Currently the first step has been done. All delayed extents are 34 * tracked in the tree. It maintains the delayed extent when a delayed 35 * allocation is issued, and the delayed extent is written out or 36 * invalidated. Therefore the implementation of fiemap and bigalloc 37 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 38 * 39 * The following comment describes the implemenmtation of extent 40 * status tree and future works. 41 * 42 * Step2: 43 * In this step all extent status are tracked by extent status tree. 44 * Thus, we can first try to lookup a block mapping in this tree before 45 * finding it in extent tree. Hence, single extent cache can be removed 46 * because extent status tree can do a better job. Extents in status 47 * tree are loaded on-demand. Therefore, the extent status tree may not 48 * contain all of the extents in a file. Meanwhile we define a shrinker 49 * to reclaim memory from extent status tree because fragmented extent 50 * tree will make status tree cost too much memory. written/unwritten/- 51 * hole extents in the tree will be reclaimed by this shrinker when we 52 * are under high memory pressure. Delayed extents will not be 53 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 54 */ 55 56 /* 57 * Extent status tree implementation for ext4. 58 * 59 * 60 * ========================================================================== 61 * Extent status tree tracks all extent status. 62 * 63 * 1. Why we need to implement extent status tree? 64 * 65 * Without extent status tree, ext4 identifies a delayed extent by looking 66 * up page cache, this has several deficiencies - complicated, buggy, 67 * and inefficient code. 68 * 69 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 70 * block or a range of blocks are belonged to a delayed extent. 71 * 72 * Let us have a look at how they do without extent status tree. 73 * -- FIEMAP 74 * FIEMAP looks up page cache to identify delayed allocations from holes. 75 * 76 * -- SEEK_HOLE/DATA 77 * SEEK_HOLE/DATA has the same problem as FIEMAP. 78 * 79 * -- bigalloc 80 * bigalloc looks up page cache to figure out if a block is 81 * already under delayed allocation or not to determine whether 82 * quota reserving is needed for the cluster. 83 * 84 * -- writeout 85 * Writeout looks up whole page cache to see if a buffer is 86 * mapped, If there are not very many delayed buffers, then it is 87 * time comsuming. 88 * 89 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 90 * bigalloc and writeout can figure out if a block or a range of 91 * blocks is under delayed allocation(belonged to a delayed extent) or 92 * not by searching the extent tree. 93 * 94 * 95 * ========================================================================== 96 * 2. Ext4 extent status tree impelmentation 97 * 98 * -- extent 99 * A extent is a range of blocks which are contiguous logically and 100 * physically. Unlike extent in extent tree, this extent in ext4 is 101 * a in-memory struct, there is no corresponding on-disk data. There 102 * is no limit on length of extent, so an extent can contain as many 103 * blocks as they are contiguous logically and physically. 104 * 105 * -- extent status tree 106 * Every inode has an extent status tree and all allocation blocks 107 * are added to the tree with different status. The extent in the 108 * tree are ordered by logical block no. 109 * 110 * -- operations on a extent status tree 111 * There are three important operations on a delayed extent tree: find 112 * next extent, adding a extent(a range of blocks) and removing a extent. 113 * 114 * -- race on a extent status tree 115 * Extent status tree is protected by inode->i_es_lock. 116 * 117 * -- memory consumption 118 * Fragmented extent tree will make extent status tree cost too much 119 * memory. Hence, we will reclaim written/unwritten/hole extents from 120 * the tree under a heavy memory pressure. 121 * 122 * 123 * ========================================================================== 124 * 3. Performance analysis 125 * 126 * -- overhead 127 * 1. There is a cache extent for write access, so if writes are 128 * not very random, adding space operaions are in O(1) time. 129 * 130 * -- gain 131 * 2. Code is much simpler, more readable, more maintainable and 132 * more efficient. 133 * 134 * 135 * ========================================================================== 136 * 4. TODO list 137 * 138 * -- Refactor delayed space reservation 139 * 140 * -- Extent-level locking 141 */ 142 143 static struct kmem_cache *ext4_es_cachep; 144 145 static int __es_insert_extent(struct inode *inode, struct extent_status *newes); 146 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 147 ext4_lblk_t end); 148 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 149 int nr_to_scan); 150 151 int __init ext4_init_es(void) 152 { 153 ext4_es_cachep = kmem_cache_create("ext4_extent_status", 154 sizeof(struct extent_status), 155 0, (SLAB_RECLAIM_ACCOUNT), NULL); 156 if (ext4_es_cachep == NULL) 157 return -ENOMEM; 158 return 0; 159 } 160 161 void ext4_exit_es(void) 162 { 163 if (ext4_es_cachep) 164 kmem_cache_destroy(ext4_es_cachep); 165 } 166 167 void ext4_es_init_tree(struct ext4_es_tree *tree) 168 { 169 tree->root = RB_ROOT; 170 tree->cache_es = NULL; 171 } 172 173 #ifdef ES_DEBUG__ 174 static void ext4_es_print_tree(struct inode *inode) 175 { 176 struct ext4_es_tree *tree; 177 struct rb_node *node; 178 179 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 180 tree = &EXT4_I(inode)->i_es_tree; 181 node = rb_first(&tree->root); 182 while (node) { 183 struct extent_status *es; 184 es = rb_entry(node, struct extent_status, rb_node); 185 printk(KERN_DEBUG " [%u/%u) %llu %llx", 186 es->es_lblk, es->es_len, 187 ext4_es_pblock(es), ext4_es_status(es)); 188 node = rb_next(node); 189 } 190 printk(KERN_DEBUG "\n"); 191 } 192 #else 193 #define ext4_es_print_tree(inode) 194 #endif 195 196 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 197 { 198 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 199 return es->es_lblk + es->es_len - 1; 200 } 201 202 /* 203 * search through the tree for an delayed extent with a given offset. If 204 * it can't be found, try to find next extent. 205 */ 206 static struct extent_status *__es_tree_search(struct rb_root *root, 207 ext4_lblk_t lblk) 208 { 209 struct rb_node *node = root->rb_node; 210 struct extent_status *es = NULL; 211 212 while (node) { 213 es = rb_entry(node, struct extent_status, rb_node); 214 if (lblk < es->es_lblk) 215 node = node->rb_left; 216 else if (lblk > ext4_es_end(es)) 217 node = node->rb_right; 218 else 219 return es; 220 } 221 222 if (es && lblk < es->es_lblk) 223 return es; 224 225 if (es && lblk > ext4_es_end(es)) { 226 node = rb_next(&es->rb_node); 227 return node ? rb_entry(node, struct extent_status, rb_node) : 228 NULL; 229 } 230 231 return NULL; 232 } 233 234 /* 235 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering 236 * @es->lblk if it exists, otherwise, the next extent after @es->lblk. 237 * 238 * @inode: the inode which owns delayed extents 239 * @lblk: the offset where we start to search 240 * @end: the offset where we stop to search 241 * @es: delayed extent that we found 242 */ 243 void ext4_es_find_delayed_extent_range(struct inode *inode, 244 ext4_lblk_t lblk, ext4_lblk_t end, 245 struct extent_status *es) 246 { 247 struct ext4_es_tree *tree = NULL; 248 struct extent_status *es1 = NULL; 249 struct rb_node *node; 250 251 BUG_ON(es == NULL); 252 BUG_ON(end < lblk); 253 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); 254 255 read_lock(&EXT4_I(inode)->i_es_lock); 256 tree = &EXT4_I(inode)->i_es_tree; 257 258 /* find extent in cache firstly */ 259 es->es_lblk = es->es_len = es->es_pblk = 0; 260 if (tree->cache_es) { 261 es1 = tree->cache_es; 262 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 263 es_debug("%u cached by [%u/%u) %llu %llx\n", 264 lblk, es1->es_lblk, es1->es_len, 265 ext4_es_pblock(es1), ext4_es_status(es1)); 266 goto out; 267 } 268 } 269 270 es1 = __es_tree_search(&tree->root, lblk); 271 272 out: 273 if (es1 && !ext4_es_is_delayed(es1)) { 274 while ((node = rb_next(&es1->rb_node)) != NULL) { 275 es1 = rb_entry(node, struct extent_status, rb_node); 276 if (es1->es_lblk > end) { 277 es1 = NULL; 278 break; 279 } 280 if (ext4_es_is_delayed(es1)) 281 break; 282 } 283 } 284 285 if (es1 && ext4_es_is_delayed(es1)) { 286 tree->cache_es = es1; 287 es->es_lblk = es1->es_lblk; 288 es->es_len = es1->es_len; 289 es->es_pblk = es1->es_pblk; 290 } 291 292 read_unlock(&EXT4_I(inode)->i_es_lock); 293 294 ext4_es_lru_add(inode); 295 trace_ext4_es_find_delayed_extent_range_exit(inode, es); 296 } 297 298 static struct extent_status * 299 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, 300 ext4_fsblk_t pblk) 301 { 302 struct extent_status *es; 303 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 304 if (es == NULL) 305 return NULL; 306 es->es_lblk = lblk; 307 es->es_len = len; 308 es->es_pblk = pblk; 309 310 /* 311 * We don't count delayed extent because we never try to reclaim them 312 */ 313 if (!ext4_es_is_delayed(es)) { 314 EXT4_I(inode)->i_es_lru_nr++; 315 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 316 } 317 318 return es; 319 } 320 321 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 322 { 323 /* Decrease the lru counter when this es is not delayed */ 324 if (!ext4_es_is_delayed(es)) { 325 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); 326 EXT4_I(inode)->i_es_lru_nr--; 327 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 328 } 329 330 kmem_cache_free(ext4_es_cachep, es); 331 } 332 333 /* 334 * Check whether or not two extents can be merged 335 * Condition: 336 * - logical block number is contiguous 337 * - physical block number is contiguous 338 * - status is equal 339 */ 340 static int ext4_es_can_be_merged(struct extent_status *es1, 341 struct extent_status *es2) 342 { 343 if (ext4_es_status(es1) != ext4_es_status(es2)) 344 return 0; 345 346 if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) 347 return 0; 348 349 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 350 return 0; 351 352 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 353 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 354 return 1; 355 356 if (ext4_es_is_hole(es1)) 357 return 1; 358 359 /* we need to check delayed extent is without unwritten status */ 360 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 361 return 1; 362 363 return 0; 364 } 365 366 static struct extent_status * 367 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 368 { 369 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 370 struct extent_status *es1; 371 struct rb_node *node; 372 373 node = rb_prev(&es->rb_node); 374 if (!node) 375 return es; 376 377 es1 = rb_entry(node, struct extent_status, rb_node); 378 if (ext4_es_can_be_merged(es1, es)) { 379 es1->es_len += es->es_len; 380 rb_erase(&es->rb_node, &tree->root); 381 ext4_es_free_extent(inode, es); 382 es = es1; 383 } 384 385 return es; 386 } 387 388 static struct extent_status * 389 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 390 { 391 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 392 struct extent_status *es1; 393 struct rb_node *node; 394 395 node = rb_next(&es->rb_node); 396 if (!node) 397 return es; 398 399 es1 = rb_entry(node, struct extent_status, rb_node); 400 if (ext4_es_can_be_merged(es, es1)) { 401 es->es_len += es1->es_len; 402 rb_erase(node, &tree->root); 403 ext4_es_free_extent(inode, es1); 404 } 405 406 return es; 407 } 408 409 #ifdef ES_AGGRESSIVE_TEST 410 static void ext4_es_insert_extent_ext_check(struct inode *inode, 411 struct extent_status *es) 412 { 413 struct ext4_ext_path *path = NULL; 414 struct ext4_extent *ex; 415 ext4_lblk_t ee_block; 416 ext4_fsblk_t ee_start; 417 unsigned short ee_len; 418 int depth, ee_status, es_status; 419 420 path = ext4_ext_find_extent(inode, es->es_lblk, NULL); 421 if (IS_ERR(path)) 422 return; 423 424 depth = ext_depth(inode); 425 ex = path[depth].p_ext; 426 427 if (ex) { 428 429 ee_block = le32_to_cpu(ex->ee_block); 430 ee_start = ext4_ext_pblock(ex); 431 ee_len = ext4_ext_get_actual_len(ex); 432 433 ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; 434 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 435 436 /* 437 * Make sure ex and es are not overlap when we try to insert 438 * a delayed/hole extent. 439 */ 440 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 441 if (in_range(es->es_lblk, ee_block, ee_len)) { 442 pr_warn("ES insert assertation failed for " 443 "inode: %lu we can find an extent " 444 "at block [%d/%d/%llu/%c], but we " 445 "want to add an delayed/hole extent " 446 "[%d/%d/%llu/%llx]\n", 447 inode->i_ino, ee_block, ee_len, 448 ee_start, ee_status ? 'u' : 'w', 449 es->es_lblk, es->es_len, 450 ext4_es_pblock(es), ext4_es_status(es)); 451 } 452 goto out; 453 } 454 455 /* 456 * We don't check ee_block == es->es_lblk, etc. because es 457 * might be a part of whole extent, vice versa. 458 */ 459 if (es->es_lblk < ee_block || 460 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 461 pr_warn("ES insert assertation failed for inode: %lu " 462 "ex_status [%d/%d/%llu/%c] != " 463 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 464 ee_block, ee_len, ee_start, 465 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 466 ext4_es_pblock(es), es_status ? 'u' : 'w'); 467 goto out; 468 } 469 470 if (ee_status ^ es_status) { 471 pr_warn("ES insert assertation failed for inode: %lu " 472 "ex_status [%d/%d/%llu/%c] != " 473 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 474 ee_block, ee_len, ee_start, 475 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 476 ext4_es_pblock(es), es_status ? 'u' : 'w'); 477 } 478 } else { 479 /* 480 * We can't find an extent on disk. So we need to make sure 481 * that we don't want to add an written/unwritten extent. 482 */ 483 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 484 pr_warn("ES insert assertation failed for inode: %lu " 485 "can't find an extent at block %d but we want " 486 "to add an written/unwritten extent " 487 "[%d/%d/%llu/%llx]\n", inode->i_ino, 488 es->es_lblk, es->es_lblk, es->es_len, 489 ext4_es_pblock(es), ext4_es_status(es)); 490 } 491 } 492 out: 493 if (path) { 494 ext4_ext_drop_refs(path); 495 kfree(path); 496 } 497 } 498 499 static void ext4_es_insert_extent_ind_check(struct inode *inode, 500 struct extent_status *es) 501 { 502 struct ext4_map_blocks map; 503 int retval; 504 505 /* 506 * Here we call ext4_ind_map_blocks to lookup a block mapping because 507 * 'Indirect' structure is defined in indirect.c. So we couldn't 508 * access direct/indirect tree from outside. It is too dirty to define 509 * this function in indirect.c file. 510 */ 511 512 map.m_lblk = es->es_lblk; 513 map.m_len = es->es_len; 514 515 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 516 if (retval > 0) { 517 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 518 /* 519 * We want to add a delayed/hole extent but this 520 * block has been allocated. 521 */ 522 pr_warn("ES insert assertation failed for inode: %lu " 523 "We can find blocks but we want to add a " 524 "delayed/hole extent [%d/%d/%llu/%llx]\n", 525 inode->i_ino, es->es_lblk, es->es_len, 526 ext4_es_pblock(es), ext4_es_status(es)); 527 return; 528 } else if (ext4_es_is_written(es)) { 529 if (retval != es->es_len) { 530 pr_warn("ES insert assertation failed for " 531 "inode: %lu retval %d != es_len %d\n", 532 inode->i_ino, retval, es->es_len); 533 return; 534 } 535 if (map.m_pblk != ext4_es_pblock(es)) { 536 pr_warn("ES insert assertation failed for " 537 "inode: %lu m_pblk %llu != " 538 "es_pblk %llu\n", 539 inode->i_ino, map.m_pblk, 540 ext4_es_pblock(es)); 541 return; 542 } 543 } else { 544 /* 545 * We don't need to check unwritten extent because 546 * indirect-based file doesn't have it. 547 */ 548 BUG_ON(1); 549 } 550 } else if (retval == 0) { 551 if (ext4_es_is_written(es)) { 552 pr_warn("ES insert assertation failed for inode: %lu " 553 "We can't find the block but we want to add " 554 "an written extent [%d/%d/%llu/%llx]\n", 555 inode->i_ino, es->es_lblk, es->es_len, 556 ext4_es_pblock(es), ext4_es_status(es)); 557 return; 558 } 559 } 560 } 561 562 static inline void ext4_es_insert_extent_check(struct inode *inode, 563 struct extent_status *es) 564 { 565 /* 566 * We don't need to worry about the race condition because 567 * caller takes i_data_sem locking. 568 */ 569 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 570 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 571 ext4_es_insert_extent_ext_check(inode, es); 572 else 573 ext4_es_insert_extent_ind_check(inode, es); 574 } 575 #else 576 static inline void ext4_es_insert_extent_check(struct inode *inode, 577 struct extent_status *es) 578 { 579 } 580 #endif 581 582 static int __es_insert_extent(struct inode *inode, struct extent_status *newes) 583 { 584 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 585 struct rb_node **p = &tree->root.rb_node; 586 struct rb_node *parent = NULL; 587 struct extent_status *es; 588 589 while (*p) { 590 parent = *p; 591 es = rb_entry(parent, struct extent_status, rb_node); 592 593 if (newes->es_lblk < es->es_lblk) { 594 if (ext4_es_can_be_merged(newes, es)) { 595 /* 596 * Here we can modify es_lblk directly 597 * because it isn't overlapped. 598 */ 599 es->es_lblk = newes->es_lblk; 600 es->es_len += newes->es_len; 601 if (ext4_es_is_written(es) || 602 ext4_es_is_unwritten(es)) 603 ext4_es_store_pblock(es, 604 newes->es_pblk); 605 es = ext4_es_try_to_merge_left(inode, es); 606 goto out; 607 } 608 p = &(*p)->rb_left; 609 } else if (newes->es_lblk > ext4_es_end(es)) { 610 if (ext4_es_can_be_merged(es, newes)) { 611 es->es_len += newes->es_len; 612 es = ext4_es_try_to_merge_right(inode, es); 613 goto out; 614 } 615 p = &(*p)->rb_right; 616 } else { 617 BUG_ON(1); 618 return -EINVAL; 619 } 620 } 621 622 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len, 623 newes->es_pblk); 624 if (!es) 625 return -ENOMEM; 626 rb_link_node(&es->rb_node, parent, p); 627 rb_insert_color(&es->rb_node, &tree->root); 628 629 out: 630 tree->cache_es = es; 631 return 0; 632 } 633 634 /* 635 * ext4_es_insert_extent() adds a space to a extent status tree. 636 * 637 * ext4_es_insert_extent is called by ext4_da_write_begin and 638 * ext4_es_remove_extent. 639 * 640 * Return 0 on success, error code on failure. 641 */ 642 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 643 ext4_lblk_t len, ext4_fsblk_t pblk, 644 unsigned long long status) 645 { 646 struct extent_status newes; 647 ext4_lblk_t end = lblk + len - 1; 648 int err = 0; 649 650 es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n", 651 lblk, len, pblk, status, inode->i_ino); 652 653 if (!len) 654 return 0; 655 656 BUG_ON(end < lblk); 657 658 newes.es_lblk = lblk; 659 newes.es_len = len; 660 ext4_es_store_pblock(&newes, pblk); 661 ext4_es_store_status(&newes, status); 662 trace_ext4_es_insert_extent(inode, &newes); 663 664 ext4_es_insert_extent_check(inode, &newes); 665 666 write_lock(&EXT4_I(inode)->i_es_lock); 667 err = __es_remove_extent(inode, lblk, end); 668 if (err != 0) 669 goto error; 670 err = __es_insert_extent(inode, &newes); 671 672 error: 673 write_unlock(&EXT4_I(inode)->i_es_lock); 674 675 ext4_es_lru_add(inode); 676 ext4_es_print_tree(inode); 677 678 return err; 679 } 680 681 /* 682 * ext4_es_lookup_extent() looks up an extent in extent status tree. 683 * 684 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 685 * 686 * Return: 1 on found, 0 on not 687 */ 688 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 689 struct extent_status *es) 690 { 691 struct ext4_es_tree *tree; 692 struct extent_status *es1 = NULL; 693 struct rb_node *node; 694 int found = 0; 695 696 trace_ext4_es_lookup_extent_enter(inode, lblk); 697 es_debug("lookup extent in block %u\n", lblk); 698 699 tree = &EXT4_I(inode)->i_es_tree; 700 read_lock(&EXT4_I(inode)->i_es_lock); 701 702 /* find extent in cache firstly */ 703 es->es_lblk = es->es_len = es->es_pblk = 0; 704 if (tree->cache_es) { 705 es1 = tree->cache_es; 706 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 707 es_debug("%u cached by [%u/%u)\n", 708 lblk, es1->es_lblk, es1->es_len); 709 found = 1; 710 goto out; 711 } 712 } 713 714 node = tree->root.rb_node; 715 while (node) { 716 es1 = rb_entry(node, struct extent_status, rb_node); 717 if (lblk < es1->es_lblk) 718 node = node->rb_left; 719 else if (lblk > ext4_es_end(es1)) 720 node = node->rb_right; 721 else { 722 found = 1; 723 break; 724 } 725 } 726 727 out: 728 if (found) { 729 BUG_ON(!es1); 730 es->es_lblk = es1->es_lblk; 731 es->es_len = es1->es_len; 732 es->es_pblk = es1->es_pblk; 733 } 734 735 read_unlock(&EXT4_I(inode)->i_es_lock); 736 737 ext4_es_lru_add(inode); 738 trace_ext4_es_lookup_extent_exit(inode, es, found); 739 return found; 740 } 741 742 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 743 ext4_lblk_t end) 744 { 745 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 746 struct rb_node *node; 747 struct extent_status *es; 748 struct extent_status orig_es; 749 ext4_lblk_t len1, len2; 750 ext4_fsblk_t block; 751 int err = 0; 752 753 es = __es_tree_search(&tree->root, lblk); 754 if (!es) 755 goto out; 756 if (es->es_lblk > end) 757 goto out; 758 759 /* Simply invalidate cache_es. */ 760 tree->cache_es = NULL; 761 762 orig_es.es_lblk = es->es_lblk; 763 orig_es.es_len = es->es_len; 764 orig_es.es_pblk = es->es_pblk; 765 766 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 767 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 768 if (len1 > 0) 769 es->es_len = len1; 770 if (len2 > 0) { 771 if (len1 > 0) { 772 struct extent_status newes; 773 774 newes.es_lblk = end + 1; 775 newes.es_len = len2; 776 if (ext4_es_is_written(&orig_es) || 777 ext4_es_is_unwritten(&orig_es)) { 778 block = ext4_es_pblock(&orig_es) + 779 orig_es.es_len - len2; 780 ext4_es_store_pblock(&newes, block); 781 } 782 ext4_es_store_status(&newes, ext4_es_status(&orig_es)); 783 err = __es_insert_extent(inode, &newes); 784 if (err) { 785 es->es_lblk = orig_es.es_lblk; 786 es->es_len = orig_es.es_len; 787 goto out; 788 } 789 } else { 790 es->es_lblk = end + 1; 791 es->es_len = len2; 792 if (ext4_es_is_written(es) || 793 ext4_es_is_unwritten(es)) { 794 block = orig_es.es_pblk + orig_es.es_len - len2; 795 ext4_es_store_pblock(es, block); 796 } 797 } 798 goto out; 799 } 800 801 if (len1 > 0) { 802 node = rb_next(&es->rb_node); 803 if (node) 804 es = rb_entry(node, struct extent_status, rb_node); 805 else 806 es = NULL; 807 } 808 809 while (es && ext4_es_end(es) <= end) { 810 node = rb_next(&es->rb_node); 811 rb_erase(&es->rb_node, &tree->root); 812 ext4_es_free_extent(inode, es); 813 if (!node) { 814 es = NULL; 815 break; 816 } 817 es = rb_entry(node, struct extent_status, rb_node); 818 } 819 820 if (es && es->es_lblk < end + 1) { 821 ext4_lblk_t orig_len = es->es_len; 822 823 len1 = ext4_es_end(es) - end; 824 es->es_lblk = end + 1; 825 es->es_len = len1; 826 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 827 block = es->es_pblk + orig_len - len1; 828 ext4_es_store_pblock(es, block); 829 } 830 } 831 832 out: 833 return err; 834 } 835 836 /* 837 * ext4_es_remove_extent() removes a space from a extent status tree. 838 * 839 * Return 0 on success, error code on failure. 840 */ 841 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 842 ext4_lblk_t len) 843 { 844 ext4_lblk_t end; 845 int err = 0; 846 847 trace_ext4_es_remove_extent(inode, lblk, len); 848 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 849 lblk, len, inode->i_ino); 850 851 if (!len) 852 return err; 853 854 end = lblk + len - 1; 855 BUG_ON(end < lblk); 856 857 write_lock(&EXT4_I(inode)->i_es_lock); 858 err = __es_remove_extent(inode, lblk, end); 859 write_unlock(&EXT4_I(inode)->i_es_lock); 860 ext4_es_print_tree(inode); 861 return err; 862 } 863 864 int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) 865 { 866 ext4_lblk_t ee_block; 867 ext4_fsblk_t ee_pblock; 868 unsigned int ee_len; 869 870 ee_block = le32_to_cpu(ex->ee_block); 871 ee_len = ext4_ext_get_actual_len(ex); 872 ee_pblock = ext4_ext_pblock(ex); 873 874 if (ee_len == 0) 875 return 0; 876 877 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 878 EXTENT_STATUS_WRITTEN); 879 } 880 881 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) 882 { 883 struct ext4_sb_info *sbi = container_of(shrink, 884 struct ext4_sb_info, s_es_shrinker); 885 struct ext4_inode_info *ei; 886 struct list_head *cur, *tmp, scanned; 887 int nr_to_scan = sc->nr_to_scan; 888 int ret, nr_shrunk = 0; 889 890 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 891 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret); 892 893 if (!nr_to_scan) 894 return ret; 895 896 INIT_LIST_HEAD(&scanned); 897 898 spin_lock(&sbi->s_es_lru_lock); 899 list_for_each_safe(cur, tmp, &sbi->s_es_lru) { 900 list_move_tail(cur, &scanned); 901 902 ei = list_entry(cur, struct ext4_inode_info, i_es_lru); 903 904 read_lock(&ei->i_es_lock); 905 if (ei->i_es_lru_nr == 0) { 906 read_unlock(&ei->i_es_lock); 907 continue; 908 } 909 read_unlock(&ei->i_es_lock); 910 911 write_lock(&ei->i_es_lock); 912 ret = __es_try_to_reclaim_extents(ei, nr_to_scan); 913 write_unlock(&ei->i_es_lock); 914 915 nr_shrunk += ret; 916 nr_to_scan -= ret; 917 if (nr_to_scan == 0) 918 break; 919 } 920 list_splice_tail(&scanned, &sbi->s_es_lru); 921 spin_unlock(&sbi->s_es_lru_lock); 922 923 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 924 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); 925 return ret; 926 } 927 928 void ext4_es_register_shrinker(struct super_block *sb) 929 { 930 struct ext4_sb_info *sbi; 931 932 sbi = EXT4_SB(sb); 933 INIT_LIST_HEAD(&sbi->s_es_lru); 934 spin_lock_init(&sbi->s_es_lru_lock); 935 sbi->s_es_shrinker.shrink = ext4_es_shrink; 936 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 937 register_shrinker(&sbi->s_es_shrinker); 938 } 939 940 void ext4_es_unregister_shrinker(struct super_block *sb) 941 { 942 unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker); 943 } 944 945 void ext4_es_lru_add(struct inode *inode) 946 { 947 struct ext4_inode_info *ei = EXT4_I(inode); 948 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 949 950 spin_lock(&sbi->s_es_lru_lock); 951 if (list_empty(&ei->i_es_lru)) 952 list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); 953 else 954 list_move_tail(&ei->i_es_lru, &sbi->s_es_lru); 955 spin_unlock(&sbi->s_es_lru_lock); 956 } 957 958 void ext4_es_lru_del(struct inode *inode) 959 { 960 struct ext4_inode_info *ei = EXT4_I(inode); 961 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 962 963 spin_lock(&sbi->s_es_lru_lock); 964 if (!list_empty(&ei->i_es_lru)) 965 list_del_init(&ei->i_es_lru); 966 spin_unlock(&sbi->s_es_lru_lock); 967 } 968 969 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 970 int nr_to_scan) 971 { 972 struct inode *inode = &ei->vfs_inode; 973 struct ext4_es_tree *tree = &ei->i_es_tree; 974 struct rb_node *node; 975 struct extent_status *es; 976 int nr_shrunk = 0; 977 978 if (ei->i_es_lru_nr == 0) 979 return 0; 980 981 node = rb_first(&tree->root); 982 while (node != NULL) { 983 es = rb_entry(node, struct extent_status, rb_node); 984 node = rb_next(&es->rb_node); 985 /* 986 * We can't reclaim delayed extent from status tree because 987 * fiemap, bigallic, and seek_data/hole need to use it. 988 */ 989 if (!ext4_es_is_delayed(es)) { 990 rb_erase(&es->rb_node, &tree->root); 991 ext4_es_free_extent(inode, es); 992 nr_shrunk++; 993 if (--nr_to_scan == 0) 994 break; 995 } 996 } 997 tree->cache_es = NULL; 998 return nr_shrunk; 999 } 1000