1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/ext4/extents_status.c 4 * 5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 6 * Modified by 7 * Allison Henderson <achender@linux.vnet.ibm.com> 8 * Hugh Dickins <hughd@google.com> 9 * Zheng Liu <wenqing.lz@taobao.com> 10 * 11 * Ext4 extents status tree core functions. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/proc_fs.h> 15 #include <linux/seq_file.h> 16 #include "ext4.h" 17 18 #include <trace/events/ext4.h> 19 20 /* 21 * According to previous discussion in Ext4 Developer Workshop, we 22 * will introduce a new structure called io tree to track all extent 23 * status in order to solve some problems that we have met 24 * (e.g. Reservation space warning), and provide extent-level locking. 25 * Delay extent tree is the first step to achieve this goal. It is 26 * original built by Yongqiang Yang. At that time it is called delay 27 * extent tree, whose goal is only track delayed extents in memory to 28 * simplify the implementation of fiemap and bigalloc, and introduce 29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 30 * delay extent tree at the first commit. But for better understand 31 * what it does, it has been rename to extent status tree. 32 * 33 * Step1: 34 * Currently the first step has been done. All delayed extents are 35 * tracked in the tree. It maintains the delayed extent when a delayed 36 * allocation is issued, and the delayed extent is written out or 37 * invalidated. Therefore the implementation of fiemap and bigalloc 38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 39 * 40 * The following comment describes the implemenmtation of extent 41 * status tree and future works. 42 * 43 * Step2: 44 * In this step all extent status are tracked by extent status tree. 45 * Thus, we can first try to lookup a block mapping in this tree before 46 * finding it in extent tree. Hence, single extent cache can be removed 47 * because extent status tree can do a better job. Extents in status 48 * tree are loaded on-demand. Therefore, the extent status tree may not 49 * contain all of the extents in a file. Meanwhile we define a shrinker 50 * to reclaim memory from extent status tree because fragmented extent 51 * tree will make status tree cost too much memory. written/unwritten/- 52 * hole extents in the tree will be reclaimed by this shrinker when we 53 * are under high memory pressure. Delayed extents will not be 54 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 55 */ 56 57 /* 58 * Extent status tree implementation for ext4. 59 * 60 * 61 * ========================================================================== 62 * Extent status tree tracks all extent status. 63 * 64 * 1. Why we need to implement extent status tree? 65 * 66 * Without extent status tree, ext4 identifies a delayed extent by looking 67 * up page cache, this has several deficiencies - complicated, buggy, 68 * and inefficient code. 69 * 70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 71 * block or a range of blocks are belonged to a delayed extent. 72 * 73 * Let us have a look at how they do without extent status tree. 74 * -- FIEMAP 75 * FIEMAP looks up page cache to identify delayed allocations from holes. 76 * 77 * -- SEEK_HOLE/DATA 78 * SEEK_HOLE/DATA has the same problem as FIEMAP. 79 * 80 * -- bigalloc 81 * bigalloc looks up page cache to figure out if a block is 82 * already under delayed allocation or not to determine whether 83 * quota reserving is needed for the cluster. 84 * 85 * -- writeout 86 * Writeout looks up whole page cache to see if a buffer is 87 * mapped, If there are not very many delayed buffers, then it is 88 * time consuming. 89 * 90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 91 * bigalloc and writeout can figure out if a block or a range of 92 * blocks is under delayed allocation(belonged to a delayed extent) or 93 * not by searching the extent tree. 94 * 95 * 96 * ========================================================================== 97 * 2. Ext4 extent status tree impelmentation 98 * 99 * -- extent 100 * A extent is a range of blocks which are contiguous logically and 101 * physically. Unlike extent in extent tree, this extent in ext4 is 102 * a in-memory struct, there is no corresponding on-disk data. There 103 * is no limit on length of extent, so an extent can contain as many 104 * blocks as they are contiguous logically and physically. 105 * 106 * -- extent status tree 107 * Every inode has an extent status tree and all allocation blocks 108 * are added to the tree with different status. The extent in the 109 * tree are ordered by logical block no. 110 * 111 * -- operations on a extent status tree 112 * There are three important operations on a delayed extent tree: find 113 * next extent, adding a extent(a range of blocks) and removing a extent. 114 * 115 * -- race on a extent status tree 116 * Extent status tree is protected by inode->i_es_lock. 117 * 118 * -- memory consumption 119 * Fragmented extent tree will make extent status tree cost too much 120 * memory. Hence, we will reclaim written/unwritten/hole extents from 121 * the tree under a heavy memory pressure. 122 * 123 * 124 * ========================================================================== 125 * 3. Performance analysis 126 * 127 * -- overhead 128 * 1. There is a cache extent for write access, so if writes are 129 * not very random, adding space operaions are in O(1) time. 130 * 131 * -- gain 132 * 2. Code is much simpler, more readable, more maintainable and 133 * more efficient. 134 * 135 * 136 * ========================================================================== 137 * 4. TODO list 138 * 139 * -- Refactor delayed space reservation 140 * 141 * -- Extent-level locking 142 */ 143 144 static struct kmem_cache *ext4_es_cachep; 145 static struct kmem_cache *ext4_pending_cachep; 146 147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 148 struct extent_status *prealloc); 149 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 150 ext4_lblk_t end, int *reserved, 151 struct extent_status *prealloc); 152 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); 153 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 154 struct ext4_inode_info *locked_ei); 155 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 156 ext4_lblk_t len); 157 158 int __init ext4_init_es(void) 159 { 160 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); 161 if (ext4_es_cachep == NULL) 162 return -ENOMEM; 163 return 0; 164 } 165 166 void ext4_exit_es(void) 167 { 168 kmem_cache_destroy(ext4_es_cachep); 169 } 170 171 void ext4_es_init_tree(struct ext4_es_tree *tree) 172 { 173 tree->root = RB_ROOT; 174 tree->cache_es = NULL; 175 } 176 177 #ifdef ES_DEBUG__ 178 static void ext4_es_print_tree(struct inode *inode) 179 { 180 struct ext4_es_tree *tree; 181 struct rb_node *node; 182 183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 184 tree = &EXT4_I(inode)->i_es_tree; 185 node = rb_first(&tree->root); 186 while (node) { 187 struct extent_status *es; 188 es = rb_entry(node, struct extent_status, rb_node); 189 printk(KERN_DEBUG " [%u/%u) %llu %x", 190 es->es_lblk, es->es_len, 191 ext4_es_pblock(es), ext4_es_status(es)); 192 node = rb_next(node); 193 } 194 printk(KERN_DEBUG "\n"); 195 } 196 #else 197 #define ext4_es_print_tree(inode) 198 #endif 199 200 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 201 { 202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 203 return es->es_lblk + es->es_len - 1; 204 } 205 206 /* 207 * search through the tree for an delayed extent with a given offset. If 208 * it can't be found, try to find next extent. 209 */ 210 static struct extent_status *__es_tree_search(struct rb_root *root, 211 ext4_lblk_t lblk) 212 { 213 struct rb_node *node = root->rb_node; 214 struct extent_status *es = NULL; 215 216 while (node) { 217 es = rb_entry(node, struct extent_status, rb_node); 218 if (lblk < es->es_lblk) 219 node = node->rb_left; 220 else if (lblk > ext4_es_end(es)) 221 node = node->rb_right; 222 else 223 return es; 224 } 225 226 if (es && lblk < es->es_lblk) 227 return es; 228 229 if (es && lblk > ext4_es_end(es)) { 230 node = rb_next(&es->rb_node); 231 return node ? rb_entry(node, struct extent_status, rb_node) : 232 NULL; 233 } 234 235 return NULL; 236 } 237 238 /* 239 * ext4_es_find_extent_range - find extent with specified status within block 240 * range or next extent following block range in 241 * extents status tree 242 * 243 * @inode - file containing the range 244 * @matching_fn - pointer to function that matches extents with desired status 245 * @lblk - logical block defining start of range 246 * @end - logical block defining end of range 247 * @es - extent found, if any 248 * 249 * Find the first extent within the block range specified by @lblk and @end 250 * in the extents status tree that satisfies @matching_fn. If a match 251 * is found, it's returned in @es. If not, and a matching extent is found 252 * beyond the block range, it's returned in @es. If no match is found, an 253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components 254 * are 0. 255 */ 256 static void __es_find_extent_range(struct inode *inode, 257 int (*matching_fn)(struct extent_status *es), 258 ext4_lblk_t lblk, ext4_lblk_t end, 259 struct extent_status *es) 260 { 261 struct ext4_es_tree *tree = NULL; 262 struct extent_status *es1 = NULL; 263 struct rb_node *node; 264 265 WARN_ON(es == NULL); 266 WARN_ON(end < lblk); 267 268 tree = &EXT4_I(inode)->i_es_tree; 269 270 /* see if the extent has been cached */ 271 es->es_lblk = es->es_len = es->es_pblk = 0; 272 es1 = READ_ONCE(tree->cache_es); 273 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 274 es_debug("%u cached by [%u/%u) %llu %x\n", 275 lblk, es1->es_lblk, es1->es_len, 276 ext4_es_pblock(es1), ext4_es_status(es1)); 277 goto out; 278 } 279 280 es1 = __es_tree_search(&tree->root, lblk); 281 282 out: 283 if (es1 && !matching_fn(es1)) { 284 while ((node = rb_next(&es1->rb_node)) != NULL) { 285 es1 = rb_entry(node, struct extent_status, rb_node); 286 if (es1->es_lblk > end) { 287 es1 = NULL; 288 break; 289 } 290 if (matching_fn(es1)) 291 break; 292 } 293 } 294 295 if (es1 && matching_fn(es1)) { 296 WRITE_ONCE(tree->cache_es, es1); 297 es->es_lblk = es1->es_lblk; 298 es->es_len = es1->es_len; 299 es->es_pblk = es1->es_pblk; 300 } 301 302 } 303 304 /* 305 * Locking for __es_find_extent_range() for external use 306 */ 307 void ext4_es_find_extent_range(struct inode *inode, 308 int (*matching_fn)(struct extent_status *es), 309 ext4_lblk_t lblk, ext4_lblk_t end, 310 struct extent_status *es) 311 { 312 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 313 return; 314 315 trace_ext4_es_find_extent_range_enter(inode, lblk); 316 317 read_lock(&EXT4_I(inode)->i_es_lock); 318 __es_find_extent_range(inode, matching_fn, lblk, end, es); 319 read_unlock(&EXT4_I(inode)->i_es_lock); 320 321 trace_ext4_es_find_extent_range_exit(inode, es); 322 } 323 324 /* 325 * __es_scan_range - search block range for block with specified status 326 * in extents status tree 327 * 328 * @inode - file containing the range 329 * @matching_fn - pointer to function that matches extents with desired status 330 * @lblk - logical block defining start of range 331 * @end - logical block defining end of range 332 * 333 * Returns true if at least one block in the specified block range satisfies 334 * the criterion specified by @matching_fn, and false if not. If at least 335 * one extent has the specified status, then there is at least one block 336 * in the cluster with that status. Should only be called by code that has 337 * taken i_es_lock. 338 */ 339 static bool __es_scan_range(struct inode *inode, 340 int (*matching_fn)(struct extent_status *es), 341 ext4_lblk_t start, ext4_lblk_t end) 342 { 343 struct extent_status es; 344 345 __es_find_extent_range(inode, matching_fn, start, end, &es); 346 if (es.es_len == 0) 347 return false; /* no matching extent in the tree */ 348 else if (es.es_lblk <= start && 349 start < es.es_lblk + es.es_len) 350 return true; 351 else if (start <= es.es_lblk && es.es_lblk <= end) 352 return true; 353 else 354 return false; 355 } 356 /* 357 * Locking for __es_scan_range() for external use 358 */ 359 bool ext4_es_scan_range(struct inode *inode, 360 int (*matching_fn)(struct extent_status *es), 361 ext4_lblk_t lblk, ext4_lblk_t end) 362 { 363 bool ret; 364 365 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 366 return false; 367 368 read_lock(&EXT4_I(inode)->i_es_lock); 369 ret = __es_scan_range(inode, matching_fn, lblk, end); 370 read_unlock(&EXT4_I(inode)->i_es_lock); 371 372 return ret; 373 } 374 375 /* 376 * __es_scan_clu - search cluster for block with specified status in 377 * extents status tree 378 * 379 * @inode - file containing the cluster 380 * @matching_fn - pointer to function that matches extents with desired status 381 * @lblk - logical block in cluster to be searched 382 * 383 * Returns true if at least one extent in the cluster containing @lblk 384 * satisfies the criterion specified by @matching_fn, and false if not. If at 385 * least one extent has the specified status, then there is at least one block 386 * in the cluster with that status. Should only be called by code that has 387 * taken i_es_lock. 388 */ 389 static bool __es_scan_clu(struct inode *inode, 390 int (*matching_fn)(struct extent_status *es), 391 ext4_lblk_t lblk) 392 { 393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 394 ext4_lblk_t lblk_start, lblk_end; 395 396 lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 397 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 398 399 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); 400 } 401 402 /* 403 * Locking for __es_scan_clu() for external use 404 */ 405 bool ext4_es_scan_clu(struct inode *inode, 406 int (*matching_fn)(struct extent_status *es), 407 ext4_lblk_t lblk) 408 { 409 bool ret; 410 411 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 412 return false; 413 414 read_lock(&EXT4_I(inode)->i_es_lock); 415 ret = __es_scan_clu(inode, matching_fn, lblk); 416 read_unlock(&EXT4_I(inode)->i_es_lock); 417 418 return ret; 419 } 420 421 static void ext4_es_list_add(struct inode *inode) 422 { 423 struct ext4_inode_info *ei = EXT4_I(inode); 424 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 425 426 if (!list_empty(&ei->i_es_list)) 427 return; 428 429 spin_lock(&sbi->s_es_lock); 430 if (list_empty(&ei->i_es_list)) { 431 list_add_tail(&ei->i_es_list, &sbi->s_es_list); 432 sbi->s_es_nr_inode++; 433 } 434 spin_unlock(&sbi->s_es_lock); 435 } 436 437 static void ext4_es_list_del(struct inode *inode) 438 { 439 struct ext4_inode_info *ei = EXT4_I(inode); 440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 441 442 spin_lock(&sbi->s_es_lock); 443 if (!list_empty(&ei->i_es_list)) { 444 list_del_init(&ei->i_es_list); 445 sbi->s_es_nr_inode--; 446 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); 447 } 448 spin_unlock(&sbi->s_es_lock); 449 } 450 451 /* 452 * Returns true if we cannot fail to allocate memory for this extent_status 453 * entry and cannot reclaim it until its status changes. 454 */ 455 static inline bool ext4_es_must_keep(struct extent_status *es) 456 { 457 /* fiemap, bigalloc, and seek_data/hole need to use it. */ 458 if (ext4_es_is_delayed(es)) 459 return true; 460 461 return false; 462 } 463 464 static inline struct extent_status *__es_alloc_extent(bool nofail) 465 { 466 if (!nofail) 467 return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 468 469 return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); 470 } 471 472 static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, 473 ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) 474 { 475 es->es_lblk = lblk; 476 es->es_len = len; 477 es->es_pblk = pblk; 478 479 /* We never try to reclaim a must kept extent, so we don't count it. */ 480 if (!ext4_es_must_keep(es)) { 481 if (!EXT4_I(inode)->i_es_shk_nr++) 482 ext4_es_list_add(inode); 483 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 484 s_es_stats.es_stats_shk_cnt); 485 } 486 487 EXT4_I(inode)->i_es_all_nr++; 488 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 489 } 490 491 static inline void __es_free_extent(struct extent_status *es) 492 { 493 kmem_cache_free(ext4_es_cachep, es); 494 } 495 496 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 497 { 498 EXT4_I(inode)->i_es_all_nr--; 499 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 500 501 /* Decrease the shrink counter when we can reclaim the extent. */ 502 if (!ext4_es_must_keep(es)) { 503 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); 504 if (!--EXT4_I(inode)->i_es_shk_nr) 505 ext4_es_list_del(inode); 506 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 507 s_es_stats.es_stats_shk_cnt); 508 } 509 510 __es_free_extent(es); 511 } 512 513 /* 514 * Check whether or not two extents can be merged 515 * Condition: 516 * - logical block number is contiguous 517 * - physical block number is contiguous 518 * - status is equal 519 */ 520 static int ext4_es_can_be_merged(struct extent_status *es1, 521 struct extent_status *es2) 522 { 523 if (ext4_es_type(es1) != ext4_es_type(es2)) 524 return 0; 525 526 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 527 pr_warn("ES assertion failed when merging extents. " 528 "The sum of lengths of es1 (%d) and es2 (%d) " 529 "is bigger than allowed file size (%d)\n", 530 es1->es_len, es2->es_len, EXT_MAX_BLOCKS); 531 WARN_ON(1); 532 return 0; 533 } 534 535 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 536 return 0; 537 538 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 539 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 540 return 1; 541 542 if (ext4_es_is_hole(es1)) 543 return 1; 544 545 /* we need to check delayed extent is without unwritten status */ 546 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 547 return 1; 548 549 return 0; 550 } 551 552 static struct extent_status * 553 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 554 { 555 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 556 struct extent_status *es1; 557 struct rb_node *node; 558 559 node = rb_prev(&es->rb_node); 560 if (!node) 561 return es; 562 563 es1 = rb_entry(node, struct extent_status, rb_node); 564 if (ext4_es_can_be_merged(es1, es)) { 565 es1->es_len += es->es_len; 566 if (ext4_es_is_referenced(es)) 567 ext4_es_set_referenced(es1); 568 rb_erase(&es->rb_node, &tree->root); 569 ext4_es_free_extent(inode, es); 570 es = es1; 571 } 572 573 return es; 574 } 575 576 static struct extent_status * 577 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 578 { 579 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 580 struct extent_status *es1; 581 struct rb_node *node; 582 583 node = rb_next(&es->rb_node); 584 if (!node) 585 return es; 586 587 es1 = rb_entry(node, struct extent_status, rb_node); 588 if (ext4_es_can_be_merged(es, es1)) { 589 es->es_len += es1->es_len; 590 if (ext4_es_is_referenced(es1)) 591 ext4_es_set_referenced(es); 592 rb_erase(node, &tree->root); 593 ext4_es_free_extent(inode, es1); 594 } 595 596 return es; 597 } 598 599 #ifdef ES_AGGRESSIVE_TEST 600 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ 601 602 static void ext4_es_insert_extent_ext_check(struct inode *inode, 603 struct extent_status *es) 604 { 605 struct ext4_ext_path *path = NULL; 606 struct ext4_extent *ex; 607 ext4_lblk_t ee_block; 608 ext4_fsblk_t ee_start; 609 unsigned short ee_len; 610 int depth, ee_status, es_status; 611 612 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 613 if (IS_ERR(path)) 614 return; 615 616 depth = ext_depth(inode); 617 ex = path[depth].p_ext; 618 619 if (ex) { 620 621 ee_block = le32_to_cpu(ex->ee_block); 622 ee_start = ext4_ext_pblock(ex); 623 ee_len = ext4_ext_get_actual_len(ex); 624 625 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; 626 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 627 628 /* 629 * Make sure ex and es are not overlap when we try to insert 630 * a delayed/hole extent. 631 */ 632 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 633 if (in_range(es->es_lblk, ee_block, ee_len)) { 634 pr_warn("ES insert assertion failed for " 635 "inode: %lu we can find an extent " 636 "at block [%d/%d/%llu/%c], but we " 637 "want to add a delayed/hole extent " 638 "[%d/%d/%llu/%x]\n", 639 inode->i_ino, ee_block, ee_len, 640 ee_start, ee_status ? 'u' : 'w', 641 es->es_lblk, es->es_len, 642 ext4_es_pblock(es), ext4_es_status(es)); 643 } 644 goto out; 645 } 646 647 /* 648 * We don't check ee_block == es->es_lblk, etc. because es 649 * might be a part of whole extent, vice versa. 650 */ 651 if (es->es_lblk < ee_block || 652 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 653 pr_warn("ES insert assertion failed for inode: %lu " 654 "ex_status [%d/%d/%llu/%c] != " 655 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 656 ee_block, ee_len, ee_start, 657 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 658 ext4_es_pblock(es), es_status ? 'u' : 'w'); 659 goto out; 660 } 661 662 if (ee_status ^ es_status) { 663 pr_warn("ES insert assertion failed for inode: %lu " 664 "ex_status [%d/%d/%llu/%c] != " 665 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 666 ee_block, ee_len, ee_start, 667 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 668 ext4_es_pblock(es), es_status ? 'u' : 'w'); 669 } 670 } else { 671 /* 672 * We can't find an extent on disk. So we need to make sure 673 * that we don't want to add an written/unwritten extent. 674 */ 675 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 676 pr_warn("ES insert assertion failed for inode: %lu " 677 "can't find an extent at block %d but we want " 678 "to add a written/unwritten extent " 679 "[%d/%d/%llu/%x]\n", inode->i_ino, 680 es->es_lblk, es->es_lblk, es->es_len, 681 ext4_es_pblock(es), ext4_es_status(es)); 682 } 683 } 684 out: 685 ext4_free_ext_path(path); 686 } 687 688 static void ext4_es_insert_extent_ind_check(struct inode *inode, 689 struct extent_status *es) 690 { 691 struct ext4_map_blocks map; 692 int retval; 693 694 /* 695 * Here we call ext4_ind_map_blocks to lookup a block mapping because 696 * 'Indirect' structure is defined in indirect.c. So we couldn't 697 * access direct/indirect tree from outside. It is too dirty to define 698 * this function in indirect.c file. 699 */ 700 701 map.m_lblk = es->es_lblk; 702 map.m_len = es->es_len; 703 704 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 705 if (retval > 0) { 706 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 707 /* 708 * We want to add a delayed/hole extent but this 709 * block has been allocated. 710 */ 711 pr_warn("ES insert assertion failed for inode: %lu " 712 "We can find blocks but we want to add a " 713 "delayed/hole extent [%d/%d/%llu/%x]\n", 714 inode->i_ino, es->es_lblk, es->es_len, 715 ext4_es_pblock(es), ext4_es_status(es)); 716 return; 717 } else if (ext4_es_is_written(es)) { 718 if (retval != es->es_len) { 719 pr_warn("ES insert assertion failed for " 720 "inode: %lu retval %d != es_len %d\n", 721 inode->i_ino, retval, es->es_len); 722 return; 723 } 724 if (map.m_pblk != ext4_es_pblock(es)) { 725 pr_warn("ES insert assertion failed for " 726 "inode: %lu m_pblk %llu != " 727 "es_pblk %llu\n", 728 inode->i_ino, map.m_pblk, 729 ext4_es_pblock(es)); 730 return; 731 } 732 } else { 733 /* 734 * We don't need to check unwritten extent because 735 * indirect-based file doesn't have it. 736 */ 737 BUG(); 738 } 739 } else if (retval == 0) { 740 if (ext4_es_is_written(es)) { 741 pr_warn("ES insert assertion failed for inode: %lu " 742 "We can't find the block but we want to add " 743 "a written extent [%d/%d/%llu/%x]\n", 744 inode->i_ino, es->es_lblk, es->es_len, 745 ext4_es_pblock(es), ext4_es_status(es)); 746 return; 747 } 748 } 749 } 750 751 static inline void ext4_es_insert_extent_check(struct inode *inode, 752 struct extent_status *es) 753 { 754 /* 755 * We don't need to worry about the race condition because 756 * caller takes i_data_sem locking. 757 */ 758 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 759 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 760 ext4_es_insert_extent_ext_check(inode, es); 761 else 762 ext4_es_insert_extent_ind_check(inode, es); 763 } 764 #else 765 static inline void ext4_es_insert_extent_check(struct inode *inode, 766 struct extent_status *es) 767 { 768 } 769 #endif 770 771 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 772 struct extent_status *prealloc) 773 { 774 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 775 struct rb_node **p = &tree->root.rb_node; 776 struct rb_node *parent = NULL; 777 struct extent_status *es; 778 779 while (*p) { 780 parent = *p; 781 es = rb_entry(parent, struct extent_status, rb_node); 782 783 if (newes->es_lblk < es->es_lblk) { 784 if (ext4_es_can_be_merged(newes, es)) { 785 /* 786 * Here we can modify es_lblk directly 787 * because it isn't overlapped. 788 */ 789 es->es_lblk = newes->es_lblk; 790 es->es_len += newes->es_len; 791 if (ext4_es_is_written(es) || 792 ext4_es_is_unwritten(es)) 793 ext4_es_store_pblock(es, 794 newes->es_pblk); 795 es = ext4_es_try_to_merge_left(inode, es); 796 goto out; 797 } 798 p = &(*p)->rb_left; 799 } else if (newes->es_lblk > ext4_es_end(es)) { 800 if (ext4_es_can_be_merged(es, newes)) { 801 es->es_len += newes->es_len; 802 es = ext4_es_try_to_merge_right(inode, es); 803 goto out; 804 } 805 p = &(*p)->rb_right; 806 } else { 807 BUG(); 808 return -EINVAL; 809 } 810 } 811 812 if (prealloc) 813 es = prealloc; 814 else 815 es = __es_alloc_extent(false); 816 if (!es) 817 return -ENOMEM; 818 ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, 819 newes->es_pblk); 820 821 rb_link_node(&es->rb_node, parent, p); 822 rb_insert_color(&es->rb_node, &tree->root); 823 824 out: 825 tree->cache_es = es; 826 return 0; 827 } 828 829 /* 830 * ext4_es_insert_extent() adds information to an inode's extent 831 * status tree. 832 * 833 * Return 0 on success, error code on failure. 834 */ 835 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 836 ext4_lblk_t len, ext4_fsblk_t pblk, 837 unsigned int status) 838 { 839 struct extent_status newes; 840 ext4_lblk_t end = lblk + len - 1; 841 int err = 0; 842 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 843 844 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 845 return 0; 846 847 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", 848 lblk, len, pblk, status, inode->i_ino); 849 850 if (!len) 851 return 0; 852 853 BUG_ON(end < lblk); 854 855 if ((status & EXTENT_STATUS_DELAYED) && 856 (status & EXTENT_STATUS_WRITTEN)) { 857 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " 858 " delayed and written which can potentially " 859 " cause data loss.", lblk, len); 860 WARN_ON(1); 861 } 862 863 newes.es_lblk = lblk; 864 newes.es_len = len; 865 ext4_es_store_pblock_status(&newes, pblk, status); 866 trace_ext4_es_insert_extent(inode, &newes); 867 868 ext4_es_insert_extent_check(inode, &newes); 869 870 write_lock(&EXT4_I(inode)->i_es_lock); 871 err = __es_remove_extent(inode, lblk, end, NULL, NULL); 872 if (err != 0) 873 goto error; 874 retry: 875 err = __es_insert_extent(inode, &newes, NULL); 876 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), 877 128, EXT4_I(inode))) 878 goto retry; 879 if (err == -ENOMEM && !ext4_es_must_keep(&newes)) 880 err = 0; 881 882 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && 883 (status & EXTENT_STATUS_WRITTEN || 884 status & EXTENT_STATUS_UNWRITTEN)) 885 __revise_pending(inode, lblk, len); 886 887 error: 888 write_unlock(&EXT4_I(inode)->i_es_lock); 889 890 ext4_es_print_tree(inode); 891 892 return err; 893 } 894 895 /* 896 * ext4_es_cache_extent() inserts information into the extent status 897 * tree if and only if there isn't information about the range in 898 * question already. 899 */ 900 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, 901 ext4_lblk_t len, ext4_fsblk_t pblk, 902 unsigned int status) 903 { 904 struct extent_status *es; 905 struct extent_status newes; 906 ext4_lblk_t end = lblk + len - 1; 907 908 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 909 return; 910 911 newes.es_lblk = lblk; 912 newes.es_len = len; 913 ext4_es_store_pblock_status(&newes, pblk, status); 914 trace_ext4_es_cache_extent(inode, &newes); 915 916 if (!len) 917 return; 918 919 BUG_ON(end < lblk); 920 921 write_lock(&EXT4_I(inode)->i_es_lock); 922 923 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); 924 if (!es || es->es_lblk > end) 925 __es_insert_extent(inode, &newes, NULL); 926 write_unlock(&EXT4_I(inode)->i_es_lock); 927 } 928 929 /* 930 * ext4_es_lookup_extent() looks up an extent in extent status tree. 931 * 932 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 933 * 934 * Return: 1 on found, 0 on not 935 */ 936 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 937 ext4_lblk_t *next_lblk, 938 struct extent_status *es) 939 { 940 struct ext4_es_tree *tree; 941 struct ext4_es_stats *stats; 942 struct extent_status *es1 = NULL; 943 struct rb_node *node; 944 int found = 0; 945 946 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 947 return 0; 948 949 trace_ext4_es_lookup_extent_enter(inode, lblk); 950 es_debug("lookup extent in block %u\n", lblk); 951 952 tree = &EXT4_I(inode)->i_es_tree; 953 read_lock(&EXT4_I(inode)->i_es_lock); 954 955 /* find extent in cache firstly */ 956 es->es_lblk = es->es_len = es->es_pblk = 0; 957 es1 = READ_ONCE(tree->cache_es); 958 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 959 es_debug("%u cached by [%u/%u)\n", 960 lblk, es1->es_lblk, es1->es_len); 961 found = 1; 962 goto out; 963 } 964 965 node = tree->root.rb_node; 966 while (node) { 967 es1 = rb_entry(node, struct extent_status, rb_node); 968 if (lblk < es1->es_lblk) 969 node = node->rb_left; 970 else if (lblk > ext4_es_end(es1)) 971 node = node->rb_right; 972 else { 973 found = 1; 974 break; 975 } 976 } 977 978 out: 979 stats = &EXT4_SB(inode->i_sb)->s_es_stats; 980 if (found) { 981 BUG_ON(!es1); 982 es->es_lblk = es1->es_lblk; 983 es->es_len = es1->es_len; 984 es->es_pblk = es1->es_pblk; 985 if (!ext4_es_is_referenced(es1)) 986 ext4_es_set_referenced(es1); 987 percpu_counter_inc(&stats->es_stats_cache_hits); 988 if (next_lblk) { 989 node = rb_next(&es1->rb_node); 990 if (node) { 991 es1 = rb_entry(node, struct extent_status, 992 rb_node); 993 *next_lblk = es1->es_lblk; 994 } else 995 *next_lblk = 0; 996 } 997 } else { 998 percpu_counter_inc(&stats->es_stats_cache_misses); 999 } 1000 1001 read_unlock(&EXT4_I(inode)->i_es_lock); 1002 1003 trace_ext4_es_lookup_extent_exit(inode, es, found); 1004 return found; 1005 } 1006 1007 struct rsvd_count { 1008 int ndelonly; 1009 bool first_do_lblk_found; 1010 ext4_lblk_t first_do_lblk; 1011 ext4_lblk_t last_do_lblk; 1012 struct extent_status *left_es; 1013 bool partial; 1014 ext4_lblk_t lclu; 1015 }; 1016 1017 /* 1018 * init_rsvd - initialize reserved count data before removing block range 1019 * in file from extent status tree 1020 * 1021 * @inode - file containing range 1022 * @lblk - first block in range 1023 * @es - pointer to first extent in range 1024 * @rc - pointer to reserved count data 1025 * 1026 * Assumes es is not NULL 1027 */ 1028 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, 1029 struct extent_status *es, struct rsvd_count *rc) 1030 { 1031 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1032 struct rb_node *node; 1033 1034 rc->ndelonly = 0; 1035 1036 /* 1037 * for bigalloc, note the first delonly block in the range has not 1038 * been found, record the extent containing the block to the left of 1039 * the region to be removed, if any, and note that there's no partial 1040 * cluster to track 1041 */ 1042 if (sbi->s_cluster_ratio > 1) { 1043 rc->first_do_lblk_found = false; 1044 if (lblk > es->es_lblk) { 1045 rc->left_es = es; 1046 } else { 1047 node = rb_prev(&es->rb_node); 1048 rc->left_es = node ? rb_entry(node, 1049 struct extent_status, 1050 rb_node) : NULL; 1051 } 1052 rc->partial = false; 1053 } 1054 } 1055 1056 /* 1057 * count_rsvd - count the clusters containing delayed and not unwritten 1058 * (delonly) blocks in a range within an extent and add to 1059 * the running tally in rsvd_count 1060 * 1061 * @inode - file containing extent 1062 * @lblk - first block in range 1063 * @len - length of range in blocks 1064 * @es - pointer to extent containing clusters to be counted 1065 * @rc - pointer to reserved count data 1066 * 1067 * Tracks partial clusters found at the beginning and end of extents so 1068 * they aren't overcounted when they span adjacent extents 1069 */ 1070 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, 1071 struct extent_status *es, struct rsvd_count *rc) 1072 { 1073 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1074 ext4_lblk_t i, end, nclu; 1075 1076 if (!ext4_es_is_delonly(es)) 1077 return; 1078 1079 WARN_ON(len <= 0); 1080 1081 if (sbi->s_cluster_ratio == 1) { 1082 rc->ndelonly += (int) len; 1083 return; 1084 } 1085 1086 /* bigalloc */ 1087 1088 i = (lblk < es->es_lblk) ? es->es_lblk : lblk; 1089 end = lblk + (ext4_lblk_t) len - 1; 1090 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; 1091 1092 /* record the first block of the first delonly extent seen */ 1093 if (!rc->first_do_lblk_found) { 1094 rc->first_do_lblk = i; 1095 rc->first_do_lblk_found = true; 1096 } 1097 1098 /* update the last lblk in the region seen so far */ 1099 rc->last_do_lblk = end; 1100 1101 /* 1102 * if we're tracking a partial cluster and the current extent 1103 * doesn't start with it, count it and stop tracking 1104 */ 1105 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { 1106 rc->ndelonly++; 1107 rc->partial = false; 1108 } 1109 1110 /* 1111 * if the first cluster doesn't start on a cluster boundary but 1112 * ends on one, count it 1113 */ 1114 if (EXT4_LBLK_COFF(sbi, i) != 0) { 1115 if (end >= EXT4_LBLK_CFILL(sbi, i)) { 1116 rc->ndelonly++; 1117 rc->partial = false; 1118 i = EXT4_LBLK_CFILL(sbi, i) + 1; 1119 } 1120 } 1121 1122 /* 1123 * if the current cluster starts on a cluster boundary, count the 1124 * number of whole delonly clusters in the extent 1125 */ 1126 if ((i + sbi->s_cluster_ratio - 1) <= end) { 1127 nclu = (end - i + 1) >> sbi->s_cluster_bits; 1128 rc->ndelonly += nclu; 1129 i += nclu << sbi->s_cluster_bits; 1130 } 1131 1132 /* 1133 * start tracking a partial cluster if there's a partial at the end 1134 * of the current extent and we're not already tracking one 1135 */ 1136 if (!rc->partial && i <= end) { 1137 rc->partial = true; 1138 rc->lclu = EXT4_B2C(sbi, i); 1139 } 1140 } 1141 1142 /* 1143 * __pr_tree_search - search for a pending cluster reservation 1144 * 1145 * @root - root of pending reservation tree 1146 * @lclu - logical cluster to search for 1147 * 1148 * Returns the pending reservation for the cluster identified by @lclu 1149 * if found. If not, returns a reservation for the next cluster if any, 1150 * and if not, returns NULL. 1151 */ 1152 static struct pending_reservation *__pr_tree_search(struct rb_root *root, 1153 ext4_lblk_t lclu) 1154 { 1155 struct rb_node *node = root->rb_node; 1156 struct pending_reservation *pr = NULL; 1157 1158 while (node) { 1159 pr = rb_entry(node, struct pending_reservation, rb_node); 1160 if (lclu < pr->lclu) 1161 node = node->rb_left; 1162 else if (lclu > pr->lclu) 1163 node = node->rb_right; 1164 else 1165 return pr; 1166 } 1167 if (pr && lclu < pr->lclu) 1168 return pr; 1169 if (pr && lclu > pr->lclu) { 1170 node = rb_next(&pr->rb_node); 1171 return node ? rb_entry(node, struct pending_reservation, 1172 rb_node) : NULL; 1173 } 1174 return NULL; 1175 } 1176 1177 /* 1178 * get_rsvd - calculates and returns the number of cluster reservations to be 1179 * released when removing a block range from the extent status tree 1180 * and releases any pending reservations within the range 1181 * 1182 * @inode - file containing block range 1183 * @end - last block in range 1184 * @right_es - pointer to extent containing next block beyond end or NULL 1185 * @rc - pointer to reserved count data 1186 * 1187 * The number of reservations to be released is equal to the number of 1188 * clusters containing delayed and not unwritten (delonly) blocks within 1189 * the range, minus the number of clusters still containing delonly blocks 1190 * at the ends of the range, and minus the number of pending reservations 1191 * within the range. 1192 */ 1193 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, 1194 struct extent_status *right_es, 1195 struct rsvd_count *rc) 1196 { 1197 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1198 struct pending_reservation *pr; 1199 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1200 struct rb_node *node; 1201 ext4_lblk_t first_lclu, last_lclu; 1202 bool left_delonly, right_delonly, count_pending; 1203 struct extent_status *es; 1204 1205 if (sbi->s_cluster_ratio > 1) { 1206 /* count any remaining partial cluster */ 1207 if (rc->partial) 1208 rc->ndelonly++; 1209 1210 if (rc->ndelonly == 0) 1211 return 0; 1212 1213 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); 1214 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); 1215 1216 /* 1217 * decrease the delonly count by the number of clusters at the 1218 * ends of the range that still contain delonly blocks - 1219 * these clusters still need to be reserved 1220 */ 1221 left_delonly = right_delonly = false; 1222 1223 es = rc->left_es; 1224 while (es && ext4_es_end(es) >= 1225 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { 1226 if (ext4_es_is_delonly(es)) { 1227 rc->ndelonly--; 1228 left_delonly = true; 1229 break; 1230 } 1231 node = rb_prev(&es->rb_node); 1232 if (!node) 1233 break; 1234 es = rb_entry(node, struct extent_status, rb_node); 1235 } 1236 if (right_es && (!left_delonly || first_lclu != last_lclu)) { 1237 if (end < ext4_es_end(right_es)) { 1238 es = right_es; 1239 } else { 1240 node = rb_next(&right_es->rb_node); 1241 es = node ? rb_entry(node, struct extent_status, 1242 rb_node) : NULL; 1243 } 1244 while (es && es->es_lblk <= 1245 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { 1246 if (ext4_es_is_delonly(es)) { 1247 rc->ndelonly--; 1248 right_delonly = true; 1249 break; 1250 } 1251 node = rb_next(&es->rb_node); 1252 if (!node) 1253 break; 1254 es = rb_entry(node, struct extent_status, 1255 rb_node); 1256 } 1257 } 1258 1259 /* 1260 * Determine the block range that should be searched for 1261 * pending reservations, if any. Clusters on the ends of the 1262 * original removed range containing delonly blocks are 1263 * excluded. They've already been accounted for and it's not 1264 * possible to determine if an associated pending reservation 1265 * should be released with the information available in the 1266 * extents status tree. 1267 */ 1268 if (first_lclu == last_lclu) { 1269 if (left_delonly | right_delonly) 1270 count_pending = false; 1271 else 1272 count_pending = true; 1273 } else { 1274 if (left_delonly) 1275 first_lclu++; 1276 if (right_delonly) 1277 last_lclu--; 1278 if (first_lclu <= last_lclu) 1279 count_pending = true; 1280 else 1281 count_pending = false; 1282 } 1283 1284 /* 1285 * a pending reservation found between first_lclu and last_lclu 1286 * represents an allocated cluster that contained at least one 1287 * delonly block, so the delonly total must be reduced by one 1288 * for each pending reservation found and released 1289 */ 1290 if (count_pending) { 1291 pr = __pr_tree_search(&tree->root, first_lclu); 1292 while (pr && pr->lclu <= last_lclu) { 1293 rc->ndelonly--; 1294 node = rb_next(&pr->rb_node); 1295 rb_erase(&pr->rb_node, &tree->root); 1296 kmem_cache_free(ext4_pending_cachep, pr); 1297 if (!node) 1298 break; 1299 pr = rb_entry(node, struct pending_reservation, 1300 rb_node); 1301 } 1302 } 1303 } 1304 return rc->ndelonly; 1305 } 1306 1307 1308 /* 1309 * __es_remove_extent - removes block range from extent status tree 1310 * 1311 * @inode - file containing range 1312 * @lblk - first block in range 1313 * @end - last block in range 1314 * @reserved - number of cluster reservations released 1315 * @prealloc - pre-allocated es to avoid memory allocation failures 1316 * 1317 * If @reserved is not NULL and delayed allocation is enabled, counts 1318 * block/cluster reservations freed by removing range and if bigalloc 1319 * enabled cancels pending reservations as needed. Returns 0 on success, 1320 * error code on failure. 1321 */ 1322 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1323 ext4_lblk_t end, int *reserved, 1324 struct extent_status *prealloc) 1325 { 1326 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 1327 struct rb_node *node; 1328 struct extent_status *es; 1329 struct extent_status orig_es; 1330 ext4_lblk_t len1, len2; 1331 ext4_fsblk_t block; 1332 int err = 0; 1333 bool count_reserved = true; 1334 struct rsvd_count rc; 1335 1336 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) 1337 count_reserved = false; 1338 1339 es = __es_tree_search(&tree->root, lblk); 1340 if (!es) 1341 goto out; 1342 if (es->es_lblk > end) 1343 goto out; 1344 1345 /* Simply invalidate cache_es. */ 1346 tree->cache_es = NULL; 1347 if (count_reserved) 1348 init_rsvd(inode, lblk, es, &rc); 1349 1350 orig_es.es_lblk = es->es_lblk; 1351 orig_es.es_len = es->es_len; 1352 orig_es.es_pblk = es->es_pblk; 1353 1354 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 1355 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 1356 if (len1 > 0) 1357 es->es_len = len1; 1358 if (len2 > 0) { 1359 if (len1 > 0) { 1360 struct extent_status newes; 1361 1362 newes.es_lblk = end + 1; 1363 newes.es_len = len2; 1364 block = 0x7FDEADBEEFULL; 1365 if (ext4_es_is_written(&orig_es) || 1366 ext4_es_is_unwritten(&orig_es)) 1367 block = ext4_es_pblock(&orig_es) + 1368 orig_es.es_len - len2; 1369 ext4_es_store_pblock_status(&newes, block, 1370 ext4_es_status(&orig_es)); 1371 err = __es_insert_extent(inode, &newes, prealloc); 1372 if (err) { 1373 if (!ext4_es_must_keep(&newes)) 1374 return 0; 1375 1376 es->es_lblk = orig_es.es_lblk; 1377 es->es_len = orig_es.es_len; 1378 goto out; 1379 } 1380 } else { 1381 es->es_lblk = end + 1; 1382 es->es_len = len2; 1383 if (ext4_es_is_written(es) || 1384 ext4_es_is_unwritten(es)) { 1385 block = orig_es.es_pblk + orig_es.es_len - len2; 1386 ext4_es_store_pblock(es, block); 1387 } 1388 } 1389 if (count_reserved) 1390 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, 1391 &orig_es, &rc); 1392 goto out_get_reserved; 1393 } 1394 1395 if (len1 > 0) { 1396 if (count_reserved) 1397 count_rsvd(inode, lblk, orig_es.es_len - len1, 1398 &orig_es, &rc); 1399 node = rb_next(&es->rb_node); 1400 if (node) 1401 es = rb_entry(node, struct extent_status, rb_node); 1402 else 1403 es = NULL; 1404 } 1405 1406 while (es && ext4_es_end(es) <= end) { 1407 if (count_reserved) 1408 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); 1409 node = rb_next(&es->rb_node); 1410 rb_erase(&es->rb_node, &tree->root); 1411 ext4_es_free_extent(inode, es); 1412 if (!node) { 1413 es = NULL; 1414 break; 1415 } 1416 es = rb_entry(node, struct extent_status, rb_node); 1417 } 1418 1419 if (es && es->es_lblk < end + 1) { 1420 ext4_lblk_t orig_len = es->es_len; 1421 1422 len1 = ext4_es_end(es) - end; 1423 if (count_reserved) 1424 count_rsvd(inode, es->es_lblk, orig_len - len1, 1425 es, &rc); 1426 es->es_lblk = end + 1; 1427 es->es_len = len1; 1428 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 1429 block = es->es_pblk + orig_len - len1; 1430 ext4_es_store_pblock(es, block); 1431 } 1432 } 1433 1434 out_get_reserved: 1435 if (count_reserved) 1436 *reserved = get_rsvd(inode, end, es, &rc); 1437 out: 1438 return err; 1439 } 1440 1441 /* 1442 * ext4_es_remove_extent - removes block range from extent status tree 1443 * 1444 * @inode - file containing range 1445 * @lblk - first block in range 1446 * @len - number of blocks to remove 1447 * 1448 * Reduces block/cluster reservation count and for bigalloc cancels pending 1449 * reservations as needed. Returns 0 on success, error code on failure. 1450 */ 1451 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1452 ext4_lblk_t len) 1453 { 1454 ext4_lblk_t end; 1455 int err = 0; 1456 int reserved = 0; 1457 struct extent_status *es = NULL; 1458 1459 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 1460 return 0; 1461 1462 trace_ext4_es_remove_extent(inode, lblk, len); 1463 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 1464 lblk, len, inode->i_ino); 1465 1466 if (!len) 1467 return err; 1468 1469 end = lblk + len - 1; 1470 BUG_ON(end < lblk); 1471 1472 retry: 1473 if (err && !es) 1474 es = __es_alloc_extent(true); 1475 /* 1476 * ext4_clear_inode() depends on us taking i_es_lock unconditionally 1477 * so that we are sure __es_shrink() is done with the inode before it 1478 * is reclaimed. 1479 */ 1480 write_lock(&EXT4_I(inode)->i_es_lock); 1481 err = __es_remove_extent(inode, lblk, end, &reserved, es); 1482 if (es && !es->es_len) 1483 __es_free_extent(es); 1484 write_unlock(&EXT4_I(inode)->i_es_lock); 1485 if (err) 1486 goto retry; 1487 1488 ext4_es_print_tree(inode); 1489 ext4_da_release_space(inode, reserved); 1490 return 0; 1491 } 1492 1493 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 1494 struct ext4_inode_info *locked_ei) 1495 { 1496 struct ext4_inode_info *ei; 1497 struct ext4_es_stats *es_stats; 1498 ktime_t start_time; 1499 u64 scan_time; 1500 int nr_to_walk; 1501 int nr_shrunk = 0; 1502 int retried = 0, nr_skipped = 0; 1503 1504 es_stats = &sbi->s_es_stats; 1505 start_time = ktime_get(); 1506 1507 retry: 1508 spin_lock(&sbi->s_es_lock); 1509 nr_to_walk = sbi->s_es_nr_inode; 1510 while (nr_to_walk-- > 0) { 1511 if (list_empty(&sbi->s_es_list)) { 1512 spin_unlock(&sbi->s_es_lock); 1513 goto out; 1514 } 1515 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, 1516 i_es_list); 1517 /* Move the inode to the tail */ 1518 list_move_tail(&ei->i_es_list, &sbi->s_es_list); 1519 1520 /* 1521 * Normally we try hard to avoid shrinking precached inodes, 1522 * but we will as a last resort. 1523 */ 1524 if (!retried && ext4_test_inode_state(&ei->vfs_inode, 1525 EXT4_STATE_EXT_PRECACHED)) { 1526 nr_skipped++; 1527 continue; 1528 } 1529 1530 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { 1531 nr_skipped++; 1532 continue; 1533 } 1534 /* 1535 * Now we hold i_es_lock which protects us from inode reclaim 1536 * freeing inode under us 1537 */ 1538 spin_unlock(&sbi->s_es_lock); 1539 1540 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); 1541 write_unlock(&ei->i_es_lock); 1542 1543 if (nr_to_scan <= 0) 1544 goto out; 1545 spin_lock(&sbi->s_es_lock); 1546 } 1547 spin_unlock(&sbi->s_es_lock); 1548 1549 /* 1550 * If we skipped any inodes, and we weren't able to make any 1551 * forward progress, try again to scan precached inodes. 1552 */ 1553 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1554 retried++; 1555 goto retry; 1556 } 1557 1558 if (locked_ei && nr_shrunk == 0) 1559 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); 1560 1561 out: 1562 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1563 if (likely(es_stats->es_stats_scan_time)) 1564 es_stats->es_stats_scan_time = (scan_time + 1565 es_stats->es_stats_scan_time*3) / 4; 1566 else 1567 es_stats->es_stats_scan_time = scan_time; 1568 if (scan_time > es_stats->es_stats_max_scan_time) 1569 es_stats->es_stats_max_scan_time = scan_time; 1570 if (likely(es_stats->es_stats_shrunk)) 1571 es_stats->es_stats_shrunk = (nr_shrunk + 1572 es_stats->es_stats_shrunk*3) / 4; 1573 else 1574 es_stats->es_stats_shrunk = nr_shrunk; 1575 1576 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, 1577 nr_skipped, retried); 1578 return nr_shrunk; 1579 } 1580 1581 static unsigned long ext4_es_count(struct shrinker *shrink, 1582 struct shrink_control *sc) 1583 { 1584 unsigned long nr; 1585 struct ext4_sb_info *sbi; 1586 1587 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1588 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1589 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1590 return nr; 1591 } 1592 1593 static unsigned long ext4_es_scan(struct shrinker *shrink, 1594 struct shrink_control *sc) 1595 { 1596 struct ext4_sb_info *sbi = container_of(shrink, 1597 struct ext4_sb_info, s_es_shrinker); 1598 int nr_to_scan = sc->nr_to_scan; 1599 int ret, nr_shrunk; 1600 1601 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1602 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1603 1604 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); 1605 1606 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1607 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1608 return nr_shrunk; 1609 } 1610 1611 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) 1612 { 1613 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); 1614 struct ext4_es_stats *es_stats = &sbi->s_es_stats; 1615 struct ext4_inode_info *ei, *max = NULL; 1616 unsigned int inode_cnt = 0; 1617 1618 if (v != SEQ_START_TOKEN) 1619 return 0; 1620 1621 /* here we just find an inode that has the max nr. of objects */ 1622 spin_lock(&sbi->s_es_lock); 1623 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { 1624 inode_cnt++; 1625 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1626 max = ei; 1627 else if (!max) 1628 max = ei; 1629 } 1630 spin_unlock(&sbi->s_es_lock); 1631 1632 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1633 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1634 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); 1635 seq_printf(seq, " %lld/%lld cache hits/misses\n", 1636 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), 1637 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); 1638 if (inode_cnt) 1639 seq_printf(seq, " %d inodes on list\n", inode_cnt); 1640 1641 seq_printf(seq, "average:\n %llu us scan time\n", 1642 div_u64(es_stats->es_stats_scan_time, 1000)); 1643 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); 1644 if (inode_cnt) 1645 seq_printf(seq, 1646 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1647 " %llu us max scan time\n", 1648 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, 1649 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1650 1651 return 0; 1652 } 1653 1654 int ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1655 { 1656 int err; 1657 1658 /* Make sure we have enough bits for physical block number */ 1659 BUILD_BUG_ON(ES_SHIFT < 48); 1660 INIT_LIST_HEAD(&sbi->s_es_list); 1661 sbi->s_es_nr_inode = 0; 1662 spin_lock_init(&sbi->s_es_lock); 1663 sbi->s_es_stats.es_stats_shrunk = 0; 1664 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, 1665 GFP_KERNEL); 1666 if (err) 1667 return err; 1668 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, 1669 GFP_KERNEL); 1670 if (err) 1671 goto err1; 1672 sbi->s_es_stats.es_stats_scan_time = 0; 1673 sbi->s_es_stats.es_stats_max_scan_time = 0; 1674 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1675 if (err) 1676 goto err2; 1677 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); 1678 if (err) 1679 goto err3; 1680 1681 sbi->s_es_shrinker.scan_objects = ext4_es_scan; 1682 sbi->s_es_shrinker.count_objects = ext4_es_count; 1683 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1684 err = register_shrinker(&sbi->s_es_shrinker, "ext4-es:%s", 1685 sbi->s_sb->s_id); 1686 if (err) 1687 goto err4; 1688 1689 return 0; 1690 err4: 1691 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1692 err3: 1693 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1694 err2: 1695 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1696 err1: 1697 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1698 return err; 1699 } 1700 1701 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1702 { 1703 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1704 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1705 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1706 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1707 unregister_shrinker(&sbi->s_es_shrinker); 1708 } 1709 1710 /* 1711 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at 1712 * most *nr_to_scan extents, update *nr_to_scan accordingly. 1713 * 1714 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. 1715 * Increment *nr_shrunk by the number of reclaimed extents. Also update 1716 * ei->i_es_shrink_lblk to where we should continue scanning. 1717 */ 1718 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, 1719 int *nr_to_scan, int *nr_shrunk) 1720 { 1721 struct inode *inode = &ei->vfs_inode; 1722 struct ext4_es_tree *tree = &ei->i_es_tree; 1723 struct extent_status *es; 1724 struct rb_node *node; 1725 1726 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); 1727 if (!es) 1728 goto out_wrap; 1729 1730 while (*nr_to_scan > 0) { 1731 if (es->es_lblk > end) { 1732 ei->i_es_shrink_lblk = end + 1; 1733 return 0; 1734 } 1735 1736 (*nr_to_scan)--; 1737 node = rb_next(&es->rb_node); 1738 1739 if (ext4_es_must_keep(es)) 1740 goto next; 1741 if (ext4_es_is_referenced(es)) { 1742 ext4_es_clear_referenced(es); 1743 goto next; 1744 } 1745 1746 rb_erase(&es->rb_node, &tree->root); 1747 ext4_es_free_extent(inode, es); 1748 (*nr_shrunk)++; 1749 next: 1750 if (!node) 1751 goto out_wrap; 1752 es = rb_entry(node, struct extent_status, rb_node); 1753 } 1754 ei->i_es_shrink_lblk = es->es_lblk; 1755 return 1; 1756 out_wrap: 1757 ei->i_es_shrink_lblk = 0; 1758 return 0; 1759 } 1760 1761 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) 1762 { 1763 struct inode *inode = &ei->vfs_inode; 1764 int nr_shrunk = 0; 1765 ext4_lblk_t start = ei->i_es_shrink_lblk; 1766 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1767 DEFAULT_RATELIMIT_BURST); 1768 1769 if (ei->i_es_shk_nr == 0) 1770 return 0; 1771 1772 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1773 __ratelimit(&_rs)) 1774 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1775 1776 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && 1777 start != 0) 1778 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); 1779 1780 ei->i_es_tree.cache_es = NULL; 1781 return nr_shrunk; 1782 } 1783 1784 /* 1785 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove 1786 * discretionary entries from the extent status cache. (Some entries 1787 * must be present for proper operations.) 1788 */ 1789 void ext4_clear_inode_es(struct inode *inode) 1790 { 1791 struct ext4_inode_info *ei = EXT4_I(inode); 1792 struct extent_status *es; 1793 struct ext4_es_tree *tree; 1794 struct rb_node *node; 1795 1796 write_lock(&ei->i_es_lock); 1797 tree = &EXT4_I(inode)->i_es_tree; 1798 tree->cache_es = NULL; 1799 node = rb_first(&tree->root); 1800 while (node) { 1801 es = rb_entry(node, struct extent_status, rb_node); 1802 node = rb_next(node); 1803 if (!ext4_es_must_keep(es)) { 1804 rb_erase(&es->rb_node, &tree->root); 1805 ext4_es_free_extent(inode, es); 1806 } 1807 } 1808 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 1809 write_unlock(&ei->i_es_lock); 1810 } 1811 1812 #ifdef ES_DEBUG__ 1813 static void ext4_print_pending_tree(struct inode *inode) 1814 { 1815 struct ext4_pending_tree *tree; 1816 struct rb_node *node; 1817 struct pending_reservation *pr; 1818 1819 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); 1820 tree = &EXT4_I(inode)->i_pending_tree; 1821 node = rb_first(&tree->root); 1822 while (node) { 1823 pr = rb_entry(node, struct pending_reservation, rb_node); 1824 printk(KERN_DEBUG " %u", pr->lclu); 1825 node = rb_next(node); 1826 } 1827 printk(KERN_DEBUG "\n"); 1828 } 1829 #else 1830 #define ext4_print_pending_tree(inode) 1831 #endif 1832 1833 int __init ext4_init_pending(void) 1834 { 1835 ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); 1836 if (ext4_pending_cachep == NULL) 1837 return -ENOMEM; 1838 return 0; 1839 } 1840 1841 void ext4_exit_pending(void) 1842 { 1843 kmem_cache_destroy(ext4_pending_cachep); 1844 } 1845 1846 void ext4_init_pending_tree(struct ext4_pending_tree *tree) 1847 { 1848 tree->root = RB_ROOT; 1849 } 1850 1851 /* 1852 * __get_pending - retrieve a pointer to a pending reservation 1853 * 1854 * @inode - file containing the pending cluster reservation 1855 * @lclu - logical cluster of interest 1856 * 1857 * Returns a pointer to a pending reservation if it's a member of 1858 * the set, and NULL if not. Must be called holding i_es_lock. 1859 */ 1860 static struct pending_reservation *__get_pending(struct inode *inode, 1861 ext4_lblk_t lclu) 1862 { 1863 struct ext4_pending_tree *tree; 1864 struct rb_node *node; 1865 struct pending_reservation *pr = NULL; 1866 1867 tree = &EXT4_I(inode)->i_pending_tree; 1868 node = (&tree->root)->rb_node; 1869 1870 while (node) { 1871 pr = rb_entry(node, struct pending_reservation, rb_node); 1872 if (lclu < pr->lclu) 1873 node = node->rb_left; 1874 else if (lclu > pr->lclu) 1875 node = node->rb_right; 1876 else if (lclu == pr->lclu) 1877 return pr; 1878 } 1879 return NULL; 1880 } 1881 1882 /* 1883 * __insert_pending - adds a pending cluster reservation to the set of 1884 * pending reservations 1885 * 1886 * @inode - file containing the cluster 1887 * @lblk - logical block in the cluster to be added 1888 * 1889 * Returns 0 on successful insertion and -ENOMEM on failure. If the 1890 * pending reservation is already in the set, returns successfully. 1891 */ 1892 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk) 1893 { 1894 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1895 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1896 struct rb_node **p = &tree->root.rb_node; 1897 struct rb_node *parent = NULL; 1898 struct pending_reservation *pr; 1899 ext4_lblk_t lclu; 1900 int ret = 0; 1901 1902 lclu = EXT4_B2C(sbi, lblk); 1903 /* search to find parent for insertion */ 1904 while (*p) { 1905 parent = *p; 1906 pr = rb_entry(parent, struct pending_reservation, rb_node); 1907 1908 if (lclu < pr->lclu) { 1909 p = &(*p)->rb_left; 1910 } else if (lclu > pr->lclu) { 1911 p = &(*p)->rb_right; 1912 } else { 1913 /* pending reservation already inserted */ 1914 goto out; 1915 } 1916 } 1917 1918 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); 1919 if (pr == NULL) { 1920 ret = -ENOMEM; 1921 goto out; 1922 } 1923 pr->lclu = lclu; 1924 1925 rb_link_node(&pr->rb_node, parent, p); 1926 rb_insert_color(&pr->rb_node, &tree->root); 1927 1928 out: 1929 return ret; 1930 } 1931 1932 /* 1933 * __remove_pending - removes a pending cluster reservation from the set 1934 * of pending reservations 1935 * 1936 * @inode - file containing the cluster 1937 * @lblk - logical block in the pending cluster reservation to be removed 1938 * 1939 * Returns successfully if pending reservation is not a member of the set. 1940 */ 1941 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) 1942 { 1943 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1944 struct pending_reservation *pr; 1945 struct ext4_pending_tree *tree; 1946 1947 pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); 1948 if (pr != NULL) { 1949 tree = &EXT4_I(inode)->i_pending_tree; 1950 rb_erase(&pr->rb_node, &tree->root); 1951 kmem_cache_free(ext4_pending_cachep, pr); 1952 } 1953 } 1954 1955 /* 1956 * ext4_remove_pending - removes a pending cluster reservation from the set 1957 * of pending reservations 1958 * 1959 * @inode - file containing the cluster 1960 * @lblk - logical block in the pending cluster reservation to be removed 1961 * 1962 * Locking for external use of __remove_pending. 1963 */ 1964 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) 1965 { 1966 struct ext4_inode_info *ei = EXT4_I(inode); 1967 1968 write_lock(&ei->i_es_lock); 1969 __remove_pending(inode, lblk); 1970 write_unlock(&ei->i_es_lock); 1971 } 1972 1973 /* 1974 * ext4_is_pending - determine whether a cluster has a pending reservation 1975 * on it 1976 * 1977 * @inode - file containing the cluster 1978 * @lblk - logical block in the cluster 1979 * 1980 * Returns true if there's a pending reservation for the cluster in the 1981 * set of pending reservations, and false if not. 1982 */ 1983 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) 1984 { 1985 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1986 struct ext4_inode_info *ei = EXT4_I(inode); 1987 bool ret; 1988 1989 read_lock(&ei->i_es_lock); 1990 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); 1991 read_unlock(&ei->i_es_lock); 1992 1993 return ret; 1994 } 1995 1996 /* 1997 * ext4_es_insert_delayed_block - adds a delayed block to the extents status 1998 * tree, adding a pending reservation where 1999 * needed 2000 * 2001 * @inode - file containing the newly added block 2002 * @lblk - logical block to be added 2003 * @allocated - indicates whether a physical cluster has been allocated for 2004 * the logical cluster that contains the block 2005 * 2006 * Returns 0 on success, negative error code on failure. 2007 */ 2008 int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk, 2009 bool allocated) 2010 { 2011 struct extent_status newes; 2012 int err1 = 0; 2013 int err2 = 0; 2014 struct extent_status *es1 = NULL; 2015 struct extent_status *es2 = NULL; 2016 2017 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 2018 return 0; 2019 2020 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n", 2021 lblk, inode->i_ino); 2022 2023 newes.es_lblk = lblk; 2024 newes.es_len = 1; 2025 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); 2026 trace_ext4_es_insert_delayed_block(inode, &newes, allocated); 2027 2028 ext4_es_insert_extent_check(inode, &newes); 2029 2030 retry: 2031 if (err1 && !es1) 2032 es1 = __es_alloc_extent(true); 2033 if ((err1 || err2) && !es2) 2034 es2 = __es_alloc_extent(true); 2035 write_lock(&EXT4_I(inode)->i_es_lock); 2036 2037 err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1); 2038 if (err1 != 0) 2039 goto error; 2040 2041 err2 = __es_insert_extent(inode, &newes, es2); 2042 if (err2 != 0) 2043 goto error; 2044 2045 if (allocated) 2046 __insert_pending(inode, lblk); 2047 2048 /* es is pre-allocated but not used, free it. */ 2049 if (es1 && !es1->es_len) 2050 __es_free_extent(es1); 2051 if (es2 && !es2->es_len) 2052 __es_free_extent(es2); 2053 error: 2054 write_unlock(&EXT4_I(inode)->i_es_lock); 2055 if (err1 || err2) 2056 goto retry; 2057 2058 ext4_es_print_tree(inode); 2059 ext4_print_pending_tree(inode); 2060 return 0; 2061 } 2062 2063 /* 2064 * __es_delayed_clu - count number of clusters containing blocks that 2065 * are delayed only 2066 * 2067 * @inode - file containing block range 2068 * @start - logical block defining start of range 2069 * @end - logical block defining end of range 2070 * 2071 * Returns the number of clusters containing only delayed (not delayed 2072 * and unwritten) blocks in the range specified by @start and @end. Any 2073 * cluster or part of a cluster within the range and containing a delayed 2074 * and not unwritten block within the range is counted as a whole cluster. 2075 */ 2076 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, 2077 ext4_lblk_t end) 2078 { 2079 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 2080 struct extent_status *es; 2081 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2082 struct rb_node *node; 2083 ext4_lblk_t first_lclu, last_lclu; 2084 unsigned long long last_counted_lclu; 2085 unsigned int n = 0; 2086 2087 /* guaranteed to be unequal to any ext4_lblk_t value */ 2088 last_counted_lclu = ~0ULL; 2089 2090 es = __es_tree_search(&tree->root, start); 2091 2092 while (es && (es->es_lblk <= end)) { 2093 if (ext4_es_is_delonly(es)) { 2094 if (es->es_lblk <= start) 2095 first_lclu = EXT4_B2C(sbi, start); 2096 else 2097 first_lclu = EXT4_B2C(sbi, es->es_lblk); 2098 2099 if (ext4_es_end(es) >= end) 2100 last_lclu = EXT4_B2C(sbi, end); 2101 else 2102 last_lclu = EXT4_B2C(sbi, ext4_es_end(es)); 2103 2104 if (first_lclu == last_counted_lclu) 2105 n += last_lclu - first_lclu; 2106 else 2107 n += last_lclu - first_lclu + 1; 2108 last_counted_lclu = last_lclu; 2109 } 2110 node = rb_next(&es->rb_node); 2111 if (!node) 2112 break; 2113 es = rb_entry(node, struct extent_status, rb_node); 2114 } 2115 2116 return n; 2117 } 2118 2119 /* 2120 * ext4_es_delayed_clu - count number of clusters containing blocks that 2121 * are both delayed and unwritten 2122 * 2123 * @inode - file containing block range 2124 * @lblk - logical block defining start of range 2125 * @len - number of blocks in range 2126 * 2127 * Locking for external use of __es_delayed_clu(). 2128 */ 2129 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk, 2130 ext4_lblk_t len) 2131 { 2132 struct ext4_inode_info *ei = EXT4_I(inode); 2133 ext4_lblk_t end; 2134 unsigned int n; 2135 2136 if (len == 0) 2137 return 0; 2138 2139 end = lblk + len - 1; 2140 WARN_ON(end < lblk); 2141 2142 read_lock(&ei->i_es_lock); 2143 2144 n = __es_delayed_clu(inode, lblk, end); 2145 2146 read_unlock(&ei->i_es_lock); 2147 2148 return n; 2149 } 2150 2151 /* 2152 * __revise_pending - makes, cancels, or leaves unchanged pending cluster 2153 * reservations for a specified block range depending 2154 * upon the presence or absence of delayed blocks 2155 * outside the range within clusters at the ends of the 2156 * range 2157 * 2158 * @inode - file containing the range 2159 * @lblk - logical block defining the start of range 2160 * @len - length of range in blocks 2161 * 2162 * Used after a newly allocated extent is added to the extents status tree. 2163 * Requires that the extents in the range have either written or unwritten 2164 * status. Must be called while holding i_es_lock. 2165 */ 2166 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 2167 ext4_lblk_t len) 2168 { 2169 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2170 ext4_lblk_t end = lblk + len - 1; 2171 ext4_lblk_t first, last; 2172 bool f_del = false, l_del = false; 2173 2174 if (len == 0) 2175 return; 2176 2177 /* 2178 * Two cases - block range within single cluster and block range 2179 * spanning two or more clusters. Note that a cluster belonging 2180 * to a range starting and/or ending on a cluster boundary is treated 2181 * as if it does not contain a delayed extent. The new range may 2182 * have allocated space for previously delayed blocks out to the 2183 * cluster boundary, requiring that any pre-existing pending 2184 * reservation be canceled. Because this code only looks at blocks 2185 * outside the range, it should revise pending reservations 2186 * correctly even if the extent represented by the range can't be 2187 * inserted in the extents status tree due to ENOSPC. 2188 */ 2189 2190 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { 2191 first = EXT4_LBLK_CMASK(sbi, lblk); 2192 if (first != lblk) 2193 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2194 first, lblk - 1); 2195 if (f_del) { 2196 __insert_pending(inode, first); 2197 } else { 2198 last = EXT4_LBLK_CMASK(sbi, end) + 2199 sbi->s_cluster_ratio - 1; 2200 if (last != end) 2201 l_del = __es_scan_range(inode, 2202 &ext4_es_is_delonly, 2203 end + 1, last); 2204 if (l_del) 2205 __insert_pending(inode, last); 2206 else 2207 __remove_pending(inode, last); 2208 } 2209 } else { 2210 first = EXT4_LBLK_CMASK(sbi, lblk); 2211 if (first != lblk) 2212 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2213 first, lblk - 1); 2214 if (f_del) 2215 __insert_pending(inode, first); 2216 else 2217 __remove_pending(inode, first); 2218 2219 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; 2220 if (last != end) 2221 l_del = __es_scan_range(inode, &ext4_es_is_delonly, 2222 end + 1, last); 2223 if (l_del) 2224 __insert_pending(inode, last); 2225 else 2226 __remove_pending(inode, last); 2227 } 2228 } 2229