1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/ext4/extents_status.c 4 * 5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 6 * Modified by 7 * Allison Henderson <achender@linux.vnet.ibm.com> 8 * Hugh Dickins <hughd@google.com> 9 * Zheng Liu <wenqing.lz@taobao.com> 10 * 11 * Ext4 extents status tree core functions. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/proc_fs.h> 15 #include <linux/seq_file.h> 16 #include "ext4.h" 17 18 #include <trace/events/ext4.h> 19 20 /* 21 * According to previous discussion in Ext4 Developer Workshop, we 22 * will introduce a new structure called io tree to track all extent 23 * status in order to solve some problems that we have met 24 * (e.g. Reservation space warning), and provide extent-level locking. 25 * Delay extent tree is the first step to achieve this goal. It is 26 * original built by Yongqiang Yang. At that time it is called delay 27 * extent tree, whose goal is only track delayed extents in memory to 28 * simplify the implementation of fiemap and bigalloc, and introduce 29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 30 * delay extent tree at the first commit. But for better understand 31 * what it does, it has been rename to extent status tree. 32 * 33 * Step1: 34 * Currently the first step has been done. All delayed extents are 35 * tracked in the tree. It maintains the delayed extent when a delayed 36 * allocation is issued, and the delayed extent is written out or 37 * invalidated. Therefore the implementation of fiemap and bigalloc 38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 39 * 40 * The following comment describes the implemenmtation of extent 41 * status tree and future works. 42 * 43 * Step2: 44 * In this step all extent status are tracked by extent status tree. 45 * Thus, we can first try to lookup a block mapping in this tree before 46 * finding it in extent tree. Hence, single extent cache can be removed 47 * because extent status tree can do a better job. Extents in status 48 * tree are loaded on-demand. Therefore, the extent status tree may not 49 * contain all of the extents in a file. Meanwhile we define a shrinker 50 * to reclaim memory from extent status tree because fragmented extent 51 * tree will make status tree cost too much memory. written/unwritten/- 52 * hole extents in the tree will be reclaimed by this shrinker when we 53 * are under high memory pressure. Delayed extents will not be 54 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 55 */ 56 57 /* 58 * Extent status tree implementation for ext4. 59 * 60 * 61 * ========================================================================== 62 * Extent status tree tracks all extent status. 63 * 64 * 1. Why we need to implement extent status tree? 65 * 66 * Without extent status tree, ext4 identifies a delayed extent by looking 67 * up page cache, this has several deficiencies - complicated, buggy, 68 * and inefficient code. 69 * 70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 71 * block or a range of blocks are belonged to a delayed extent. 72 * 73 * Let us have a look at how they do without extent status tree. 74 * -- FIEMAP 75 * FIEMAP looks up page cache to identify delayed allocations from holes. 76 * 77 * -- SEEK_HOLE/DATA 78 * SEEK_HOLE/DATA has the same problem as FIEMAP. 79 * 80 * -- bigalloc 81 * bigalloc looks up page cache to figure out if a block is 82 * already under delayed allocation or not to determine whether 83 * quota reserving is needed for the cluster. 84 * 85 * -- writeout 86 * Writeout looks up whole page cache to see if a buffer is 87 * mapped, If there are not very many delayed buffers, then it is 88 * time consuming. 89 * 90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 91 * bigalloc and writeout can figure out if a block or a range of 92 * blocks is under delayed allocation(belonged to a delayed extent) or 93 * not by searching the extent tree. 94 * 95 * 96 * ========================================================================== 97 * 2. Ext4 extent status tree impelmentation 98 * 99 * -- extent 100 * A extent is a range of blocks which are contiguous logically and 101 * physically. Unlike extent in extent tree, this extent in ext4 is 102 * a in-memory struct, there is no corresponding on-disk data. There 103 * is no limit on length of extent, so an extent can contain as many 104 * blocks as they are contiguous logically and physically. 105 * 106 * -- extent status tree 107 * Every inode has an extent status tree and all allocation blocks 108 * are added to the tree with different status. The extent in the 109 * tree are ordered by logical block no. 110 * 111 * -- operations on a extent status tree 112 * There are three important operations on a delayed extent tree: find 113 * next extent, adding a extent(a range of blocks) and removing a extent. 114 * 115 * -- race on a extent status tree 116 * Extent status tree is protected by inode->i_es_lock. 117 * 118 * -- memory consumption 119 * Fragmented extent tree will make extent status tree cost too much 120 * memory. Hence, we will reclaim written/unwritten/hole extents from 121 * the tree under a heavy memory pressure. 122 * 123 * 124 * ========================================================================== 125 * 3. Performance analysis 126 * 127 * -- overhead 128 * 1. There is a cache extent for write access, so if writes are 129 * not very random, adding space operaions are in O(1) time. 130 * 131 * -- gain 132 * 2. Code is much simpler, more readable, more maintainable and 133 * more efficient. 134 * 135 * 136 * ========================================================================== 137 * 4. TODO list 138 * 139 * -- Refactor delayed space reservation 140 * 141 * -- Extent-level locking 142 */ 143 144 static struct kmem_cache *ext4_es_cachep; 145 static struct kmem_cache *ext4_pending_cachep; 146 147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 148 struct extent_status *prealloc); 149 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 150 ext4_lblk_t end, int *reserved, 151 struct extent_status *prealloc); 152 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); 153 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 154 struct ext4_inode_info *locked_ei); 155 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 156 ext4_lblk_t len); 157 158 int __init ext4_init_es(void) 159 { 160 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); 161 if (ext4_es_cachep == NULL) 162 return -ENOMEM; 163 return 0; 164 } 165 166 void ext4_exit_es(void) 167 { 168 kmem_cache_destroy(ext4_es_cachep); 169 } 170 171 void ext4_es_init_tree(struct ext4_es_tree *tree) 172 { 173 tree->root = RB_ROOT; 174 tree->cache_es = NULL; 175 } 176 177 #ifdef ES_DEBUG__ 178 static void ext4_es_print_tree(struct inode *inode) 179 { 180 struct ext4_es_tree *tree; 181 struct rb_node *node; 182 183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 184 tree = &EXT4_I(inode)->i_es_tree; 185 node = rb_first(&tree->root); 186 while (node) { 187 struct extent_status *es; 188 es = rb_entry(node, struct extent_status, rb_node); 189 printk(KERN_DEBUG " [%u/%u) %llu %x", 190 es->es_lblk, es->es_len, 191 ext4_es_pblock(es), ext4_es_status(es)); 192 node = rb_next(node); 193 } 194 printk(KERN_DEBUG "\n"); 195 } 196 #else 197 #define ext4_es_print_tree(inode) 198 #endif 199 200 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 201 { 202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 203 return es->es_lblk + es->es_len - 1; 204 } 205 206 /* 207 * search through the tree for an delayed extent with a given offset. If 208 * it can't be found, try to find next extent. 209 */ 210 static struct extent_status *__es_tree_search(struct rb_root *root, 211 ext4_lblk_t lblk) 212 { 213 struct rb_node *node = root->rb_node; 214 struct extent_status *es = NULL; 215 216 while (node) { 217 es = rb_entry(node, struct extent_status, rb_node); 218 if (lblk < es->es_lblk) 219 node = node->rb_left; 220 else if (lblk > ext4_es_end(es)) 221 node = node->rb_right; 222 else 223 return es; 224 } 225 226 if (es && lblk < es->es_lblk) 227 return es; 228 229 if (es && lblk > ext4_es_end(es)) { 230 node = rb_next(&es->rb_node); 231 return node ? rb_entry(node, struct extent_status, rb_node) : 232 NULL; 233 } 234 235 return NULL; 236 } 237 238 /* 239 * ext4_es_find_extent_range - find extent with specified status within block 240 * range or next extent following block range in 241 * extents status tree 242 * 243 * @inode - file containing the range 244 * @matching_fn - pointer to function that matches extents with desired status 245 * @lblk - logical block defining start of range 246 * @end - logical block defining end of range 247 * @es - extent found, if any 248 * 249 * Find the first extent within the block range specified by @lblk and @end 250 * in the extents status tree that satisfies @matching_fn. If a match 251 * is found, it's returned in @es. If not, and a matching extent is found 252 * beyond the block range, it's returned in @es. If no match is found, an 253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components 254 * are 0. 255 */ 256 static void __es_find_extent_range(struct inode *inode, 257 int (*matching_fn)(struct extent_status *es), 258 ext4_lblk_t lblk, ext4_lblk_t end, 259 struct extent_status *es) 260 { 261 struct ext4_es_tree *tree = NULL; 262 struct extent_status *es1 = NULL; 263 struct rb_node *node; 264 265 WARN_ON(es == NULL); 266 WARN_ON(end < lblk); 267 268 tree = &EXT4_I(inode)->i_es_tree; 269 270 /* see if the extent has been cached */ 271 es->es_lblk = es->es_len = es->es_pblk = 0; 272 es1 = READ_ONCE(tree->cache_es); 273 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 274 es_debug("%u cached by [%u/%u) %llu %x\n", 275 lblk, es1->es_lblk, es1->es_len, 276 ext4_es_pblock(es1), ext4_es_status(es1)); 277 goto out; 278 } 279 280 es1 = __es_tree_search(&tree->root, lblk); 281 282 out: 283 if (es1 && !matching_fn(es1)) { 284 while ((node = rb_next(&es1->rb_node)) != NULL) { 285 es1 = rb_entry(node, struct extent_status, rb_node); 286 if (es1->es_lblk > end) { 287 es1 = NULL; 288 break; 289 } 290 if (matching_fn(es1)) 291 break; 292 } 293 } 294 295 if (es1 && matching_fn(es1)) { 296 WRITE_ONCE(tree->cache_es, es1); 297 es->es_lblk = es1->es_lblk; 298 es->es_len = es1->es_len; 299 es->es_pblk = es1->es_pblk; 300 } 301 302 } 303 304 /* 305 * Locking for __es_find_extent_range() for external use 306 */ 307 void ext4_es_find_extent_range(struct inode *inode, 308 int (*matching_fn)(struct extent_status *es), 309 ext4_lblk_t lblk, ext4_lblk_t end, 310 struct extent_status *es) 311 { 312 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 313 return; 314 315 trace_ext4_es_find_extent_range_enter(inode, lblk); 316 317 read_lock(&EXT4_I(inode)->i_es_lock); 318 __es_find_extent_range(inode, matching_fn, lblk, end, es); 319 read_unlock(&EXT4_I(inode)->i_es_lock); 320 321 trace_ext4_es_find_extent_range_exit(inode, es); 322 } 323 324 /* 325 * __es_scan_range - search block range for block with specified status 326 * in extents status tree 327 * 328 * @inode - file containing the range 329 * @matching_fn - pointer to function that matches extents with desired status 330 * @lblk - logical block defining start of range 331 * @end - logical block defining end of range 332 * 333 * Returns true if at least one block in the specified block range satisfies 334 * the criterion specified by @matching_fn, and false if not. If at least 335 * one extent has the specified status, then there is at least one block 336 * in the cluster with that status. Should only be called by code that has 337 * taken i_es_lock. 338 */ 339 static bool __es_scan_range(struct inode *inode, 340 int (*matching_fn)(struct extent_status *es), 341 ext4_lblk_t start, ext4_lblk_t end) 342 { 343 struct extent_status es; 344 345 __es_find_extent_range(inode, matching_fn, start, end, &es); 346 if (es.es_len == 0) 347 return false; /* no matching extent in the tree */ 348 else if (es.es_lblk <= start && 349 start < es.es_lblk + es.es_len) 350 return true; 351 else if (start <= es.es_lblk && es.es_lblk <= end) 352 return true; 353 else 354 return false; 355 } 356 /* 357 * Locking for __es_scan_range() for external use 358 */ 359 bool ext4_es_scan_range(struct inode *inode, 360 int (*matching_fn)(struct extent_status *es), 361 ext4_lblk_t lblk, ext4_lblk_t end) 362 { 363 bool ret; 364 365 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 366 return false; 367 368 read_lock(&EXT4_I(inode)->i_es_lock); 369 ret = __es_scan_range(inode, matching_fn, lblk, end); 370 read_unlock(&EXT4_I(inode)->i_es_lock); 371 372 return ret; 373 } 374 375 /* 376 * __es_scan_clu - search cluster for block with specified status in 377 * extents status tree 378 * 379 * @inode - file containing the cluster 380 * @matching_fn - pointer to function that matches extents with desired status 381 * @lblk - logical block in cluster to be searched 382 * 383 * Returns true if at least one extent in the cluster containing @lblk 384 * satisfies the criterion specified by @matching_fn, and false if not. If at 385 * least one extent has the specified status, then there is at least one block 386 * in the cluster with that status. Should only be called by code that has 387 * taken i_es_lock. 388 */ 389 static bool __es_scan_clu(struct inode *inode, 390 int (*matching_fn)(struct extent_status *es), 391 ext4_lblk_t lblk) 392 { 393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 394 ext4_lblk_t lblk_start, lblk_end; 395 396 lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 397 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 398 399 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); 400 } 401 402 /* 403 * Locking for __es_scan_clu() for external use 404 */ 405 bool ext4_es_scan_clu(struct inode *inode, 406 int (*matching_fn)(struct extent_status *es), 407 ext4_lblk_t lblk) 408 { 409 bool ret; 410 411 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 412 return false; 413 414 read_lock(&EXT4_I(inode)->i_es_lock); 415 ret = __es_scan_clu(inode, matching_fn, lblk); 416 read_unlock(&EXT4_I(inode)->i_es_lock); 417 418 return ret; 419 } 420 421 static void ext4_es_list_add(struct inode *inode) 422 { 423 struct ext4_inode_info *ei = EXT4_I(inode); 424 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 425 426 if (!list_empty(&ei->i_es_list)) 427 return; 428 429 spin_lock(&sbi->s_es_lock); 430 if (list_empty(&ei->i_es_list)) { 431 list_add_tail(&ei->i_es_list, &sbi->s_es_list); 432 sbi->s_es_nr_inode++; 433 } 434 spin_unlock(&sbi->s_es_lock); 435 } 436 437 static void ext4_es_list_del(struct inode *inode) 438 { 439 struct ext4_inode_info *ei = EXT4_I(inode); 440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 441 442 spin_lock(&sbi->s_es_lock); 443 if (!list_empty(&ei->i_es_list)) { 444 list_del_init(&ei->i_es_list); 445 sbi->s_es_nr_inode--; 446 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); 447 } 448 spin_unlock(&sbi->s_es_lock); 449 } 450 451 /* 452 * Returns true if we cannot fail to allocate memory for this extent_status 453 * entry and cannot reclaim it until its status changes. 454 */ 455 static inline bool ext4_es_must_keep(struct extent_status *es) 456 { 457 /* fiemap, bigalloc, and seek_data/hole need to use it. */ 458 if (ext4_es_is_delayed(es)) 459 return true; 460 461 return false; 462 } 463 464 static inline struct extent_status *__es_alloc_extent(bool nofail) 465 { 466 if (!nofail) 467 return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 468 469 return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); 470 } 471 472 static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, 473 ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) 474 { 475 es->es_lblk = lblk; 476 es->es_len = len; 477 es->es_pblk = pblk; 478 479 /* We never try to reclaim a must kept extent, so we don't count it. */ 480 if (!ext4_es_must_keep(es)) { 481 if (!EXT4_I(inode)->i_es_shk_nr++) 482 ext4_es_list_add(inode); 483 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 484 s_es_stats.es_stats_shk_cnt); 485 } 486 487 EXT4_I(inode)->i_es_all_nr++; 488 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 489 } 490 491 static inline void __es_free_extent(struct extent_status *es) 492 { 493 kmem_cache_free(ext4_es_cachep, es); 494 } 495 496 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 497 { 498 EXT4_I(inode)->i_es_all_nr--; 499 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 500 501 /* Decrease the shrink counter when we can reclaim the extent. */ 502 if (!ext4_es_must_keep(es)) { 503 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); 504 if (!--EXT4_I(inode)->i_es_shk_nr) 505 ext4_es_list_del(inode); 506 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 507 s_es_stats.es_stats_shk_cnt); 508 } 509 510 __es_free_extent(es); 511 } 512 513 /* 514 * Check whether or not two extents can be merged 515 * Condition: 516 * - logical block number is contiguous 517 * - physical block number is contiguous 518 * - status is equal 519 */ 520 static int ext4_es_can_be_merged(struct extent_status *es1, 521 struct extent_status *es2) 522 { 523 if (ext4_es_type(es1) != ext4_es_type(es2)) 524 return 0; 525 526 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 527 pr_warn("ES assertion failed when merging extents. " 528 "The sum of lengths of es1 (%d) and es2 (%d) " 529 "is bigger than allowed file size (%d)\n", 530 es1->es_len, es2->es_len, EXT_MAX_BLOCKS); 531 WARN_ON(1); 532 return 0; 533 } 534 535 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 536 return 0; 537 538 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 539 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 540 return 1; 541 542 if (ext4_es_is_hole(es1)) 543 return 1; 544 545 /* we need to check delayed extent is without unwritten status */ 546 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 547 return 1; 548 549 return 0; 550 } 551 552 static struct extent_status * 553 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 554 { 555 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 556 struct extent_status *es1; 557 struct rb_node *node; 558 559 node = rb_prev(&es->rb_node); 560 if (!node) 561 return es; 562 563 es1 = rb_entry(node, struct extent_status, rb_node); 564 if (ext4_es_can_be_merged(es1, es)) { 565 es1->es_len += es->es_len; 566 if (ext4_es_is_referenced(es)) 567 ext4_es_set_referenced(es1); 568 rb_erase(&es->rb_node, &tree->root); 569 ext4_es_free_extent(inode, es); 570 es = es1; 571 } 572 573 return es; 574 } 575 576 static struct extent_status * 577 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 578 { 579 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 580 struct extent_status *es1; 581 struct rb_node *node; 582 583 node = rb_next(&es->rb_node); 584 if (!node) 585 return es; 586 587 es1 = rb_entry(node, struct extent_status, rb_node); 588 if (ext4_es_can_be_merged(es, es1)) { 589 es->es_len += es1->es_len; 590 if (ext4_es_is_referenced(es1)) 591 ext4_es_set_referenced(es); 592 rb_erase(node, &tree->root); 593 ext4_es_free_extent(inode, es1); 594 } 595 596 return es; 597 } 598 599 #ifdef ES_AGGRESSIVE_TEST 600 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ 601 602 static void ext4_es_insert_extent_ext_check(struct inode *inode, 603 struct extent_status *es) 604 { 605 struct ext4_ext_path *path = NULL; 606 struct ext4_extent *ex; 607 ext4_lblk_t ee_block; 608 ext4_fsblk_t ee_start; 609 unsigned short ee_len; 610 int depth, ee_status, es_status; 611 612 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 613 if (IS_ERR(path)) 614 return; 615 616 depth = ext_depth(inode); 617 ex = path[depth].p_ext; 618 619 if (ex) { 620 621 ee_block = le32_to_cpu(ex->ee_block); 622 ee_start = ext4_ext_pblock(ex); 623 ee_len = ext4_ext_get_actual_len(ex); 624 625 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; 626 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 627 628 /* 629 * Make sure ex and es are not overlap when we try to insert 630 * a delayed/hole extent. 631 */ 632 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 633 if (in_range(es->es_lblk, ee_block, ee_len)) { 634 pr_warn("ES insert assertion failed for " 635 "inode: %lu we can find an extent " 636 "at block [%d/%d/%llu/%c], but we " 637 "want to add a delayed/hole extent " 638 "[%d/%d/%llu/%x]\n", 639 inode->i_ino, ee_block, ee_len, 640 ee_start, ee_status ? 'u' : 'w', 641 es->es_lblk, es->es_len, 642 ext4_es_pblock(es), ext4_es_status(es)); 643 } 644 goto out; 645 } 646 647 /* 648 * We don't check ee_block == es->es_lblk, etc. because es 649 * might be a part of whole extent, vice versa. 650 */ 651 if (es->es_lblk < ee_block || 652 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 653 pr_warn("ES insert assertion failed for inode: %lu " 654 "ex_status [%d/%d/%llu/%c] != " 655 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 656 ee_block, ee_len, ee_start, 657 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 658 ext4_es_pblock(es), es_status ? 'u' : 'w'); 659 goto out; 660 } 661 662 if (ee_status ^ es_status) { 663 pr_warn("ES insert assertion failed for inode: %lu " 664 "ex_status [%d/%d/%llu/%c] != " 665 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 666 ee_block, ee_len, ee_start, 667 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 668 ext4_es_pblock(es), es_status ? 'u' : 'w'); 669 } 670 } else { 671 /* 672 * We can't find an extent on disk. So we need to make sure 673 * that we don't want to add an written/unwritten extent. 674 */ 675 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 676 pr_warn("ES insert assertion failed for inode: %lu " 677 "can't find an extent at block %d but we want " 678 "to add a written/unwritten extent " 679 "[%d/%d/%llu/%x]\n", inode->i_ino, 680 es->es_lblk, es->es_lblk, es->es_len, 681 ext4_es_pblock(es), ext4_es_status(es)); 682 } 683 } 684 out: 685 ext4_free_ext_path(path); 686 } 687 688 static void ext4_es_insert_extent_ind_check(struct inode *inode, 689 struct extent_status *es) 690 { 691 struct ext4_map_blocks map; 692 int retval; 693 694 /* 695 * Here we call ext4_ind_map_blocks to lookup a block mapping because 696 * 'Indirect' structure is defined in indirect.c. So we couldn't 697 * access direct/indirect tree from outside. It is too dirty to define 698 * this function in indirect.c file. 699 */ 700 701 map.m_lblk = es->es_lblk; 702 map.m_len = es->es_len; 703 704 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 705 if (retval > 0) { 706 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 707 /* 708 * We want to add a delayed/hole extent but this 709 * block has been allocated. 710 */ 711 pr_warn("ES insert assertion failed for inode: %lu " 712 "We can find blocks but we want to add a " 713 "delayed/hole extent [%d/%d/%llu/%x]\n", 714 inode->i_ino, es->es_lblk, es->es_len, 715 ext4_es_pblock(es), ext4_es_status(es)); 716 return; 717 } else if (ext4_es_is_written(es)) { 718 if (retval != es->es_len) { 719 pr_warn("ES insert assertion failed for " 720 "inode: %lu retval %d != es_len %d\n", 721 inode->i_ino, retval, es->es_len); 722 return; 723 } 724 if (map.m_pblk != ext4_es_pblock(es)) { 725 pr_warn("ES insert assertion failed for " 726 "inode: %lu m_pblk %llu != " 727 "es_pblk %llu\n", 728 inode->i_ino, map.m_pblk, 729 ext4_es_pblock(es)); 730 return; 731 } 732 } else { 733 /* 734 * We don't need to check unwritten extent because 735 * indirect-based file doesn't have it. 736 */ 737 BUG(); 738 } 739 } else if (retval == 0) { 740 if (ext4_es_is_written(es)) { 741 pr_warn("ES insert assertion failed for inode: %lu " 742 "We can't find the block but we want to add " 743 "a written extent [%d/%d/%llu/%x]\n", 744 inode->i_ino, es->es_lblk, es->es_len, 745 ext4_es_pblock(es), ext4_es_status(es)); 746 return; 747 } 748 } 749 } 750 751 static inline void ext4_es_insert_extent_check(struct inode *inode, 752 struct extent_status *es) 753 { 754 /* 755 * We don't need to worry about the race condition because 756 * caller takes i_data_sem locking. 757 */ 758 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 759 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 760 ext4_es_insert_extent_ext_check(inode, es); 761 else 762 ext4_es_insert_extent_ind_check(inode, es); 763 } 764 #else 765 static inline void ext4_es_insert_extent_check(struct inode *inode, 766 struct extent_status *es) 767 { 768 } 769 #endif 770 771 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 772 struct extent_status *prealloc) 773 { 774 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 775 struct rb_node **p = &tree->root.rb_node; 776 struct rb_node *parent = NULL; 777 struct extent_status *es; 778 779 while (*p) { 780 parent = *p; 781 es = rb_entry(parent, struct extent_status, rb_node); 782 783 if (newes->es_lblk < es->es_lblk) { 784 if (ext4_es_can_be_merged(newes, es)) { 785 /* 786 * Here we can modify es_lblk directly 787 * because it isn't overlapped. 788 */ 789 es->es_lblk = newes->es_lblk; 790 es->es_len += newes->es_len; 791 if (ext4_es_is_written(es) || 792 ext4_es_is_unwritten(es)) 793 ext4_es_store_pblock(es, 794 newes->es_pblk); 795 es = ext4_es_try_to_merge_left(inode, es); 796 goto out; 797 } 798 p = &(*p)->rb_left; 799 } else if (newes->es_lblk > ext4_es_end(es)) { 800 if (ext4_es_can_be_merged(es, newes)) { 801 es->es_len += newes->es_len; 802 es = ext4_es_try_to_merge_right(inode, es); 803 goto out; 804 } 805 p = &(*p)->rb_right; 806 } else { 807 BUG(); 808 return -EINVAL; 809 } 810 } 811 812 if (prealloc) 813 es = prealloc; 814 else 815 es = __es_alloc_extent(false); 816 if (!es) 817 return -ENOMEM; 818 ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, 819 newes->es_pblk); 820 821 rb_link_node(&es->rb_node, parent, p); 822 rb_insert_color(&es->rb_node, &tree->root); 823 824 out: 825 tree->cache_es = es; 826 return 0; 827 } 828 829 /* 830 * ext4_es_insert_extent() adds information to an inode's extent 831 * status tree. 832 * 833 * Return 0 on success, error code on failure. 834 */ 835 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 836 ext4_lblk_t len, ext4_fsblk_t pblk, 837 unsigned int status) 838 { 839 struct extent_status newes; 840 ext4_lblk_t end = lblk + len - 1; 841 int err1 = 0; 842 int err2 = 0; 843 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 844 struct extent_status *es1 = NULL; 845 struct extent_status *es2 = NULL; 846 847 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 848 return 0; 849 850 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", 851 lblk, len, pblk, status, inode->i_ino); 852 853 if (!len) 854 return 0; 855 856 BUG_ON(end < lblk); 857 858 if ((status & EXTENT_STATUS_DELAYED) && 859 (status & EXTENT_STATUS_WRITTEN)) { 860 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " 861 " delayed and written which can potentially " 862 " cause data loss.", lblk, len); 863 WARN_ON(1); 864 } 865 866 newes.es_lblk = lblk; 867 newes.es_len = len; 868 ext4_es_store_pblock_status(&newes, pblk, status); 869 trace_ext4_es_insert_extent(inode, &newes); 870 871 ext4_es_insert_extent_check(inode, &newes); 872 873 retry: 874 if (err1 && !es1) 875 es1 = __es_alloc_extent(true); 876 if ((err1 || err2) && !es2) 877 es2 = __es_alloc_extent(true); 878 write_lock(&EXT4_I(inode)->i_es_lock); 879 880 err1 = __es_remove_extent(inode, lblk, end, NULL, es1); 881 if (err1 != 0) 882 goto error; 883 884 err2 = __es_insert_extent(inode, &newes, es2); 885 if (err2 == -ENOMEM && !ext4_es_must_keep(&newes)) 886 err2 = 0; 887 if (err2 != 0) 888 goto error; 889 890 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && 891 (status & EXTENT_STATUS_WRITTEN || 892 status & EXTENT_STATUS_UNWRITTEN)) 893 __revise_pending(inode, lblk, len); 894 895 /* es is pre-allocated but not used, free it. */ 896 if (es1 && !es1->es_len) 897 __es_free_extent(es1); 898 if (es2 && !es2->es_len) 899 __es_free_extent(es2); 900 error: 901 write_unlock(&EXT4_I(inode)->i_es_lock); 902 if (err1 || err2) 903 goto retry; 904 905 ext4_es_print_tree(inode); 906 return 0; 907 } 908 909 /* 910 * ext4_es_cache_extent() inserts information into the extent status 911 * tree if and only if there isn't information about the range in 912 * question already. 913 */ 914 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, 915 ext4_lblk_t len, ext4_fsblk_t pblk, 916 unsigned int status) 917 { 918 struct extent_status *es; 919 struct extent_status newes; 920 ext4_lblk_t end = lblk + len - 1; 921 922 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 923 return; 924 925 newes.es_lblk = lblk; 926 newes.es_len = len; 927 ext4_es_store_pblock_status(&newes, pblk, status); 928 trace_ext4_es_cache_extent(inode, &newes); 929 930 if (!len) 931 return; 932 933 BUG_ON(end < lblk); 934 935 write_lock(&EXT4_I(inode)->i_es_lock); 936 937 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); 938 if (!es || es->es_lblk > end) 939 __es_insert_extent(inode, &newes, NULL); 940 write_unlock(&EXT4_I(inode)->i_es_lock); 941 } 942 943 /* 944 * ext4_es_lookup_extent() looks up an extent in extent status tree. 945 * 946 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 947 * 948 * Return: 1 on found, 0 on not 949 */ 950 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 951 ext4_lblk_t *next_lblk, 952 struct extent_status *es) 953 { 954 struct ext4_es_tree *tree; 955 struct ext4_es_stats *stats; 956 struct extent_status *es1 = NULL; 957 struct rb_node *node; 958 int found = 0; 959 960 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 961 return 0; 962 963 trace_ext4_es_lookup_extent_enter(inode, lblk); 964 es_debug("lookup extent in block %u\n", lblk); 965 966 tree = &EXT4_I(inode)->i_es_tree; 967 read_lock(&EXT4_I(inode)->i_es_lock); 968 969 /* find extent in cache firstly */ 970 es->es_lblk = es->es_len = es->es_pblk = 0; 971 es1 = READ_ONCE(tree->cache_es); 972 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 973 es_debug("%u cached by [%u/%u)\n", 974 lblk, es1->es_lblk, es1->es_len); 975 found = 1; 976 goto out; 977 } 978 979 node = tree->root.rb_node; 980 while (node) { 981 es1 = rb_entry(node, struct extent_status, rb_node); 982 if (lblk < es1->es_lblk) 983 node = node->rb_left; 984 else if (lblk > ext4_es_end(es1)) 985 node = node->rb_right; 986 else { 987 found = 1; 988 break; 989 } 990 } 991 992 out: 993 stats = &EXT4_SB(inode->i_sb)->s_es_stats; 994 if (found) { 995 BUG_ON(!es1); 996 es->es_lblk = es1->es_lblk; 997 es->es_len = es1->es_len; 998 es->es_pblk = es1->es_pblk; 999 if (!ext4_es_is_referenced(es1)) 1000 ext4_es_set_referenced(es1); 1001 percpu_counter_inc(&stats->es_stats_cache_hits); 1002 if (next_lblk) { 1003 node = rb_next(&es1->rb_node); 1004 if (node) { 1005 es1 = rb_entry(node, struct extent_status, 1006 rb_node); 1007 *next_lblk = es1->es_lblk; 1008 } else 1009 *next_lblk = 0; 1010 } 1011 } else { 1012 percpu_counter_inc(&stats->es_stats_cache_misses); 1013 } 1014 1015 read_unlock(&EXT4_I(inode)->i_es_lock); 1016 1017 trace_ext4_es_lookup_extent_exit(inode, es, found); 1018 return found; 1019 } 1020 1021 struct rsvd_count { 1022 int ndelonly; 1023 bool first_do_lblk_found; 1024 ext4_lblk_t first_do_lblk; 1025 ext4_lblk_t last_do_lblk; 1026 struct extent_status *left_es; 1027 bool partial; 1028 ext4_lblk_t lclu; 1029 }; 1030 1031 /* 1032 * init_rsvd - initialize reserved count data before removing block range 1033 * in file from extent status tree 1034 * 1035 * @inode - file containing range 1036 * @lblk - first block in range 1037 * @es - pointer to first extent in range 1038 * @rc - pointer to reserved count data 1039 * 1040 * Assumes es is not NULL 1041 */ 1042 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, 1043 struct extent_status *es, struct rsvd_count *rc) 1044 { 1045 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1046 struct rb_node *node; 1047 1048 rc->ndelonly = 0; 1049 1050 /* 1051 * for bigalloc, note the first delonly block in the range has not 1052 * been found, record the extent containing the block to the left of 1053 * the region to be removed, if any, and note that there's no partial 1054 * cluster to track 1055 */ 1056 if (sbi->s_cluster_ratio > 1) { 1057 rc->first_do_lblk_found = false; 1058 if (lblk > es->es_lblk) { 1059 rc->left_es = es; 1060 } else { 1061 node = rb_prev(&es->rb_node); 1062 rc->left_es = node ? rb_entry(node, 1063 struct extent_status, 1064 rb_node) : NULL; 1065 } 1066 rc->partial = false; 1067 } 1068 } 1069 1070 /* 1071 * count_rsvd - count the clusters containing delayed and not unwritten 1072 * (delonly) blocks in a range within an extent and add to 1073 * the running tally in rsvd_count 1074 * 1075 * @inode - file containing extent 1076 * @lblk - first block in range 1077 * @len - length of range in blocks 1078 * @es - pointer to extent containing clusters to be counted 1079 * @rc - pointer to reserved count data 1080 * 1081 * Tracks partial clusters found at the beginning and end of extents so 1082 * they aren't overcounted when they span adjacent extents 1083 */ 1084 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, 1085 struct extent_status *es, struct rsvd_count *rc) 1086 { 1087 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1088 ext4_lblk_t i, end, nclu; 1089 1090 if (!ext4_es_is_delonly(es)) 1091 return; 1092 1093 WARN_ON(len <= 0); 1094 1095 if (sbi->s_cluster_ratio == 1) { 1096 rc->ndelonly += (int) len; 1097 return; 1098 } 1099 1100 /* bigalloc */ 1101 1102 i = (lblk < es->es_lblk) ? es->es_lblk : lblk; 1103 end = lblk + (ext4_lblk_t) len - 1; 1104 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; 1105 1106 /* record the first block of the first delonly extent seen */ 1107 if (!rc->first_do_lblk_found) { 1108 rc->first_do_lblk = i; 1109 rc->first_do_lblk_found = true; 1110 } 1111 1112 /* update the last lblk in the region seen so far */ 1113 rc->last_do_lblk = end; 1114 1115 /* 1116 * if we're tracking a partial cluster and the current extent 1117 * doesn't start with it, count it and stop tracking 1118 */ 1119 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { 1120 rc->ndelonly++; 1121 rc->partial = false; 1122 } 1123 1124 /* 1125 * if the first cluster doesn't start on a cluster boundary but 1126 * ends on one, count it 1127 */ 1128 if (EXT4_LBLK_COFF(sbi, i) != 0) { 1129 if (end >= EXT4_LBLK_CFILL(sbi, i)) { 1130 rc->ndelonly++; 1131 rc->partial = false; 1132 i = EXT4_LBLK_CFILL(sbi, i) + 1; 1133 } 1134 } 1135 1136 /* 1137 * if the current cluster starts on a cluster boundary, count the 1138 * number of whole delonly clusters in the extent 1139 */ 1140 if ((i + sbi->s_cluster_ratio - 1) <= end) { 1141 nclu = (end - i + 1) >> sbi->s_cluster_bits; 1142 rc->ndelonly += nclu; 1143 i += nclu << sbi->s_cluster_bits; 1144 } 1145 1146 /* 1147 * start tracking a partial cluster if there's a partial at the end 1148 * of the current extent and we're not already tracking one 1149 */ 1150 if (!rc->partial && i <= end) { 1151 rc->partial = true; 1152 rc->lclu = EXT4_B2C(sbi, i); 1153 } 1154 } 1155 1156 /* 1157 * __pr_tree_search - search for a pending cluster reservation 1158 * 1159 * @root - root of pending reservation tree 1160 * @lclu - logical cluster to search for 1161 * 1162 * Returns the pending reservation for the cluster identified by @lclu 1163 * if found. If not, returns a reservation for the next cluster if any, 1164 * and if not, returns NULL. 1165 */ 1166 static struct pending_reservation *__pr_tree_search(struct rb_root *root, 1167 ext4_lblk_t lclu) 1168 { 1169 struct rb_node *node = root->rb_node; 1170 struct pending_reservation *pr = NULL; 1171 1172 while (node) { 1173 pr = rb_entry(node, struct pending_reservation, rb_node); 1174 if (lclu < pr->lclu) 1175 node = node->rb_left; 1176 else if (lclu > pr->lclu) 1177 node = node->rb_right; 1178 else 1179 return pr; 1180 } 1181 if (pr && lclu < pr->lclu) 1182 return pr; 1183 if (pr && lclu > pr->lclu) { 1184 node = rb_next(&pr->rb_node); 1185 return node ? rb_entry(node, struct pending_reservation, 1186 rb_node) : NULL; 1187 } 1188 return NULL; 1189 } 1190 1191 /* 1192 * get_rsvd - calculates and returns the number of cluster reservations to be 1193 * released when removing a block range from the extent status tree 1194 * and releases any pending reservations within the range 1195 * 1196 * @inode - file containing block range 1197 * @end - last block in range 1198 * @right_es - pointer to extent containing next block beyond end or NULL 1199 * @rc - pointer to reserved count data 1200 * 1201 * The number of reservations to be released is equal to the number of 1202 * clusters containing delayed and not unwritten (delonly) blocks within 1203 * the range, minus the number of clusters still containing delonly blocks 1204 * at the ends of the range, and minus the number of pending reservations 1205 * within the range. 1206 */ 1207 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, 1208 struct extent_status *right_es, 1209 struct rsvd_count *rc) 1210 { 1211 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1212 struct pending_reservation *pr; 1213 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1214 struct rb_node *node; 1215 ext4_lblk_t first_lclu, last_lclu; 1216 bool left_delonly, right_delonly, count_pending; 1217 struct extent_status *es; 1218 1219 if (sbi->s_cluster_ratio > 1) { 1220 /* count any remaining partial cluster */ 1221 if (rc->partial) 1222 rc->ndelonly++; 1223 1224 if (rc->ndelonly == 0) 1225 return 0; 1226 1227 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); 1228 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); 1229 1230 /* 1231 * decrease the delonly count by the number of clusters at the 1232 * ends of the range that still contain delonly blocks - 1233 * these clusters still need to be reserved 1234 */ 1235 left_delonly = right_delonly = false; 1236 1237 es = rc->left_es; 1238 while (es && ext4_es_end(es) >= 1239 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { 1240 if (ext4_es_is_delonly(es)) { 1241 rc->ndelonly--; 1242 left_delonly = true; 1243 break; 1244 } 1245 node = rb_prev(&es->rb_node); 1246 if (!node) 1247 break; 1248 es = rb_entry(node, struct extent_status, rb_node); 1249 } 1250 if (right_es && (!left_delonly || first_lclu != last_lclu)) { 1251 if (end < ext4_es_end(right_es)) { 1252 es = right_es; 1253 } else { 1254 node = rb_next(&right_es->rb_node); 1255 es = node ? rb_entry(node, struct extent_status, 1256 rb_node) : NULL; 1257 } 1258 while (es && es->es_lblk <= 1259 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { 1260 if (ext4_es_is_delonly(es)) { 1261 rc->ndelonly--; 1262 right_delonly = true; 1263 break; 1264 } 1265 node = rb_next(&es->rb_node); 1266 if (!node) 1267 break; 1268 es = rb_entry(node, struct extent_status, 1269 rb_node); 1270 } 1271 } 1272 1273 /* 1274 * Determine the block range that should be searched for 1275 * pending reservations, if any. Clusters on the ends of the 1276 * original removed range containing delonly blocks are 1277 * excluded. They've already been accounted for and it's not 1278 * possible to determine if an associated pending reservation 1279 * should be released with the information available in the 1280 * extents status tree. 1281 */ 1282 if (first_lclu == last_lclu) { 1283 if (left_delonly | right_delonly) 1284 count_pending = false; 1285 else 1286 count_pending = true; 1287 } else { 1288 if (left_delonly) 1289 first_lclu++; 1290 if (right_delonly) 1291 last_lclu--; 1292 if (first_lclu <= last_lclu) 1293 count_pending = true; 1294 else 1295 count_pending = false; 1296 } 1297 1298 /* 1299 * a pending reservation found between first_lclu and last_lclu 1300 * represents an allocated cluster that contained at least one 1301 * delonly block, so the delonly total must be reduced by one 1302 * for each pending reservation found and released 1303 */ 1304 if (count_pending) { 1305 pr = __pr_tree_search(&tree->root, first_lclu); 1306 while (pr && pr->lclu <= last_lclu) { 1307 rc->ndelonly--; 1308 node = rb_next(&pr->rb_node); 1309 rb_erase(&pr->rb_node, &tree->root); 1310 kmem_cache_free(ext4_pending_cachep, pr); 1311 if (!node) 1312 break; 1313 pr = rb_entry(node, struct pending_reservation, 1314 rb_node); 1315 } 1316 } 1317 } 1318 return rc->ndelonly; 1319 } 1320 1321 1322 /* 1323 * __es_remove_extent - removes block range from extent status tree 1324 * 1325 * @inode - file containing range 1326 * @lblk - first block in range 1327 * @end - last block in range 1328 * @reserved - number of cluster reservations released 1329 * @prealloc - pre-allocated es to avoid memory allocation failures 1330 * 1331 * If @reserved is not NULL and delayed allocation is enabled, counts 1332 * block/cluster reservations freed by removing range and if bigalloc 1333 * enabled cancels pending reservations as needed. Returns 0 on success, 1334 * error code on failure. 1335 */ 1336 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1337 ext4_lblk_t end, int *reserved, 1338 struct extent_status *prealloc) 1339 { 1340 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 1341 struct rb_node *node; 1342 struct extent_status *es; 1343 struct extent_status orig_es; 1344 ext4_lblk_t len1, len2; 1345 ext4_fsblk_t block; 1346 int err = 0; 1347 bool count_reserved = true; 1348 struct rsvd_count rc; 1349 1350 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) 1351 count_reserved = false; 1352 1353 es = __es_tree_search(&tree->root, lblk); 1354 if (!es) 1355 goto out; 1356 if (es->es_lblk > end) 1357 goto out; 1358 1359 /* Simply invalidate cache_es. */ 1360 tree->cache_es = NULL; 1361 if (count_reserved) 1362 init_rsvd(inode, lblk, es, &rc); 1363 1364 orig_es.es_lblk = es->es_lblk; 1365 orig_es.es_len = es->es_len; 1366 orig_es.es_pblk = es->es_pblk; 1367 1368 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 1369 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 1370 if (len1 > 0) 1371 es->es_len = len1; 1372 if (len2 > 0) { 1373 if (len1 > 0) { 1374 struct extent_status newes; 1375 1376 newes.es_lblk = end + 1; 1377 newes.es_len = len2; 1378 block = 0x7FDEADBEEFULL; 1379 if (ext4_es_is_written(&orig_es) || 1380 ext4_es_is_unwritten(&orig_es)) 1381 block = ext4_es_pblock(&orig_es) + 1382 orig_es.es_len - len2; 1383 ext4_es_store_pblock_status(&newes, block, 1384 ext4_es_status(&orig_es)); 1385 err = __es_insert_extent(inode, &newes, prealloc); 1386 if (err) { 1387 if (!ext4_es_must_keep(&newes)) 1388 return 0; 1389 1390 es->es_lblk = orig_es.es_lblk; 1391 es->es_len = orig_es.es_len; 1392 goto out; 1393 } 1394 } else { 1395 es->es_lblk = end + 1; 1396 es->es_len = len2; 1397 if (ext4_es_is_written(es) || 1398 ext4_es_is_unwritten(es)) { 1399 block = orig_es.es_pblk + orig_es.es_len - len2; 1400 ext4_es_store_pblock(es, block); 1401 } 1402 } 1403 if (count_reserved) 1404 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, 1405 &orig_es, &rc); 1406 goto out_get_reserved; 1407 } 1408 1409 if (len1 > 0) { 1410 if (count_reserved) 1411 count_rsvd(inode, lblk, orig_es.es_len - len1, 1412 &orig_es, &rc); 1413 node = rb_next(&es->rb_node); 1414 if (node) 1415 es = rb_entry(node, struct extent_status, rb_node); 1416 else 1417 es = NULL; 1418 } 1419 1420 while (es && ext4_es_end(es) <= end) { 1421 if (count_reserved) 1422 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); 1423 node = rb_next(&es->rb_node); 1424 rb_erase(&es->rb_node, &tree->root); 1425 ext4_es_free_extent(inode, es); 1426 if (!node) { 1427 es = NULL; 1428 break; 1429 } 1430 es = rb_entry(node, struct extent_status, rb_node); 1431 } 1432 1433 if (es && es->es_lblk < end + 1) { 1434 ext4_lblk_t orig_len = es->es_len; 1435 1436 len1 = ext4_es_end(es) - end; 1437 if (count_reserved) 1438 count_rsvd(inode, es->es_lblk, orig_len - len1, 1439 es, &rc); 1440 es->es_lblk = end + 1; 1441 es->es_len = len1; 1442 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 1443 block = es->es_pblk + orig_len - len1; 1444 ext4_es_store_pblock(es, block); 1445 } 1446 } 1447 1448 out_get_reserved: 1449 if (count_reserved) 1450 *reserved = get_rsvd(inode, end, es, &rc); 1451 out: 1452 return err; 1453 } 1454 1455 /* 1456 * ext4_es_remove_extent - removes block range from extent status tree 1457 * 1458 * @inode - file containing range 1459 * @lblk - first block in range 1460 * @len - number of blocks to remove 1461 * 1462 * Reduces block/cluster reservation count and for bigalloc cancels pending 1463 * reservations as needed. Returns 0 on success, error code on failure. 1464 */ 1465 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1466 ext4_lblk_t len) 1467 { 1468 ext4_lblk_t end; 1469 int err = 0; 1470 int reserved = 0; 1471 struct extent_status *es = NULL; 1472 1473 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 1474 return 0; 1475 1476 trace_ext4_es_remove_extent(inode, lblk, len); 1477 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 1478 lblk, len, inode->i_ino); 1479 1480 if (!len) 1481 return err; 1482 1483 end = lblk + len - 1; 1484 BUG_ON(end < lblk); 1485 1486 retry: 1487 if (err && !es) 1488 es = __es_alloc_extent(true); 1489 /* 1490 * ext4_clear_inode() depends on us taking i_es_lock unconditionally 1491 * so that we are sure __es_shrink() is done with the inode before it 1492 * is reclaimed. 1493 */ 1494 write_lock(&EXT4_I(inode)->i_es_lock); 1495 err = __es_remove_extent(inode, lblk, end, &reserved, es); 1496 if (es && !es->es_len) 1497 __es_free_extent(es); 1498 write_unlock(&EXT4_I(inode)->i_es_lock); 1499 if (err) 1500 goto retry; 1501 1502 ext4_es_print_tree(inode); 1503 ext4_da_release_space(inode, reserved); 1504 return 0; 1505 } 1506 1507 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 1508 struct ext4_inode_info *locked_ei) 1509 { 1510 struct ext4_inode_info *ei; 1511 struct ext4_es_stats *es_stats; 1512 ktime_t start_time; 1513 u64 scan_time; 1514 int nr_to_walk; 1515 int nr_shrunk = 0; 1516 int retried = 0, nr_skipped = 0; 1517 1518 es_stats = &sbi->s_es_stats; 1519 start_time = ktime_get(); 1520 1521 retry: 1522 spin_lock(&sbi->s_es_lock); 1523 nr_to_walk = sbi->s_es_nr_inode; 1524 while (nr_to_walk-- > 0) { 1525 if (list_empty(&sbi->s_es_list)) { 1526 spin_unlock(&sbi->s_es_lock); 1527 goto out; 1528 } 1529 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, 1530 i_es_list); 1531 /* Move the inode to the tail */ 1532 list_move_tail(&ei->i_es_list, &sbi->s_es_list); 1533 1534 /* 1535 * Normally we try hard to avoid shrinking precached inodes, 1536 * but we will as a last resort. 1537 */ 1538 if (!retried && ext4_test_inode_state(&ei->vfs_inode, 1539 EXT4_STATE_EXT_PRECACHED)) { 1540 nr_skipped++; 1541 continue; 1542 } 1543 1544 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { 1545 nr_skipped++; 1546 continue; 1547 } 1548 /* 1549 * Now we hold i_es_lock which protects us from inode reclaim 1550 * freeing inode under us 1551 */ 1552 spin_unlock(&sbi->s_es_lock); 1553 1554 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); 1555 write_unlock(&ei->i_es_lock); 1556 1557 if (nr_to_scan <= 0) 1558 goto out; 1559 spin_lock(&sbi->s_es_lock); 1560 } 1561 spin_unlock(&sbi->s_es_lock); 1562 1563 /* 1564 * If we skipped any inodes, and we weren't able to make any 1565 * forward progress, try again to scan precached inodes. 1566 */ 1567 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1568 retried++; 1569 goto retry; 1570 } 1571 1572 if (locked_ei && nr_shrunk == 0) 1573 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); 1574 1575 out: 1576 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1577 if (likely(es_stats->es_stats_scan_time)) 1578 es_stats->es_stats_scan_time = (scan_time + 1579 es_stats->es_stats_scan_time*3) / 4; 1580 else 1581 es_stats->es_stats_scan_time = scan_time; 1582 if (scan_time > es_stats->es_stats_max_scan_time) 1583 es_stats->es_stats_max_scan_time = scan_time; 1584 if (likely(es_stats->es_stats_shrunk)) 1585 es_stats->es_stats_shrunk = (nr_shrunk + 1586 es_stats->es_stats_shrunk*3) / 4; 1587 else 1588 es_stats->es_stats_shrunk = nr_shrunk; 1589 1590 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, 1591 nr_skipped, retried); 1592 return nr_shrunk; 1593 } 1594 1595 static unsigned long ext4_es_count(struct shrinker *shrink, 1596 struct shrink_control *sc) 1597 { 1598 unsigned long nr; 1599 struct ext4_sb_info *sbi; 1600 1601 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1602 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1603 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1604 return nr; 1605 } 1606 1607 static unsigned long ext4_es_scan(struct shrinker *shrink, 1608 struct shrink_control *sc) 1609 { 1610 struct ext4_sb_info *sbi = container_of(shrink, 1611 struct ext4_sb_info, s_es_shrinker); 1612 int nr_to_scan = sc->nr_to_scan; 1613 int ret, nr_shrunk; 1614 1615 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1616 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1617 1618 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); 1619 1620 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1621 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1622 return nr_shrunk; 1623 } 1624 1625 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) 1626 { 1627 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); 1628 struct ext4_es_stats *es_stats = &sbi->s_es_stats; 1629 struct ext4_inode_info *ei, *max = NULL; 1630 unsigned int inode_cnt = 0; 1631 1632 if (v != SEQ_START_TOKEN) 1633 return 0; 1634 1635 /* here we just find an inode that has the max nr. of objects */ 1636 spin_lock(&sbi->s_es_lock); 1637 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { 1638 inode_cnt++; 1639 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1640 max = ei; 1641 else if (!max) 1642 max = ei; 1643 } 1644 spin_unlock(&sbi->s_es_lock); 1645 1646 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1647 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1648 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); 1649 seq_printf(seq, " %lld/%lld cache hits/misses\n", 1650 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), 1651 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); 1652 if (inode_cnt) 1653 seq_printf(seq, " %d inodes on list\n", inode_cnt); 1654 1655 seq_printf(seq, "average:\n %llu us scan time\n", 1656 div_u64(es_stats->es_stats_scan_time, 1000)); 1657 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); 1658 if (inode_cnt) 1659 seq_printf(seq, 1660 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1661 " %llu us max scan time\n", 1662 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, 1663 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1664 1665 return 0; 1666 } 1667 1668 int ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1669 { 1670 int err; 1671 1672 /* Make sure we have enough bits for physical block number */ 1673 BUILD_BUG_ON(ES_SHIFT < 48); 1674 INIT_LIST_HEAD(&sbi->s_es_list); 1675 sbi->s_es_nr_inode = 0; 1676 spin_lock_init(&sbi->s_es_lock); 1677 sbi->s_es_stats.es_stats_shrunk = 0; 1678 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, 1679 GFP_KERNEL); 1680 if (err) 1681 return err; 1682 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, 1683 GFP_KERNEL); 1684 if (err) 1685 goto err1; 1686 sbi->s_es_stats.es_stats_scan_time = 0; 1687 sbi->s_es_stats.es_stats_max_scan_time = 0; 1688 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1689 if (err) 1690 goto err2; 1691 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); 1692 if (err) 1693 goto err3; 1694 1695 sbi->s_es_shrinker.scan_objects = ext4_es_scan; 1696 sbi->s_es_shrinker.count_objects = ext4_es_count; 1697 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1698 err = register_shrinker(&sbi->s_es_shrinker, "ext4-es:%s", 1699 sbi->s_sb->s_id); 1700 if (err) 1701 goto err4; 1702 1703 return 0; 1704 err4: 1705 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1706 err3: 1707 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1708 err2: 1709 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1710 err1: 1711 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1712 return err; 1713 } 1714 1715 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1716 { 1717 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1718 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1719 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1720 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1721 unregister_shrinker(&sbi->s_es_shrinker); 1722 } 1723 1724 /* 1725 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at 1726 * most *nr_to_scan extents, update *nr_to_scan accordingly. 1727 * 1728 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. 1729 * Increment *nr_shrunk by the number of reclaimed extents. Also update 1730 * ei->i_es_shrink_lblk to where we should continue scanning. 1731 */ 1732 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, 1733 int *nr_to_scan, int *nr_shrunk) 1734 { 1735 struct inode *inode = &ei->vfs_inode; 1736 struct ext4_es_tree *tree = &ei->i_es_tree; 1737 struct extent_status *es; 1738 struct rb_node *node; 1739 1740 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); 1741 if (!es) 1742 goto out_wrap; 1743 1744 while (*nr_to_scan > 0) { 1745 if (es->es_lblk > end) { 1746 ei->i_es_shrink_lblk = end + 1; 1747 return 0; 1748 } 1749 1750 (*nr_to_scan)--; 1751 node = rb_next(&es->rb_node); 1752 1753 if (ext4_es_must_keep(es)) 1754 goto next; 1755 if (ext4_es_is_referenced(es)) { 1756 ext4_es_clear_referenced(es); 1757 goto next; 1758 } 1759 1760 rb_erase(&es->rb_node, &tree->root); 1761 ext4_es_free_extent(inode, es); 1762 (*nr_shrunk)++; 1763 next: 1764 if (!node) 1765 goto out_wrap; 1766 es = rb_entry(node, struct extent_status, rb_node); 1767 } 1768 ei->i_es_shrink_lblk = es->es_lblk; 1769 return 1; 1770 out_wrap: 1771 ei->i_es_shrink_lblk = 0; 1772 return 0; 1773 } 1774 1775 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) 1776 { 1777 struct inode *inode = &ei->vfs_inode; 1778 int nr_shrunk = 0; 1779 ext4_lblk_t start = ei->i_es_shrink_lblk; 1780 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1781 DEFAULT_RATELIMIT_BURST); 1782 1783 if (ei->i_es_shk_nr == 0) 1784 return 0; 1785 1786 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1787 __ratelimit(&_rs)) 1788 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1789 1790 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && 1791 start != 0) 1792 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); 1793 1794 ei->i_es_tree.cache_es = NULL; 1795 return nr_shrunk; 1796 } 1797 1798 /* 1799 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove 1800 * discretionary entries from the extent status cache. (Some entries 1801 * must be present for proper operations.) 1802 */ 1803 void ext4_clear_inode_es(struct inode *inode) 1804 { 1805 struct ext4_inode_info *ei = EXT4_I(inode); 1806 struct extent_status *es; 1807 struct ext4_es_tree *tree; 1808 struct rb_node *node; 1809 1810 write_lock(&ei->i_es_lock); 1811 tree = &EXT4_I(inode)->i_es_tree; 1812 tree->cache_es = NULL; 1813 node = rb_first(&tree->root); 1814 while (node) { 1815 es = rb_entry(node, struct extent_status, rb_node); 1816 node = rb_next(node); 1817 if (!ext4_es_must_keep(es)) { 1818 rb_erase(&es->rb_node, &tree->root); 1819 ext4_es_free_extent(inode, es); 1820 } 1821 } 1822 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 1823 write_unlock(&ei->i_es_lock); 1824 } 1825 1826 #ifdef ES_DEBUG__ 1827 static void ext4_print_pending_tree(struct inode *inode) 1828 { 1829 struct ext4_pending_tree *tree; 1830 struct rb_node *node; 1831 struct pending_reservation *pr; 1832 1833 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); 1834 tree = &EXT4_I(inode)->i_pending_tree; 1835 node = rb_first(&tree->root); 1836 while (node) { 1837 pr = rb_entry(node, struct pending_reservation, rb_node); 1838 printk(KERN_DEBUG " %u", pr->lclu); 1839 node = rb_next(node); 1840 } 1841 printk(KERN_DEBUG "\n"); 1842 } 1843 #else 1844 #define ext4_print_pending_tree(inode) 1845 #endif 1846 1847 int __init ext4_init_pending(void) 1848 { 1849 ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); 1850 if (ext4_pending_cachep == NULL) 1851 return -ENOMEM; 1852 return 0; 1853 } 1854 1855 void ext4_exit_pending(void) 1856 { 1857 kmem_cache_destroy(ext4_pending_cachep); 1858 } 1859 1860 void ext4_init_pending_tree(struct ext4_pending_tree *tree) 1861 { 1862 tree->root = RB_ROOT; 1863 } 1864 1865 /* 1866 * __get_pending - retrieve a pointer to a pending reservation 1867 * 1868 * @inode - file containing the pending cluster reservation 1869 * @lclu - logical cluster of interest 1870 * 1871 * Returns a pointer to a pending reservation if it's a member of 1872 * the set, and NULL if not. Must be called holding i_es_lock. 1873 */ 1874 static struct pending_reservation *__get_pending(struct inode *inode, 1875 ext4_lblk_t lclu) 1876 { 1877 struct ext4_pending_tree *tree; 1878 struct rb_node *node; 1879 struct pending_reservation *pr = NULL; 1880 1881 tree = &EXT4_I(inode)->i_pending_tree; 1882 node = (&tree->root)->rb_node; 1883 1884 while (node) { 1885 pr = rb_entry(node, struct pending_reservation, rb_node); 1886 if (lclu < pr->lclu) 1887 node = node->rb_left; 1888 else if (lclu > pr->lclu) 1889 node = node->rb_right; 1890 else if (lclu == pr->lclu) 1891 return pr; 1892 } 1893 return NULL; 1894 } 1895 1896 /* 1897 * __insert_pending - adds a pending cluster reservation to the set of 1898 * pending reservations 1899 * 1900 * @inode - file containing the cluster 1901 * @lblk - logical block in the cluster to be added 1902 * 1903 * Returns 0 on successful insertion and -ENOMEM on failure. If the 1904 * pending reservation is already in the set, returns successfully. 1905 */ 1906 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk) 1907 { 1908 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1909 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1910 struct rb_node **p = &tree->root.rb_node; 1911 struct rb_node *parent = NULL; 1912 struct pending_reservation *pr; 1913 ext4_lblk_t lclu; 1914 int ret = 0; 1915 1916 lclu = EXT4_B2C(sbi, lblk); 1917 /* search to find parent for insertion */ 1918 while (*p) { 1919 parent = *p; 1920 pr = rb_entry(parent, struct pending_reservation, rb_node); 1921 1922 if (lclu < pr->lclu) { 1923 p = &(*p)->rb_left; 1924 } else if (lclu > pr->lclu) { 1925 p = &(*p)->rb_right; 1926 } else { 1927 /* pending reservation already inserted */ 1928 goto out; 1929 } 1930 } 1931 1932 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); 1933 if (pr == NULL) { 1934 ret = -ENOMEM; 1935 goto out; 1936 } 1937 pr->lclu = lclu; 1938 1939 rb_link_node(&pr->rb_node, parent, p); 1940 rb_insert_color(&pr->rb_node, &tree->root); 1941 1942 out: 1943 return ret; 1944 } 1945 1946 /* 1947 * __remove_pending - removes a pending cluster reservation from the set 1948 * of pending reservations 1949 * 1950 * @inode - file containing the cluster 1951 * @lblk - logical block in the pending cluster reservation to be removed 1952 * 1953 * Returns successfully if pending reservation is not a member of the set. 1954 */ 1955 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) 1956 { 1957 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1958 struct pending_reservation *pr; 1959 struct ext4_pending_tree *tree; 1960 1961 pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); 1962 if (pr != NULL) { 1963 tree = &EXT4_I(inode)->i_pending_tree; 1964 rb_erase(&pr->rb_node, &tree->root); 1965 kmem_cache_free(ext4_pending_cachep, pr); 1966 } 1967 } 1968 1969 /* 1970 * ext4_remove_pending - removes a pending cluster reservation from the set 1971 * of pending reservations 1972 * 1973 * @inode - file containing the cluster 1974 * @lblk - logical block in the pending cluster reservation to be removed 1975 * 1976 * Locking for external use of __remove_pending. 1977 */ 1978 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) 1979 { 1980 struct ext4_inode_info *ei = EXT4_I(inode); 1981 1982 write_lock(&ei->i_es_lock); 1983 __remove_pending(inode, lblk); 1984 write_unlock(&ei->i_es_lock); 1985 } 1986 1987 /* 1988 * ext4_is_pending - determine whether a cluster has a pending reservation 1989 * on it 1990 * 1991 * @inode - file containing the cluster 1992 * @lblk - logical block in the cluster 1993 * 1994 * Returns true if there's a pending reservation for the cluster in the 1995 * set of pending reservations, and false if not. 1996 */ 1997 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) 1998 { 1999 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2000 struct ext4_inode_info *ei = EXT4_I(inode); 2001 bool ret; 2002 2003 read_lock(&ei->i_es_lock); 2004 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); 2005 read_unlock(&ei->i_es_lock); 2006 2007 return ret; 2008 } 2009 2010 /* 2011 * ext4_es_insert_delayed_block - adds a delayed block to the extents status 2012 * tree, adding a pending reservation where 2013 * needed 2014 * 2015 * @inode - file containing the newly added block 2016 * @lblk - logical block to be added 2017 * @allocated - indicates whether a physical cluster has been allocated for 2018 * the logical cluster that contains the block 2019 * 2020 * Returns 0 on success, negative error code on failure. 2021 */ 2022 int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk, 2023 bool allocated) 2024 { 2025 struct extent_status newes; 2026 int err1 = 0; 2027 int err2 = 0; 2028 struct extent_status *es1 = NULL; 2029 struct extent_status *es2 = NULL; 2030 2031 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 2032 return 0; 2033 2034 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n", 2035 lblk, inode->i_ino); 2036 2037 newes.es_lblk = lblk; 2038 newes.es_len = 1; 2039 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); 2040 trace_ext4_es_insert_delayed_block(inode, &newes, allocated); 2041 2042 ext4_es_insert_extent_check(inode, &newes); 2043 2044 retry: 2045 if (err1 && !es1) 2046 es1 = __es_alloc_extent(true); 2047 if ((err1 || err2) && !es2) 2048 es2 = __es_alloc_extent(true); 2049 write_lock(&EXT4_I(inode)->i_es_lock); 2050 2051 err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1); 2052 if (err1 != 0) 2053 goto error; 2054 2055 err2 = __es_insert_extent(inode, &newes, es2); 2056 if (err2 != 0) 2057 goto error; 2058 2059 if (allocated) 2060 __insert_pending(inode, lblk); 2061 2062 /* es is pre-allocated but not used, free it. */ 2063 if (es1 && !es1->es_len) 2064 __es_free_extent(es1); 2065 if (es2 && !es2->es_len) 2066 __es_free_extent(es2); 2067 error: 2068 write_unlock(&EXT4_I(inode)->i_es_lock); 2069 if (err1 || err2) 2070 goto retry; 2071 2072 ext4_es_print_tree(inode); 2073 ext4_print_pending_tree(inode); 2074 return 0; 2075 } 2076 2077 /* 2078 * __es_delayed_clu - count number of clusters containing blocks that 2079 * are delayed only 2080 * 2081 * @inode - file containing block range 2082 * @start - logical block defining start of range 2083 * @end - logical block defining end of range 2084 * 2085 * Returns the number of clusters containing only delayed (not delayed 2086 * and unwritten) blocks in the range specified by @start and @end. Any 2087 * cluster or part of a cluster within the range and containing a delayed 2088 * and not unwritten block within the range is counted as a whole cluster. 2089 */ 2090 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, 2091 ext4_lblk_t end) 2092 { 2093 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 2094 struct extent_status *es; 2095 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2096 struct rb_node *node; 2097 ext4_lblk_t first_lclu, last_lclu; 2098 unsigned long long last_counted_lclu; 2099 unsigned int n = 0; 2100 2101 /* guaranteed to be unequal to any ext4_lblk_t value */ 2102 last_counted_lclu = ~0ULL; 2103 2104 es = __es_tree_search(&tree->root, start); 2105 2106 while (es && (es->es_lblk <= end)) { 2107 if (ext4_es_is_delonly(es)) { 2108 if (es->es_lblk <= start) 2109 first_lclu = EXT4_B2C(sbi, start); 2110 else 2111 first_lclu = EXT4_B2C(sbi, es->es_lblk); 2112 2113 if (ext4_es_end(es) >= end) 2114 last_lclu = EXT4_B2C(sbi, end); 2115 else 2116 last_lclu = EXT4_B2C(sbi, ext4_es_end(es)); 2117 2118 if (first_lclu == last_counted_lclu) 2119 n += last_lclu - first_lclu; 2120 else 2121 n += last_lclu - first_lclu + 1; 2122 last_counted_lclu = last_lclu; 2123 } 2124 node = rb_next(&es->rb_node); 2125 if (!node) 2126 break; 2127 es = rb_entry(node, struct extent_status, rb_node); 2128 } 2129 2130 return n; 2131 } 2132 2133 /* 2134 * ext4_es_delayed_clu - count number of clusters containing blocks that 2135 * are both delayed and unwritten 2136 * 2137 * @inode - file containing block range 2138 * @lblk - logical block defining start of range 2139 * @len - number of blocks in range 2140 * 2141 * Locking for external use of __es_delayed_clu(). 2142 */ 2143 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk, 2144 ext4_lblk_t len) 2145 { 2146 struct ext4_inode_info *ei = EXT4_I(inode); 2147 ext4_lblk_t end; 2148 unsigned int n; 2149 2150 if (len == 0) 2151 return 0; 2152 2153 end = lblk + len - 1; 2154 WARN_ON(end < lblk); 2155 2156 read_lock(&ei->i_es_lock); 2157 2158 n = __es_delayed_clu(inode, lblk, end); 2159 2160 read_unlock(&ei->i_es_lock); 2161 2162 return n; 2163 } 2164 2165 /* 2166 * __revise_pending - makes, cancels, or leaves unchanged pending cluster 2167 * reservations for a specified block range depending 2168 * upon the presence or absence of delayed blocks 2169 * outside the range within clusters at the ends of the 2170 * range 2171 * 2172 * @inode - file containing the range 2173 * @lblk - logical block defining the start of range 2174 * @len - length of range in blocks 2175 * 2176 * Used after a newly allocated extent is added to the extents status tree. 2177 * Requires that the extents in the range have either written or unwritten 2178 * status. Must be called while holding i_es_lock. 2179 */ 2180 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 2181 ext4_lblk_t len) 2182 { 2183 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2184 ext4_lblk_t end = lblk + len - 1; 2185 ext4_lblk_t first, last; 2186 bool f_del = false, l_del = false; 2187 2188 if (len == 0) 2189 return; 2190 2191 /* 2192 * Two cases - block range within single cluster and block range 2193 * spanning two or more clusters. Note that a cluster belonging 2194 * to a range starting and/or ending on a cluster boundary is treated 2195 * as if it does not contain a delayed extent. The new range may 2196 * have allocated space for previously delayed blocks out to the 2197 * cluster boundary, requiring that any pre-existing pending 2198 * reservation be canceled. Because this code only looks at blocks 2199 * outside the range, it should revise pending reservations 2200 * correctly even if the extent represented by the range can't be 2201 * inserted in the extents status tree due to ENOSPC. 2202 */ 2203 2204 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { 2205 first = EXT4_LBLK_CMASK(sbi, lblk); 2206 if (first != lblk) 2207 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2208 first, lblk - 1); 2209 if (f_del) { 2210 __insert_pending(inode, first); 2211 } else { 2212 last = EXT4_LBLK_CMASK(sbi, end) + 2213 sbi->s_cluster_ratio - 1; 2214 if (last != end) 2215 l_del = __es_scan_range(inode, 2216 &ext4_es_is_delonly, 2217 end + 1, last); 2218 if (l_del) 2219 __insert_pending(inode, last); 2220 else 2221 __remove_pending(inode, last); 2222 } 2223 } else { 2224 first = EXT4_LBLK_CMASK(sbi, lblk); 2225 if (first != lblk) 2226 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2227 first, lblk - 1); 2228 if (f_del) 2229 __insert_pending(inode, first); 2230 else 2231 __remove_pending(inode, first); 2232 2233 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; 2234 if (last != end) 2235 l_del = __es_scan_range(inode, &ext4_es_is_delonly, 2236 end + 1, last); 2237 if (l_del) 2238 __insert_pending(inode, last); 2239 else 2240 __remove_pending(inode, last); 2241 } 2242 } 2243