1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_inode_item.h" 18 #include "xfs_quota.h" 19 #include "xfs_trace.h" 20 #include "xfs_icache.h" 21 #include "xfs_bmap_util.h" 22 #include "xfs_dquot_item.h" 23 #include "xfs_dquot.h" 24 #include "xfs_reflink.h" 25 26 #include <linux/iversion.h> 27 28 /* 29 * Allocate and initialise an xfs_inode. 30 */ 31 struct xfs_inode * 32 xfs_inode_alloc( 33 struct xfs_mount *mp, 34 xfs_ino_t ino) 35 { 36 struct xfs_inode *ip; 37 38 /* 39 * if this didn't occur in transactions, we could use 40 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 41 * code up to do this anyway. 42 */ 43 ip = kmem_zone_alloc(xfs_inode_zone, 0); 44 if (!ip) 45 return NULL; 46 if (inode_init_always(mp->m_super, VFS_I(ip))) { 47 kmem_cache_free(xfs_inode_zone, ip); 48 return NULL; 49 } 50 51 /* VFS doesn't initialise i_mode! */ 52 VFS_I(ip)->i_mode = 0; 53 54 XFS_STATS_INC(mp, vn_active); 55 ASSERT(atomic_read(&ip->i_pincount) == 0); 56 ASSERT(!xfs_isiflocked(ip)); 57 ASSERT(ip->i_ino == 0); 58 59 /* initialise the xfs inode */ 60 ip->i_ino = ino; 61 ip->i_mount = mp; 62 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 63 ip->i_afp = NULL; 64 ip->i_cowfp = NULL; 65 ip->i_cnextents = 0; 66 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 67 memset(&ip->i_df, 0, sizeof(ip->i_df)); 68 ip->i_flags = 0; 69 ip->i_delayed_blks = 0; 70 memset(&ip->i_d, 0, sizeof(ip->i_d)); 71 ip->i_sick = 0; 72 ip->i_checked = 0; 73 INIT_WORK(&ip->i_ioend_work, xfs_end_io); 74 INIT_LIST_HEAD(&ip->i_ioend_list); 75 spin_lock_init(&ip->i_ioend_lock); 76 77 return ip; 78 } 79 80 STATIC void 81 xfs_inode_free_callback( 82 struct rcu_head *head) 83 { 84 struct inode *inode = container_of(head, struct inode, i_rcu); 85 struct xfs_inode *ip = XFS_I(inode); 86 87 switch (VFS_I(ip)->i_mode & S_IFMT) { 88 case S_IFREG: 89 case S_IFDIR: 90 case S_IFLNK: 91 xfs_idestroy_fork(ip, XFS_DATA_FORK); 92 break; 93 } 94 95 if (ip->i_afp) 96 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 97 if (ip->i_cowfp) 98 xfs_idestroy_fork(ip, XFS_COW_FORK); 99 100 if (ip->i_itemp) { 101 ASSERT(!test_bit(XFS_LI_IN_AIL, 102 &ip->i_itemp->ili_item.li_flags)); 103 xfs_inode_item_destroy(ip); 104 ip->i_itemp = NULL; 105 } 106 107 kmem_cache_free(xfs_inode_zone, ip); 108 } 109 110 static void 111 __xfs_inode_free( 112 struct xfs_inode *ip) 113 { 114 /* asserts to verify all state is correct here */ 115 ASSERT(atomic_read(&ip->i_pincount) == 0); 116 XFS_STATS_DEC(ip->i_mount, vn_active); 117 118 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 119 } 120 121 void 122 xfs_inode_free( 123 struct xfs_inode *ip) 124 { 125 ASSERT(!xfs_isiflocked(ip)); 126 127 /* 128 * Because we use RCU freeing we need to ensure the inode always 129 * appears to be reclaimed with an invalid inode number when in the 130 * free state. The ip->i_flags_lock provides the barrier against lookup 131 * races. 132 */ 133 spin_lock(&ip->i_flags_lock); 134 ip->i_flags = XFS_IRECLAIM; 135 ip->i_ino = 0; 136 spin_unlock(&ip->i_flags_lock); 137 138 __xfs_inode_free(ip); 139 } 140 141 /* 142 * Queue a new inode reclaim pass if there are reclaimable inodes and there 143 * isn't a reclaim pass already in progress. By default it runs every 5s based 144 * on the xfs periodic sync default of 30s. Perhaps this should have it's own 145 * tunable, but that can be done if this method proves to be ineffective or too 146 * aggressive. 147 */ 148 static void 149 xfs_reclaim_work_queue( 150 struct xfs_mount *mp) 151 { 152 153 rcu_read_lock(); 154 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 155 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 156 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 157 } 158 rcu_read_unlock(); 159 } 160 161 /* 162 * This is a fast pass over the inode cache to try to get reclaim moving on as 163 * many inodes as possible in a short period of time. It kicks itself every few 164 * seconds, as well as being kicked by the inode cache shrinker when memory 165 * goes low. It scans as quickly as possible avoiding locked inodes or those 166 * already being flushed, and once done schedules a future pass. 167 */ 168 void 169 xfs_reclaim_worker( 170 struct work_struct *work) 171 { 172 struct xfs_mount *mp = container_of(to_delayed_work(work), 173 struct xfs_mount, m_reclaim_work); 174 175 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 176 xfs_reclaim_work_queue(mp); 177 } 178 179 static void 180 xfs_perag_set_reclaim_tag( 181 struct xfs_perag *pag) 182 { 183 struct xfs_mount *mp = pag->pag_mount; 184 185 lockdep_assert_held(&pag->pag_ici_lock); 186 if (pag->pag_ici_reclaimable++) 187 return; 188 189 /* propagate the reclaim tag up into the perag radix tree */ 190 spin_lock(&mp->m_perag_lock); 191 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, 192 XFS_ICI_RECLAIM_TAG); 193 spin_unlock(&mp->m_perag_lock); 194 195 /* schedule periodic background inode reclaim */ 196 xfs_reclaim_work_queue(mp); 197 198 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 199 } 200 201 static void 202 xfs_perag_clear_reclaim_tag( 203 struct xfs_perag *pag) 204 { 205 struct xfs_mount *mp = pag->pag_mount; 206 207 lockdep_assert_held(&pag->pag_ici_lock); 208 if (--pag->pag_ici_reclaimable) 209 return; 210 211 /* clear the reclaim tag from the perag radix tree */ 212 spin_lock(&mp->m_perag_lock); 213 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, 214 XFS_ICI_RECLAIM_TAG); 215 spin_unlock(&mp->m_perag_lock); 216 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 217 } 218 219 220 /* 221 * We set the inode flag atomically with the radix tree tag. 222 * Once we get tag lookups on the radix tree, this inode flag 223 * can go away. 224 */ 225 void 226 xfs_inode_set_reclaim_tag( 227 struct xfs_inode *ip) 228 { 229 struct xfs_mount *mp = ip->i_mount; 230 struct xfs_perag *pag; 231 232 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 233 spin_lock(&pag->pag_ici_lock); 234 spin_lock(&ip->i_flags_lock); 235 236 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), 237 XFS_ICI_RECLAIM_TAG); 238 xfs_perag_set_reclaim_tag(pag); 239 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 240 241 spin_unlock(&ip->i_flags_lock); 242 spin_unlock(&pag->pag_ici_lock); 243 xfs_perag_put(pag); 244 } 245 246 STATIC void 247 xfs_inode_clear_reclaim_tag( 248 struct xfs_perag *pag, 249 xfs_ino_t ino) 250 { 251 radix_tree_tag_clear(&pag->pag_ici_root, 252 XFS_INO_TO_AGINO(pag->pag_mount, ino), 253 XFS_ICI_RECLAIM_TAG); 254 xfs_perag_clear_reclaim_tag(pag); 255 } 256 257 static void 258 xfs_inew_wait( 259 struct xfs_inode *ip) 260 { 261 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); 262 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); 263 264 do { 265 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 266 if (!xfs_iflags_test(ip, XFS_INEW)) 267 break; 268 schedule(); 269 } while (true); 270 finish_wait(wq, &wait.wq_entry); 271 } 272 273 /* 274 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 275 * part of the structure. This is made more complex by the fact we store 276 * information about the on-disk values in the VFS inode and so we can't just 277 * overwrite the values unconditionally. Hence we save the parameters we 278 * need to retain across reinitialisation, and rewrite them into the VFS inode 279 * after reinitialisation even if it fails. 280 */ 281 static int 282 xfs_reinit_inode( 283 struct xfs_mount *mp, 284 struct inode *inode) 285 { 286 int error; 287 uint32_t nlink = inode->i_nlink; 288 uint32_t generation = inode->i_generation; 289 uint64_t version = inode_peek_iversion(inode); 290 umode_t mode = inode->i_mode; 291 dev_t dev = inode->i_rdev; 292 kuid_t uid = inode->i_uid; 293 kgid_t gid = inode->i_gid; 294 295 error = inode_init_always(mp->m_super, inode); 296 297 set_nlink(inode, nlink); 298 inode->i_generation = generation; 299 inode_set_iversion_queried(inode, version); 300 inode->i_mode = mode; 301 inode->i_rdev = dev; 302 inode->i_uid = uid; 303 inode->i_gid = gid; 304 return error; 305 } 306 307 /* 308 * If we are allocating a new inode, then check what was returned is 309 * actually a free, empty inode. If we are not allocating an inode, 310 * then check we didn't find a free inode. 311 * 312 * Returns: 313 * 0 if the inode free state matches the lookup context 314 * -ENOENT if the inode is free and we are not allocating 315 * -EFSCORRUPTED if there is any state mismatch at all 316 */ 317 static int 318 xfs_iget_check_free_state( 319 struct xfs_inode *ip, 320 int flags) 321 { 322 if (flags & XFS_IGET_CREATE) { 323 /* should be a free inode */ 324 if (VFS_I(ip)->i_mode != 0) { 325 xfs_warn(ip->i_mount, 326 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", 327 ip->i_ino, VFS_I(ip)->i_mode); 328 return -EFSCORRUPTED; 329 } 330 331 if (ip->i_d.di_nblocks != 0) { 332 xfs_warn(ip->i_mount, 333 "Corruption detected! Free inode 0x%llx has blocks allocated!", 334 ip->i_ino); 335 return -EFSCORRUPTED; 336 } 337 return 0; 338 } 339 340 /* should be an allocated inode */ 341 if (VFS_I(ip)->i_mode == 0) 342 return -ENOENT; 343 344 return 0; 345 } 346 347 /* 348 * Check the validity of the inode we just found it the cache 349 */ 350 static int 351 xfs_iget_cache_hit( 352 struct xfs_perag *pag, 353 struct xfs_inode *ip, 354 xfs_ino_t ino, 355 int flags, 356 int lock_flags) __releases(RCU) 357 { 358 struct inode *inode = VFS_I(ip); 359 struct xfs_mount *mp = ip->i_mount; 360 int error; 361 362 /* 363 * check for re-use of an inode within an RCU grace period due to the 364 * radix tree nodes not being updated yet. We monitor for this by 365 * setting the inode number to zero before freeing the inode structure. 366 * If the inode has been reallocated and set up, then the inode number 367 * will not match, so check for that, too. 368 */ 369 spin_lock(&ip->i_flags_lock); 370 if (ip->i_ino != ino) { 371 trace_xfs_iget_skip(ip); 372 XFS_STATS_INC(mp, xs_ig_frecycle); 373 error = -EAGAIN; 374 goto out_error; 375 } 376 377 378 /* 379 * If we are racing with another cache hit that is currently 380 * instantiating this inode or currently recycling it out of 381 * reclaimabe state, wait for the initialisation to complete 382 * before continuing. 383 * 384 * XXX(hch): eventually we should do something equivalent to 385 * wait_on_inode to wait for these flags to be cleared 386 * instead of polling for it. 387 */ 388 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 389 trace_xfs_iget_skip(ip); 390 XFS_STATS_INC(mp, xs_ig_frecycle); 391 error = -EAGAIN; 392 goto out_error; 393 } 394 395 /* 396 * Check the inode free state is valid. This also detects lookup 397 * racing with unlinks. 398 */ 399 error = xfs_iget_check_free_state(ip, flags); 400 if (error) 401 goto out_error; 402 403 /* 404 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 405 * Need to carefully get it back into useable state. 406 */ 407 if (ip->i_flags & XFS_IRECLAIMABLE) { 408 trace_xfs_iget_reclaim(ip); 409 410 if (flags & XFS_IGET_INCORE) { 411 error = -EAGAIN; 412 goto out_error; 413 } 414 415 /* 416 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 417 * from stomping over us while we recycle the inode. We can't 418 * clear the radix tree reclaimable tag yet as it requires 419 * pag_ici_lock to be held exclusive. 420 */ 421 ip->i_flags |= XFS_IRECLAIM; 422 423 spin_unlock(&ip->i_flags_lock); 424 rcu_read_unlock(); 425 426 error = xfs_reinit_inode(mp, inode); 427 if (error) { 428 bool wake; 429 /* 430 * Re-initializing the inode failed, and we are in deep 431 * trouble. Try to re-add it to the reclaim list. 432 */ 433 rcu_read_lock(); 434 spin_lock(&ip->i_flags_lock); 435 wake = !!__xfs_iflags_test(ip, XFS_INEW); 436 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 437 if (wake) 438 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 439 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 440 trace_xfs_iget_reclaim_fail(ip); 441 goto out_error; 442 } 443 444 spin_lock(&pag->pag_ici_lock); 445 spin_lock(&ip->i_flags_lock); 446 447 /* 448 * Clear the per-lifetime state in the inode as we are now 449 * effectively a new inode and need to return to the initial 450 * state before reuse occurs. 451 */ 452 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 453 ip->i_flags |= XFS_INEW; 454 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 455 inode->i_state = I_NEW; 456 ip->i_sick = 0; 457 ip->i_checked = 0; 458 459 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 460 init_rwsem(&inode->i_rwsem); 461 462 spin_unlock(&ip->i_flags_lock); 463 spin_unlock(&pag->pag_ici_lock); 464 } else { 465 /* If the VFS inode is being torn down, pause and try again. */ 466 if (!igrab(inode)) { 467 trace_xfs_iget_skip(ip); 468 error = -EAGAIN; 469 goto out_error; 470 } 471 472 /* We've got a live one. */ 473 spin_unlock(&ip->i_flags_lock); 474 rcu_read_unlock(); 475 trace_xfs_iget_hit(ip); 476 } 477 478 if (lock_flags != 0) 479 xfs_ilock(ip, lock_flags); 480 481 if (!(flags & XFS_IGET_INCORE)) 482 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); 483 XFS_STATS_INC(mp, xs_ig_found); 484 485 return 0; 486 487 out_error: 488 spin_unlock(&ip->i_flags_lock); 489 rcu_read_unlock(); 490 return error; 491 } 492 493 494 static int 495 xfs_iget_cache_miss( 496 struct xfs_mount *mp, 497 struct xfs_perag *pag, 498 xfs_trans_t *tp, 499 xfs_ino_t ino, 500 struct xfs_inode **ipp, 501 int flags, 502 int lock_flags) 503 { 504 struct xfs_inode *ip; 505 int error; 506 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 507 int iflags; 508 509 ip = xfs_inode_alloc(mp, ino); 510 if (!ip) 511 return -ENOMEM; 512 513 error = xfs_iread(mp, tp, ip, flags); 514 if (error) 515 goto out_destroy; 516 517 if (!xfs_inode_verify_forks(ip)) { 518 error = -EFSCORRUPTED; 519 goto out_destroy; 520 } 521 522 trace_xfs_iget_miss(ip); 523 524 525 /* 526 * Check the inode free state is valid. This also detects lookup 527 * racing with unlinks. 528 */ 529 error = xfs_iget_check_free_state(ip, flags); 530 if (error) 531 goto out_destroy; 532 533 /* 534 * Preload the radix tree so we can insert safely under the 535 * write spinlock. Note that we cannot sleep inside the preload 536 * region. Since we can be called from transaction context, don't 537 * recurse into the file system. 538 */ 539 if (radix_tree_preload(GFP_NOFS)) { 540 error = -EAGAIN; 541 goto out_destroy; 542 } 543 544 /* 545 * Because the inode hasn't been added to the radix-tree yet it can't 546 * be found by another thread, so we can do the non-sleeping lock here. 547 */ 548 if (lock_flags) { 549 if (!xfs_ilock_nowait(ip, lock_flags)) 550 BUG(); 551 } 552 553 /* 554 * These values must be set before inserting the inode into the radix 555 * tree as the moment it is inserted a concurrent lookup (allowed by the 556 * RCU locking mechanism) can find it and that lookup must see that this 557 * is an inode currently under construction (i.e. that XFS_INEW is set). 558 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 559 * memory barrier that ensures this detection works correctly at lookup 560 * time. 561 */ 562 iflags = XFS_INEW; 563 if (flags & XFS_IGET_DONTCACHE) 564 iflags |= XFS_IDONTCACHE; 565 ip->i_udquot = NULL; 566 ip->i_gdquot = NULL; 567 ip->i_pdquot = NULL; 568 xfs_iflags_set(ip, iflags); 569 570 /* insert the new inode */ 571 spin_lock(&pag->pag_ici_lock); 572 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 573 if (unlikely(error)) { 574 WARN_ON(error != -EEXIST); 575 XFS_STATS_INC(mp, xs_ig_dup); 576 error = -EAGAIN; 577 goto out_preload_end; 578 } 579 spin_unlock(&pag->pag_ici_lock); 580 radix_tree_preload_end(); 581 582 *ipp = ip; 583 return 0; 584 585 out_preload_end: 586 spin_unlock(&pag->pag_ici_lock); 587 radix_tree_preload_end(); 588 if (lock_flags) 589 xfs_iunlock(ip, lock_flags); 590 out_destroy: 591 __destroy_inode(VFS_I(ip)); 592 xfs_inode_free(ip); 593 return error; 594 } 595 596 /* 597 * Look up an inode by number in the given file system. 598 * The inode is looked up in the cache held in each AG. 599 * If the inode is found in the cache, initialise the vfs inode 600 * if necessary. 601 * 602 * If it is not in core, read it in from the file system's device, 603 * add it to the cache and initialise the vfs inode. 604 * 605 * The inode is locked according to the value of the lock_flags parameter. 606 * This flag parameter indicates how and if the inode's IO lock and inode lock 607 * should be taken. 608 * 609 * mp -- the mount point structure for the current file system. It points 610 * to the inode hash table. 611 * tp -- a pointer to the current transaction if there is one. This is 612 * simply passed through to the xfs_iread() call. 613 * ino -- the number of the inode desired. This is the unique identifier 614 * within the file system for the inode being requested. 615 * lock_flags -- flags indicating how to lock the inode. See the comment 616 * for xfs_ilock() for a list of valid values. 617 */ 618 int 619 xfs_iget( 620 xfs_mount_t *mp, 621 xfs_trans_t *tp, 622 xfs_ino_t ino, 623 uint flags, 624 uint lock_flags, 625 xfs_inode_t **ipp) 626 { 627 xfs_inode_t *ip; 628 int error; 629 xfs_perag_t *pag; 630 xfs_agino_t agino; 631 632 /* 633 * xfs_reclaim_inode() uses the ILOCK to ensure an inode 634 * doesn't get freed while it's being referenced during a 635 * radix tree traversal here. It assumes this function 636 * aqcuires only the ILOCK (and therefore it has no need to 637 * involve the IOLOCK in this synchronization). 638 */ 639 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 640 641 /* reject inode numbers outside existing AGs */ 642 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 643 return -EINVAL; 644 645 XFS_STATS_INC(mp, xs_ig_attempts); 646 647 /* get the perag structure and ensure that it's inode capable */ 648 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 649 agino = XFS_INO_TO_AGINO(mp, ino); 650 651 again: 652 error = 0; 653 rcu_read_lock(); 654 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 655 656 if (ip) { 657 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 658 if (error) 659 goto out_error_or_again; 660 } else { 661 rcu_read_unlock(); 662 if (flags & XFS_IGET_INCORE) { 663 error = -ENODATA; 664 goto out_error_or_again; 665 } 666 XFS_STATS_INC(mp, xs_ig_missed); 667 668 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 669 flags, lock_flags); 670 if (error) 671 goto out_error_or_again; 672 } 673 xfs_perag_put(pag); 674 675 *ipp = ip; 676 677 /* 678 * If we have a real type for an on-disk inode, we can setup the inode 679 * now. If it's a new inode being created, xfs_ialloc will handle it. 680 */ 681 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 682 xfs_setup_existing_inode(ip); 683 return 0; 684 685 out_error_or_again: 686 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { 687 delay(1); 688 goto again; 689 } 690 xfs_perag_put(pag); 691 return error; 692 } 693 694 /* 695 * "Is this a cached inode that's also allocated?" 696 * 697 * Look up an inode by number in the given file system. If the inode is 698 * in cache and isn't in purgatory, return 1 if the inode is allocated 699 * and 0 if it is not. For all other cases (not in cache, being torn 700 * down, etc.), return a negative error code. 701 * 702 * The caller has to prevent inode allocation and freeing activity, 703 * presumably by locking the AGI buffer. This is to ensure that an 704 * inode cannot transition from allocated to freed until the caller is 705 * ready to allow that. If the inode is in an intermediate state (new, 706 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the 707 * inode is not in the cache, -ENOENT will be returned. The caller must 708 * deal with these scenarios appropriately. 709 * 710 * This is a specialized use case for the online scrubber; if you're 711 * reading this, you probably want xfs_iget. 712 */ 713 int 714 xfs_icache_inode_is_allocated( 715 struct xfs_mount *mp, 716 struct xfs_trans *tp, 717 xfs_ino_t ino, 718 bool *inuse) 719 { 720 struct xfs_inode *ip; 721 int error; 722 723 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); 724 if (error) 725 return error; 726 727 *inuse = !!(VFS_I(ip)->i_mode); 728 xfs_irele(ip); 729 return 0; 730 } 731 732 /* 733 * The inode lookup is done in batches to keep the amount of lock traffic and 734 * radix tree lookups to a minimum. The batch size is a trade off between 735 * lookup reduction and stack usage. This is in the reclaim path, so we can't 736 * be too greedy. 737 */ 738 #define XFS_LOOKUP_BATCH 32 739 740 STATIC int 741 xfs_inode_ag_walk_grab( 742 struct xfs_inode *ip, 743 int flags) 744 { 745 struct inode *inode = VFS_I(ip); 746 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); 747 748 ASSERT(rcu_read_lock_held()); 749 750 /* 751 * check for stale RCU freed inode 752 * 753 * If the inode has been reallocated, it doesn't matter if it's not in 754 * the AG we are walking - we are walking for writeback, so if it 755 * passes all the "valid inode" checks and is dirty, then we'll write 756 * it back anyway. If it has been reallocated and still being 757 * initialised, the XFS_INEW check below will catch it. 758 */ 759 spin_lock(&ip->i_flags_lock); 760 if (!ip->i_ino) 761 goto out_unlock_noent; 762 763 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ 764 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || 765 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) 766 goto out_unlock_noent; 767 spin_unlock(&ip->i_flags_lock); 768 769 /* nothing to sync during shutdown */ 770 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 771 return -EFSCORRUPTED; 772 773 /* If we can't grab the inode, it must on it's way to reclaim. */ 774 if (!igrab(inode)) 775 return -ENOENT; 776 777 /* inode is valid */ 778 return 0; 779 780 out_unlock_noent: 781 spin_unlock(&ip->i_flags_lock); 782 return -ENOENT; 783 } 784 785 STATIC int 786 xfs_inode_ag_walk( 787 struct xfs_mount *mp, 788 struct xfs_perag *pag, 789 int (*execute)(struct xfs_inode *ip, int flags, 790 void *args), 791 int flags, 792 void *args, 793 int tag, 794 int iter_flags) 795 { 796 uint32_t first_index; 797 int last_error = 0; 798 int skipped; 799 int done; 800 int nr_found; 801 802 restart: 803 done = 0; 804 skipped = 0; 805 first_index = 0; 806 nr_found = 0; 807 do { 808 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 809 int error = 0; 810 int i; 811 812 rcu_read_lock(); 813 814 if (tag == -1) 815 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 816 (void **)batch, first_index, 817 XFS_LOOKUP_BATCH); 818 else 819 nr_found = radix_tree_gang_lookup_tag( 820 &pag->pag_ici_root, 821 (void **) batch, first_index, 822 XFS_LOOKUP_BATCH, tag); 823 824 if (!nr_found) { 825 rcu_read_unlock(); 826 break; 827 } 828 829 /* 830 * Grab the inodes before we drop the lock. if we found 831 * nothing, nr == 0 and the loop will be skipped. 832 */ 833 for (i = 0; i < nr_found; i++) { 834 struct xfs_inode *ip = batch[i]; 835 836 if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) 837 batch[i] = NULL; 838 839 /* 840 * Update the index for the next lookup. Catch 841 * overflows into the next AG range which can occur if 842 * we have inodes in the last block of the AG and we 843 * are currently pointing to the last inode. 844 * 845 * Because we may see inodes that are from the wrong AG 846 * due to RCU freeing and reallocation, only update the 847 * index if it lies in this AG. It was a race that lead 848 * us to see this inode, so another lookup from the 849 * same index will not find it again. 850 */ 851 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) 852 continue; 853 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 854 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 855 done = 1; 856 } 857 858 /* unlock now we've grabbed the inodes. */ 859 rcu_read_unlock(); 860 861 for (i = 0; i < nr_found; i++) { 862 if (!batch[i]) 863 continue; 864 if ((iter_flags & XFS_AGITER_INEW_WAIT) && 865 xfs_iflags_test(batch[i], XFS_INEW)) 866 xfs_inew_wait(batch[i]); 867 error = execute(batch[i], flags, args); 868 xfs_irele(batch[i]); 869 if (error == -EAGAIN) { 870 skipped++; 871 continue; 872 } 873 if (error && last_error != -EFSCORRUPTED) 874 last_error = error; 875 } 876 877 /* bail out if the filesystem is corrupted. */ 878 if (error == -EFSCORRUPTED) 879 break; 880 881 cond_resched(); 882 883 } while (nr_found && !done); 884 885 if (skipped) { 886 delay(1); 887 goto restart; 888 } 889 return last_error; 890 } 891 892 /* 893 * Background scanning to trim post-EOF preallocated space. This is queued 894 * based on the 'speculative_prealloc_lifetime' tunable (5m by default). 895 */ 896 void 897 xfs_queue_eofblocks( 898 struct xfs_mount *mp) 899 { 900 rcu_read_lock(); 901 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) 902 queue_delayed_work(mp->m_eofblocks_workqueue, 903 &mp->m_eofblocks_work, 904 msecs_to_jiffies(xfs_eofb_secs * 1000)); 905 rcu_read_unlock(); 906 } 907 908 void 909 xfs_eofblocks_worker( 910 struct work_struct *work) 911 { 912 struct xfs_mount *mp = container_of(to_delayed_work(work), 913 struct xfs_mount, m_eofblocks_work); 914 xfs_icache_free_eofblocks(mp, NULL); 915 xfs_queue_eofblocks(mp); 916 } 917 918 /* 919 * Background scanning to trim preallocated CoW space. This is queued 920 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 921 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 922 */ 923 void 924 xfs_queue_cowblocks( 925 struct xfs_mount *mp) 926 { 927 rcu_read_lock(); 928 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) 929 queue_delayed_work(mp->m_eofblocks_workqueue, 930 &mp->m_cowblocks_work, 931 msecs_to_jiffies(xfs_cowb_secs * 1000)); 932 rcu_read_unlock(); 933 } 934 935 void 936 xfs_cowblocks_worker( 937 struct work_struct *work) 938 { 939 struct xfs_mount *mp = container_of(to_delayed_work(work), 940 struct xfs_mount, m_cowblocks_work); 941 xfs_icache_free_cowblocks(mp, NULL); 942 xfs_queue_cowblocks(mp); 943 } 944 945 int 946 xfs_inode_ag_iterator_flags( 947 struct xfs_mount *mp, 948 int (*execute)(struct xfs_inode *ip, int flags, 949 void *args), 950 int flags, 951 void *args, 952 int iter_flags) 953 { 954 struct xfs_perag *pag; 955 int error = 0; 956 int last_error = 0; 957 xfs_agnumber_t ag; 958 959 ag = 0; 960 while ((pag = xfs_perag_get(mp, ag))) { 961 ag = pag->pag_agno + 1; 962 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, 963 iter_flags); 964 xfs_perag_put(pag); 965 if (error) { 966 last_error = error; 967 if (error == -EFSCORRUPTED) 968 break; 969 } 970 } 971 return last_error; 972 } 973 974 int 975 xfs_inode_ag_iterator( 976 struct xfs_mount *mp, 977 int (*execute)(struct xfs_inode *ip, int flags, 978 void *args), 979 int flags, 980 void *args) 981 { 982 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); 983 } 984 985 int 986 xfs_inode_ag_iterator_tag( 987 struct xfs_mount *mp, 988 int (*execute)(struct xfs_inode *ip, int flags, 989 void *args), 990 int flags, 991 void *args, 992 int tag) 993 { 994 struct xfs_perag *pag; 995 int error = 0; 996 int last_error = 0; 997 xfs_agnumber_t ag; 998 999 ag = 0; 1000 while ((pag = xfs_perag_get_tag(mp, ag, tag))) { 1001 ag = pag->pag_agno + 1; 1002 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, 1003 0); 1004 xfs_perag_put(pag); 1005 if (error) { 1006 last_error = error; 1007 if (error == -EFSCORRUPTED) 1008 break; 1009 } 1010 } 1011 return last_error; 1012 } 1013 1014 /* 1015 * Grab the inode for reclaim exclusively. 1016 * Return 0 if we grabbed it, non-zero otherwise. 1017 */ 1018 STATIC int 1019 xfs_reclaim_inode_grab( 1020 struct xfs_inode *ip, 1021 int flags) 1022 { 1023 ASSERT(rcu_read_lock_held()); 1024 1025 /* quick check for stale RCU freed inode */ 1026 if (!ip->i_ino) 1027 return 1; 1028 1029 /* 1030 * If we are asked for non-blocking operation, do unlocked checks to 1031 * see if the inode already is being flushed or in reclaim to avoid 1032 * lock traffic. 1033 */ 1034 if ((flags & SYNC_TRYLOCK) && 1035 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) 1036 return 1; 1037 1038 /* 1039 * The radix tree lock here protects a thread in xfs_iget from racing 1040 * with us starting reclaim on the inode. Once we have the 1041 * XFS_IRECLAIM flag set it will not touch us. 1042 * 1043 * Due to RCU lookup, we may find inodes that have been freed and only 1044 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that 1045 * aren't candidates for reclaim at all, so we must check the 1046 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. 1047 */ 1048 spin_lock(&ip->i_flags_lock); 1049 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 1050 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 1051 /* not a reclaim candidate. */ 1052 spin_unlock(&ip->i_flags_lock); 1053 return 1; 1054 } 1055 __xfs_iflags_set(ip, XFS_IRECLAIM); 1056 spin_unlock(&ip->i_flags_lock); 1057 return 0; 1058 } 1059 1060 /* 1061 * Inodes in different states need to be treated differently. The following 1062 * table lists the inode states and the reclaim actions necessary: 1063 * 1064 * inode state iflush ret required action 1065 * --------------- ---------- --------------- 1066 * bad - reclaim 1067 * shutdown EIO unpin and reclaim 1068 * clean, unpinned 0 reclaim 1069 * stale, unpinned 0 reclaim 1070 * clean, pinned(*) 0 requeue 1071 * stale, pinned EAGAIN requeue 1072 * dirty, async - requeue 1073 * dirty, sync 0 reclaim 1074 * 1075 * (*) dgc: I don't think the clean, pinned state is possible but it gets 1076 * handled anyway given the order of checks implemented. 1077 * 1078 * Also, because we get the flush lock first, we know that any inode that has 1079 * been flushed delwri has had the flush completed by the time we check that 1080 * the inode is clean. 1081 * 1082 * Note that because the inode is flushed delayed write by AIL pushing, the 1083 * flush lock may already be held here and waiting on it can result in very 1084 * long latencies. Hence for sync reclaims, where we wait on the flush lock, 1085 * the caller should push the AIL first before trying to reclaim inodes to 1086 * minimise the amount of time spent waiting. For background relaim, we only 1087 * bother to reclaim clean inodes anyway. 1088 * 1089 * Hence the order of actions after gaining the locks should be: 1090 * bad => reclaim 1091 * shutdown => unpin and reclaim 1092 * pinned, async => requeue 1093 * pinned, sync => unpin 1094 * stale => reclaim 1095 * clean => reclaim 1096 * dirty, async => requeue 1097 * dirty, sync => flush, wait and reclaim 1098 */ 1099 STATIC int 1100 xfs_reclaim_inode( 1101 struct xfs_inode *ip, 1102 struct xfs_perag *pag, 1103 int sync_mode) 1104 { 1105 struct xfs_buf *bp = NULL; 1106 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 1107 int error; 1108 1109 restart: 1110 error = 0; 1111 xfs_ilock(ip, XFS_ILOCK_EXCL); 1112 if (!xfs_iflock_nowait(ip)) { 1113 if (!(sync_mode & SYNC_WAIT)) 1114 goto out; 1115 xfs_iflock(ip); 1116 } 1117 1118 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1119 xfs_iunpin_wait(ip); 1120 /* xfs_iflush_abort() drops the flush lock */ 1121 xfs_iflush_abort(ip, false); 1122 goto reclaim; 1123 } 1124 if (xfs_ipincount(ip)) { 1125 if (!(sync_mode & SYNC_WAIT)) 1126 goto out_ifunlock; 1127 xfs_iunpin_wait(ip); 1128 } 1129 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { 1130 xfs_ifunlock(ip); 1131 goto reclaim; 1132 } 1133 1134 /* 1135 * Never flush out dirty data during non-blocking reclaim, as it would 1136 * just contend with AIL pushing trying to do the same job. 1137 */ 1138 if (!(sync_mode & SYNC_WAIT)) 1139 goto out_ifunlock; 1140 1141 /* 1142 * Now we have an inode that needs flushing. 1143 * 1144 * Note that xfs_iflush will never block on the inode buffer lock, as 1145 * xfs_ifree_cluster() can lock the inode buffer before it locks the 1146 * ip->i_lock, and we are doing the exact opposite here. As a result, 1147 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would 1148 * result in an ABBA deadlock with xfs_ifree_cluster(). 1149 * 1150 * As xfs_ifree_cluser() must gather all inodes that are active in the 1151 * cache to mark them stale, if we hit this case we don't actually want 1152 * to do IO here - we want the inode marked stale so we can simply 1153 * reclaim it. Hence if we get an EAGAIN error here, just unlock the 1154 * inode, back off and try again. Hopefully the next pass through will 1155 * see the stale flag set on the inode. 1156 */ 1157 error = xfs_iflush(ip, &bp); 1158 if (error == -EAGAIN) { 1159 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1160 /* backoff longer than in xfs_ifree_cluster */ 1161 delay(2); 1162 goto restart; 1163 } 1164 1165 if (!error) { 1166 error = xfs_bwrite(bp); 1167 xfs_buf_relse(bp); 1168 } 1169 1170 reclaim: 1171 ASSERT(!xfs_isiflocked(ip)); 1172 1173 /* 1174 * Because we use RCU freeing we need to ensure the inode always appears 1175 * to be reclaimed with an invalid inode number when in the free state. 1176 * We do this as early as possible under the ILOCK so that 1177 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to 1178 * detect races with us here. By doing this, we guarantee that once 1179 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that 1180 * it will see either a valid inode that will serialise correctly, or it 1181 * will see an invalid inode that it can skip. 1182 */ 1183 spin_lock(&ip->i_flags_lock); 1184 ip->i_flags = XFS_IRECLAIM; 1185 ip->i_ino = 0; 1186 spin_unlock(&ip->i_flags_lock); 1187 1188 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1189 1190 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1191 /* 1192 * Remove the inode from the per-AG radix tree. 1193 * 1194 * Because radix_tree_delete won't complain even if the item was never 1195 * added to the tree assert that it's been there before to catch 1196 * problems with the inode life time early on. 1197 */ 1198 spin_lock(&pag->pag_ici_lock); 1199 if (!radix_tree_delete(&pag->pag_ici_root, 1200 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1201 ASSERT(0); 1202 xfs_perag_clear_reclaim_tag(pag); 1203 spin_unlock(&pag->pag_ici_lock); 1204 1205 /* 1206 * Here we do an (almost) spurious inode lock in order to coordinate 1207 * with inode cache radix tree lookups. This is because the lookup 1208 * can reference the inodes in the cache without taking references. 1209 * 1210 * We make that OK here by ensuring that we wait until the inode is 1211 * unlocked after the lookup before we go ahead and free it. 1212 */ 1213 xfs_ilock(ip, XFS_ILOCK_EXCL); 1214 xfs_qm_dqdetach(ip); 1215 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1216 1217 __xfs_inode_free(ip); 1218 return error; 1219 1220 out_ifunlock: 1221 xfs_ifunlock(ip); 1222 out: 1223 xfs_iflags_clear(ip, XFS_IRECLAIM); 1224 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1225 /* 1226 * We could return -EAGAIN here to make reclaim rescan the inode tree in 1227 * a short while. However, this just burns CPU time scanning the tree 1228 * waiting for IO to complete and the reclaim work never goes back to 1229 * the idle state. Instead, return 0 to let the next scheduled 1230 * background reclaim attempt to reclaim the inode again. 1231 */ 1232 return 0; 1233 } 1234 1235 /* 1236 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is 1237 * corrupted, we still want to try to reclaim all the inodes. If we don't, 1238 * then a shut down during filesystem unmount reclaim walk leak all the 1239 * unreclaimed inodes. 1240 */ 1241 STATIC int 1242 xfs_reclaim_inodes_ag( 1243 struct xfs_mount *mp, 1244 int flags, 1245 int *nr_to_scan) 1246 { 1247 struct xfs_perag *pag; 1248 int error = 0; 1249 int last_error = 0; 1250 xfs_agnumber_t ag; 1251 int trylock = flags & SYNC_TRYLOCK; 1252 int skipped; 1253 1254 restart: 1255 ag = 0; 1256 skipped = 0; 1257 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1258 unsigned long first_index = 0; 1259 int done = 0; 1260 int nr_found = 0; 1261 1262 ag = pag->pag_agno + 1; 1263 1264 if (trylock) { 1265 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 1266 skipped++; 1267 xfs_perag_put(pag); 1268 continue; 1269 } 1270 first_index = pag->pag_ici_reclaim_cursor; 1271 } else 1272 mutex_lock(&pag->pag_ici_reclaim_lock); 1273 1274 do { 1275 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1276 int i; 1277 1278 rcu_read_lock(); 1279 nr_found = radix_tree_gang_lookup_tag( 1280 &pag->pag_ici_root, 1281 (void **)batch, first_index, 1282 XFS_LOOKUP_BATCH, 1283 XFS_ICI_RECLAIM_TAG); 1284 if (!nr_found) { 1285 done = 1; 1286 rcu_read_unlock(); 1287 break; 1288 } 1289 1290 /* 1291 * Grab the inodes before we drop the lock. if we found 1292 * nothing, nr == 0 and the loop will be skipped. 1293 */ 1294 for (i = 0; i < nr_found; i++) { 1295 struct xfs_inode *ip = batch[i]; 1296 1297 if (done || xfs_reclaim_inode_grab(ip, flags)) 1298 batch[i] = NULL; 1299 1300 /* 1301 * Update the index for the next lookup. Catch 1302 * overflows into the next AG range which can 1303 * occur if we have inodes in the last block of 1304 * the AG and we are currently pointing to the 1305 * last inode. 1306 * 1307 * Because we may see inodes that are from the 1308 * wrong AG due to RCU freeing and 1309 * reallocation, only update the index if it 1310 * lies in this AG. It was a race that lead us 1311 * to see this inode, so another lookup from 1312 * the same index will not find it again. 1313 */ 1314 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != 1315 pag->pag_agno) 1316 continue; 1317 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1318 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1319 done = 1; 1320 } 1321 1322 /* unlock now we've grabbed the inodes. */ 1323 rcu_read_unlock(); 1324 1325 for (i = 0; i < nr_found; i++) { 1326 if (!batch[i]) 1327 continue; 1328 error = xfs_reclaim_inode(batch[i], pag, flags); 1329 if (error && last_error != -EFSCORRUPTED) 1330 last_error = error; 1331 } 1332 1333 *nr_to_scan -= XFS_LOOKUP_BATCH; 1334 1335 cond_resched(); 1336 1337 } while (nr_found && !done && *nr_to_scan > 0); 1338 1339 if (trylock && !done) 1340 pag->pag_ici_reclaim_cursor = first_index; 1341 else 1342 pag->pag_ici_reclaim_cursor = 0; 1343 mutex_unlock(&pag->pag_ici_reclaim_lock); 1344 xfs_perag_put(pag); 1345 } 1346 1347 /* 1348 * if we skipped any AG, and we still have scan count remaining, do 1349 * another pass this time using blocking reclaim semantics (i.e 1350 * waiting on the reclaim locks and ignoring the reclaim cursors). This 1351 * ensure that when we get more reclaimers than AGs we block rather 1352 * than spin trying to execute reclaim. 1353 */ 1354 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1355 trylock = 0; 1356 goto restart; 1357 } 1358 return last_error; 1359 } 1360 1361 int 1362 xfs_reclaim_inodes( 1363 xfs_mount_t *mp, 1364 int mode) 1365 { 1366 int nr_to_scan = INT_MAX; 1367 1368 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); 1369 } 1370 1371 /* 1372 * Scan a certain number of inodes for reclaim. 1373 * 1374 * When called we make sure that there is a background (fast) inode reclaim in 1375 * progress, while we will throttle the speed of reclaim via doing synchronous 1376 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1377 * them to be cleaned, which we hope will not be very long due to the 1378 * background walker having already kicked the IO off on those dirty inodes. 1379 */ 1380 long 1381 xfs_reclaim_inodes_nr( 1382 struct xfs_mount *mp, 1383 int nr_to_scan) 1384 { 1385 /* kick background reclaimer and push the AIL */ 1386 xfs_reclaim_work_queue(mp); 1387 xfs_ail_push_all(mp->m_ail); 1388 1389 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1390 } 1391 1392 /* 1393 * Return the number of reclaimable inodes in the filesystem for 1394 * the shrinker to determine how much to reclaim. 1395 */ 1396 int 1397 xfs_reclaim_inodes_count( 1398 struct xfs_mount *mp) 1399 { 1400 struct xfs_perag *pag; 1401 xfs_agnumber_t ag = 0; 1402 int reclaimable = 0; 1403 1404 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1405 ag = pag->pag_agno + 1; 1406 reclaimable += pag->pag_ici_reclaimable; 1407 xfs_perag_put(pag); 1408 } 1409 return reclaimable; 1410 } 1411 1412 STATIC int 1413 xfs_inode_match_id( 1414 struct xfs_inode *ip, 1415 struct xfs_eofblocks *eofb) 1416 { 1417 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1418 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1419 return 0; 1420 1421 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1422 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1423 return 0; 1424 1425 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1426 ip->i_d.di_projid != eofb->eof_prid) 1427 return 0; 1428 1429 return 1; 1430 } 1431 1432 /* 1433 * A union-based inode filtering algorithm. Process the inode if any of the 1434 * criteria match. This is for global/internal scans only. 1435 */ 1436 STATIC int 1437 xfs_inode_match_id_union( 1438 struct xfs_inode *ip, 1439 struct xfs_eofblocks *eofb) 1440 { 1441 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1442 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1443 return 1; 1444 1445 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1446 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1447 return 1; 1448 1449 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1450 ip->i_d.di_projid == eofb->eof_prid) 1451 return 1; 1452 1453 return 0; 1454 } 1455 1456 STATIC int 1457 xfs_inode_free_eofblocks( 1458 struct xfs_inode *ip, 1459 int flags, 1460 void *args) 1461 { 1462 int ret = 0; 1463 struct xfs_eofblocks *eofb = args; 1464 int match; 1465 1466 if (!xfs_can_free_eofblocks(ip, false)) { 1467 /* inode could be preallocated or append-only */ 1468 trace_xfs_inode_free_eofblocks_invalid(ip); 1469 xfs_inode_clear_eofblocks_tag(ip); 1470 return 0; 1471 } 1472 1473 /* 1474 * If the mapping is dirty the operation can block and wait for some 1475 * time. Unless we are waiting, skip it. 1476 */ 1477 if (!(flags & SYNC_WAIT) && 1478 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1479 return 0; 1480 1481 if (eofb) { 1482 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1483 match = xfs_inode_match_id_union(ip, eofb); 1484 else 1485 match = xfs_inode_match_id(ip, eofb); 1486 if (!match) 1487 return 0; 1488 1489 /* skip the inode if the file size is too small */ 1490 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1491 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1492 return 0; 1493 } 1494 1495 /* 1496 * If the caller is waiting, return -EAGAIN to keep the background 1497 * scanner moving and revisit the inode in a subsequent pass. 1498 */ 1499 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1500 if (flags & SYNC_WAIT) 1501 ret = -EAGAIN; 1502 return ret; 1503 } 1504 ret = xfs_free_eofblocks(ip); 1505 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1506 1507 return ret; 1508 } 1509 1510 static int 1511 __xfs_icache_free_eofblocks( 1512 struct xfs_mount *mp, 1513 struct xfs_eofblocks *eofb, 1514 int (*execute)(struct xfs_inode *ip, int flags, 1515 void *args), 1516 int tag) 1517 { 1518 int flags = SYNC_TRYLOCK; 1519 1520 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) 1521 flags = SYNC_WAIT; 1522 1523 return xfs_inode_ag_iterator_tag(mp, execute, flags, 1524 eofb, tag); 1525 } 1526 1527 int 1528 xfs_icache_free_eofblocks( 1529 struct xfs_mount *mp, 1530 struct xfs_eofblocks *eofb) 1531 { 1532 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, 1533 XFS_ICI_EOFBLOCKS_TAG); 1534 } 1535 1536 /* 1537 * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1538 * multiple quotas, we don't know exactly which quota caused an allocation 1539 * failure. We make a best effort by including each quota under low free space 1540 * conditions (less than 1% free space) in the scan. 1541 */ 1542 static int 1543 __xfs_inode_free_quota_eofblocks( 1544 struct xfs_inode *ip, 1545 int (*execute)(struct xfs_mount *mp, 1546 struct xfs_eofblocks *eofb)) 1547 { 1548 int scan = 0; 1549 struct xfs_eofblocks eofb = {0}; 1550 struct xfs_dquot *dq; 1551 1552 /* 1553 * Run a sync scan to increase effectiveness and use the union filter to 1554 * cover all applicable quotas in a single scan. 1555 */ 1556 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1557 1558 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1559 dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1560 if (dq && xfs_dquot_lowsp(dq)) { 1561 eofb.eof_uid = VFS_I(ip)->i_uid; 1562 eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1563 scan = 1; 1564 } 1565 } 1566 1567 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1568 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1569 if (dq && xfs_dquot_lowsp(dq)) { 1570 eofb.eof_gid = VFS_I(ip)->i_gid; 1571 eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1572 scan = 1; 1573 } 1574 } 1575 1576 if (scan) 1577 execute(ip->i_mount, &eofb); 1578 1579 return scan; 1580 } 1581 1582 int 1583 xfs_inode_free_quota_eofblocks( 1584 struct xfs_inode *ip) 1585 { 1586 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1587 } 1588 1589 static inline unsigned long 1590 xfs_iflag_for_tag( 1591 int tag) 1592 { 1593 switch (tag) { 1594 case XFS_ICI_EOFBLOCKS_TAG: 1595 return XFS_IEOFBLOCKS; 1596 case XFS_ICI_COWBLOCKS_TAG: 1597 return XFS_ICOWBLOCKS; 1598 default: 1599 ASSERT(0); 1600 return 0; 1601 } 1602 } 1603 1604 static void 1605 __xfs_inode_set_blocks_tag( 1606 xfs_inode_t *ip, 1607 void (*execute)(struct xfs_mount *mp), 1608 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1609 int error, unsigned long caller_ip), 1610 int tag) 1611 { 1612 struct xfs_mount *mp = ip->i_mount; 1613 struct xfs_perag *pag; 1614 int tagged; 1615 1616 /* 1617 * Don't bother locking the AG and looking up in the radix trees 1618 * if we already know that we have the tag set. 1619 */ 1620 if (ip->i_flags & xfs_iflag_for_tag(tag)) 1621 return; 1622 spin_lock(&ip->i_flags_lock); 1623 ip->i_flags |= xfs_iflag_for_tag(tag); 1624 spin_unlock(&ip->i_flags_lock); 1625 1626 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1627 spin_lock(&pag->pag_ici_lock); 1628 1629 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 1630 radix_tree_tag_set(&pag->pag_ici_root, 1631 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1632 if (!tagged) { 1633 /* propagate the eofblocks tag up into the perag radix tree */ 1634 spin_lock(&ip->i_mount->m_perag_lock); 1635 radix_tree_tag_set(&ip->i_mount->m_perag_tree, 1636 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1637 tag); 1638 spin_unlock(&ip->i_mount->m_perag_lock); 1639 1640 /* kick off background trimming */ 1641 execute(ip->i_mount); 1642 1643 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1644 } 1645 1646 spin_unlock(&pag->pag_ici_lock); 1647 xfs_perag_put(pag); 1648 } 1649 1650 void 1651 xfs_inode_set_eofblocks_tag( 1652 xfs_inode_t *ip) 1653 { 1654 trace_xfs_inode_set_eofblocks_tag(ip); 1655 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, 1656 trace_xfs_perag_set_eofblocks, 1657 XFS_ICI_EOFBLOCKS_TAG); 1658 } 1659 1660 static void 1661 __xfs_inode_clear_blocks_tag( 1662 xfs_inode_t *ip, 1663 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1664 int error, unsigned long caller_ip), 1665 int tag) 1666 { 1667 struct xfs_mount *mp = ip->i_mount; 1668 struct xfs_perag *pag; 1669 1670 spin_lock(&ip->i_flags_lock); 1671 ip->i_flags &= ~xfs_iflag_for_tag(tag); 1672 spin_unlock(&ip->i_flags_lock); 1673 1674 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1675 spin_lock(&pag->pag_ici_lock); 1676 1677 radix_tree_tag_clear(&pag->pag_ici_root, 1678 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1679 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { 1680 /* clear the eofblocks tag from the perag radix tree */ 1681 spin_lock(&ip->i_mount->m_perag_lock); 1682 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, 1683 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1684 tag); 1685 spin_unlock(&ip->i_mount->m_perag_lock); 1686 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1687 } 1688 1689 spin_unlock(&pag->pag_ici_lock); 1690 xfs_perag_put(pag); 1691 } 1692 1693 void 1694 xfs_inode_clear_eofblocks_tag( 1695 xfs_inode_t *ip) 1696 { 1697 trace_xfs_inode_clear_eofblocks_tag(ip); 1698 return __xfs_inode_clear_blocks_tag(ip, 1699 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1700 } 1701 1702 /* 1703 * Set ourselves up to free CoW blocks from this file. If it's already clean 1704 * then we can bail out quickly, but otherwise we must back off if the file 1705 * is undergoing some kind of write. 1706 */ 1707 static bool 1708 xfs_prep_free_cowblocks( 1709 struct xfs_inode *ip) 1710 { 1711 /* 1712 * Just clear the tag if we have an empty cow fork or none at all. It's 1713 * possible the inode was fully unshared since it was originally tagged. 1714 */ 1715 if (!xfs_inode_has_cow_data(ip)) { 1716 trace_xfs_inode_free_cowblocks_invalid(ip); 1717 xfs_inode_clear_cowblocks_tag(ip); 1718 return false; 1719 } 1720 1721 /* 1722 * If the mapping is dirty or under writeback we cannot touch the 1723 * CoW fork. Leave it alone if we're in the midst of a directio. 1724 */ 1725 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || 1726 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1727 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1728 atomic_read(&VFS_I(ip)->i_dio_count)) 1729 return false; 1730 1731 return true; 1732 } 1733 1734 /* 1735 * Automatic CoW Reservation Freeing 1736 * 1737 * These functions automatically garbage collect leftover CoW reservations 1738 * that were made on behalf of a cowextsize hint when we start to run out 1739 * of quota or when the reservations sit around for too long. If the file 1740 * has dirty pages or is undergoing writeback, its CoW reservations will 1741 * be retained. 1742 * 1743 * The actual garbage collection piggybacks off the same code that runs 1744 * the speculative EOF preallocation garbage collector. 1745 */ 1746 STATIC int 1747 xfs_inode_free_cowblocks( 1748 struct xfs_inode *ip, 1749 int flags, 1750 void *args) 1751 { 1752 struct xfs_eofblocks *eofb = args; 1753 int match; 1754 int ret = 0; 1755 1756 if (!xfs_prep_free_cowblocks(ip)) 1757 return 0; 1758 1759 if (eofb) { 1760 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1761 match = xfs_inode_match_id_union(ip, eofb); 1762 else 1763 match = xfs_inode_match_id(ip, eofb); 1764 if (!match) 1765 return 0; 1766 1767 /* skip the inode if the file size is too small */ 1768 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1769 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1770 return 0; 1771 } 1772 1773 /* Free the CoW blocks */ 1774 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1775 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1776 1777 /* 1778 * Check again, nobody else should be able to dirty blocks or change 1779 * the reflink iflag now that we have the first two locks held. 1780 */ 1781 if (xfs_prep_free_cowblocks(ip)) 1782 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1783 1784 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1785 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1786 1787 return ret; 1788 } 1789 1790 int 1791 xfs_icache_free_cowblocks( 1792 struct xfs_mount *mp, 1793 struct xfs_eofblocks *eofb) 1794 { 1795 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, 1796 XFS_ICI_COWBLOCKS_TAG); 1797 } 1798 1799 int 1800 xfs_inode_free_quota_cowblocks( 1801 struct xfs_inode *ip) 1802 { 1803 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); 1804 } 1805 1806 void 1807 xfs_inode_set_cowblocks_tag( 1808 xfs_inode_t *ip) 1809 { 1810 trace_xfs_inode_set_cowblocks_tag(ip); 1811 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, 1812 trace_xfs_perag_set_cowblocks, 1813 XFS_ICI_COWBLOCKS_TAG); 1814 } 1815 1816 void 1817 xfs_inode_clear_cowblocks_tag( 1818 xfs_inode_t *ip) 1819 { 1820 trace_xfs_inode_clear_cowblocks_tag(ip); 1821 return __xfs_inode_clear_blocks_tag(ip, 1822 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1823 } 1824 1825 /* Disable post-EOF and CoW block auto-reclamation. */ 1826 void 1827 xfs_stop_block_reaping( 1828 struct xfs_mount *mp) 1829 { 1830 cancel_delayed_work_sync(&mp->m_eofblocks_work); 1831 cancel_delayed_work_sync(&mp->m_cowblocks_work); 1832 } 1833 1834 /* Enable post-EOF and CoW block auto-reclamation. */ 1835 void 1836 xfs_start_block_reaping( 1837 struct xfs_mount *mp) 1838 { 1839 xfs_queue_eofblocks(mp); 1840 xfs_queue_cowblocks(mp); 1841 } 1842