1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_sb.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_quota.h" 31 #include "xfs_trace.h" 32 #include "xfs_icache.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_dquot_item.h" 35 #include "xfs_dquot.h" 36 #include "xfs_reflink.h" 37 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 #include <linux/iversion.h> 41 42 /* 43 * Allocate and initialise an xfs_inode. 44 */ 45 struct xfs_inode * 46 xfs_inode_alloc( 47 struct xfs_mount *mp, 48 xfs_ino_t ino) 49 { 50 struct xfs_inode *ip; 51 52 /* 53 * if this didn't occur in transactions, we could use 54 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 55 * code up to do this anyway. 56 */ 57 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 58 if (!ip) 59 return NULL; 60 if (inode_init_always(mp->m_super, VFS_I(ip))) { 61 kmem_zone_free(xfs_inode_zone, ip); 62 return NULL; 63 } 64 65 /* VFS doesn't initialise i_mode! */ 66 VFS_I(ip)->i_mode = 0; 67 68 XFS_STATS_INC(mp, vn_active); 69 ASSERT(atomic_read(&ip->i_pincount) == 0); 70 ASSERT(!xfs_isiflocked(ip)); 71 ASSERT(ip->i_ino == 0); 72 73 /* initialise the xfs inode */ 74 ip->i_ino = ino; 75 ip->i_mount = mp; 76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 77 ip->i_afp = NULL; 78 ip->i_cowfp = NULL; 79 ip->i_cnextents = 0; 80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); 82 ip->i_flags = 0; 83 ip->i_delayed_blks = 0; 84 memset(&ip->i_d, 0, sizeof(ip->i_d)); 85 86 return ip; 87 } 88 89 STATIC void 90 xfs_inode_free_callback( 91 struct rcu_head *head) 92 { 93 struct inode *inode = container_of(head, struct inode, i_rcu); 94 struct xfs_inode *ip = XFS_I(inode); 95 96 switch (VFS_I(ip)->i_mode & S_IFMT) { 97 case S_IFREG: 98 case S_IFDIR: 99 case S_IFLNK: 100 xfs_idestroy_fork(ip, XFS_DATA_FORK); 101 break; 102 } 103 104 if (ip->i_afp) 105 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 106 if (ip->i_cowfp) 107 xfs_idestroy_fork(ip, XFS_COW_FORK); 108 109 if (ip->i_itemp) { 110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); 111 xfs_inode_item_destroy(ip); 112 ip->i_itemp = NULL; 113 } 114 115 kmem_zone_free(xfs_inode_zone, ip); 116 } 117 118 static void 119 __xfs_inode_free( 120 struct xfs_inode *ip) 121 { 122 /* asserts to verify all state is correct here */ 123 ASSERT(atomic_read(&ip->i_pincount) == 0); 124 XFS_STATS_DEC(ip->i_mount, vn_active); 125 126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 127 } 128 129 void 130 xfs_inode_free( 131 struct xfs_inode *ip) 132 { 133 ASSERT(!xfs_isiflocked(ip)); 134 135 /* 136 * Because we use RCU freeing we need to ensure the inode always 137 * appears to be reclaimed with an invalid inode number when in the 138 * free state. The ip->i_flags_lock provides the barrier against lookup 139 * races. 140 */ 141 spin_lock(&ip->i_flags_lock); 142 ip->i_flags = XFS_IRECLAIM; 143 ip->i_ino = 0; 144 spin_unlock(&ip->i_flags_lock); 145 146 __xfs_inode_free(ip); 147 } 148 149 /* 150 * Queue a new inode reclaim pass if there are reclaimable inodes and there 151 * isn't a reclaim pass already in progress. By default it runs every 5s based 152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own 153 * tunable, but that can be done if this method proves to be ineffective or too 154 * aggressive. 155 */ 156 static void 157 xfs_reclaim_work_queue( 158 struct xfs_mount *mp) 159 { 160 161 rcu_read_lock(); 162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 165 } 166 rcu_read_unlock(); 167 } 168 169 /* 170 * This is a fast pass over the inode cache to try to get reclaim moving on as 171 * many inodes as possible in a short period of time. It kicks itself every few 172 * seconds, as well as being kicked by the inode cache shrinker when memory 173 * goes low. It scans as quickly as possible avoiding locked inodes or those 174 * already being flushed, and once done schedules a future pass. 175 */ 176 void 177 xfs_reclaim_worker( 178 struct work_struct *work) 179 { 180 struct xfs_mount *mp = container_of(to_delayed_work(work), 181 struct xfs_mount, m_reclaim_work); 182 183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 184 xfs_reclaim_work_queue(mp); 185 } 186 187 static void 188 xfs_perag_set_reclaim_tag( 189 struct xfs_perag *pag) 190 { 191 struct xfs_mount *mp = pag->pag_mount; 192 193 lockdep_assert_held(&pag->pag_ici_lock); 194 if (pag->pag_ici_reclaimable++) 195 return; 196 197 /* propagate the reclaim tag up into the perag radix tree */ 198 spin_lock(&mp->m_perag_lock); 199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, 200 XFS_ICI_RECLAIM_TAG); 201 spin_unlock(&mp->m_perag_lock); 202 203 /* schedule periodic background inode reclaim */ 204 xfs_reclaim_work_queue(mp); 205 206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 207 } 208 209 static void 210 xfs_perag_clear_reclaim_tag( 211 struct xfs_perag *pag) 212 { 213 struct xfs_mount *mp = pag->pag_mount; 214 215 lockdep_assert_held(&pag->pag_ici_lock); 216 if (--pag->pag_ici_reclaimable) 217 return; 218 219 /* clear the reclaim tag from the perag radix tree */ 220 spin_lock(&mp->m_perag_lock); 221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, 222 XFS_ICI_RECLAIM_TAG); 223 spin_unlock(&mp->m_perag_lock); 224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 225 } 226 227 228 /* 229 * We set the inode flag atomically with the radix tree tag. 230 * Once we get tag lookups on the radix tree, this inode flag 231 * can go away. 232 */ 233 void 234 xfs_inode_set_reclaim_tag( 235 struct xfs_inode *ip) 236 { 237 struct xfs_mount *mp = ip->i_mount; 238 struct xfs_perag *pag; 239 240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 241 spin_lock(&pag->pag_ici_lock); 242 spin_lock(&ip->i_flags_lock); 243 244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), 245 XFS_ICI_RECLAIM_TAG); 246 xfs_perag_set_reclaim_tag(pag); 247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 248 249 spin_unlock(&ip->i_flags_lock); 250 spin_unlock(&pag->pag_ici_lock); 251 xfs_perag_put(pag); 252 } 253 254 STATIC void 255 xfs_inode_clear_reclaim_tag( 256 struct xfs_perag *pag, 257 xfs_ino_t ino) 258 { 259 radix_tree_tag_clear(&pag->pag_ici_root, 260 XFS_INO_TO_AGINO(pag->pag_mount, ino), 261 XFS_ICI_RECLAIM_TAG); 262 xfs_perag_clear_reclaim_tag(pag); 263 } 264 265 static void 266 xfs_inew_wait( 267 struct xfs_inode *ip) 268 { 269 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); 270 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); 271 272 do { 273 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 274 if (!xfs_iflags_test(ip, XFS_INEW)) 275 break; 276 schedule(); 277 } while (true); 278 finish_wait(wq, &wait.wq_entry); 279 } 280 281 /* 282 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 283 * part of the structure. This is made more complex by the fact we store 284 * information about the on-disk values in the VFS inode and so we can't just 285 * overwrite the values unconditionally. Hence we save the parameters we 286 * need to retain across reinitialisation, and rewrite them into the VFS inode 287 * after reinitialisation even if it fails. 288 */ 289 static int 290 xfs_reinit_inode( 291 struct xfs_mount *mp, 292 struct inode *inode) 293 { 294 int error; 295 uint32_t nlink = inode->i_nlink; 296 uint32_t generation = inode->i_generation; 297 uint64_t version = inode_peek_iversion(inode); 298 umode_t mode = inode->i_mode; 299 dev_t dev = inode->i_rdev; 300 301 error = inode_init_always(mp->m_super, inode); 302 303 set_nlink(inode, nlink); 304 inode->i_generation = generation; 305 inode_set_iversion_queried(inode, version); 306 inode->i_mode = mode; 307 inode->i_rdev = dev; 308 return error; 309 } 310 311 /* 312 * Check the validity of the inode we just found it the cache 313 */ 314 static int 315 xfs_iget_cache_hit( 316 struct xfs_perag *pag, 317 struct xfs_inode *ip, 318 xfs_ino_t ino, 319 int flags, 320 int lock_flags) __releases(RCU) 321 { 322 struct inode *inode = VFS_I(ip); 323 struct xfs_mount *mp = ip->i_mount; 324 int error; 325 326 /* 327 * check for re-use of an inode within an RCU grace period due to the 328 * radix tree nodes not being updated yet. We monitor for this by 329 * setting the inode number to zero before freeing the inode structure. 330 * If the inode has been reallocated and set up, then the inode number 331 * will not match, so check for that, too. 332 */ 333 spin_lock(&ip->i_flags_lock); 334 if (ip->i_ino != ino) { 335 trace_xfs_iget_skip(ip); 336 XFS_STATS_INC(mp, xs_ig_frecycle); 337 error = -EAGAIN; 338 goto out_error; 339 } 340 341 342 /* 343 * If we are racing with another cache hit that is currently 344 * instantiating this inode or currently recycling it out of 345 * reclaimabe state, wait for the initialisation to complete 346 * before continuing. 347 * 348 * XXX(hch): eventually we should do something equivalent to 349 * wait_on_inode to wait for these flags to be cleared 350 * instead of polling for it. 351 */ 352 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 353 trace_xfs_iget_skip(ip); 354 XFS_STATS_INC(mp, xs_ig_frecycle); 355 error = -EAGAIN; 356 goto out_error; 357 } 358 359 /* 360 * If lookup is racing with unlink return an error immediately. 361 */ 362 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { 363 error = -ENOENT; 364 goto out_error; 365 } 366 367 /* 368 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 369 * Need to carefully get it back into useable state. 370 */ 371 if (ip->i_flags & XFS_IRECLAIMABLE) { 372 trace_xfs_iget_reclaim(ip); 373 374 if (flags & XFS_IGET_INCORE) { 375 error = -EAGAIN; 376 goto out_error; 377 } 378 379 /* 380 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 381 * from stomping over us while we recycle the inode. We can't 382 * clear the radix tree reclaimable tag yet as it requires 383 * pag_ici_lock to be held exclusive. 384 */ 385 ip->i_flags |= XFS_IRECLAIM; 386 387 spin_unlock(&ip->i_flags_lock); 388 rcu_read_unlock(); 389 390 error = xfs_reinit_inode(mp, inode); 391 if (error) { 392 bool wake; 393 /* 394 * Re-initializing the inode failed, and we are in deep 395 * trouble. Try to re-add it to the reclaim list. 396 */ 397 rcu_read_lock(); 398 spin_lock(&ip->i_flags_lock); 399 wake = !!__xfs_iflags_test(ip, XFS_INEW); 400 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 401 if (wake) 402 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 403 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 404 trace_xfs_iget_reclaim_fail(ip); 405 goto out_error; 406 } 407 408 spin_lock(&pag->pag_ici_lock); 409 spin_lock(&ip->i_flags_lock); 410 411 /* 412 * Clear the per-lifetime state in the inode as we are now 413 * effectively a new inode and need to return to the initial 414 * state before reuse occurs. 415 */ 416 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 417 ip->i_flags |= XFS_INEW; 418 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 419 inode->i_state = I_NEW; 420 421 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 422 init_rwsem(&inode->i_rwsem); 423 424 spin_unlock(&ip->i_flags_lock); 425 spin_unlock(&pag->pag_ici_lock); 426 } else { 427 /* If the VFS inode is being torn down, pause and try again. */ 428 if (!igrab(inode)) { 429 trace_xfs_iget_skip(ip); 430 error = -EAGAIN; 431 goto out_error; 432 } 433 434 /* We've got a live one. */ 435 spin_unlock(&ip->i_flags_lock); 436 rcu_read_unlock(); 437 trace_xfs_iget_hit(ip); 438 } 439 440 if (lock_flags != 0) 441 xfs_ilock(ip, lock_flags); 442 443 if (!(flags & XFS_IGET_INCORE)) 444 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); 445 XFS_STATS_INC(mp, xs_ig_found); 446 447 return 0; 448 449 out_error: 450 spin_unlock(&ip->i_flags_lock); 451 rcu_read_unlock(); 452 return error; 453 } 454 455 456 static int 457 xfs_iget_cache_miss( 458 struct xfs_mount *mp, 459 struct xfs_perag *pag, 460 xfs_trans_t *tp, 461 xfs_ino_t ino, 462 struct xfs_inode **ipp, 463 int flags, 464 int lock_flags) 465 { 466 struct xfs_inode *ip; 467 int error; 468 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 469 int iflags; 470 471 ip = xfs_inode_alloc(mp, ino); 472 if (!ip) 473 return -ENOMEM; 474 475 error = xfs_iread(mp, tp, ip, flags); 476 if (error) 477 goto out_destroy; 478 479 if (!xfs_inode_verify_forks(ip)) { 480 error = -EFSCORRUPTED; 481 goto out_destroy; 482 } 483 484 trace_xfs_iget_miss(ip); 485 486 487 /* 488 * If we are allocating a new inode, then check what was returned is 489 * actually a free, empty inode. If we are not allocating an inode, 490 * the check we didn't find a free inode. 491 */ 492 if (flags & XFS_IGET_CREATE) { 493 if (VFS_I(ip)->i_mode != 0) { 494 xfs_warn(mp, 495 "Corruption detected! Free inode 0x%llx not marked free on disk", 496 ino); 497 error = -EFSCORRUPTED; 498 goto out_destroy; 499 } 500 if (ip->i_d.di_nblocks != 0) { 501 xfs_warn(mp, 502 "Corruption detected! Free inode 0x%llx has blocks allocated!", 503 ino); 504 error = -EFSCORRUPTED; 505 goto out_destroy; 506 } 507 } else if (VFS_I(ip)->i_mode == 0) { 508 error = -ENOENT; 509 goto out_destroy; 510 } 511 512 /* 513 * Preload the radix tree so we can insert safely under the 514 * write spinlock. Note that we cannot sleep inside the preload 515 * region. Since we can be called from transaction context, don't 516 * recurse into the file system. 517 */ 518 if (radix_tree_preload(GFP_NOFS)) { 519 error = -EAGAIN; 520 goto out_destroy; 521 } 522 523 /* 524 * Because the inode hasn't been added to the radix-tree yet it can't 525 * be found by another thread, so we can do the non-sleeping lock here. 526 */ 527 if (lock_flags) { 528 if (!xfs_ilock_nowait(ip, lock_flags)) 529 BUG(); 530 } 531 532 /* 533 * These values must be set before inserting the inode into the radix 534 * tree as the moment it is inserted a concurrent lookup (allowed by the 535 * RCU locking mechanism) can find it and that lookup must see that this 536 * is an inode currently under construction (i.e. that XFS_INEW is set). 537 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 538 * memory barrier that ensures this detection works correctly at lookup 539 * time. 540 */ 541 iflags = XFS_INEW; 542 if (flags & XFS_IGET_DONTCACHE) 543 iflags |= XFS_IDONTCACHE; 544 ip->i_udquot = NULL; 545 ip->i_gdquot = NULL; 546 ip->i_pdquot = NULL; 547 xfs_iflags_set(ip, iflags); 548 549 /* insert the new inode */ 550 spin_lock(&pag->pag_ici_lock); 551 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 552 if (unlikely(error)) { 553 WARN_ON(error != -EEXIST); 554 XFS_STATS_INC(mp, xs_ig_dup); 555 error = -EAGAIN; 556 goto out_preload_end; 557 } 558 spin_unlock(&pag->pag_ici_lock); 559 radix_tree_preload_end(); 560 561 *ipp = ip; 562 return 0; 563 564 out_preload_end: 565 spin_unlock(&pag->pag_ici_lock); 566 radix_tree_preload_end(); 567 if (lock_flags) 568 xfs_iunlock(ip, lock_flags); 569 out_destroy: 570 __destroy_inode(VFS_I(ip)); 571 xfs_inode_free(ip); 572 return error; 573 } 574 575 /* 576 * Look up an inode by number in the given file system. 577 * The inode is looked up in the cache held in each AG. 578 * If the inode is found in the cache, initialise the vfs inode 579 * if necessary. 580 * 581 * If it is not in core, read it in from the file system's device, 582 * add it to the cache and initialise the vfs inode. 583 * 584 * The inode is locked according to the value of the lock_flags parameter. 585 * This flag parameter indicates how and if the inode's IO lock and inode lock 586 * should be taken. 587 * 588 * mp -- the mount point structure for the current file system. It points 589 * to the inode hash table. 590 * tp -- a pointer to the current transaction if there is one. This is 591 * simply passed through to the xfs_iread() call. 592 * ino -- the number of the inode desired. This is the unique identifier 593 * within the file system for the inode being requested. 594 * lock_flags -- flags indicating how to lock the inode. See the comment 595 * for xfs_ilock() for a list of valid values. 596 */ 597 int 598 xfs_iget( 599 xfs_mount_t *mp, 600 xfs_trans_t *tp, 601 xfs_ino_t ino, 602 uint flags, 603 uint lock_flags, 604 xfs_inode_t **ipp) 605 { 606 xfs_inode_t *ip; 607 int error; 608 xfs_perag_t *pag; 609 xfs_agino_t agino; 610 611 /* 612 * xfs_reclaim_inode() uses the ILOCK to ensure an inode 613 * doesn't get freed while it's being referenced during a 614 * radix tree traversal here. It assumes this function 615 * aqcuires only the ILOCK (and therefore it has no need to 616 * involve the IOLOCK in this synchronization). 617 */ 618 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 619 620 /* reject inode numbers outside existing AGs */ 621 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 622 return -EINVAL; 623 624 XFS_STATS_INC(mp, xs_ig_attempts); 625 626 /* get the perag structure and ensure that it's inode capable */ 627 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 628 agino = XFS_INO_TO_AGINO(mp, ino); 629 630 again: 631 error = 0; 632 rcu_read_lock(); 633 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 634 635 if (ip) { 636 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 637 if (error) 638 goto out_error_or_again; 639 } else { 640 rcu_read_unlock(); 641 if (flags & XFS_IGET_INCORE) { 642 error = -ENODATA; 643 goto out_error_or_again; 644 } 645 XFS_STATS_INC(mp, xs_ig_missed); 646 647 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 648 flags, lock_flags); 649 if (error) 650 goto out_error_or_again; 651 } 652 xfs_perag_put(pag); 653 654 *ipp = ip; 655 656 /* 657 * If we have a real type for an on-disk inode, we can setup the inode 658 * now. If it's a new inode being created, xfs_ialloc will handle it. 659 */ 660 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 661 xfs_setup_existing_inode(ip); 662 return 0; 663 664 out_error_or_again: 665 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { 666 delay(1); 667 goto again; 668 } 669 xfs_perag_put(pag); 670 return error; 671 } 672 673 /* 674 * "Is this a cached inode that's also allocated?" 675 * 676 * Look up an inode by number in the given file system. If the inode is 677 * in cache and isn't in purgatory, return 1 if the inode is allocated 678 * and 0 if it is not. For all other cases (not in cache, being torn 679 * down, etc.), return a negative error code. 680 * 681 * The caller has to prevent inode allocation and freeing activity, 682 * presumably by locking the AGI buffer. This is to ensure that an 683 * inode cannot transition from allocated to freed until the caller is 684 * ready to allow that. If the inode is in an intermediate state (new, 685 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the 686 * inode is not in the cache, -ENOENT will be returned. The caller must 687 * deal with these scenarios appropriately. 688 * 689 * This is a specialized use case for the online scrubber; if you're 690 * reading this, you probably want xfs_iget. 691 */ 692 int 693 xfs_icache_inode_is_allocated( 694 struct xfs_mount *mp, 695 struct xfs_trans *tp, 696 xfs_ino_t ino, 697 bool *inuse) 698 { 699 struct xfs_inode *ip; 700 int error; 701 702 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); 703 if (error) 704 return error; 705 706 *inuse = !!(VFS_I(ip)->i_mode); 707 IRELE(ip); 708 return 0; 709 } 710 711 /* 712 * The inode lookup is done in batches to keep the amount of lock traffic and 713 * radix tree lookups to a minimum. The batch size is a trade off between 714 * lookup reduction and stack usage. This is in the reclaim path, so we can't 715 * be too greedy. 716 */ 717 #define XFS_LOOKUP_BATCH 32 718 719 STATIC int 720 xfs_inode_ag_walk_grab( 721 struct xfs_inode *ip, 722 int flags) 723 { 724 struct inode *inode = VFS_I(ip); 725 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); 726 727 ASSERT(rcu_read_lock_held()); 728 729 /* 730 * check for stale RCU freed inode 731 * 732 * If the inode has been reallocated, it doesn't matter if it's not in 733 * the AG we are walking - we are walking for writeback, so if it 734 * passes all the "valid inode" checks and is dirty, then we'll write 735 * it back anyway. If it has been reallocated and still being 736 * initialised, the XFS_INEW check below will catch it. 737 */ 738 spin_lock(&ip->i_flags_lock); 739 if (!ip->i_ino) 740 goto out_unlock_noent; 741 742 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ 743 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || 744 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) 745 goto out_unlock_noent; 746 spin_unlock(&ip->i_flags_lock); 747 748 /* nothing to sync during shutdown */ 749 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 750 return -EFSCORRUPTED; 751 752 /* If we can't grab the inode, it must on it's way to reclaim. */ 753 if (!igrab(inode)) 754 return -ENOENT; 755 756 /* inode is valid */ 757 return 0; 758 759 out_unlock_noent: 760 spin_unlock(&ip->i_flags_lock); 761 return -ENOENT; 762 } 763 764 STATIC int 765 xfs_inode_ag_walk( 766 struct xfs_mount *mp, 767 struct xfs_perag *pag, 768 int (*execute)(struct xfs_inode *ip, int flags, 769 void *args), 770 int flags, 771 void *args, 772 int tag, 773 int iter_flags) 774 { 775 uint32_t first_index; 776 int last_error = 0; 777 int skipped; 778 int done; 779 int nr_found; 780 781 restart: 782 done = 0; 783 skipped = 0; 784 first_index = 0; 785 nr_found = 0; 786 do { 787 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 788 int error = 0; 789 int i; 790 791 rcu_read_lock(); 792 793 if (tag == -1) 794 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 795 (void **)batch, first_index, 796 XFS_LOOKUP_BATCH); 797 else 798 nr_found = radix_tree_gang_lookup_tag( 799 &pag->pag_ici_root, 800 (void **) batch, first_index, 801 XFS_LOOKUP_BATCH, tag); 802 803 if (!nr_found) { 804 rcu_read_unlock(); 805 break; 806 } 807 808 /* 809 * Grab the inodes before we drop the lock. if we found 810 * nothing, nr == 0 and the loop will be skipped. 811 */ 812 for (i = 0; i < nr_found; i++) { 813 struct xfs_inode *ip = batch[i]; 814 815 if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) 816 batch[i] = NULL; 817 818 /* 819 * Update the index for the next lookup. Catch 820 * overflows into the next AG range which can occur if 821 * we have inodes in the last block of the AG and we 822 * are currently pointing to the last inode. 823 * 824 * Because we may see inodes that are from the wrong AG 825 * due to RCU freeing and reallocation, only update the 826 * index if it lies in this AG. It was a race that lead 827 * us to see this inode, so another lookup from the 828 * same index will not find it again. 829 */ 830 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) 831 continue; 832 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 833 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 834 done = 1; 835 } 836 837 /* unlock now we've grabbed the inodes. */ 838 rcu_read_unlock(); 839 840 for (i = 0; i < nr_found; i++) { 841 if (!batch[i]) 842 continue; 843 if ((iter_flags & XFS_AGITER_INEW_WAIT) && 844 xfs_iflags_test(batch[i], XFS_INEW)) 845 xfs_inew_wait(batch[i]); 846 error = execute(batch[i], flags, args); 847 IRELE(batch[i]); 848 if (error == -EAGAIN) { 849 skipped++; 850 continue; 851 } 852 if (error && last_error != -EFSCORRUPTED) 853 last_error = error; 854 } 855 856 /* bail out if the filesystem is corrupted. */ 857 if (error == -EFSCORRUPTED) 858 break; 859 860 cond_resched(); 861 862 } while (nr_found && !done); 863 864 if (skipped) { 865 delay(1); 866 goto restart; 867 } 868 return last_error; 869 } 870 871 /* 872 * Background scanning to trim post-EOF preallocated space. This is queued 873 * based on the 'speculative_prealloc_lifetime' tunable (5m by default). 874 */ 875 void 876 xfs_queue_eofblocks( 877 struct xfs_mount *mp) 878 { 879 rcu_read_lock(); 880 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) 881 queue_delayed_work(mp->m_eofblocks_workqueue, 882 &mp->m_eofblocks_work, 883 msecs_to_jiffies(xfs_eofb_secs * 1000)); 884 rcu_read_unlock(); 885 } 886 887 void 888 xfs_eofblocks_worker( 889 struct work_struct *work) 890 { 891 struct xfs_mount *mp = container_of(to_delayed_work(work), 892 struct xfs_mount, m_eofblocks_work); 893 xfs_icache_free_eofblocks(mp, NULL); 894 xfs_queue_eofblocks(mp); 895 } 896 897 /* 898 * Background scanning to trim preallocated CoW space. This is queued 899 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 900 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 901 */ 902 void 903 xfs_queue_cowblocks( 904 struct xfs_mount *mp) 905 { 906 rcu_read_lock(); 907 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) 908 queue_delayed_work(mp->m_eofblocks_workqueue, 909 &mp->m_cowblocks_work, 910 msecs_to_jiffies(xfs_cowb_secs * 1000)); 911 rcu_read_unlock(); 912 } 913 914 void 915 xfs_cowblocks_worker( 916 struct work_struct *work) 917 { 918 struct xfs_mount *mp = container_of(to_delayed_work(work), 919 struct xfs_mount, m_cowblocks_work); 920 xfs_icache_free_cowblocks(mp, NULL); 921 xfs_queue_cowblocks(mp); 922 } 923 924 int 925 xfs_inode_ag_iterator_flags( 926 struct xfs_mount *mp, 927 int (*execute)(struct xfs_inode *ip, int flags, 928 void *args), 929 int flags, 930 void *args, 931 int iter_flags) 932 { 933 struct xfs_perag *pag; 934 int error = 0; 935 int last_error = 0; 936 xfs_agnumber_t ag; 937 938 ag = 0; 939 while ((pag = xfs_perag_get(mp, ag))) { 940 ag = pag->pag_agno + 1; 941 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, 942 iter_flags); 943 xfs_perag_put(pag); 944 if (error) { 945 last_error = error; 946 if (error == -EFSCORRUPTED) 947 break; 948 } 949 } 950 return last_error; 951 } 952 953 int 954 xfs_inode_ag_iterator( 955 struct xfs_mount *mp, 956 int (*execute)(struct xfs_inode *ip, int flags, 957 void *args), 958 int flags, 959 void *args) 960 { 961 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); 962 } 963 964 int 965 xfs_inode_ag_iterator_tag( 966 struct xfs_mount *mp, 967 int (*execute)(struct xfs_inode *ip, int flags, 968 void *args), 969 int flags, 970 void *args, 971 int tag) 972 { 973 struct xfs_perag *pag; 974 int error = 0; 975 int last_error = 0; 976 xfs_agnumber_t ag; 977 978 ag = 0; 979 while ((pag = xfs_perag_get_tag(mp, ag, tag))) { 980 ag = pag->pag_agno + 1; 981 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, 982 0); 983 xfs_perag_put(pag); 984 if (error) { 985 last_error = error; 986 if (error == -EFSCORRUPTED) 987 break; 988 } 989 } 990 return last_error; 991 } 992 993 /* 994 * Grab the inode for reclaim exclusively. 995 * Return 0 if we grabbed it, non-zero otherwise. 996 */ 997 STATIC int 998 xfs_reclaim_inode_grab( 999 struct xfs_inode *ip, 1000 int flags) 1001 { 1002 ASSERT(rcu_read_lock_held()); 1003 1004 /* quick check for stale RCU freed inode */ 1005 if (!ip->i_ino) 1006 return 1; 1007 1008 /* 1009 * If we are asked for non-blocking operation, do unlocked checks to 1010 * see if the inode already is being flushed or in reclaim to avoid 1011 * lock traffic. 1012 */ 1013 if ((flags & SYNC_TRYLOCK) && 1014 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) 1015 return 1; 1016 1017 /* 1018 * The radix tree lock here protects a thread in xfs_iget from racing 1019 * with us starting reclaim on the inode. Once we have the 1020 * XFS_IRECLAIM flag set it will not touch us. 1021 * 1022 * Due to RCU lookup, we may find inodes that have been freed and only 1023 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that 1024 * aren't candidates for reclaim at all, so we must check the 1025 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. 1026 */ 1027 spin_lock(&ip->i_flags_lock); 1028 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 1029 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 1030 /* not a reclaim candidate. */ 1031 spin_unlock(&ip->i_flags_lock); 1032 return 1; 1033 } 1034 __xfs_iflags_set(ip, XFS_IRECLAIM); 1035 spin_unlock(&ip->i_flags_lock); 1036 return 0; 1037 } 1038 1039 /* 1040 * Inodes in different states need to be treated differently. The following 1041 * table lists the inode states and the reclaim actions necessary: 1042 * 1043 * inode state iflush ret required action 1044 * --------------- ---------- --------------- 1045 * bad - reclaim 1046 * shutdown EIO unpin and reclaim 1047 * clean, unpinned 0 reclaim 1048 * stale, unpinned 0 reclaim 1049 * clean, pinned(*) 0 requeue 1050 * stale, pinned EAGAIN requeue 1051 * dirty, async - requeue 1052 * dirty, sync 0 reclaim 1053 * 1054 * (*) dgc: I don't think the clean, pinned state is possible but it gets 1055 * handled anyway given the order of checks implemented. 1056 * 1057 * Also, because we get the flush lock first, we know that any inode that has 1058 * been flushed delwri has had the flush completed by the time we check that 1059 * the inode is clean. 1060 * 1061 * Note that because the inode is flushed delayed write by AIL pushing, the 1062 * flush lock may already be held here and waiting on it can result in very 1063 * long latencies. Hence for sync reclaims, where we wait on the flush lock, 1064 * the caller should push the AIL first before trying to reclaim inodes to 1065 * minimise the amount of time spent waiting. For background relaim, we only 1066 * bother to reclaim clean inodes anyway. 1067 * 1068 * Hence the order of actions after gaining the locks should be: 1069 * bad => reclaim 1070 * shutdown => unpin and reclaim 1071 * pinned, async => requeue 1072 * pinned, sync => unpin 1073 * stale => reclaim 1074 * clean => reclaim 1075 * dirty, async => requeue 1076 * dirty, sync => flush, wait and reclaim 1077 */ 1078 STATIC int 1079 xfs_reclaim_inode( 1080 struct xfs_inode *ip, 1081 struct xfs_perag *pag, 1082 int sync_mode) 1083 { 1084 struct xfs_buf *bp = NULL; 1085 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 1086 int error; 1087 1088 restart: 1089 error = 0; 1090 xfs_ilock(ip, XFS_ILOCK_EXCL); 1091 if (!xfs_iflock_nowait(ip)) { 1092 if (!(sync_mode & SYNC_WAIT)) 1093 goto out; 1094 xfs_iflock(ip); 1095 } 1096 1097 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1098 xfs_iunpin_wait(ip); 1099 /* xfs_iflush_abort() drops the flush lock */ 1100 xfs_iflush_abort(ip, false); 1101 goto reclaim; 1102 } 1103 if (xfs_ipincount(ip)) { 1104 if (!(sync_mode & SYNC_WAIT)) 1105 goto out_ifunlock; 1106 xfs_iunpin_wait(ip); 1107 } 1108 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { 1109 xfs_ifunlock(ip); 1110 goto reclaim; 1111 } 1112 1113 /* 1114 * Never flush out dirty data during non-blocking reclaim, as it would 1115 * just contend with AIL pushing trying to do the same job. 1116 */ 1117 if (!(sync_mode & SYNC_WAIT)) 1118 goto out_ifunlock; 1119 1120 /* 1121 * Now we have an inode that needs flushing. 1122 * 1123 * Note that xfs_iflush will never block on the inode buffer lock, as 1124 * xfs_ifree_cluster() can lock the inode buffer before it locks the 1125 * ip->i_lock, and we are doing the exact opposite here. As a result, 1126 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would 1127 * result in an ABBA deadlock with xfs_ifree_cluster(). 1128 * 1129 * As xfs_ifree_cluser() must gather all inodes that are active in the 1130 * cache to mark them stale, if we hit this case we don't actually want 1131 * to do IO here - we want the inode marked stale so we can simply 1132 * reclaim it. Hence if we get an EAGAIN error here, just unlock the 1133 * inode, back off and try again. Hopefully the next pass through will 1134 * see the stale flag set on the inode. 1135 */ 1136 error = xfs_iflush(ip, &bp); 1137 if (error == -EAGAIN) { 1138 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1139 /* backoff longer than in xfs_ifree_cluster */ 1140 delay(2); 1141 goto restart; 1142 } 1143 1144 if (!error) { 1145 error = xfs_bwrite(bp); 1146 xfs_buf_relse(bp); 1147 } 1148 1149 reclaim: 1150 ASSERT(!xfs_isiflocked(ip)); 1151 1152 /* 1153 * Because we use RCU freeing we need to ensure the inode always appears 1154 * to be reclaimed with an invalid inode number when in the free state. 1155 * We do this as early as possible under the ILOCK so that 1156 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to 1157 * detect races with us here. By doing this, we guarantee that once 1158 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that 1159 * it will see either a valid inode that will serialise correctly, or it 1160 * will see an invalid inode that it can skip. 1161 */ 1162 spin_lock(&ip->i_flags_lock); 1163 ip->i_flags = XFS_IRECLAIM; 1164 ip->i_ino = 0; 1165 spin_unlock(&ip->i_flags_lock); 1166 1167 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1168 1169 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1170 /* 1171 * Remove the inode from the per-AG radix tree. 1172 * 1173 * Because radix_tree_delete won't complain even if the item was never 1174 * added to the tree assert that it's been there before to catch 1175 * problems with the inode life time early on. 1176 */ 1177 spin_lock(&pag->pag_ici_lock); 1178 if (!radix_tree_delete(&pag->pag_ici_root, 1179 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1180 ASSERT(0); 1181 xfs_perag_clear_reclaim_tag(pag); 1182 spin_unlock(&pag->pag_ici_lock); 1183 1184 /* 1185 * Here we do an (almost) spurious inode lock in order to coordinate 1186 * with inode cache radix tree lookups. This is because the lookup 1187 * can reference the inodes in the cache without taking references. 1188 * 1189 * We make that OK here by ensuring that we wait until the inode is 1190 * unlocked after the lookup before we go ahead and free it. 1191 */ 1192 xfs_ilock(ip, XFS_ILOCK_EXCL); 1193 xfs_qm_dqdetach(ip); 1194 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1195 1196 __xfs_inode_free(ip); 1197 return error; 1198 1199 out_ifunlock: 1200 xfs_ifunlock(ip); 1201 out: 1202 xfs_iflags_clear(ip, XFS_IRECLAIM); 1203 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1204 /* 1205 * We could return -EAGAIN here to make reclaim rescan the inode tree in 1206 * a short while. However, this just burns CPU time scanning the tree 1207 * waiting for IO to complete and the reclaim work never goes back to 1208 * the idle state. Instead, return 0 to let the next scheduled 1209 * background reclaim attempt to reclaim the inode again. 1210 */ 1211 return 0; 1212 } 1213 1214 /* 1215 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is 1216 * corrupted, we still want to try to reclaim all the inodes. If we don't, 1217 * then a shut down during filesystem unmount reclaim walk leak all the 1218 * unreclaimed inodes. 1219 */ 1220 STATIC int 1221 xfs_reclaim_inodes_ag( 1222 struct xfs_mount *mp, 1223 int flags, 1224 int *nr_to_scan) 1225 { 1226 struct xfs_perag *pag; 1227 int error = 0; 1228 int last_error = 0; 1229 xfs_agnumber_t ag; 1230 int trylock = flags & SYNC_TRYLOCK; 1231 int skipped; 1232 1233 restart: 1234 ag = 0; 1235 skipped = 0; 1236 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1237 unsigned long first_index = 0; 1238 int done = 0; 1239 int nr_found = 0; 1240 1241 ag = pag->pag_agno + 1; 1242 1243 if (trylock) { 1244 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 1245 skipped++; 1246 xfs_perag_put(pag); 1247 continue; 1248 } 1249 first_index = pag->pag_ici_reclaim_cursor; 1250 } else 1251 mutex_lock(&pag->pag_ici_reclaim_lock); 1252 1253 do { 1254 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1255 int i; 1256 1257 rcu_read_lock(); 1258 nr_found = radix_tree_gang_lookup_tag( 1259 &pag->pag_ici_root, 1260 (void **)batch, first_index, 1261 XFS_LOOKUP_BATCH, 1262 XFS_ICI_RECLAIM_TAG); 1263 if (!nr_found) { 1264 done = 1; 1265 rcu_read_unlock(); 1266 break; 1267 } 1268 1269 /* 1270 * Grab the inodes before we drop the lock. if we found 1271 * nothing, nr == 0 and the loop will be skipped. 1272 */ 1273 for (i = 0; i < nr_found; i++) { 1274 struct xfs_inode *ip = batch[i]; 1275 1276 if (done || xfs_reclaim_inode_grab(ip, flags)) 1277 batch[i] = NULL; 1278 1279 /* 1280 * Update the index for the next lookup. Catch 1281 * overflows into the next AG range which can 1282 * occur if we have inodes in the last block of 1283 * the AG and we are currently pointing to the 1284 * last inode. 1285 * 1286 * Because we may see inodes that are from the 1287 * wrong AG due to RCU freeing and 1288 * reallocation, only update the index if it 1289 * lies in this AG. It was a race that lead us 1290 * to see this inode, so another lookup from 1291 * the same index will not find it again. 1292 */ 1293 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != 1294 pag->pag_agno) 1295 continue; 1296 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1297 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1298 done = 1; 1299 } 1300 1301 /* unlock now we've grabbed the inodes. */ 1302 rcu_read_unlock(); 1303 1304 for (i = 0; i < nr_found; i++) { 1305 if (!batch[i]) 1306 continue; 1307 error = xfs_reclaim_inode(batch[i], pag, flags); 1308 if (error && last_error != -EFSCORRUPTED) 1309 last_error = error; 1310 } 1311 1312 *nr_to_scan -= XFS_LOOKUP_BATCH; 1313 1314 cond_resched(); 1315 1316 } while (nr_found && !done && *nr_to_scan > 0); 1317 1318 if (trylock && !done) 1319 pag->pag_ici_reclaim_cursor = first_index; 1320 else 1321 pag->pag_ici_reclaim_cursor = 0; 1322 mutex_unlock(&pag->pag_ici_reclaim_lock); 1323 xfs_perag_put(pag); 1324 } 1325 1326 /* 1327 * if we skipped any AG, and we still have scan count remaining, do 1328 * another pass this time using blocking reclaim semantics (i.e 1329 * waiting on the reclaim locks and ignoring the reclaim cursors). This 1330 * ensure that when we get more reclaimers than AGs we block rather 1331 * than spin trying to execute reclaim. 1332 */ 1333 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1334 trylock = 0; 1335 goto restart; 1336 } 1337 return last_error; 1338 } 1339 1340 int 1341 xfs_reclaim_inodes( 1342 xfs_mount_t *mp, 1343 int mode) 1344 { 1345 int nr_to_scan = INT_MAX; 1346 1347 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); 1348 } 1349 1350 /* 1351 * Scan a certain number of inodes for reclaim. 1352 * 1353 * When called we make sure that there is a background (fast) inode reclaim in 1354 * progress, while we will throttle the speed of reclaim via doing synchronous 1355 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1356 * them to be cleaned, which we hope will not be very long due to the 1357 * background walker having already kicked the IO off on those dirty inodes. 1358 */ 1359 long 1360 xfs_reclaim_inodes_nr( 1361 struct xfs_mount *mp, 1362 int nr_to_scan) 1363 { 1364 /* kick background reclaimer and push the AIL */ 1365 xfs_reclaim_work_queue(mp); 1366 xfs_ail_push_all(mp->m_ail); 1367 1368 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1369 } 1370 1371 /* 1372 * Return the number of reclaimable inodes in the filesystem for 1373 * the shrinker to determine how much to reclaim. 1374 */ 1375 int 1376 xfs_reclaim_inodes_count( 1377 struct xfs_mount *mp) 1378 { 1379 struct xfs_perag *pag; 1380 xfs_agnumber_t ag = 0; 1381 int reclaimable = 0; 1382 1383 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1384 ag = pag->pag_agno + 1; 1385 reclaimable += pag->pag_ici_reclaimable; 1386 xfs_perag_put(pag); 1387 } 1388 return reclaimable; 1389 } 1390 1391 STATIC int 1392 xfs_inode_match_id( 1393 struct xfs_inode *ip, 1394 struct xfs_eofblocks *eofb) 1395 { 1396 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1397 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1398 return 0; 1399 1400 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1401 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1402 return 0; 1403 1404 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1405 xfs_get_projid(ip) != eofb->eof_prid) 1406 return 0; 1407 1408 return 1; 1409 } 1410 1411 /* 1412 * A union-based inode filtering algorithm. Process the inode if any of the 1413 * criteria match. This is for global/internal scans only. 1414 */ 1415 STATIC int 1416 xfs_inode_match_id_union( 1417 struct xfs_inode *ip, 1418 struct xfs_eofblocks *eofb) 1419 { 1420 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1421 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1422 return 1; 1423 1424 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1425 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1426 return 1; 1427 1428 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1429 xfs_get_projid(ip) == eofb->eof_prid) 1430 return 1; 1431 1432 return 0; 1433 } 1434 1435 STATIC int 1436 xfs_inode_free_eofblocks( 1437 struct xfs_inode *ip, 1438 int flags, 1439 void *args) 1440 { 1441 int ret = 0; 1442 struct xfs_eofblocks *eofb = args; 1443 int match; 1444 1445 if (!xfs_can_free_eofblocks(ip, false)) { 1446 /* inode could be preallocated or append-only */ 1447 trace_xfs_inode_free_eofblocks_invalid(ip); 1448 xfs_inode_clear_eofblocks_tag(ip); 1449 return 0; 1450 } 1451 1452 /* 1453 * If the mapping is dirty the operation can block and wait for some 1454 * time. Unless we are waiting, skip it. 1455 */ 1456 if (!(flags & SYNC_WAIT) && 1457 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1458 return 0; 1459 1460 if (eofb) { 1461 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1462 match = xfs_inode_match_id_union(ip, eofb); 1463 else 1464 match = xfs_inode_match_id(ip, eofb); 1465 if (!match) 1466 return 0; 1467 1468 /* skip the inode if the file size is too small */ 1469 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1470 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1471 return 0; 1472 } 1473 1474 /* 1475 * If the caller is waiting, return -EAGAIN to keep the background 1476 * scanner moving and revisit the inode in a subsequent pass. 1477 */ 1478 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1479 if (flags & SYNC_WAIT) 1480 ret = -EAGAIN; 1481 return ret; 1482 } 1483 ret = xfs_free_eofblocks(ip); 1484 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1485 1486 return ret; 1487 } 1488 1489 static int 1490 __xfs_icache_free_eofblocks( 1491 struct xfs_mount *mp, 1492 struct xfs_eofblocks *eofb, 1493 int (*execute)(struct xfs_inode *ip, int flags, 1494 void *args), 1495 int tag) 1496 { 1497 int flags = SYNC_TRYLOCK; 1498 1499 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) 1500 flags = SYNC_WAIT; 1501 1502 return xfs_inode_ag_iterator_tag(mp, execute, flags, 1503 eofb, tag); 1504 } 1505 1506 int 1507 xfs_icache_free_eofblocks( 1508 struct xfs_mount *mp, 1509 struct xfs_eofblocks *eofb) 1510 { 1511 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, 1512 XFS_ICI_EOFBLOCKS_TAG); 1513 } 1514 1515 /* 1516 * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1517 * multiple quotas, we don't know exactly which quota caused an allocation 1518 * failure. We make a best effort by including each quota under low free space 1519 * conditions (less than 1% free space) in the scan. 1520 */ 1521 static int 1522 __xfs_inode_free_quota_eofblocks( 1523 struct xfs_inode *ip, 1524 int (*execute)(struct xfs_mount *mp, 1525 struct xfs_eofblocks *eofb)) 1526 { 1527 int scan = 0; 1528 struct xfs_eofblocks eofb = {0}; 1529 struct xfs_dquot *dq; 1530 1531 /* 1532 * Run a sync scan to increase effectiveness and use the union filter to 1533 * cover all applicable quotas in a single scan. 1534 */ 1535 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1536 1537 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1538 dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1539 if (dq && xfs_dquot_lowsp(dq)) { 1540 eofb.eof_uid = VFS_I(ip)->i_uid; 1541 eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1542 scan = 1; 1543 } 1544 } 1545 1546 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1547 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1548 if (dq && xfs_dquot_lowsp(dq)) { 1549 eofb.eof_gid = VFS_I(ip)->i_gid; 1550 eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1551 scan = 1; 1552 } 1553 } 1554 1555 if (scan) 1556 execute(ip->i_mount, &eofb); 1557 1558 return scan; 1559 } 1560 1561 int 1562 xfs_inode_free_quota_eofblocks( 1563 struct xfs_inode *ip) 1564 { 1565 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1566 } 1567 1568 static inline unsigned long 1569 xfs_iflag_for_tag( 1570 int tag) 1571 { 1572 switch (tag) { 1573 case XFS_ICI_EOFBLOCKS_TAG: 1574 return XFS_IEOFBLOCKS; 1575 case XFS_ICI_COWBLOCKS_TAG: 1576 return XFS_ICOWBLOCKS; 1577 default: 1578 ASSERT(0); 1579 return 0; 1580 } 1581 } 1582 1583 static void 1584 __xfs_inode_set_blocks_tag( 1585 xfs_inode_t *ip, 1586 void (*execute)(struct xfs_mount *mp), 1587 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1588 int error, unsigned long caller_ip), 1589 int tag) 1590 { 1591 struct xfs_mount *mp = ip->i_mount; 1592 struct xfs_perag *pag; 1593 int tagged; 1594 1595 /* 1596 * Don't bother locking the AG and looking up in the radix trees 1597 * if we already know that we have the tag set. 1598 */ 1599 if (ip->i_flags & xfs_iflag_for_tag(tag)) 1600 return; 1601 spin_lock(&ip->i_flags_lock); 1602 ip->i_flags |= xfs_iflag_for_tag(tag); 1603 spin_unlock(&ip->i_flags_lock); 1604 1605 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1606 spin_lock(&pag->pag_ici_lock); 1607 1608 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 1609 radix_tree_tag_set(&pag->pag_ici_root, 1610 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1611 if (!tagged) { 1612 /* propagate the eofblocks tag up into the perag radix tree */ 1613 spin_lock(&ip->i_mount->m_perag_lock); 1614 radix_tree_tag_set(&ip->i_mount->m_perag_tree, 1615 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1616 tag); 1617 spin_unlock(&ip->i_mount->m_perag_lock); 1618 1619 /* kick off background trimming */ 1620 execute(ip->i_mount); 1621 1622 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1623 } 1624 1625 spin_unlock(&pag->pag_ici_lock); 1626 xfs_perag_put(pag); 1627 } 1628 1629 void 1630 xfs_inode_set_eofblocks_tag( 1631 xfs_inode_t *ip) 1632 { 1633 trace_xfs_inode_set_eofblocks_tag(ip); 1634 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, 1635 trace_xfs_perag_set_eofblocks, 1636 XFS_ICI_EOFBLOCKS_TAG); 1637 } 1638 1639 static void 1640 __xfs_inode_clear_blocks_tag( 1641 xfs_inode_t *ip, 1642 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1643 int error, unsigned long caller_ip), 1644 int tag) 1645 { 1646 struct xfs_mount *mp = ip->i_mount; 1647 struct xfs_perag *pag; 1648 1649 spin_lock(&ip->i_flags_lock); 1650 ip->i_flags &= ~xfs_iflag_for_tag(tag); 1651 spin_unlock(&ip->i_flags_lock); 1652 1653 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1654 spin_lock(&pag->pag_ici_lock); 1655 1656 radix_tree_tag_clear(&pag->pag_ici_root, 1657 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1658 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { 1659 /* clear the eofblocks tag from the perag radix tree */ 1660 spin_lock(&ip->i_mount->m_perag_lock); 1661 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, 1662 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1663 tag); 1664 spin_unlock(&ip->i_mount->m_perag_lock); 1665 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1666 } 1667 1668 spin_unlock(&pag->pag_ici_lock); 1669 xfs_perag_put(pag); 1670 } 1671 1672 void 1673 xfs_inode_clear_eofblocks_tag( 1674 xfs_inode_t *ip) 1675 { 1676 trace_xfs_inode_clear_eofblocks_tag(ip); 1677 return __xfs_inode_clear_blocks_tag(ip, 1678 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1679 } 1680 1681 /* 1682 * Set ourselves up to free CoW blocks from this file. If it's already clean 1683 * then we can bail out quickly, but otherwise we must back off if the file 1684 * is undergoing some kind of write. 1685 */ 1686 static bool 1687 xfs_prep_free_cowblocks( 1688 struct xfs_inode *ip, 1689 struct xfs_ifork *ifp) 1690 { 1691 /* 1692 * Just clear the tag if we have an empty cow fork or none at all. It's 1693 * possible the inode was fully unshared since it was originally tagged. 1694 */ 1695 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) { 1696 trace_xfs_inode_free_cowblocks_invalid(ip); 1697 xfs_inode_clear_cowblocks_tag(ip); 1698 return false; 1699 } 1700 1701 /* 1702 * If the mapping is dirty or under writeback we cannot touch the 1703 * CoW fork. Leave it alone if we're in the midst of a directio. 1704 */ 1705 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || 1706 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1707 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1708 atomic_read(&VFS_I(ip)->i_dio_count)) 1709 return false; 1710 1711 return true; 1712 } 1713 1714 /* 1715 * Automatic CoW Reservation Freeing 1716 * 1717 * These functions automatically garbage collect leftover CoW reservations 1718 * that were made on behalf of a cowextsize hint when we start to run out 1719 * of quota or when the reservations sit around for too long. If the file 1720 * has dirty pages or is undergoing writeback, its CoW reservations will 1721 * be retained. 1722 * 1723 * The actual garbage collection piggybacks off the same code that runs 1724 * the speculative EOF preallocation garbage collector. 1725 */ 1726 STATIC int 1727 xfs_inode_free_cowblocks( 1728 struct xfs_inode *ip, 1729 int flags, 1730 void *args) 1731 { 1732 struct xfs_eofblocks *eofb = args; 1733 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 1734 int match; 1735 int ret = 0; 1736 1737 if (!xfs_prep_free_cowblocks(ip, ifp)) 1738 return 0; 1739 1740 if (eofb) { 1741 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1742 match = xfs_inode_match_id_union(ip, eofb); 1743 else 1744 match = xfs_inode_match_id(ip, eofb); 1745 if (!match) 1746 return 0; 1747 1748 /* skip the inode if the file size is too small */ 1749 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1750 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1751 return 0; 1752 } 1753 1754 /* Free the CoW blocks */ 1755 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1756 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1757 1758 /* 1759 * Check again, nobody else should be able to dirty blocks or change 1760 * the reflink iflag now that we have the first two locks held. 1761 */ 1762 if (xfs_prep_free_cowblocks(ip, ifp)) 1763 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1764 1765 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1766 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1767 1768 return ret; 1769 } 1770 1771 int 1772 xfs_icache_free_cowblocks( 1773 struct xfs_mount *mp, 1774 struct xfs_eofblocks *eofb) 1775 { 1776 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, 1777 XFS_ICI_COWBLOCKS_TAG); 1778 } 1779 1780 int 1781 xfs_inode_free_quota_cowblocks( 1782 struct xfs_inode *ip) 1783 { 1784 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); 1785 } 1786 1787 void 1788 xfs_inode_set_cowblocks_tag( 1789 xfs_inode_t *ip) 1790 { 1791 trace_xfs_inode_set_cowblocks_tag(ip); 1792 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, 1793 trace_xfs_perag_set_cowblocks, 1794 XFS_ICI_COWBLOCKS_TAG); 1795 } 1796 1797 void 1798 xfs_inode_clear_cowblocks_tag( 1799 xfs_inode_t *ip) 1800 { 1801 trace_xfs_inode_clear_cowblocks_tag(ip); 1802 return __xfs_inode_clear_blocks_tag(ip, 1803 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1804 } 1805