1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_sb.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_quota.h" 31 #include "xfs_trace.h" 32 #include "xfs_icache.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_dquot_item.h" 35 #include "xfs_dquot.h" 36 #include "xfs_reflink.h" 37 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 #include <linux/iversion.h> 41 42 /* 43 * Allocate and initialise an xfs_inode. 44 */ 45 struct xfs_inode * 46 xfs_inode_alloc( 47 struct xfs_mount *mp, 48 xfs_ino_t ino) 49 { 50 struct xfs_inode *ip; 51 52 /* 53 * if this didn't occur in transactions, we could use 54 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 55 * code up to do this anyway. 56 */ 57 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 58 if (!ip) 59 return NULL; 60 if (inode_init_always(mp->m_super, VFS_I(ip))) { 61 kmem_zone_free(xfs_inode_zone, ip); 62 return NULL; 63 } 64 65 /* VFS doesn't initialise i_mode! */ 66 VFS_I(ip)->i_mode = 0; 67 68 XFS_STATS_INC(mp, vn_active); 69 ASSERT(atomic_read(&ip->i_pincount) == 0); 70 ASSERT(!xfs_isiflocked(ip)); 71 ASSERT(ip->i_ino == 0); 72 73 /* initialise the xfs inode */ 74 ip->i_ino = ino; 75 ip->i_mount = mp; 76 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 77 ip->i_afp = NULL; 78 ip->i_cowfp = NULL; 79 ip->i_cnextents = 0; 80 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 81 memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); 82 ip->i_flags = 0; 83 ip->i_delayed_blks = 0; 84 memset(&ip->i_d, 0, sizeof(ip->i_d)); 85 86 return ip; 87 } 88 89 STATIC void 90 xfs_inode_free_callback( 91 struct rcu_head *head) 92 { 93 struct inode *inode = container_of(head, struct inode, i_rcu); 94 struct xfs_inode *ip = XFS_I(inode); 95 96 switch (VFS_I(ip)->i_mode & S_IFMT) { 97 case S_IFREG: 98 case S_IFDIR: 99 case S_IFLNK: 100 xfs_idestroy_fork(ip, XFS_DATA_FORK); 101 break; 102 } 103 104 if (ip->i_afp) 105 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 106 if (ip->i_cowfp) 107 xfs_idestroy_fork(ip, XFS_COW_FORK); 108 109 if (ip->i_itemp) { 110 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); 111 xfs_inode_item_destroy(ip); 112 ip->i_itemp = NULL; 113 } 114 115 kmem_zone_free(xfs_inode_zone, ip); 116 } 117 118 static void 119 __xfs_inode_free( 120 struct xfs_inode *ip) 121 { 122 /* asserts to verify all state is correct here */ 123 ASSERT(atomic_read(&ip->i_pincount) == 0); 124 XFS_STATS_DEC(ip->i_mount, vn_active); 125 126 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 127 } 128 129 void 130 xfs_inode_free( 131 struct xfs_inode *ip) 132 { 133 ASSERT(!xfs_isiflocked(ip)); 134 135 /* 136 * Because we use RCU freeing we need to ensure the inode always 137 * appears to be reclaimed with an invalid inode number when in the 138 * free state. The ip->i_flags_lock provides the barrier against lookup 139 * races. 140 */ 141 spin_lock(&ip->i_flags_lock); 142 ip->i_flags = XFS_IRECLAIM; 143 ip->i_ino = 0; 144 spin_unlock(&ip->i_flags_lock); 145 146 __xfs_inode_free(ip); 147 } 148 149 /* 150 * Queue a new inode reclaim pass if there are reclaimable inodes and there 151 * isn't a reclaim pass already in progress. By default it runs every 5s based 152 * on the xfs periodic sync default of 30s. Perhaps this should have it's own 153 * tunable, but that can be done if this method proves to be ineffective or too 154 * aggressive. 155 */ 156 static void 157 xfs_reclaim_work_queue( 158 struct xfs_mount *mp) 159 { 160 161 rcu_read_lock(); 162 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 163 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 164 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 165 } 166 rcu_read_unlock(); 167 } 168 169 /* 170 * This is a fast pass over the inode cache to try to get reclaim moving on as 171 * many inodes as possible in a short period of time. It kicks itself every few 172 * seconds, as well as being kicked by the inode cache shrinker when memory 173 * goes low. It scans as quickly as possible avoiding locked inodes or those 174 * already being flushed, and once done schedules a future pass. 175 */ 176 void 177 xfs_reclaim_worker( 178 struct work_struct *work) 179 { 180 struct xfs_mount *mp = container_of(to_delayed_work(work), 181 struct xfs_mount, m_reclaim_work); 182 183 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 184 xfs_reclaim_work_queue(mp); 185 } 186 187 static void 188 xfs_perag_set_reclaim_tag( 189 struct xfs_perag *pag) 190 { 191 struct xfs_mount *mp = pag->pag_mount; 192 193 lockdep_assert_held(&pag->pag_ici_lock); 194 if (pag->pag_ici_reclaimable++) 195 return; 196 197 /* propagate the reclaim tag up into the perag radix tree */ 198 spin_lock(&mp->m_perag_lock); 199 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, 200 XFS_ICI_RECLAIM_TAG); 201 spin_unlock(&mp->m_perag_lock); 202 203 /* schedule periodic background inode reclaim */ 204 xfs_reclaim_work_queue(mp); 205 206 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 207 } 208 209 static void 210 xfs_perag_clear_reclaim_tag( 211 struct xfs_perag *pag) 212 { 213 struct xfs_mount *mp = pag->pag_mount; 214 215 lockdep_assert_held(&pag->pag_ici_lock); 216 if (--pag->pag_ici_reclaimable) 217 return; 218 219 /* clear the reclaim tag from the perag radix tree */ 220 spin_lock(&mp->m_perag_lock); 221 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, 222 XFS_ICI_RECLAIM_TAG); 223 spin_unlock(&mp->m_perag_lock); 224 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 225 } 226 227 228 /* 229 * We set the inode flag atomically with the radix tree tag. 230 * Once we get tag lookups on the radix tree, this inode flag 231 * can go away. 232 */ 233 void 234 xfs_inode_set_reclaim_tag( 235 struct xfs_inode *ip) 236 { 237 struct xfs_mount *mp = ip->i_mount; 238 struct xfs_perag *pag; 239 240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 241 spin_lock(&pag->pag_ici_lock); 242 spin_lock(&ip->i_flags_lock); 243 244 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), 245 XFS_ICI_RECLAIM_TAG); 246 xfs_perag_set_reclaim_tag(pag); 247 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 248 249 spin_unlock(&ip->i_flags_lock); 250 spin_unlock(&pag->pag_ici_lock); 251 xfs_perag_put(pag); 252 } 253 254 STATIC void 255 xfs_inode_clear_reclaim_tag( 256 struct xfs_perag *pag, 257 xfs_ino_t ino) 258 { 259 radix_tree_tag_clear(&pag->pag_ici_root, 260 XFS_INO_TO_AGINO(pag->pag_mount, ino), 261 XFS_ICI_RECLAIM_TAG); 262 xfs_perag_clear_reclaim_tag(pag); 263 } 264 265 static void 266 xfs_inew_wait( 267 struct xfs_inode *ip) 268 { 269 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); 270 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); 271 272 do { 273 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 274 if (!xfs_iflags_test(ip, XFS_INEW)) 275 break; 276 schedule(); 277 } while (true); 278 finish_wait(wq, &wait.wq_entry); 279 } 280 281 /* 282 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 283 * part of the structure. This is made more complex by the fact we store 284 * information about the on-disk values in the VFS inode and so we can't just 285 * overwrite the values unconditionally. Hence we save the parameters we 286 * need to retain across reinitialisation, and rewrite them into the VFS inode 287 * after reinitialisation even if it fails. 288 */ 289 static int 290 xfs_reinit_inode( 291 struct xfs_mount *mp, 292 struct inode *inode) 293 { 294 int error; 295 uint32_t nlink = inode->i_nlink; 296 uint32_t generation = inode->i_generation; 297 uint64_t version = inode_peek_iversion(inode); 298 umode_t mode = inode->i_mode; 299 dev_t dev = inode->i_rdev; 300 301 error = inode_init_always(mp->m_super, inode); 302 303 set_nlink(inode, nlink); 304 inode->i_generation = generation; 305 inode_set_iversion_queried(inode, version); 306 inode->i_mode = mode; 307 inode->i_rdev = dev; 308 return error; 309 } 310 311 /* 312 * Check the validity of the inode we just found it the cache 313 */ 314 static int 315 xfs_iget_cache_hit( 316 struct xfs_perag *pag, 317 struct xfs_inode *ip, 318 xfs_ino_t ino, 319 int flags, 320 int lock_flags) __releases(RCU) 321 { 322 struct inode *inode = VFS_I(ip); 323 struct xfs_mount *mp = ip->i_mount; 324 int error; 325 326 /* 327 * check for re-use of an inode within an RCU grace period due to the 328 * radix tree nodes not being updated yet. We monitor for this by 329 * setting the inode number to zero before freeing the inode structure. 330 * If the inode has been reallocated and set up, then the inode number 331 * will not match, so check for that, too. 332 */ 333 spin_lock(&ip->i_flags_lock); 334 if (ip->i_ino != ino) { 335 trace_xfs_iget_skip(ip); 336 XFS_STATS_INC(mp, xs_ig_frecycle); 337 error = -EAGAIN; 338 goto out_error; 339 } 340 341 342 /* 343 * If we are racing with another cache hit that is currently 344 * instantiating this inode or currently recycling it out of 345 * reclaimabe state, wait for the initialisation to complete 346 * before continuing. 347 * 348 * XXX(hch): eventually we should do something equivalent to 349 * wait_on_inode to wait for these flags to be cleared 350 * instead of polling for it. 351 */ 352 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 353 trace_xfs_iget_skip(ip); 354 XFS_STATS_INC(mp, xs_ig_frecycle); 355 error = -EAGAIN; 356 goto out_error; 357 } 358 359 /* 360 * If lookup is racing with unlink return an error immediately. 361 */ 362 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { 363 error = -ENOENT; 364 goto out_error; 365 } 366 367 /* 368 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 369 * Need to carefully get it back into useable state. 370 */ 371 if (ip->i_flags & XFS_IRECLAIMABLE) { 372 trace_xfs_iget_reclaim(ip); 373 374 if (flags & XFS_IGET_INCORE) { 375 error = -EAGAIN; 376 goto out_error; 377 } 378 379 /* 380 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 381 * from stomping over us while we recycle the inode. We can't 382 * clear the radix tree reclaimable tag yet as it requires 383 * pag_ici_lock to be held exclusive. 384 */ 385 ip->i_flags |= XFS_IRECLAIM; 386 387 spin_unlock(&ip->i_flags_lock); 388 rcu_read_unlock(); 389 390 error = xfs_reinit_inode(mp, inode); 391 if (error) { 392 bool wake; 393 /* 394 * Re-initializing the inode failed, and we are in deep 395 * trouble. Try to re-add it to the reclaim list. 396 */ 397 rcu_read_lock(); 398 spin_lock(&ip->i_flags_lock); 399 wake = !!__xfs_iflags_test(ip, XFS_INEW); 400 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 401 if (wake) 402 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 403 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 404 trace_xfs_iget_reclaim_fail(ip); 405 goto out_error; 406 } 407 408 spin_lock(&pag->pag_ici_lock); 409 spin_lock(&ip->i_flags_lock); 410 411 /* 412 * Clear the per-lifetime state in the inode as we are now 413 * effectively a new inode and need to return to the initial 414 * state before reuse occurs. 415 */ 416 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 417 ip->i_flags |= XFS_INEW; 418 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 419 inode->i_state = I_NEW; 420 421 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 422 init_rwsem(&inode->i_rwsem); 423 424 spin_unlock(&ip->i_flags_lock); 425 spin_unlock(&pag->pag_ici_lock); 426 } else { 427 /* If the VFS inode is being torn down, pause and try again. */ 428 if (!igrab(inode)) { 429 trace_xfs_iget_skip(ip); 430 error = -EAGAIN; 431 goto out_error; 432 } 433 434 /* We've got a live one. */ 435 spin_unlock(&ip->i_flags_lock); 436 rcu_read_unlock(); 437 trace_xfs_iget_hit(ip); 438 } 439 440 if (lock_flags != 0) 441 xfs_ilock(ip, lock_flags); 442 443 if (!(flags & XFS_IGET_INCORE)) 444 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); 445 XFS_STATS_INC(mp, xs_ig_found); 446 447 return 0; 448 449 out_error: 450 spin_unlock(&ip->i_flags_lock); 451 rcu_read_unlock(); 452 return error; 453 } 454 455 456 static int 457 xfs_iget_cache_miss( 458 struct xfs_mount *mp, 459 struct xfs_perag *pag, 460 xfs_trans_t *tp, 461 xfs_ino_t ino, 462 struct xfs_inode **ipp, 463 int flags, 464 int lock_flags) 465 { 466 struct xfs_inode *ip; 467 int error; 468 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 469 int iflags; 470 471 ip = xfs_inode_alloc(mp, ino); 472 if (!ip) 473 return -ENOMEM; 474 475 error = xfs_iread(mp, tp, ip, flags); 476 if (error) 477 goto out_destroy; 478 479 if (!xfs_inode_verify_forks(ip)) { 480 error = -EFSCORRUPTED; 481 goto out_destroy; 482 } 483 484 trace_xfs_iget_miss(ip); 485 486 if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { 487 error = -ENOENT; 488 goto out_destroy; 489 } 490 491 /* 492 * Preload the radix tree so we can insert safely under the 493 * write spinlock. Note that we cannot sleep inside the preload 494 * region. Since we can be called from transaction context, don't 495 * recurse into the file system. 496 */ 497 if (radix_tree_preload(GFP_NOFS)) { 498 error = -EAGAIN; 499 goto out_destroy; 500 } 501 502 /* 503 * Because the inode hasn't been added to the radix-tree yet it can't 504 * be found by another thread, so we can do the non-sleeping lock here. 505 */ 506 if (lock_flags) { 507 if (!xfs_ilock_nowait(ip, lock_flags)) 508 BUG(); 509 } 510 511 /* 512 * These values must be set before inserting the inode into the radix 513 * tree as the moment it is inserted a concurrent lookup (allowed by the 514 * RCU locking mechanism) can find it and that lookup must see that this 515 * is an inode currently under construction (i.e. that XFS_INEW is set). 516 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 517 * memory barrier that ensures this detection works correctly at lookup 518 * time. 519 */ 520 iflags = XFS_INEW; 521 if (flags & XFS_IGET_DONTCACHE) 522 iflags |= XFS_IDONTCACHE; 523 ip->i_udquot = NULL; 524 ip->i_gdquot = NULL; 525 ip->i_pdquot = NULL; 526 xfs_iflags_set(ip, iflags); 527 528 /* insert the new inode */ 529 spin_lock(&pag->pag_ici_lock); 530 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 531 if (unlikely(error)) { 532 WARN_ON(error != -EEXIST); 533 XFS_STATS_INC(mp, xs_ig_dup); 534 error = -EAGAIN; 535 goto out_preload_end; 536 } 537 spin_unlock(&pag->pag_ici_lock); 538 radix_tree_preload_end(); 539 540 *ipp = ip; 541 return 0; 542 543 out_preload_end: 544 spin_unlock(&pag->pag_ici_lock); 545 radix_tree_preload_end(); 546 if (lock_flags) 547 xfs_iunlock(ip, lock_flags); 548 out_destroy: 549 __destroy_inode(VFS_I(ip)); 550 xfs_inode_free(ip); 551 return error; 552 } 553 554 /* 555 * Look up an inode by number in the given file system. 556 * The inode is looked up in the cache held in each AG. 557 * If the inode is found in the cache, initialise the vfs inode 558 * if necessary. 559 * 560 * If it is not in core, read it in from the file system's device, 561 * add it to the cache and initialise the vfs inode. 562 * 563 * The inode is locked according to the value of the lock_flags parameter. 564 * This flag parameter indicates how and if the inode's IO lock and inode lock 565 * should be taken. 566 * 567 * mp -- the mount point structure for the current file system. It points 568 * to the inode hash table. 569 * tp -- a pointer to the current transaction if there is one. This is 570 * simply passed through to the xfs_iread() call. 571 * ino -- the number of the inode desired. This is the unique identifier 572 * within the file system for the inode being requested. 573 * lock_flags -- flags indicating how to lock the inode. See the comment 574 * for xfs_ilock() for a list of valid values. 575 */ 576 int 577 xfs_iget( 578 xfs_mount_t *mp, 579 xfs_trans_t *tp, 580 xfs_ino_t ino, 581 uint flags, 582 uint lock_flags, 583 xfs_inode_t **ipp) 584 { 585 xfs_inode_t *ip; 586 int error; 587 xfs_perag_t *pag; 588 xfs_agino_t agino; 589 590 /* 591 * xfs_reclaim_inode() uses the ILOCK to ensure an inode 592 * doesn't get freed while it's being referenced during a 593 * radix tree traversal here. It assumes this function 594 * aqcuires only the ILOCK (and therefore it has no need to 595 * involve the IOLOCK in this synchronization). 596 */ 597 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 598 599 /* reject inode numbers outside existing AGs */ 600 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 601 return -EINVAL; 602 603 XFS_STATS_INC(mp, xs_ig_attempts); 604 605 /* get the perag structure and ensure that it's inode capable */ 606 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 607 agino = XFS_INO_TO_AGINO(mp, ino); 608 609 again: 610 error = 0; 611 rcu_read_lock(); 612 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 613 614 if (ip) { 615 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 616 if (error) 617 goto out_error_or_again; 618 } else { 619 rcu_read_unlock(); 620 if (flags & XFS_IGET_INCORE) { 621 error = -ENODATA; 622 goto out_error_or_again; 623 } 624 XFS_STATS_INC(mp, xs_ig_missed); 625 626 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 627 flags, lock_flags); 628 if (error) 629 goto out_error_or_again; 630 } 631 xfs_perag_put(pag); 632 633 *ipp = ip; 634 635 /* 636 * If we have a real type for an on-disk inode, we can setup the inode 637 * now. If it's a new inode being created, xfs_ialloc will handle it. 638 */ 639 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 640 xfs_setup_existing_inode(ip); 641 return 0; 642 643 out_error_or_again: 644 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { 645 delay(1); 646 goto again; 647 } 648 xfs_perag_put(pag); 649 return error; 650 } 651 652 /* 653 * "Is this a cached inode that's also allocated?" 654 * 655 * Look up an inode by number in the given file system. If the inode is 656 * in cache and isn't in purgatory, return 1 if the inode is allocated 657 * and 0 if it is not. For all other cases (not in cache, being torn 658 * down, etc.), return a negative error code. 659 * 660 * The caller has to prevent inode allocation and freeing activity, 661 * presumably by locking the AGI buffer. This is to ensure that an 662 * inode cannot transition from allocated to freed until the caller is 663 * ready to allow that. If the inode is in an intermediate state (new, 664 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the 665 * inode is not in the cache, -ENOENT will be returned. The caller must 666 * deal with these scenarios appropriately. 667 * 668 * This is a specialized use case for the online scrubber; if you're 669 * reading this, you probably want xfs_iget. 670 */ 671 int 672 xfs_icache_inode_is_allocated( 673 struct xfs_mount *mp, 674 struct xfs_trans *tp, 675 xfs_ino_t ino, 676 bool *inuse) 677 { 678 struct xfs_inode *ip; 679 int error; 680 681 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); 682 if (error) 683 return error; 684 685 *inuse = !!(VFS_I(ip)->i_mode); 686 IRELE(ip); 687 return 0; 688 } 689 690 /* 691 * The inode lookup is done in batches to keep the amount of lock traffic and 692 * radix tree lookups to a minimum. The batch size is a trade off between 693 * lookup reduction and stack usage. This is in the reclaim path, so we can't 694 * be too greedy. 695 */ 696 #define XFS_LOOKUP_BATCH 32 697 698 STATIC int 699 xfs_inode_ag_walk_grab( 700 struct xfs_inode *ip, 701 int flags) 702 { 703 struct inode *inode = VFS_I(ip); 704 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); 705 706 ASSERT(rcu_read_lock_held()); 707 708 /* 709 * check for stale RCU freed inode 710 * 711 * If the inode has been reallocated, it doesn't matter if it's not in 712 * the AG we are walking - we are walking for writeback, so if it 713 * passes all the "valid inode" checks and is dirty, then we'll write 714 * it back anyway. If it has been reallocated and still being 715 * initialised, the XFS_INEW check below will catch it. 716 */ 717 spin_lock(&ip->i_flags_lock); 718 if (!ip->i_ino) 719 goto out_unlock_noent; 720 721 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ 722 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || 723 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) 724 goto out_unlock_noent; 725 spin_unlock(&ip->i_flags_lock); 726 727 /* nothing to sync during shutdown */ 728 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 729 return -EFSCORRUPTED; 730 731 /* If we can't grab the inode, it must on it's way to reclaim. */ 732 if (!igrab(inode)) 733 return -ENOENT; 734 735 /* inode is valid */ 736 return 0; 737 738 out_unlock_noent: 739 spin_unlock(&ip->i_flags_lock); 740 return -ENOENT; 741 } 742 743 STATIC int 744 xfs_inode_ag_walk( 745 struct xfs_mount *mp, 746 struct xfs_perag *pag, 747 int (*execute)(struct xfs_inode *ip, int flags, 748 void *args), 749 int flags, 750 void *args, 751 int tag, 752 int iter_flags) 753 { 754 uint32_t first_index; 755 int last_error = 0; 756 int skipped; 757 int done; 758 int nr_found; 759 760 restart: 761 done = 0; 762 skipped = 0; 763 first_index = 0; 764 nr_found = 0; 765 do { 766 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 767 int error = 0; 768 int i; 769 770 rcu_read_lock(); 771 772 if (tag == -1) 773 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 774 (void **)batch, first_index, 775 XFS_LOOKUP_BATCH); 776 else 777 nr_found = radix_tree_gang_lookup_tag( 778 &pag->pag_ici_root, 779 (void **) batch, first_index, 780 XFS_LOOKUP_BATCH, tag); 781 782 if (!nr_found) { 783 rcu_read_unlock(); 784 break; 785 } 786 787 /* 788 * Grab the inodes before we drop the lock. if we found 789 * nothing, nr == 0 and the loop will be skipped. 790 */ 791 for (i = 0; i < nr_found; i++) { 792 struct xfs_inode *ip = batch[i]; 793 794 if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) 795 batch[i] = NULL; 796 797 /* 798 * Update the index for the next lookup. Catch 799 * overflows into the next AG range which can occur if 800 * we have inodes in the last block of the AG and we 801 * are currently pointing to the last inode. 802 * 803 * Because we may see inodes that are from the wrong AG 804 * due to RCU freeing and reallocation, only update the 805 * index if it lies in this AG. It was a race that lead 806 * us to see this inode, so another lookup from the 807 * same index will not find it again. 808 */ 809 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) 810 continue; 811 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 812 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 813 done = 1; 814 } 815 816 /* unlock now we've grabbed the inodes. */ 817 rcu_read_unlock(); 818 819 for (i = 0; i < nr_found; i++) { 820 if (!batch[i]) 821 continue; 822 if ((iter_flags & XFS_AGITER_INEW_WAIT) && 823 xfs_iflags_test(batch[i], XFS_INEW)) 824 xfs_inew_wait(batch[i]); 825 error = execute(batch[i], flags, args); 826 IRELE(batch[i]); 827 if (error == -EAGAIN) { 828 skipped++; 829 continue; 830 } 831 if (error && last_error != -EFSCORRUPTED) 832 last_error = error; 833 } 834 835 /* bail out if the filesystem is corrupted. */ 836 if (error == -EFSCORRUPTED) 837 break; 838 839 cond_resched(); 840 841 } while (nr_found && !done); 842 843 if (skipped) { 844 delay(1); 845 goto restart; 846 } 847 return last_error; 848 } 849 850 /* 851 * Background scanning to trim post-EOF preallocated space. This is queued 852 * based on the 'speculative_prealloc_lifetime' tunable (5m by default). 853 */ 854 void 855 xfs_queue_eofblocks( 856 struct xfs_mount *mp) 857 { 858 rcu_read_lock(); 859 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) 860 queue_delayed_work(mp->m_eofblocks_workqueue, 861 &mp->m_eofblocks_work, 862 msecs_to_jiffies(xfs_eofb_secs * 1000)); 863 rcu_read_unlock(); 864 } 865 866 void 867 xfs_eofblocks_worker( 868 struct work_struct *work) 869 { 870 struct xfs_mount *mp = container_of(to_delayed_work(work), 871 struct xfs_mount, m_eofblocks_work); 872 xfs_icache_free_eofblocks(mp, NULL); 873 xfs_queue_eofblocks(mp); 874 } 875 876 /* 877 * Background scanning to trim preallocated CoW space. This is queued 878 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 879 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 880 */ 881 void 882 xfs_queue_cowblocks( 883 struct xfs_mount *mp) 884 { 885 rcu_read_lock(); 886 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) 887 queue_delayed_work(mp->m_eofblocks_workqueue, 888 &mp->m_cowblocks_work, 889 msecs_to_jiffies(xfs_cowb_secs * 1000)); 890 rcu_read_unlock(); 891 } 892 893 void 894 xfs_cowblocks_worker( 895 struct work_struct *work) 896 { 897 struct xfs_mount *mp = container_of(to_delayed_work(work), 898 struct xfs_mount, m_cowblocks_work); 899 xfs_icache_free_cowblocks(mp, NULL); 900 xfs_queue_cowblocks(mp); 901 } 902 903 int 904 xfs_inode_ag_iterator_flags( 905 struct xfs_mount *mp, 906 int (*execute)(struct xfs_inode *ip, int flags, 907 void *args), 908 int flags, 909 void *args, 910 int iter_flags) 911 { 912 struct xfs_perag *pag; 913 int error = 0; 914 int last_error = 0; 915 xfs_agnumber_t ag; 916 917 ag = 0; 918 while ((pag = xfs_perag_get(mp, ag))) { 919 ag = pag->pag_agno + 1; 920 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, 921 iter_flags); 922 xfs_perag_put(pag); 923 if (error) { 924 last_error = error; 925 if (error == -EFSCORRUPTED) 926 break; 927 } 928 } 929 return last_error; 930 } 931 932 int 933 xfs_inode_ag_iterator( 934 struct xfs_mount *mp, 935 int (*execute)(struct xfs_inode *ip, int flags, 936 void *args), 937 int flags, 938 void *args) 939 { 940 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); 941 } 942 943 int 944 xfs_inode_ag_iterator_tag( 945 struct xfs_mount *mp, 946 int (*execute)(struct xfs_inode *ip, int flags, 947 void *args), 948 int flags, 949 void *args, 950 int tag) 951 { 952 struct xfs_perag *pag; 953 int error = 0; 954 int last_error = 0; 955 xfs_agnumber_t ag; 956 957 ag = 0; 958 while ((pag = xfs_perag_get_tag(mp, ag, tag))) { 959 ag = pag->pag_agno + 1; 960 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, 961 0); 962 xfs_perag_put(pag); 963 if (error) { 964 last_error = error; 965 if (error == -EFSCORRUPTED) 966 break; 967 } 968 } 969 return last_error; 970 } 971 972 /* 973 * Grab the inode for reclaim exclusively. 974 * Return 0 if we grabbed it, non-zero otherwise. 975 */ 976 STATIC int 977 xfs_reclaim_inode_grab( 978 struct xfs_inode *ip, 979 int flags) 980 { 981 ASSERT(rcu_read_lock_held()); 982 983 /* quick check for stale RCU freed inode */ 984 if (!ip->i_ino) 985 return 1; 986 987 /* 988 * If we are asked for non-blocking operation, do unlocked checks to 989 * see if the inode already is being flushed or in reclaim to avoid 990 * lock traffic. 991 */ 992 if ((flags & SYNC_TRYLOCK) && 993 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) 994 return 1; 995 996 /* 997 * The radix tree lock here protects a thread in xfs_iget from racing 998 * with us starting reclaim on the inode. Once we have the 999 * XFS_IRECLAIM flag set it will not touch us. 1000 * 1001 * Due to RCU lookup, we may find inodes that have been freed and only 1002 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that 1003 * aren't candidates for reclaim at all, so we must check the 1004 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. 1005 */ 1006 spin_lock(&ip->i_flags_lock); 1007 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 1008 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 1009 /* not a reclaim candidate. */ 1010 spin_unlock(&ip->i_flags_lock); 1011 return 1; 1012 } 1013 __xfs_iflags_set(ip, XFS_IRECLAIM); 1014 spin_unlock(&ip->i_flags_lock); 1015 return 0; 1016 } 1017 1018 /* 1019 * Inodes in different states need to be treated differently. The following 1020 * table lists the inode states and the reclaim actions necessary: 1021 * 1022 * inode state iflush ret required action 1023 * --------------- ---------- --------------- 1024 * bad - reclaim 1025 * shutdown EIO unpin and reclaim 1026 * clean, unpinned 0 reclaim 1027 * stale, unpinned 0 reclaim 1028 * clean, pinned(*) 0 requeue 1029 * stale, pinned EAGAIN requeue 1030 * dirty, async - requeue 1031 * dirty, sync 0 reclaim 1032 * 1033 * (*) dgc: I don't think the clean, pinned state is possible but it gets 1034 * handled anyway given the order of checks implemented. 1035 * 1036 * Also, because we get the flush lock first, we know that any inode that has 1037 * been flushed delwri has had the flush completed by the time we check that 1038 * the inode is clean. 1039 * 1040 * Note that because the inode is flushed delayed write by AIL pushing, the 1041 * flush lock may already be held here and waiting on it can result in very 1042 * long latencies. Hence for sync reclaims, where we wait on the flush lock, 1043 * the caller should push the AIL first before trying to reclaim inodes to 1044 * minimise the amount of time spent waiting. For background relaim, we only 1045 * bother to reclaim clean inodes anyway. 1046 * 1047 * Hence the order of actions after gaining the locks should be: 1048 * bad => reclaim 1049 * shutdown => unpin and reclaim 1050 * pinned, async => requeue 1051 * pinned, sync => unpin 1052 * stale => reclaim 1053 * clean => reclaim 1054 * dirty, async => requeue 1055 * dirty, sync => flush, wait and reclaim 1056 */ 1057 STATIC int 1058 xfs_reclaim_inode( 1059 struct xfs_inode *ip, 1060 struct xfs_perag *pag, 1061 int sync_mode) 1062 { 1063 struct xfs_buf *bp = NULL; 1064 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 1065 int error; 1066 1067 restart: 1068 error = 0; 1069 xfs_ilock(ip, XFS_ILOCK_EXCL); 1070 if (!xfs_iflock_nowait(ip)) { 1071 if (!(sync_mode & SYNC_WAIT)) 1072 goto out; 1073 xfs_iflock(ip); 1074 } 1075 1076 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1077 xfs_iunpin_wait(ip); 1078 /* xfs_iflush_abort() drops the flush lock */ 1079 xfs_iflush_abort(ip, false); 1080 goto reclaim; 1081 } 1082 if (xfs_ipincount(ip)) { 1083 if (!(sync_mode & SYNC_WAIT)) 1084 goto out_ifunlock; 1085 xfs_iunpin_wait(ip); 1086 } 1087 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { 1088 xfs_ifunlock(ip); 1089 goto reclaim; 1090 } 1091 1092 /* 1093 * Never flush out dirty data during non-blocking reclaim, as it would 1094 * just contend with AIL pushing trying to do the same job. 1095 */ 1096 if (!(sync_mode & SYNC_WAIT)) 1097 goto out_ifunlock; 1098 1099 /* 1100 * Now we have an inode that needs flushing. 1101 * 1102 * Note that xfs_iflush will never block on the inode buffer lock, as 1103 * xfs_ifree_cluster() can lock the inode buffer before it locks the 1104 * ip->i_lock, and we are doing the exact opposite here. As a result, 1105 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would 1106 * result in an ABBA deadlock with xfs_ifree_cluster(). 1107 * 1108 * As xfs_ifree_cluser() must gather all inodes that are active in the 1109 * cache to mark them stale, if we hit this case we don't actually want 1110 * to do IO here - we want the inode marked stale so we can simply 1111 * reclaim it. Hence if we get an EAGAIN error here, just unlock the 1112 * inode, back off and try again. Hopefully the next pass through will 1113 * see the stale flag set on the inode. 1114 */ 1115 error = xfs_iflush(ip, &bp); 1116 if (error == -EAGAIN) { 1117 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1118 /* backoff longer than in xfs_ifree_cluster */ 1119 delay(2); 1120 goto restart; 1121 } 1122 1123 if (!error) { 1124 error = xfs_bwrite(bp); 1125 xfs_buf_relse(bp); 1126 } 1127 1128 reclaim: 1129 ASSERT(!xfs_isiflocked(ip)); 1130 1131 /* 1132 * Because we use RCU freeing we need to ensure the inode always appears 1133 * to be reclaimed with an invalid inode number when in the free state. 1134 * We do this as early as possible under the ILOCK so that 1135 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to 1136 * detect races with us here. By doing this, we guarantee that once 1137 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that 1138 * it will see either a valid inode that will serialise correctly, or it 1139 * will see an invalid inode that it can skip. 1140 */ 1141 spin_lock(&ip->i_flags_lock); 1142 ip->i_flags = XFS_IRECLAIM; 1143 ip->i_ino = 0; 1144 spin_unlock(&ip->i_flags_lock); 1145 1146 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1147 1148 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1149 /* 1150 * Remove the inode from the per-AG radix tree. 1151 * 1152 * Because radix_tree_delete won't complain even if the item was never 1153 * added to the tree assert that it's been there before to catch 1154 * problems with the inode life time early on. 1155 */ 1156 spin_lock(&pag->pag_ici_lock); 1157 if (!radix_tree_delete(&pag->pag_ici_root, 1158 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1159 ASSERT(0); 1160 xfs_perag_clear_reclaim_tag(pag); 1161 spin_unlock(&pag->pag_ici_lock); 1162 1163 /* 1164 * Here we do an (almost) spurious inode lock in order to coordinate 1165 * with inode cache radix tree lookups. This is because the lookup 1166 * can reference the inodes in the cache without taking references. 1167 * 1168 * We make that OK here by ensuring that we wait until the inode is 1169 * unlocked after the lookup before we go ahead and free it. 1170 */ 1171 xfs_ilock(ip, XFS_ILOCK_EXCL); 1172 xfs_qm_dqdetach(ip); 1173 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1174 1175 __xfs_inode_free(ip); 1176 return error; 1177 1178 out_ifunlock: 1179 xfs_ifunlock(ip); 1180 out: 1181 xfs_iflags_clear(ip, XFS_IRECLAIM); 1182 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1183 /* 1184 * We could return -EAGAIN here to make reclaim rescan the inode tree in 1185 * a short while. However, this just burns CPU time scanning the tree 1186 * waiting for IO to complete and the reclaim work never goes back to 1187 * the idle state. Instead, return 0 to let the next scheduled 1188 * background reclaim attempt to reclaim the inode again. 1189 */ 1190 return 0; 1191 } 1192 1193 /* 1194 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is 1195 * corrupted, we still want to try to reclaim all the inodes. If we don't, 1196 * then a shut down during filesystem unmount reclaim walk leak all the 1197 * unreclaimed inodes. 1198 */ 1199 STATIC int 1200 xfs_reclaim_inodes_ag( 1201 struct xfs_mount *mp, 1202 int flags, 1203 int *nr_to_scan) 1204 { 1205 struct xfs_perag *pag; 1206 int error = 0; 1207 int last_error = 0; 1208 xfs_agnumber_t ag; 1209 int trylock = flags & SYNC_TRYLOCK; 1210 int skipped; 1211 1212 restart: 1213 ag = 0; 1214 skipped = 0; 1215 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1216 unsigned long first_index = 0; 1217 int done = 0; 1218 int nr_found = 0; 1219 1220 ag = pag->pag_agno + 1; 1221 1222 if (trylock) { 1223 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 1224 skipped++; 1225 xfs_perag_put(pag); 1226 continue; 1227 } 1228 first_index = pag->pag_ici_reclaim_cursor; 1229 } else 1230 mutex_lock(&pag->pag_ici_reclaim_lock); 1231 1232 do { 1233 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1234 int i; 1235 1236 rcu_read_lock(); 1237 nr_found = radix_tree_gang_lookup_tag( 1238 &pag->pag_ici_root, 1239 (void **)batch, first_index, 1240 XFS_LOOKUP_BATCH, 1241 XFS_ICI_RECLAIM_TAG); 1242 if (!nr_found) { 1243 done = 1; 1244 rcu_read_unlock(); 1245 break; 1246 } 1247 1248 /* 1249 * Grab the inodes before we drop the lock. if we found 1250 * nothing, nr == 0 and the loop will be skipped. 1251 */ 1252 for (i = 0; i < nr_found; i++) { 1253 struct xfs_inode *ip = batch[i]; 1254 1255 if (done || xfs_reclaim_inode_grab(ip, flags)) 1256 batch[i] = NULL; 1257 1258 /* 1259 * Update the index for the next lookup. Catch 1260 * overflows into the next AG range which can 1261 * occur if we have inodes in the last block of 1262 * the AG and we are currently pointing to the 1263 * last inode. 1264 * 1265 * Because we may see inodes that are from the 1266 * wrong AG due to RCU freeing and 1267 * reallocation, only update the index if it 1268 * lies in this AG. It was a race that lead us 1269 * to see this inode, so another lookup from 1270 * the same index will not find it again. 1271 */ 1272 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != 1273 pag->pag_agno) 1274 continue; 1275 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1276 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1277 done = 1; 1278 } 1279 1280 /* unlock now we've grabbed the inodes. */ 1281 rcu_read_unlock(); 1282 1283 for (i = 0; i < nr_found; i++) { 1284 if (!batch[i]) 1285 continue; 1286 error = xfs_reclaim_inode(batch[i], pag, flags); 1287 if (error && last_error != -EFSCORRUPTED) 1288 last_error = error; 1289 } 1290 1291 *nr_to_scan -= XFS_LOOKUP_BATCH; 1292 1293 cond_resched(); 1294 1295 } while (nr_found && !done && *nr_to_scan > 0); 1296 1297 if (trylock && !done) 1298 pag->pag_ici_reclaim_cursor = first_index; 1299 else 1300 pag->pag_ici_reclaim_cursor = 0; 1301 mutex_unlock(&pag->pag_ici_reclaim_lock); 1302 xfs_perag_put(pag); 1303 } 1304 1305 /* 1306 * if we skipped any AG, and we still have scan count remaining, do 1307 * another pass this time using blocking reclaim semantics (i.e 1308 * waiting on the reclaim locks and ignoring the reclaim cursors). This 1309 * ensure that when we get more reclaimers than AGs we block rather 1310 * than spin trying to execute reclaim. 1311 */ 1312 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1313 trylock = 0; 1314 goto restart; 1315 } 1316 return last_error; 1317 } 1318 1319 int 1320 xfs_reclaim_inodes( 1321 xfs_mount_t *mp, 1322 int mode) 1323 { 1324 int nr_to_scan = INT_MAX; 1325 1326 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); 1327 } 1328 1329 /* 1330 * Scan a certain number of inodes for reclaim. 1331 * 1332 * When called we make sure that there is a background (fast) inode reclaim in 1333 * progress, while we will throttle the speed of reclaim via doing synchronous 1334 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1335 * them to be cleaned, which we hope will not be very long due to the 1336 * background walker having already kicked the IO off on those dirty inodes. 1337 */ 1338 long 1339 xfs_reclaim_inodes_nr( 1340 struct xfs_mount *mp, 1341 int nr_to_scan) 1342 { 1343 /* kick background reclaimer and push the AIL */ 1344 xfs_reclaim_work_queue(mp); 1345 xfs_ail_push_all(mp->m_ail); 1346 1347 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1348 } 1349 1350 /* 1351 * Return the number of reclaimable inodes in the filesystem for 1352 * the shrinker to determine how much to reclaim. 1353 */ 1354 int 1355 xfs_reclaim_inodes_count( 1356 struct xfs_mount *mp) 1357 { 1358 struct xfs_perag *pag; 1359 xfs_agnumber_t ag = 0; 1360 int reclaimable = 0; 1361 1362 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1363 ag = pag->pag_agno + 1; 1364 reclaimable += pag->pag_ici_reclaimable; 1365 xfs_perag_put(pag); 1366 } 1367 return reclaimable; 1368 } 1369 1370 STATIC int 1371 xfs_inode_match_id( 1372 struct xfs_inode *ip, 1373 struct xfs_eofblocks *eofb) 1374 { 1375 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1376 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1377 return 0; 1378 1379 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1380 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1381 return 0; 1382 1383 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1384 xfs_get_projid(ip) != eofb->eof_prid) 1385 return 0; 1386 1387 return 1; 1388 } 1389 1390 /* 1391 * A union-based inode filtering algorithm. Process the inode if any of the 1392 * criteria match. This is for global/internal scans only. 1393 */ 1394 STATIC int 1395 xfs_inode_match_id_union( 1396 struct xfs_inode *ip, 1397 struct xfs_eofblocks *eofb) 1398 { 1399 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1400 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1401 return 1; 1402 1403 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1404 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1405 return 1; 1406 1407 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1408 xfs_get_projid(ip) == eofb->eof_prid) 1409 return 1; 1410 1411 return 0; 1412 } 1413 1414 STATIC int 1415 xfs_inode_free_eofblocks( 1416 struct xfs_inode *ip, 1417 int flags, 1418 void *args) 1419 { 1420 int ret = 0; 1421 struct xfs_eofblocks *eofb = args; 1422 int match; 1423 1424 if (!xfs_can_free_eofblocks(ip, false)) { 1425 /* inode could be preallocated or append-only */ 1426 trace_xfs_inode_free_eofblocks_invalid(ip); 1427 xfs_inode_clear_eofblocks_tag(ip); 1428 return 0; 1429 } 1430 1431 /* 1432 * If the mapping is dirty the operation can block and wait for some 1433 * time. Unless we are waiting, skip it. 1434 */ 1435 if (!(flags & SYNC_WAIT) && 1436 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1437 return 0; 1438 1439 if (eofb) { 1440 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1441 match = xfs_inode_match_id_union(ip, eofb); 1442 else 1443 match = xfs_inode_match_id(ip, eofb); 1444 if (!match) 1445 return 0; 1446 1447 /* skip the inode if the file size is too small */ 1448 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1449 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1450 return 0; 1451 } 1452 1453 /* 1454 * If the caller is waiting, return -EAGAIN to keep the background 1455 * scanner moving and revisit the inode in a subsequent pass. 1456 */ 1457 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1458 if (flags & SYNC_WAIT) 1459 ret = -EAGAIN; 1460 return ret; 1461 } 1462 ret = xfs_free_eofblocks(ip); 1463 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1464 1465 return ret; 1466 } 1467 1468 static int 1469 __xfs_icache_free_eofblocks( 1470 struct xfs_mount *mp, 1471 struct xfs_eofblocks *eofb, 1472 int (*execute)(struct xfs_inode *ip, int flags, 1473 void *args), 1474 int tag) 1475 { 1476 int flags = SYNC_TRYLOCK; 1477 1478 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) 1479 flags = SYNC_WAIT; 1480 1481 return xfs_inode_ag_iterator_tag(mp, execute, flags, 1482 eofb, tag); 1483 } 1484 1485 int 1486 xfs_icache_free_eofblocks( 1487 struct xfs_mount *mp, 1488 struct xfs_eofblocks *eofb) 1489 { 1490 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, 1491 XFS_ICI_EOFBLOCKS_TAG); 1492 } 1493 1494 /* 1495 * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1496 * multiple quotas, we don't know exactly which quota caused an allocation 1497 * failure. We make a best effort by including each quota under low free space 1498 * conditions (less than 1% free space) in the scan. 1499 */ 1500 static int 1501 __xfs_inode_free_quota_eofblocks( 1502 struct xfs_inode *ip, 1503 int (*execute)(struct xfs_mount *mp, 1504 struct xfs_eofblocks *eofb)) 1505 { 1506 int scan = 0; 1507 struct xfs_eofblocks eofb = {0}; 1508 struct xfs_dquot *dq; 1509 1510 /* 1511 * Run a sync scan to increase effectiveness and use the union filter to 1512 * cover all applicable quotas in a single scan. 1513 */ 1514 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1515 1516 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1517 dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1518 if (dq && xfs_dquot_lowsp(dq)) { 1519 eofb.eof_uid = VFS_I(ip)->i_uid; 1520 eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1521 scan = 1; 1522 } 1523 } 1524 1525 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1526 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1527 if (dq && xfs_dquot_lowsp(dq)) { 1528 eofb.eof_gid = VFS_I(ip)->i_gid; 1529 eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1530 scan = 1; 1531 } 1532 } 1533 1534 if (scan) 1535 execute(ip->i_mount, &eofb); 1536 1537 return scan; 1538 } 1539 1540 int 1541 xfs_inode_free_quota_eofblocks( 1542 struct xfs_inode *ip) 1543 { 1544 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1545 } 1546 1547 static inline unsigned long 1548 xfs_iflag_for_tag( 1549 int tag) 1550 { 1551 switch (tag) { 1552 case XFS_ICI_EOFBLOCKS_TAG: 1553 return XFS_IEOFBLOCKS; 1554 case XFS_ICI_COWBLOCKS_TAG: 1555 return XFS_ICOWBLOCKS; 1556 default: 1557 ASSERT(0); 1558 return 0; 1559 } 1560 } 1561 1562 static void 1563 __xfs_inode_set_blocks_tag( 1564 xfs_inode_t *ip, 1565 void (*execute)(struct xfs_mount *mp), 1566 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1567 int error, unsigned long caller_ip), 1568 int tag) 1569 { 1570 struct xfs_mount *mp = ip->i_mount; 1571 struct xfs_perag *pag; 1572 int tagged; 1573 1574 /* 1575 * Don't bother locking the AG and looking up in the radix trees 1576 * if we already know that we have the tag set. 1577 */ 1578 if (ip->i_flags & xfs_iflag_for_tag(tag)) 1579 return; 1580 spin_lock(&ip->i_flags_lock); 1581 ip->i_flags |= xfs_iflag_for_tag(tag); 1582 spin_unlock(&ip->i_flags_lock); 1583 1584 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1585 spin_lock(&pag->pag_ici_lock); 1586 1587 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 1588 radix_tree_tag_set(&pag->pag_ici_root, 1589 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1590 if (!tagged) { 1591 /* propagate the eofblocks tag up into the perag radix tree */ 1592 spin_lock(&ip->i_mount->m_perag_lock); 1593 radix_tree_tag_set(&ip->i_mount->m_perag_tree, 1594 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1595 tag); 1596 spin_unlock(&ip->i_mount->m_perag_lock); 1597 1598 /* kick off background trimming */ 1599 execute(ip->i_mount); 1600 1601 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1602 } 1603 1604 spin_unlock(&pag->pag_ici_lock); 1605 xfs_perag_put(pag); 1606 } 1607 1608 void 1609 xfs_inode_set_eofblocks_tag( 1610 xfs_inode_t *ip) 1611 { 1612 trace_xfs_inode_set_eofblocks_tag(ip); 1613 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, 1614 trace_xfs_perag_set_eofblocks, 1615 XFS_ICI_EOFBLOCKS_TAG); 1616 } 1617 1618 static void 1619 __xfs_inode_clear_blocks_tag( 1620 xfs_inode_t *ip, 1621 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1622 int error, unsigned long caller_ip), 1623 int tag) 1624 { 1625 struct xfs_mount *mp = ip->i_mount; 1626 struct xfs_perag *pag; 1627 1628 spin_lock(&ip->i_flags_lock); 1629 ip->i_flags &= ~xfs_iflag_for_tag(tag); 1630 spin_unlock(&ip->i_flags_lock); 1631 1632 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1633 spin_lock(&pag->pag_ici_lock); 1634 1635 radix_tree_tag_clear(&pag->pag_ici_root, 1636 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1637 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { 1638 /* clear the eofblocks tag from the perag radix tree */ 1639 spin_lock(&ip->i_mount->m_perag_lock); 1640 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, 1641 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1642 tag); 1643 spin_unlock(&ip->i_mount->m_perag_lock); 1644 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1645 } 1646 1647 spin_unlock(&pag->pag_ici_lock); 1648 xfs_perag_put(pag); 1649 } 1650 1651 void 1652 xfs_inode_clear_eofblocks_tag( 1653 xfs_inode_t *ip) 1654 { 1655 trace_xfs_inode_clear_eofblocks_tag(ip); 1656 return __xfs_inode_clear_blocks_tag(ip, 1657 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1658 } 1659 1660 /* 1661 * Set ourselves up to free CoW blocks from this file. If it's already clean 1662 * then we can bail out quickly, but otherwise we must back off if the file 1663 * is undergoing some kind of write. 1664 */ 1665 static bool 1666 xfs_prep_free_cowblocks( 1667 struct xfs_inode *ip, 1668 struct xfs_ifork *ifp) 1669 { 1670 /* 1671 * Just clear the tag if we have an empty cow fork or none at all. It's 1672 * possible the inode was fully unshared since it was originally tagged. 1673 */ 1674 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) { 1675 trace_xfs_inode_free_cowblocks_invalid(ip); 1676 xfs_inode_clear_cowblocks_tag(ip); 1677 return false; 1678 } 1679 1680 /* 1681 * If the mapping is dirty or under writeback we cannot touch the 1682 * CoW fork. Leave it alone if we're in the midst of a directio. 1683 */ 1684 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || 1685 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1686 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1687 atomic_read(&VFS_I(ip)->i_dio_count)) 1688 return false; 1689 1690 return true; 1691 } 1692 1693 /* 1694 * Automatic CoW Reservation Freeing 1695 * 1696 * These functions automatically garbage collect leftover CoW reservations 1697 * that were made on behalf of a cowextsize hint when we start to run out 1698 * of quota or when the reservations sit around for too long. If the file 1699 * has dirty pages or is undergoing writeback, its CoW reservations will 1700 * be retained. 1701 * 1702 * The actual garbage collection piggybacks off the same code that runs 1703 * the speculative EOF preallocation garbage collector. 1704 */ 1705 STATIC int 1706 xfs_inode_free_cowblocks( 1707 struct xfs_inode *ip, 1708 int flags, 1709 void *args) 1710 { 1711 struct xfs_eofblocks *eofb = args; 1712 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 1713 int match; 1714 int ret = 0; 1715 1716 if (!xfs_prep_free_cowblocks(ip, ifp)) 1717 return 0; 1718 1719 if (eofb) { 1720 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1721 match = xfs_inode_match_id_union(ip, eofb); 1722 else 1723 match = xfs_inode_match_id(ip, eofb); 1724 if (!match) 1725 return 0; 1726 1727 /* skip the inode if the file size is too small */ 1728 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1729 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1730 return 0; 1731 } 1732 1733 /* Free the CoW blocks */ 1734 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1735 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1736 1737 /* 1738 * Check again, nobody else should be able to dirty blocks or change 1739 * the reflink iflag now that we have the first two locks held. 1740 */ 1741 if (xfs_prep_free_cowblocks(ip, ifp)) 1742 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1743 1744 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1745 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1746 1747 return ret; 1748 } 1749 1750 int 1751 xfs_icache_free_cowblocks( 1752 struct xfs_mount *mp, 1753 struct xfs_eofblocks *eofb) 1754 { 1755 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, 1756 XFS_ICI_COWBLOCKS_TAG); 1757 } 1758 1759 int 1760 xfs_inode_free_quota_cowblocks( 1761 struct xfs_inode *ip) 1762 { 1763 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); 1764 } 1765 1766 void 1767 xfs_inode_set_cowblocks_tag( 1768 xfs_inode_t *ip) 1769 { 1770 trace_xfs_inode_set_cowblocks_tag(ip); 1771 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, 1772 trace_xfs_perag_set_cowblocks, 1773 XFS_ICI_COWBLOCKS_TAG); 1774 } 1775 1776 void 1777 xfs_inode_clear_cowblocks_tag( 1778 xfs_inode_t *ip) 1779 { 1780 trace_xfs_inode_clear_cowblocks_tag(ip); 1781 return __xfs_inode_clear_blocks_tag(ip, 1782 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1783 } 1784