1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_sb.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_quota.h" 31 #include "xfs_trace.h" 32 #include "xfs_icache.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_dquot_item.h" 35 #include "xfs_dquot.h" 36 #include "xfs_reflink.h" 37 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 41 /* 42 * Allocate and initialise an xfs_inode. 43 */ 44 struct xfs_inode * 45 xfs_inode_alloc( 46 struct xfs_mount *mp, 47 xfs_ino_t ino) 48 { 49 struct xfs_inode *ip; 50 51 /* 52 * if this didn't occur in transactions, we could use 53 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 54 * code up to do this anyway. 55 */ 56 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 57 if (!ip) 58 return NULL; 59 if (inode_init_always(mp->m_super, VFS_I(ip))) { 60 kmem_zone_free(xfs_inode_zone, ip); 61 return NULL; 62 } 63 64 /* VFS doesn't initialise i_mode! */ 65 VFS_I(ip)->i_mode = 0; 66 67 XFS_STATS_INC(mp, vn_active); 68 ASSERT(atomic_read(&ip->i_pincount) == 0); 69 ASSERT(!xfs_isiflocked(ip)); 70 ASSERT(ip->i_ino == 0); 71 72 /* initialise the xfs inode */ 73 ip->i_ino = ino; 74 ip->i_mount = mp; 75 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); 76 ip->i_afp = NULL; 77 ip->i_cowfp = NULL; 78 ip->i_cnextents = 0; 79 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 80 memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); 81 ip->i_flags = 0; 82 ip->i_delayed_blks = 0; 83 memset(&ip->i_d, 0, sizeof(ip->i_d)); 84 85 return ip; 86 } 87 88 STATIC void 89 xfs_inode_free_callback( 90 struct rcu_head *head) 91 { 92 struct inode *inode = container_of(head, struct inode, i_rcu); 93 struct xfs_inode *ip = XFS_I(inode); 94 95 switch (VFS_I(ip)->i_mode & S_IFMT) { 96 case S_IFREG: 97 case S_IFDIR: 98 case S_IFLNK: 99 xfs_idestroy_fork(ip, XFS_DATA_FORK); 100 break; 101 } 102 103 if (ip->i_afp) 104 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 105 if (ip->i_cowfp) 106 xfs_idestroy_fork(ip, XFS_COW_FORK); 107 108 if (ip->i_itemp) { 109 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); 110 xfs_inode_item_destroy(ip); 111 ip->i_itemp = NULL; 112 } 113 114 kmem_zone_free(xfs_inode_zone, ip); 115 } 116 117 static void 118 __xfs_inode_free( 119 struct xfs_inode *ip) 120 { 121 /* asserts to verify all state is correct here */ 122 ASSERT(atomic_read(&ip->i_pincount) == 0); 123 XFS_STATS_DEC(ip->i_mount, vn_active); 124 125 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 126 } 127 128 void 129 xfs_inode_free( 130 struct xfs_inode *ip) 131 { 132 ASSERT(!xfs_isiflocked(ip)); 133 134 /* 135 * Because we use RCU freeing we need to ensure the inode always 136 * appears to be reclaimed with an invalid inode number when in the 137 * free state. The ip->i_flags_lock provides the barrier against lookup 138 * races. 139 */ 140 spin_lock(&ip->i_flags_lock); 141 ip->i_flags = XFS_IRECLAIM; 142 ip->i_ino = 0; 143 spin_unlock(&ip->i_flags_lock); 144 145 __xfs_inode_free(ip); 146 } 147 148 /* 149 * Queue a new inode reclaim pass if there are reclaimable inodes and there 150 * isn't a reclaim pass already in progress. By default it runs every 5s based 151 * on the xfs periodic sync default of 30s. Perhaps this should have it's own 152 * tunable, but that can be done if this method proves to be ineffective or too 153 * aggressive. 154 */ 155 static void 156 xfs_reclaim_work_queue( 157 struct xfs_mount *mp) 158 { 159 160 rcu_read_lock(); 161 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { 162 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, 163 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); 164 } 165 rcu_read_unlock(); 166 } 167 168 /* 169 * This is a fast pass over the inode cache to try to get reclaim moving on as 170 * many inodes as possible in a short period of time. It kicks itself every few 171 * seconds, as well as being kicked by the inode cache shrinker when memory 172 * goes low. It scans as quickly as possible avoiding locked inodes or those 173 * already being flushed, and once done schedules a future pass. 174 */ 175 void 176 xfs_reclaim_worker( 177 struct work_struct *work) 178 { 179 struct xfs_mount *mp = container_of(to_delayed_work(work), 180 struct xfs_mount, m_reclaim_work); 181 182 xfs_reclaim_inodes(mp, SYNC_TRYLOCK); 183 xfs_reclaim_work_queue(mp); 184 } 185 186 static void 187 xfs_perag_set_reclaim_tag( 188 struct xfs_perag *pag) 189 { 190 struct xfs_mount *mp = pag->pag_mount; 191 192 lockdep_assert_held(&pag->pag_ici_lock); 193 if (pag->pag_ici_reclaimable++) 194 return; 195 196 /* propagate the reclaim tag up into the perag radix tree */ 197 spin_lock(&mp->m_perag_lock); 198 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, 199 XFS_ICI_RECLAIM_TAG); 200 spin_unlock(&mp->m_perag_lock); 201 202 /* schedule periodic background inode reclaim */ 203 xfs_reclaim_work_queue(mp); 204 205 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 206 } 207 208 static void 209 xfs_perag_clear_reclaim_tag( 210 struct xfs_perag *pag) 211 { 212 struct xfs_mount *mp = pag->pag_mount; 213 214 lockdep_assert_held(&pag->pag_ici_lock); 215 if (--pag->pag_ici_reclaimable) 216 return; 217 218 /* clear the reclaim tag from the perag radix tree */ 219 spin_lock(&mp->m_perag_lock); 220 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, 221 XFS_ICI_RECLAIM_TAG); 222 spin_unlock(&mp->m_perag_lock); 223 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); 224 } 225 226 227 /* 228 * We set the inode flag atomically with the radix tree tag. 229 * Once we get tag lookups on the radix tree, this inode flag 230 * can go away. 231 */ 232 void 233 xfs_inode_set_reclaim_tag( 234 struct xfs_inode *ip) 235 { 236 struct xfs_mount *mp = ip->i_mount; 237 struct xfs_perag *pag; 238 239 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 240 spin_lock(&pag->pag_ici_lock); 241 spin_lock(&ip->i_flags_lock); 242 243 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), 244 XFS_ICI_RECLAIM_TAG); 245 xfs_perag_set_reclaim_tag(pag); 246 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 247 248 spin_unlock(&ip->i_flags_lock); 249 spin_unlock(&pag->pag_ici_lock); 250 xfs_perag_put(pag); 251 } 252 253 STATIC void 254 xfs_inode_clear_reclaim_tag( 255 struct xfs_perag *pag, 256 xfs_ino_t ino) 257 { 258 radix_tree_tag_clear(&pag->pag_ici_root, 259 XFS_INO_TO_AGINO(pag->pag_mount, ino), 260 XFS_ICI_RECLAIM_TAG); 261 xfs_perag_clear_reclaim_tag(pag); 262 } 263 264 static void 265 xfs_inew_wait( 266 struct xfs_inode *ip) 267 { 268 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); 269 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); 270 271 do { 272 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 273 if (!xfs_iflags_test(ip, XFS_INEW)) 274 break; 275 schedule(); 276 } while (true); 277 finish_wait(wq, &wait.wq_entry); 278 } 279 280 /* 281 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode 282 * part of the structure. This is made more complex by the fact we store 283 * information about the on-disk values in the VFS inode and so we can't just 284 * overwrite the values unconditionally. Hence we save the parameters we 285 * need to retain across reinitialisation, and rewrite them into the VFS inode 286 * after reinitialisation even if it fails. 287 */ 288 static int 289 xfs_reinit_inode( 290 struct xfs_mount *mp, 291 struct inode *inode) 292 { 293 int error; 294 uint32_t nlink = inode->i_nlink; 295 uint32_t generation = inode->i_generation; 296 uint64_t version = inode->i_version; 297 umode_t mode = inode->i_mode; 298 299 error = inode_init_always(mp->m_super, inode); 300 301 set_nlink(inode, nlink); 302 inode->i_generation = generation; 303 inode->i_version = version; 304 inode->i_mode = mode; 305 return error; 306 } 307 308 /* 309 * Check the validity of the inode we just found it the cache 310 */ 311 static int 312 xfs_iget_cache_hit( 313 struct xfs_perag *pag, 314 struct xfs_inode *ip, 315 xfs_ino_t ino, 316 int flags, 317 int lock_flags) __releases(RCU) 318 { 319 struct inode *inode = VFS_I(ip); 320 struct xfs_mount *mp = ip->i_mount; 321 int error; 322 323 /* 324 * check for re-use of an inode within an RCU grace period due to the 325 * radix tree nodes not being updated yet. We monitor for this by 326 * setting the inode number to zero before freeing the inode structure. 327 * If the inode has been reallocated and set up, then the inode number 328 * will not match, so check for that, too. 329 */ 330 spin_lock(&ip->i_flags_lock); 331 if (ip->i_ino != ino) { 332 trace_xfs_iget_skip(ip); 333 XFS_STATS_INC(mp, xs_ig_frecycle); 334 error = -EAGAIN; 335 goto out_error; 336 } 337 338 339 /* 340 * If we are racing with another cache hit that is currently 341 * instantiating this inode or currently recycling it out of 342 * reclaimabe state, wait for the initialisation to complete 343 * before continuing. 344 * 345 * XXX(hch): eventually we should do something equivalent to 346 * wait_on_inode to wait for these flags to be cleared 347 * instead of polling for it. 348 */ 349 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 350 trace_xfs_iget_skip(ip); 351 XFS_STATS_INC(mp, xs_ig_frecycle); 352 error = -EAGAIN; 353 goto out_error; 354 } 355 356 /* 357 * If lookup is racing with unlink return an error immediately. 358 */ 359 if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { 360 error = -ENOENT; 361 goto out_error; 362 } 363 364 /* 365 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 366 * Need to carefully get it back into useable state. 367 */ 368 if (ip->i_flags & XFS_IRECLAIMABLE) { 369 trace_xfs_iget_reclaim(ip); 370 371 if (flags & XFS_IGET_INCORE) { 372 error = -EAGAIN; 373 goto out_error; 374 } 375 376 /* 377 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 378 * from stomping over us while we recycle the inode. We can't 379 * clear the radix tree reclaimable tag yet as it requires 380 * pag_ici_lock to be held exclusive. 381 */ 382 ip->i_flags |= XFS_IRECLAIM; 383 384 spin_unlock(&ip->i_flags_lock); 385 rcu_read_unlock(); 386 387 error = xfs_reinit_inode(mp, inode); 388 if (error) { 389 bool wake; 390 /* 391 * Re-initializing the inode failed, and we are in deep 392 * trouble. Try to re-add it to the reclaim list. 393 */ 394 rcu_read_lock(); 395 spin_lock(&ip->i_flags_lock); 396 wake = !!__xfs_iflags_test(ip, XFS_INEW); 397 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 398 if (wake) 399 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 400 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 401 trace_xfs_iget_reclaim_fail(ip); 402 goto out_error; 403 } 404 405 spin_lock(&pag->pag_ici_lock); 406 spin_lock(&ip->i_flags_lock); 407 408 /* 409 * Clear the per-lifetime state in the inode as we are now 410 * effectively a new inode and need to return to the initial 411 * state before reuse occurs. 412 */ 413 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 414 ip->i_flags |= XFS_INEW; 415 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 416 inode->i_state = I_NEW; 417 418 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 419 init_rwsem(&inode->i_rwsem); 420 421 spin_unlock(&ip->i_flags_lock); 422 spin_unlock(&pag->pag_ici_lock); 423 } else { 424 /* If the VFS inode is being torn down, pause and try again. */ 425 if (!igrab(inode)) { 426 trace_xfs_iget_skip(ip); 427 error = -EAGAIN; 428 goto out_error; 429 } 430 431 /* We've got a live one. */ 432 spin_unlock(&ip->i_flags_lock); 433 rcu_read_unlock(); 434 trace_xfs_iget_hit(ip); 435 } 436 437 if (lock_flags != 0) 438 xfs_ilock(ip, lock_flags); 439 440 if (!(flags & XFS_IGET_INCORE)) 441 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); 442 XFS_STATS_INC(mp, xs_ig_found); 443 444 return 0; 445 446 out_error: 447 spin_unlock(&ip->i_flags_lock); 448 rcu_read_unlock(); 449 return error; 450 } 451 452 453 static int 454 xfs_iget_cache_miss( 455 struct xfs_mount *mp, 456 struct xfs_perag *pag, 457 xfs_trans_t *tp, 458 xfs_ino_t ino, 459 struct xfs_inode **ipp, 460 int flags, 461 int lock_flags) 462 { 463 struct xfs_inode *ip; 464 int error; 465 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 466 int iflags; 467 468 ip = xfs_inode_alloc(mp, ino); 469 if (!ip) 470 return -ENOMEM; 471 472 error = xfs_iread(mp, tp, ip, flags); 473 if (error) 474 goto out_destroy; 475 476 trace_xfs_iget_miss(ip); 477 478 if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { 479 error = -ENOENT; 480 goto out_destroy; 481 } 482 483 /* 484 * Preload the radix tree so we can insert safely under the 485 * write spinlock. Note that we cannot sleep inside the preload 486 * region. Since we can be called from transaction context, don't 487 * recurse into the file system. 488 */ 489 if (radix_tree_preload(GFP_NOFS)) { 490 error = -EAGAIN; 491 goto out_destroy; 492 } 493 494 /* 495 * Because the inode hasn't been added to the radix-tree yet it can't 496 * be found by another thread, so we can do the non-sleeping lock here. 497 */ 498 if (lock_flags) { 499 if (!xfs_ilock_nowait(ip, lock_flags)) 500 BUG(); 501 } 502 503 /* 504 * These values must be set before inserting the inode into the radix 505 * tree as the moment it is inserted a concurrent lookup (allowed by the 506 * RCU locking mechanism) can find it and that lookup must see that this 507 * is an inode currently under construction (i.e. that XFS_INEW is set). 508 * The ip->i_flags_lock that protects the XFS_INEW flag forms the 509 * memory barrier that ensures this detection works correctly at lookup 510 * time. 511 */ 512 iflags = XFS_INEW; 513 if (flags & XFS_IGET_DONTCACHE) 514 iflags |= XFS_IDONTCACHE; 515 ip->i_udquot = NULL; 516 ip->i_gdquot = NULL; 517 ip->i_pdquot = NULL; 518 xfs_iflags_set(ip, iflags); 519 520 /* insert the new inode */ 521 spin_lock(&pag->pag_ici_lock); 522 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 523 if (unlikely(error)) { 524 WARN_ON(error != -EEXIST); 525 XFS_STATS_INC(mp, xs_ig_dup); 526 error = -EAGAIN; 527 goto out_preload_end; 528 } 529 spin_unlock(&pag->pag_ici_lock); 530 radix_tree_preload_end(); 531 532 *ipp = ip; 533 return 0; 534 535 out_preload_end: 536 spin_unlock(&pag->pag_ici_lock); 537 radix_tree_preload_end(); 538 if (lock_flags) 539 xfs_iunlock(ip, lock_flags); 540 out_destroy: 541 __destroy_inode(VFS_I(ip)); 542 xfs_inode_free(ip); 543 return error; 544 } 545 546 /* 547 * Look up an inode by number in the given file system. 548 * The inode is looked up in the cache held in each AG. 549 * If the inode is found in the cache, initialise the vfs inode 550 * if necessary. 551 * 552 * If it is not in core, read it in from the file system's device, 553 * add it to the cache and initialise the vfs inode. 554 * 555 * The inode is locked according to the value of the lock_flags parameter. 556 * This flag parameter indicates how and if the inode's IO lock and inode lock 557 * should be taken. 558 * 559 * mp -- the mount point structure for the current file system. It points 560 * to the inode hash table. 561 * tp -- a pointer to the current transaction if there is one. This is 562 * simply passed through to the xfs_iread() call. 563 * ino -- the number of the inode desired. This is the unique identifier 564 * within the file system for the inode being requested. 565 * lock_flags -- flags indicating how to lock the inode. See the comment 566 * for xfs_ilock() for a list of valid values. 567 */ 568 int 569 xfs_iget( 570 xfs_mount_t *mp, 571 xfs_trans_t *tp, 572 xfs_ino_t ino, 573 uint flags, 574 uint lock_flags, 575 xfs_inode_t **ipp) 576 { 577 xfs_inode_t *ip; 578 int error; 579 xfs_perag_t *pag; 580 xfs_agino_t agino; 581 582 /* 583 * xfs_reclaim_inode() uses the ILOCK to ensure an inode 584 * doesn't get freed while it's being referenced during a 585 * radix tree traversal here. It assumes this function 586 * aqcuires only the ILOCK (and therefore it has no need to 587 * involve the IOLOCK in this synchronization). 588 */ 589 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); 590 591 /* reject inode numbers outside existing AGs */ 592 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 593 return -EINVAL; 594 595 XFS_STATS_INC(mp, xs_ig_attempts); 596 597 /* get the perag structure and ensure that it's inode capable */ 598 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 599 agino = XFS_INO_TO_AGINO(mp, ino); 600 601 again: 602 error = 0; 603 rcu_read_lock(); 604 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 605 606 if (ip) { 607 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); 608 if (error) 609 goto out_error_or_again; 610 } else { 611 rcu_read_unlock(); 612 if (flags & XFS_IGET_INCORE) { 613 error = -ENOENT; 614 goto out_error_or_again; 615 } 616 XFS_STATS_INC(mp, xs_ig_missed); 617 618 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 619 flags, lock_flags); 620 if (error) 621 goto out_error_or_again; 622 } 623 xfs_perag_put(pag); 624 625 *ipp = ip; 626 627 /* 628 * If we have a real type for an on-disk inode, we can setup the inode 629 * now. If it's a new inode being created, xfs_ialloc will handle it. 630 */ 631 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) 632 xfs_setup_existing_inode(ip); 633 return 0; 634 635 out_error_or_again: 636 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { 637 delay(1); 638 goto again; 639 } 640 xfs_perag_put(pag); 641 return error; 642 } 643 644 /* 645 * "Is this a cached inode that's also allocated?" 646 * 647 * Look up an inode by number in the given file system. If the inode is 648 * in cache and isn't in purgatory, return 1 if the inode is allocated 649 * and 0 if it is not. For all other cases (not in cache, being torn 650 * down, etc.), return a negative error code. 651 * 652 * The caller has to prevent inode allocation and freeing activity, 653 * presumably by locking the AGI buffer. This is to ensure that an 654 * inode cannot transition from allocated to freed until the caller is 655 * ready to allow that. If the inode is in an intermediate state (new, 656 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the 657 * inode is not in the cache, -ENOENT will be returned. The caller must 658 * deal with these scenarios appropriately. 659 * 660 * This is a specialized use case for the online scrubber; if you're 661 * reading this, you probably want xfs_iget. 662 */ 663 int 664 xfs_icache_inode_is_allocated( 665 struct xfs_mount *mp, 666 struct xfs_trans *tp, 667 xfs_ino_t ino, 668 bool *inuse) 669 { 670 struct xfs_inode *ip; 671 int error; 672 673 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); 674 if (error) 675 return error; 676 677 *inuse = !!(VFS_I(ip)->i_mode); 678 IRELE(ip); 679 return 0; 680 } 681 682 /* 683 * The inode lookup is done in batches to keep the amount of lock traffic and 684 * radix tree lookups to a minimum. The batch size is a trade off between 685 * lookup reduction and stack usage. This is in the reclaim path, so we can't 686 * be too greedy. 687 */ 688 #define XFS_LOOKUP_BATCH 32 689 690 STATIC int 691 xfs_inode_ag_walk_grab( 692 struct xfs_inode *ip, 693 int flags) 694 { 695 struct inode *inode = VFS_I(ip); 696 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); 697 698 ASSERT(rcu_read_lock_held()); 699 700 /* 701 * check for stale RCU freed inode 702 * 703 * If the inode has been reallocated, it doesn't matter if it's not in 704 * the AG we are walking - we are walking for writeback, so if it 705 * passes all the "valid inode" checks and is dirty, then we'll write 706 * it back anyway. If it has been reallocated and still being 707 * initialised, the XFS_INEW check below will catch it. 708 */ 709 spin_lock(&ip->i_flags_lock); 710 if (!ip->i_ino) 711 goto out_unlock_noent; 712 713 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ 714 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || 715 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) 716 goto out_unlock_noent; 717 spin_unlock(&ip->i_flags_lock); 718 719 /* nothing to sync during shutdown */ 720 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 721 return -EFSCORRUPTED; 722 723 /* If we can't grab the inode, it must on it's way to reclaim. */ 724 if (!igrab(inode)) 725 return -ENOENT; 726 727 /* inode is valid */ 728 return 0; 729 730 out_unlock_noent: 731 spin_unlock(&ip->i_flags_lock); 732 return -ENOENT; 733 } 734 735 STATIC int 736 xfs_inode_ag_walk( 737 struct xfs_mount *mp, 738 struct xfs_perag *pag, 739 int (*execute)(struct xfs_inode *ip, int flags, 740 void *args), 741 int flags, 742 void *args, 743 int tag, 744 int iter_flags) 745 { 746 uint32_t first_index; 747 int last_error = 0; 748 int skipped; 749 int done; 750 int nr_found; 751 752 restart: 753 done = 0; 754 skipped = 0; 755 first_index = 0; 756 nr_found = 0; 757 do { 758 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 759 int error = 0; 760 int i; 761 762 rcu_read_lock(); 763 764 if (tag == -1) 765 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 766 (void **)batch, first_index, 767 XFS_LOOKUP_BATCH); 768 else 769 nr_found = radix_tree_gang_lookup_tag( 770 &pag->pag_ici_root, 771 (void **) batch, first_index, 772 XFS_LOOKUP_BATCH, tag); 773 774 if (!nr_found) { 775 rcu_read_unlock(); 776 break; 777 } 778 779 /* 780 * Grab the inodes before we drop the lock. if we found 781 * nothing, nr == 0 and the loop will be skipped. 782 */ 783 for (i = 0; i < nr_found; i++) { 784 struct xfs_inode *ip = batch[i]; 785 786 if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) 787 batch[i] = NULL; 788 789 /* 790 * Update the index for the next lookup. Catch 791 * overflows into the next AG range which can occur if 792 * we have inodes in the last block of the AG and we 793 * are currently pointing to the last inode. 794 * 795 * Because we may see inodes that are from the wrong AG 796 * due to RCU freeing and reallocation, only update the 797 * index if it lies in this AG. It was a race that lead 798 * us to see this inode, so another lookup from the 799 * same index will not find it again. 800 */ 801 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) 802 continue; 803 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 804 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 805 done = 1; 806 } 807 808 /* unlock now we've grabbed the inodes. */ 809 rcu_read_unlock(); 810 811 for (i = 0; i < nr_found; i++) { 812 if (!batch[i]) 813 continue; 814 if ((iter_flags & XFS_AGITER_INEW_WAIT) && 815 xfs_iflags_test(batch[i], XFS_INEW)) 816 xfs_inew_wait(batch[i]); 817 error = execute(batch[i], flags, args); 818 IRELE(batch[i]); 819 if (error == -EAGAIN) { 820 skipped++; 821 continue; 822 } 823 if (error && last_error != -EFSCORRUPTED) 824 last_error = error; 825 } 826 827 /* bail out if the filesystem is corrupted. */ 828 if (error == -EFSCORRUPTED) 829 break; 830 831 cond_resched(); 832 833 } while (nr_found && !done); 834 835 if (skipped) { 836 delay(1); 837 goto restart; 838 } 839 return last_error; 840 } 841 842 /* 843 * Background scanning to trim post-EOF preallocated space. This is queued 844 * based on the 'speculative_prealloc_lifetime' tunable (5m by default). 845 */ 846 void 847 xfs_queue_eofblocks( 848 struct xfs_mount *mp) 849 { 850 rcu_read_lock(); 851 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) 852 queue_delayed_work(mp->m_eofblocks_workqueue, 853 &mp->m_eofblocks_work, 854 msecs_to_jiffies(xfs_eofb_secs * 1000)); 855 rcu_read_unlock(); 856 } 857 858 void 859 xfs_eofblocks_worker( 860 struct work_struct *work) 861 { 862 struct xfs_mount *mp = container_of(to_delayed_work(work), 863 struct xfs_mount, m_eofblocks_work); 864 xfs_icache_free_eofblocks(mp, NULL); 865 xfs_queue_eofblocks(mp); 866 } 867 868 /* 869 * Background scanning to trim preallocated CoW space. This is queued 870 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). 871 * (We'll just piggyback on the post-EOF prealloc space workqueue.) 872 */ 873 STATIC void 874 xfs_queue_cowblocks( 875 struct xfs_mount *mp) 876 { 877 rcu_read_lock(); 878 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) 879 queue_delayed_work(mp->m_eofblocks_workqueue, 880 &mp->m_cowblocks_work, 881 msecs_to_jiffies(xfs_cowb_secs * 1000)); 882 rcu_read_unlock(); 883 } 884 885 void 886 xfs_cowblocks_worker( 887 struct work_struct *work) 888 { 889 struct xfs_mount *mp = container_of(to_delayed_work(work), 890 struct xfs_mount, m_cowblocks_work); 891 xfs_icache_free_cowblocks(mp, NULL); 892 xfs_queue_cowblocks(mp); 893 } 894 895 int 896 xfs_inode_ag_iterator_flags( 897 struct xfs_mount *mp, 898 int (*execute)(struct xfs_inode *ip, int flags, 899 void *args), 900 int flags, 901 void *args, 902 int iter_flags) 903 { 904 struct xfs_perag *pag; 905 int error = 0; 906 int last_error = 0; 907 xfs_agnumber_t ag; 908 909 ag = 0; 910 while ((pag = xfs_perag_get(mp, ag))) { 911 ag = pag->pag_agno + 1; 912 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, 913 iter_flags); 914 xfs_perag_put(pag); 915 if (error) { 916 last_error = error; 917 if (error == -EFSCORRUPTED) 918 break; 919 } 920 } 921 return last_error; 922 } 923 924 int 925 xfs_inode_ag_iterator( 926 struct xfs_mount *mp, 927 int (*execute)(struct xfs_inode *ip, int flags, 928 void *args), 929 int flags, 930 void *args) 931 { 932 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); 933 } 934 935 int 936 xfs_inode_ag_iterator_tag( 937 struct xfs_mount *mp, 938 int (*execute)(struct xfs_inode *ip, int flags, 939 void *args), 940 int flags, 941 void *args, 942 int tag) 943 { 944 struct xfs_perag *pag; 945 int error = 0; 946 int last_error = 0; 947 xfs_agnumber_t ag; 948 949 ag = 0; 950 while ((pag = xfs_perag_get_tag(mp, ag, tag))) { 951 ag = pag->pag_agno + 1; 952 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, 953 0); 954 xfs_perag_put(pag); 955 if (error) { 956 last_error = error; 957 if (error == -EFSCORRUPTED) 958 break; 959 } 960 } 961 return last_error; 962 } 963 964 /* 965 * Grab the inode for reclaim exclusively. 966 * Return 0 if we grabbed it, non-zero otherwise. 967 */ 968 STATIC int 969 xfs_reclaim_inode_grab( 970 struct xfs_inode *ip, 971 int flags) 972 { 973 ASSERT(rcu_read_lock_held()); 974 975 /* quick check for stale RCU freed inode */ 976 if (!ip->i_ino) 977 return 1; 978 979 /* 980 * If we are asked for non-blocking operation, do unlocked checks to 981 * see if the inode already is being flushed or in reclaim to avoid 982 * lock traffic. 983 */ 984 if ((flags & SYNC_TRYLOCK) && 985 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) 986 return 1; 987 988 /* 989 * The radix tree lock here protects a thread in xfs_iget from racing 990 * with us starting reclaim on the inode. Once we have the 991 * XFS_IRECLAIM flag set it will not touch us. 992 * 993 * Due to RCU lookup, we may find inodes that have been freed and only 994 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that 995 * aren't candidates for reclaim at all, so we must check the 996 * XFS_IRECLAIMABLE is set first before proceeding to reclaim. 997 */ 998 spin_lock(&ip->i_flags_lock); 999 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || 1000 __xfs_iflags_test(ip, XFS_IRECLAIM)) { 1001 /* not a reclaim candidate. */ 1002 spin_unlock(&ip->i_flags_lock); 1003 return 1; 1004 } 1005 __xfs_iflags_set(ip, XFS_IRECLAIM); 1006 spin_unlock(&ip->i_flags_lock); 1007 return 0; 1008 } 1009 1010 /* 1011 * Inodes in different states need to be treated differently. The following 1012 * table lists the inode states and the reclaim actions necessary: 1013 * 1014 * inode state iflush ret required action 1015 * --------------- ---------- --------------- 1016 * bad - reclaim 1017 * shutdown EIO unpin and reclaim 1018 * clean, unpinned 0 reclaim 1019 * stale, unpinned 0 reclaim 1020 * clean, pinned(*) 0 requeue 1021 * stale, pinned EAGAIN requeue 1022 * dirty, async - requeue 1023 * dirty, sync 0 reclaim 1024 * 1025 * (*) dgc: I don't think the clean, pinned state is possible but it gets 1026 * handled anyway given the order of checks implemented. 1027 * 1028 * Also, because we get the flush lock first, we know that any inode that has 1029 * been flushed delwri has had the flush completed by the time we check that 1030 * the inode is clean. 1031 * 1032 * Note that because the inode is flushed delayed write by AIL pushing, the 1033 * flush lock may already be held here and waiting on it can result in very 1034 * long latencies. Hence for sync reclaims, where we wait on the flush lock, 1035 * the caller should push the AIL first before trying to reclaim inodes to 1036 * minimise the amount of time spent waiting. For background relaim, we only 1037 * bother to reclaim clean inodes anyway. 1038 * 1039 * Hence the order of actions after gaining the locks should be: 1040 * bad => reclaim 1041 * shutdown => unpin and reclaim 1042 * pinned, async => requeue 1043 * pinned, sync => unpin 1044 * stale => reclaim 1045 * clean => reclaim 1046 * dirty, async => requeue 1047 * dirty, sync => flush, wait and reclaim 1048 */ 1049 STATIC int 1050 xfs_reclaim_inode( 1051 struct xfs_inode *ip, 1052 struct xfs_perag *pag, 1053 int sync_mode) 1054 { 1055 struct xfs_buf *bp = NULL; 1056 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ 1057 int error; 1058 1059 restart: 1060 error = 0; 1061 xfs_ilock(ip, XFS_ILOCK_EXCL); 1062 if (!xfs_iflock_nowait(ip)) { 1063 if (!(sync_mode & SYNC_WAIT)) 1064 goto out; 1065 xfs_iflock(ip); 1066 } 1067 1068 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1069 xfs_iunpin_wait(ip); 1070 /* xfs_iflush_abort() drops the flush lock */ 1071 xfs_iflush_abort(ip, false); 1072 goto reclaim; 1073 } 1074 if (xfs_ipincount(ip)) { 1075 if (!(sync_mode & SYNC_WAIT)) 1076 goto out_ifunlock; 1077 xfs_iunpin_wait(ip); 1078 } 1079 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { 1080 xfs_ifunlock(ip); 1081 goto reclaim; 1082 } 1083 1084 /* 1085 * Never flush out dirty data during non-blocking reclaim, as it would 1086 * just contend with AIL pushing trying to do the same job. 1087 */ 1088 if (!(sync_mode & SYNC_WAIT)) 1089 goto out_ifunlock; 1090 1091 /* 1092 * Now we have an inode that needs flushing. 1093 * 1094 * Note that xfs_iflush will never block on the inode buffer lock, as 1095 * xfs_ifree_cluster() can lock the inode buffer before it locks the 1096 * ip->i_lock, and we are doing the exact opposite here. As a result, 1097 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would 1098 * result in an ABBA deadlock with xfs_ifree_cluster(). 1099 * 1100 * As xfs_ifree_cluser() must gather all inodes that are active in the 1101 * cache to mark them stale, if we hit this case we don't actually want 1102 * to do IO here - we want the inode marked stale so we can simply 1103 * reclaim it. Hence if we get an EAGAIN error here, just unlock the 1104 * inode, back off and try again. Hopefully the next pass through will 1105 * see the stale flag set on the inode. 1106 */ 1107 error = xfs_iflush(ip, &bp); 1108 if (error == -EAGAIN) { 1109 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1110 /* backoff longer than in xfs_ifree_cluster */ 1111 delay(2); 1112 goto restart; 1113 } 1114 1115 if (!error) { 1116 error = xfs_bwrite(bp); 1117 xfs_buf_relse(bp); 1118 } 1119 1120 reclaim: 1121 ASSERT(!xfs_isiflocked(ip)); 1122 1123 /* 1124 * Because we use RCU freeing we need to ensure the inode always appears 1125 * to be reclaimed with an invalid inode number when in the free state. 1126 * We do this as early as possible under the ILOCK so that 1127 * xfs_iflush_cluster() can be guaranteed to detect races with us here. 1128 * By doing this, we guarantee that once xfs_iflush_cluster has locked 1129 * XFS_ILOCK that it will see either a valid, flushable inode that will 1130 * serialise correctly, or it will see a clean (and invalid) inode that 1131 * it can skip. 1132 */ 1133 spin_lock(&ip->i_flags_lock); 1134 ip->i_flags = XFS_IRECLAIM; 1135 ip->i_ino = 0; 1136 spin_unlock(&ip->i_flags_lock); 1137 1138 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1139 1140 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); 1141 /* 1142 * Remove the inode from the per-AG radix tree. 1143 * 1144 * Because radix_tree_delete won't complain even if the item was never 1145 * added to the tree assert that it's been there before to catch 1146 * problems with the inode life time early on. 1147 */ 1148 spin_lock(&pag->pag_ici_lock); 1149 if (!radix_tree_delete(&pag->pag_ici_root, 1150 XFS_INO_TO_AGINO(ip->i_mount, ino))) 1151 ASSERT(0); 1152 xfs_perag_clear_reclaim_tag(pag); 1153 spin_unlock(&pag->pag_ici_lock); 1154 1155 /* 1156 * Here we do an (almost) spurious inode lock in order to coordinate 1157 * with inode cache radix tree lookups. This is because the lookup 1158 * can reference the inodes in the cache without taking references. 1159 * 1160 * We make that OK here by ensuring that we wait until the inode is 1161 * unlocked after the lookup before we go ahead and free it. 1162 */ 1163 xfs_ilock(ip, XFS_ILOCK_EXCL); 1164 xfs_qm_dqdetach(ip); 1165 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1166 1167 __xfs_inode_free(ip); 1168 return error; 1169 1170 out_ifunlock: 1171 xfs_ifunlock(ip); 1172 out: 1173 xfs_iflags_clear(ip, XFS_IRECLAIM); 1174 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1175 /* 1176 * We could return -EAGAIN here to make reclaim rescan the inode tree in 1177 * a short while. However, this just burns CPU time scanning the tree 1178 * waiting for IO to complete and the reclaim work never goes back to 1179 * the idle state. Instead, return 0 to let the next scheduled 1180 * background reclaim attempt to reclaim the inode again. 1181 */ 1182 return 0; 1183 } 1184 1185 /* 1186 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is 1187 * corrupted, we still want to try to reclaim all the inodes. If we don't, 1188 * then a shut down during filesystem unmount reclaim walk leak all the 1189 * unreclaimed inodes. 1190 */ 1191 STATIC int 1192 xfs_reclaim_inodes_ag( 1193 struct xfs_mount *mp, 1194 int flags, 1195 int *nr_to_scan) 1196 { 1197 struct xfs_perag *pag; 1198 int error = 0; 1199 int last_error = 0; 1200 xfs_agnumber_t ag; 1201 int trylock = flags & SYNC_TRYLOCK; 1202 int skipped; 1203 1204 restart: 1205 ag = 0; 1206 skipped = 0; 1207 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1208 unsigned long first_index = 0; 1209 int done = 0; 1210 int nr_found = 0; 1211 1212 ag = pag->pag_agno + 1; 1213 1214 if (trylock) { 1215 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 1216 skipped++; 1217 xfs_perag_put(pag); 1218 continue; 1219 } 1220 first_index = pag->pag_ici_reclaim_cursor; 1221 } else 1222 mutex_lock(&pag->pag_ici_reclaim_lock); 1223 1224 do { 1225 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 1226 int i; 1227 1228 rcu_read_lock(); 1229 nr_found = radix_tree_gang_lookup_tag( 1230 &pag->pag_ici_root, 1231 (void **)batch, first_index, 1232 XFS_LOOKUP_BATCH, 1233 XFS_ICI_RECLAIM_TAG); 1234 if (!nr_found) { 1235 done = 1; 1236 rcu_read_unlock(); 1237 break; 1238 } 1239 1240 /* 1241 * Grab the inodes before we drop the lock. if we found 1242 * nothing, nr == 0 and the loop will be skipped. 1243 */ 1244 for (i = 0; i < nr_found; i++) { 1245 struct xfs_inode *ip = batch[i]; 1246 1247 if (done || xfs_reclaim_inode_grab(ip, flags)) 1248 batch[i] = NULL; 1249 1250 /* 1251 * Update the index for the next lookup. Catch 1252 * overflows into the next AG range which can 1253 * occur if we have inodes in the last block of 1254 * the AG and we are currently pointing to the 1255 * last inode. 1256 * 1257 * Because we may see inodes that are from the 1258 * wrong AG due to RCU freeing and 1259 * reallocation, only update the index if it 1260 * lies in this AG. It was a race that lead us 1261 * to see this inode, so another lookup from 1262 * the same index will not find it again. 1263 */ 1264 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != 1265 pag->pag_agno) 1266 continue; 1267 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 1268 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 1269 done = 1; 1270 } 1271 1272 /* unlock now we've grabbed the inodes. */ 1273 rcu_read_unlock(); 1274 1275 for (i = 0; i < nr_found; i++) { 1276 if (!batch[i]) 1277 continue; 1278 error = xfs_reclaim_inode(batch[i], pag, flags); 1279 if (error && last_error != -EFSCORRUPTED) 1280 last_error = error; 1281 } 1282 1283 *nr_to_scan -= XFS_LOOKUP_BATCH; 1284 1285 cond_resched(); 1286 1287 } while (nr_found && !done && *nr_to_scan > 0); 1288 1289 if (trylock && !done) 1290 pag->pag_ici_reclaim_cursor = first_index; 1291 else 1292 pag->pag_ici_reclaim_cursor = 0; 1293 mutex_unlock(&pag->pag_ici_reclaim_lock); 1294 xfs_perag_put(pag); 1295 } 1296 1297 /* 1298 * if we skipped any AG, and we still have scan count remaining, do 1299 * another pass this time using blocking reclaim semantics (i.e 1300 * waiting on the reclaim locks and ignoring the reclaim cursors). This 1301 * ensure that when we get more reclaimers than AGs we block rather 1302 * than spin trying to execute reclaim. 1303 */ 1304 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1305 trylock = 0; 1306 goto restart; 1307 } 1308 return last_error; 1309 } 1310 1311 int 1312 xfs_reclaim_inodes( 1313 xfs_mount_t *mp, 1314 int mode) 1315 { 1316 int nr_to_scan = INT_MAX; 1317 1318 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); 1319 } 1320 1321 /* 1322 * Scan a certain number of inodes for reclaim. 1323 * 1324 * When called we make sure that there is a background (fast) inode reclaim in 1325 * progress, while we will throttle the speed of reclaim via doing synchronous 1326 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1327 * them to be cleaned, which we hope will not be very long due to the 1328 * background walker having already kicked the IO off on those dirty inodes. 1329 */ 1330 long 1331 xfs_reclaim_inodes_nr( 1332 struct xfs_mount *mp, 1333 int nr_to_scan) 1334 { 1335 /* kick background reclaimer and push the AIL */ 1336 xfs_reclaim_work_queue(mp); 1337 xfs_ail_push_all(mp->m_ail); 1338 1339 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1340 } 1341 1342 /* 1343 * Return the number of reclaimable inodes in the filesystem for 1344 * the shrinker to determine how much to reclaim. 1345 */ 1346 int 1347 xfs_reclaim_inodes_count( 1348 struct xfs_mount *mp) 1349 { 1350 struct xfs_perag *pag; 1351 xfs_agnumber_t ag = 0; 1352 int reclaimable = 0; 1353 1354 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1355 ag = pag->pag_agno + 1; 1356 reclaimable += pag->pag_ici_reclaimable; 1357 xfs_perag_put(pag); 1358 } 1359 return reclaimable; 1360 } 1361 1362 STATIC int 1363 xfs_inode_match_id( 1364 struct xfs_inode *ip, 1365 struct xfs_eofblocks *eofb) 1366 { 1367 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1368 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1369 return 0; 1370 1371 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1372 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1373 return 0; 1374 1375 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1376 xfs_get_projid(ip) != eofb->eof_prid) 1377 return 0; 1378 1379 return 1; 1380 } 1381 1382 /* 1383 * A union-based inode filtering algorithm. Process the inode if any of the 1384 * criteria match. This is for global/internal scans only. 1385 */ 1386 STATIC int 1387 xfs_inode_match_id_union( 1388 struct xfs_inode *ip, 1389 struct xfs_eofblocks *eofb) 1390 { 1391 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1392 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1393 return 1; 1394 1395 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1396 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1397 return 1; 1398 1399 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1400 xfs_get_projid(ip) == eofb->eof_prid) 1401 return 1; 1402 1403 return 0; 1404 } 1405 1406 STATIC int 1407 xfs_inode_free_eofblocks( 1408 struct xfs_inode *ip, 1409 int flags, 1410 void *args) 1411 { 1412 int ret = 0; 1413 struct xfs_eofblocks *eofb = args; 1414 int match; 1415 1416 if (!xfs_can_free_eofblocks(ip, false)) { 1417 /* inode could be preallocated or append-only */ 1418 trace_xfs_inode_free_eofblocks_invalid(ip); 1419 xfs_inode_clear_eofblocks_tag(ip); 1420 return 0; 1421 } 1422 1423 /* 1424 * If the mapping is dirty the operation can block and wait for some 1425 * time. Unless we are waiting, skip it. 1426 */ 1427 if (!(flags & SYNC_WAIT) && 1428 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) 1429 return 0; 1430 1431 if (eofb) { 1432 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1433 match = xfs_inode_match_id_union(ip, eofb); 1434 else 1435 match = xfs_inode_match_id(ip, eofb); 1436 if (!match) 1437 return 0; 1438 1439 /* skip the inode if the file size is too small */ 1440 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1441 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1442 return 0; 1443 } 1444 1445 /* 1446 * If the caller is waiting, return -EAGAIN to keep the background 1447 * scanner moving and revisit the inode in a subsequent pass. 1448 */ 1449 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 1450 if (flags & SYNC_WAIT) 1451 ret = -EAGAIN; 1452 return ret; 1453 } 1454 ret = xfs_free_eofblocks(ip); 1455 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1456 1457 return ret; 1458 } 1459 1460 static int 1461 __xfs_icache_free_eofblocks( 1462 struct xfs_mount *mp, 1463 struct xfs_eofblocks *eofb, 1464 int (*execute)(struct xfs_inode *ip, int flags, 1465 void *args), 1466 int tag) 1467 { 1468 int flags = SYNC_TRYLOCK; 1469 1470 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) 1471 flags = SYNC_WAIT; 1472 1473 return xfs_inode_ag_iterator_tag(mp, execute, flags, 1474 eofb, tag); 1475 } 1476 1477 int 1478 xfs_icache_free_eofblocks( 1479 struct xfs_mount *mp, 1480 struct xfs_eofblocks *eofb) 1481 { 1482 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, 1483 XFS_ICI_EOFBLOCKS_TAG); 1484 } 1485 1486 /* 1487 * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1488 * multiple quotas, we don't know exactly which quota caused an allocation 1489 * failure. We make a best effort by including each quota under low free space 1490 * conditions (less than 1% free space) in the scan. 1491 */ 1492 static int 1493 __xfs_inode_free_quota_eofblocks( 1494 struct xfs_inode *ip, 1495 int (*execute)(struct xfs_mount *mp, 1496 struct xfs_eofblocks *eofb)) 1497 { 1498 int scan = 0; 1499 struct xfs_eofblocks eofb = {0}; 1500 struct xfs_dquot *dq; 1501 1502 /* 1503 * Run a sync scan to increase effectiveness and use the union filter to 1504 * cover all applicable quotas in a single scan. 1505 */ 1506 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1507 1508 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1509 dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1510 if (dq && xfs_dquot_lowsp(dq)) { 1511 eofb.eof_uid = VFS_I(ip)->i_uid; 1512 eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1513 scan = 1; 1514 } 1515 } 1516 1517 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1518 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1519 if (dq && xfs_dquot_lowsp(dq)) { 1520 eofb.eof_gid = VFS_I(ip)->i_gid; 1521 eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1522 scan = 1; 1523 } 1524 } 1525 1526 if (scan) 1527 execute(ip->i_mount, &eofb); 1528 1529 return scan; 1530 } 1531 1532 int 1533 xfs_inode_free_quota_eofblocks( 1534 struct xfs_inode *ip) 1535 { 1536 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); 1537 } 1538 1539 static void 1540 __xfs_inode_set_eofblocks_tag( 1541 xfs_inode_t *ip, 1542 void (*execute)(struct xfs_mount *mp), 1543 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1544 int error, unsigned long caller_ip), 1545 int tag) 1546 { 1547 struct xfs_mount *mp = ip->i_mount; 1548 struct xfs_perag *pag; 1549 int tagged; 1550 1551 /* 1552 * Don't bother locking the AG and looking up in the radix trees 1553 * if we already know that we have the tag set. 1554 */ 1555 if (ip->i_flags & XFS_IEOFBLOCKS) 1556 return; 1557 spin_lock(&ip->i_flags_lock); 1558 ip->i_flags |= XFS_IEOFBLOCKS; 1559 spin_unlock(&ip->i_flags_lock); 1560 1561 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1562 spin_lock(&pag->pag_ici_lock); 1563 1564 tagged = radix_tree_tagged(&pag->pag_ici_root, tag); 1565 radix_tree_tag_set(&pag->pag_ici_root, 1566 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1567 if (!tagged) { 1568 /* propagate the eofblocks tag up into the perag radix tree */ 1569 spin_lock(&ip->i_mount->m_perag_lock); 1570 radix_tree_tag_set(&ip->i_mount->m_perag_tree, 1571 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1572 tag); 1573 spin_unlock(&ip->i_mount->m_perag_lock); 1574 1575 /* kick off background trimming */ 1576 execute(ip->i_mount); 1577 1578 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1579 } 1580 1581 spin_unlock(&pag->pag_ici_lock); 1582 xfs_perag_put(pag); 1583 } 1584 1585 void 1586 xfs_inode_set_eofblocks_tag( 1587 xfs_inode_t *ip) 1588 { 1589 trace_xfs_inode_set_eofblocks_tag(ip); 1590 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks, 1591 trace_xfs_perag_set_eofblocks, 1592 XFS_ICI_EOFBLOCKS_TAG); 1593 } 1594 1595 static void 1596 __xfs_inode_clear_eofblocks_tag( 1597 xfs_inode_t *ip, 1598 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, 1599 int error, unsigned long caller_ip), 1600 int tag) 1601 { 1602 struct xfs_mount *mp = ip->i_mount; 1603 struct xfs_perag *pag; 1604 1605 spin_lock(&ip->i_flags_lock); 1606 ip->i_flags &= ~XFS_IEOFBLOCKS; 1607 spin_unlock(&ip->i_flags_lock); 1608 1609 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1610 spin_lock(&pag->pag_ici_lock); 1611 1612 radix_tree_tag_clear(&pag->pag_ici_root, 1613 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); 1614 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { 1615 /* clear the eofblocks tag from the perag radix tree */ 1616 spin_lock(&ip->i_mount->m_perag_lock); 1617 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, 1618 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 1619 tag); 1620 spin_unlock(&ip->i_mount->m_perag_lock); 1621 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); 1622 } 1623 1624 spin_unlock(&pag->pag_ici_lock); 1625 xfs_perag_put(pag); 1626 } 1627 1628 void 1629 xfs_inode_clear_eofblocks_tag( 1630 xfs_inode_t *ip) 1631 { 1632 trace_xfs_inode_clear_eofblocks_tag(ip); 1633 return __xfs_inode_clear_eofblocks_tag(ip, 1634 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); 1635 } 1636 1637 /* 1638 * Automatic CoW Reservation Freeing 1639 * 1640 * These functions automatically garbage collect leftover CoW reservations 1641 * that were made on behalf of a cowextsize hint when we start to run out 1642 * of quota or when the reservations sit around for too long. If the file 1643 * has dirty pages or is undergoing writeback, its CoW reservations will 1644 * be retained. 1645 * 1646 * The actual garbage collection piggybacks off the same code that runs 1647 * the speculative EOF preallocation garbage collector. 1648 */ 1649 STATIC int 1650 xfs_inode_free_cowblocks( 1651 struct xfs_inode *ip, 1652 int flags, 1653 void *args) 1654 { 1655 int ret; 1656 struct xfs_eofblocks *eofb = args; 1657 int match; 1658 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 1659 1660 /* 1661 * Just clear the tag if we have an empty cow fork or none at all. It's 1662 * possible the inode was fully unshared since it was originally tagged. 1663 */ 1664 if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) { 1665 trace_xfs_inode_free_cowblocks_invalid(ip); 1666 xfs_inode_clear_cowblocks_tag(ip); 1667 return 0; 1668 } 1669 1670 /* 1671 * If the mapping is dirty or under writeback we cannot touch the 1672 * CoW fork. Leave it alone if we're in the midst of a directio. 1673 */ 1674 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || 1675 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1676 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1677 atomic_read(&VFS_I(ip)->i_dio_count)) 1678 return 0; 1679 1680 if (eofb) { 1681 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1682 match = xfs_inode_match_id_union(ip, eofb); 1683 else 1684 match = xfs_inode_match_id(ip, eofb); 1685 if (!match) 1686 return 0; 1687 1688 /* skip the inode if the file size is too small */ 1689 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1690 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1691 return 0; 1692 } 1693 1694 /* Free the CoW blocks */ 1695 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1696 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1697 1698 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); 1699 1700 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1701 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1702 1703 return ret; 1704 } 1705 1706 int 1707 xfs_icache_free_cowblocks( 1708 struct xfs_mount *mp, 1709 struct xfs_eofblocks *eofb) 1710 { 1711 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, 1712 XFS_ICI_COWBLOCKS_TAG); 1713 } 1714 1715 int 1716 xfs_inode_free_quota_cowblocks( 1717 struct xfs_inode *ip) 1718 { 1719 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); 1720 } 1721 1722 void 1723 xfs_inode_set_cowblocks_tag( 1724 xfs_inode_t *ip) 1725 { 1726 trace_xfs_inode_set_cowblocks_tag(ip); 1727 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, 1728 trace_xfs_perag_set_cowblocks, 1729 XFS_ICI_COWBLOCKS_TAG); 1730 } 1731 1732 void 1733 xfs_inode_clear_cowblocks_tag( 1734 xfs_inode_t *ip) 1735 { 1736 trace_xfs_inode_clear_cowblocks_tag(ip); 1737 return __xfs_inode_clear_eofblocks_tag(ip, 1738 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); 1739 } 1740