1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include <linux/iversion.h> 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_dir2.h" 18 #include "xfs_attr.h" 19 #include "xfs_trans_space.h" 20 #include "xfs_trans.h" 21 #include "xfs_buf_item.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_iunlink_item.h" 24 #include "xfs_ialloc.h" 25 #include "xfs_bmap.h" 26 #include "xfs_bmap_util.h" 27 #include "xfs_errortag.h" 28 #include "xfs_error.h" 29 #include "xfs_quota.h" 30 #include "xfs_filestream.h" 31 #include "xfs_trace.h" 32 #include "xfs_icache.h" 33 #include "xfs_symlink.h" 34 #include "xfs_trans_priv.h" 35 #include "xfs_log.h" 36 #include "xfs_bmap_btree.h" 37 #include "xfs_reflink.h" 38 #include "xfs_ag.h" 39 #include "xfs_log_priv.h" 40 41 struct kmem_cache *xfs_inode_cache; 42 43 /* 44 * Used in xfs_itruncate_extents(). This is the maximum number of extents 45 * freed from a file in a single transaction. 46 */ 47 #define XFS_ITRUNC_MAX_EXTENTS 2 48 49 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); 50 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, 51 struct xfs_inode *); 52 53 /* 54 * helper function to extract extent size hint from inode 55 */ 56 xfs_extlen_t 57 xfs_get_extsz_hint( 58 struct xfs_inode *ip) 59 { 60 /* 61 * No point in aligning allocations if we need to COW to actually 62 * write to them. 63 */ 64 if (xfs_is_always_cow_inode(ip)) 65 return 0; 66 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) 67 return ip->i_extsize; 68 if (XFS_IS_REALTIME_INODE(ip)) 69 return ip->i_mount->m_sb.sb_rextsize; 70 return 0; 71 } 72 73 /* 74 * Helper function to extract CoW extent size hint from inode. 75 * Between the extent size hint and the CoW extent size hint, we 76 * return the greater of the two. If the value is zero (automatic), 77 * use the default size. 78 */ 79 xfs_extlen_t 80 xfs_get_cowextsz_hint( 81 struct xfs_inode *ip) 82 { 83 xfs_extlen_t a, b; 84 85 a = 0; 86 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 87 a = ip->i_cowextsize; 88 b = xfs_get_extsz_hint(ip); 89 90 a = max(a, b); 91 if (a == 0) 92 return XFS_DEFAULT_COWEXTSZ_HINT; 93 return a; 94 } 95 96 /* 97 * These two are wrapper routines around the xfs_ilock() routine used to 98 * centralize some grungy code. They are used in places that wish to lock the 99 * inode solely for reading the extents. The reason these places can't just 100 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to 101 * bringing in of the extents from disk for a file in b-tree format. If the 102 * inode is in b-tree format, then we need to lock the inode exclusively until 103 * the extents are read in. Locking it exclusively all the time would limit 104 * our parallelism unnecessarily, though. What we do instead is check to see 105 * if the extents have been read in yet, and only lock the inode exclusively 106 * if they have not. 107 * 108 * The functions return a value which should be given to the corresponding 109 * xfs_iunlock() call. 110 */ 111 uint 112 xfs_ilock_data_map_shared( 113 struct xfs_inode *ip) 114 { 115 uint lock_mode = XFS_ILOCK_SHARED; 116 117 if (xfs_need_iread_extents(&ip->i_df)) 118 lock_mode = XFS_ILOCK_EXCL; 119 xfs_ilock(ip, lock_mode); 120 return lock_mode; 121 } 122 123 uint 124 xfs_ilock_attr_map_shared( 125 struct xfs_inode *ip) 126 { 127 uint lock_mode = XFS_ILOCK_SHARED; 128 129 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af)) 130 lock_mode = XFS_ILOCK_EXCL; 131 xfs_ilock(ip, lock_mode); 132 return lock_mode; 133 } 134 135 /* 136 * You can't set both SHARED and EXCL for the same lock, 137 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED, 138 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values 139 * to set in lock_flags. 140 */ 141 static inline void 142 xfs_lock_flags_assert( 143 uint lock_flags) 144 { 145 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 146 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 147 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 148 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 149 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 150 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 151 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 152 ASSERT(lock_flags != 0); 153 } 154 155 /* 156 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 157 * multi-reader locks: invalidate_lock and the i_lock. This routine allows 158 * various combinations of the locks to be obtained. 159 * 160 * The 3 locks should always be ordered so that the IO lock is obtained first, 161 * the mmap lock second and the ilock last in order to prevent deadlock. 162 * 163 * Basic locking order: 164 * 165 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock 166 * 167 * mmap_lock locking order: 168 * 169 * i_rwsem -> page lock -> mmap_lock 170 * mmap_lock -> invalidate_lock -> page_lock 171 * 172 * The difference in mmap_lock locking order mean that we cannot hold the 173 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths 174 * can fault in pages during copy in/out (for buffered IO) or require the 175 * mmap_lock in get_user_pages() to map the user pages into the kernel address 176 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page 177 * fault because page faults already hold the mmap_lock. 178 * 179 * Hence to serialise fully against both syscall and mmap based IO, we need to 180 * take both the i_rwsem and the invalidate_lock. These locks should *only* be 181 * both taken in places where we need to invalidate the page cache in a race 182 * free manner (e.g. truncate, hole punch and other extent manipulation 183 * functions). 184 */ 185 void 186 xfs_ilock( 187 xfs_inode_t *ip, 188 uint lock_flags) 189 { 190 trace_xfs_ilock(ip, lock_flags, _RET_IP_); 191 192 xfs_lock_flags_assert(lock_flags); 193 194 if (lock_flags & XFS_IOLOCK_EXCL) { 195 down_write_nested(&VFS_I(ip)->i_rwsem, 196 XFS_IOLOCK_DEP(lock_flags)); 197 } else if (lock_flags & XFS_IOLOCK_SHARED) { 198 down_read_nested(&VFS_I(ip)->i_rwsem, 199 XFS_IOLOCK_DEP(lock_flags)); 200 } 201 202 if (lock_flags & XFS_MMAPLOCK_EXCL) { 203 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock, 204 XFS_MMAPLOCK_DEP(lock_flags)); 205 } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 206 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock, 207 XFS_MMAPLOCK_DEP(lock_flags)); 208 } 209 210 if (lock_flags & XFS_ILOCK_EXCL) 211 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 212 else if (lock_flags & XFS_ILOCK_SHARED) 213 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 214 } 215 216 /* 217 * This is just like xfs_ilock(), except that the caller 218 * is guaranteed not to sleep. It returns 1 if it gets 219 * the requested locks and 0 otherwise. If the IO lock is 220 * obtained but the inode lock cannot be, then the IO lock 221 * is dropped before returning. 222 * 223 * ip -- the inode being locked 224 * lock_flags -- this parameter indicates the inode's locks to be 225 * to be locked. See the comment for xfs_ilock() for a list 226 * of valid values. 227 */ 228 int 229 xfs_ilock_nowait( 230 xfs_inode_t *ip, 231 uint lock_flags) 232 { 233 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 234 235 xfs_lock_flags_assert(lock_flags); 236 237 if (lock_flags & XFS_IOLOCK_EXCL) { 238 if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) 239 goto out; 240 } else if (lock_flags & XFS_IOLOCK_SHARED) { 241 if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) 242 goto out; 243 } 244 245 if (lock_flags & XFS_MMAPLOCK_EXCL) { 246 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) 247 goto out_undo_iolock; 248 } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 249 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) 250 goto out_undo_iolock; 251 } 252 253 if (lock_flags & XFS_ILOCK_EXCL) { 254 if (!mrtryupdate(&ip->i_lock)) 255 goto out_undo_mmaplock; 256 } else if (lock_flags & XFS_ILOCK_SHARED) { 257 if (!mrtryaccess(&ip->i_lock)) 258 goto out_undo_mmaplock; 259 } 260 return 1; 261 262 out_undo_mmaplock: 263 if (lock_flags & XFS_MMAPLOCK_EXCL) 264 up_write(&VFS_I(ip)->i_mapping->invalidate_lock); 265 else if (lock_flags & XFS_MMAPLOCK_SHARED) 266 up_read(&VFS_I(ip)->i_mapping->invalidate_lock); 267 out_undo_iolock: 268 if (lock_flags & XFS_IOLOCK_EXCL) 269 up_write(&VFS_I(ip)->i_rwsem); 270 else if (lock_flags & XFS_IOLOCK_SHARED) 271 up_read(&VFS_I(ip)->i_rwsem); 272 out: 273 return 0; 274 } 275 276 /* 277 * xfs_iunlock() is used to drop the inode locks acquired with 278 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass 279 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so 280 * that we know which locks to drop. 281 * 282 * ip -- the inode being unlocked 283 * lock_flags -- this parameter indicates the inode's locks to be 284 * to be unlocked. See the comment for xfs_ilock() for a list 285 * of valid values for this parameter. 286 * 287 */ 288 void 289 xfs_iunlock( 290 xfs_inode_t *ip, 291 uint lock_flags) 292 { 293 xfs_lock_flags_assert(lock_flags); 294 295 if (lock_flags & XFS_IOLOCK_EXCL) 296 up_write(&VFS_I(ip)->i_rwsem); 297 else if (lock_flags & XFS_IOLOCK_SHARED) 298 up_read(&VFS_I(ip)->i_rwsem); 299 300 if (lock_flags & XFS_MMAPLOCK_EXCL) 301 up_write(&VFS_I(ip)->i_mapping->invalidate_lock); 302 else if (lock_flags & XFS_MMAPLOCK_SHARED) 303 up_read(&VFS_I(ip)->i_mapping->invalidate_lock); 304 305 if (lock_flags & XFS_ILOCK_EXCL) 306 mrunlock_excl(&ip->i_lock); 307 else if (lock_flags & XFS_ILOCK_SHARED) 308 mrunlock_shared(&ip->i_lock); 309 310 trace_xfs_iunlock(ip, lock_flags, _RET_IP_); 311 } 312 313 /* 314 * give up write locks. the i/o lock cannot be held nested 315 * if it is being demoted. 316 */ 317 void 318 xfs_ilock_demote( 319 xfs_inode_t *ip, 320 uint lock_flags) 321 { 322 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); 323 ASSERT((lock_flags & 324 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); 325 326 if (lock_flags & XFS_ILOCK_EXCL) 327 mrdemote(&ip->i_lock); 328 if (lock_flags & XFS_MMAPLOCK_EXCL) 329 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock); 330 if (lock_flags & XFS_IOLOCK_EXCL) 331 downgrade_write(&VFS_I(ip)->i_rwsem); 332 333 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 334 } 335 336 #if defined(DEBUG) || defined(XFS_WARN) 337 static inline bool 338 __xfs_rwsem_islocked( 339 struct rw_semaphore *rwsem, 340 bool shared) 341 { 342 if (!debug_locks) 343 return rwsem_is_locked(rwsem); 344 345 if (!shared) 346 return lockdep_is_held_type(rwsem, 0); 347 348 /* 349 * We are checking that the lock is held at least in shared 350 * mode but don't care that it might be held exclusively 351 * (i.e. shared | excl). Hence we check if the lock is held 352 * in any mode rather than an explicit shared mode. 353 */ 354 return lockdep_is_held_type(rwsem, -1); 355 } 356 357 bool 358 xfs_isilocked( 359 struct xfs_inode *ip, 360 uint lock_flags) 361 { 362 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 363 if (!(lock_flags & XFS_ILOCK_SHARED)) 364 return !!ip->i_lock.mr_writer; 365 return rwsem_is_locked(&ip->i_lock.mr_lock); 366 } 367 368 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 369 return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock, 370 (lock_flags & XFS_MMAPLOCK_SHARED)); 371 } 372 373 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { 374 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem, 375 (lock_flags & XFS_IOLOCK_SHARED)); 376 } 377 378 ASSERT(0); 379 return false; 380 } 381 #endif 382 383 /* 384 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when 385 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined 386 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build 387 * errors and warnings. 388 */ 389 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) 390 static bool 391 xfs_lockdep_subclass_ok( 392 int subclass) 393 { 394 return subclass < MAX_LOCKDEP_SUBCLASSES; 395 } 396 #else 397 #define xfs_lockdep_subclass_ok(subclass) (true) 398 #endif 399 400 /* 401 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different 402 * value. This can be called for any type of inode lock combination, including 403 * parent locking. Care must be taken to ensure we don't overrun the subclass 404 * storage fields in the class mask we build. 405 */ 406 static inline uint 407 xfs_lock_inumorder( 408 uint lock_mode, 409 uint subclass) 410 { 411 uint class = 0; 412 413 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | 414 XFS_ILOCK_RTSUM))); 415 ASSERT(xfs_lockdep_subclass_ok(subclass)); 416 417 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 418 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 419 class += subclass << XFS_IOLOCK_SHIFT; 420 } 421 422 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 423 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); 424 class += subclass << XFS_MMAPLOCK_SHIFT; 425 } 426 427 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { 428 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); 429 class += subclass << XFS_ILOCK_SHIFT; 430 } 431 432 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; 433 } 434 435 /* 436 * The following routine will lock n inodes in exclusive mode. We assume the 437 * caller calls us with the inodes in i_ino order. 438 * 439 * We need to detect deadlock where an inode that we lock is in the AIL and we 440 * start waiting for another inode that is locked by a thread in a long running 441 * transaction (such as truncate). This can result in deadlock since the long 442 * running trans might need to wait for the inode we just locked in order to 443 * push the tail and free space in the log. 444 * 445 * xfs_lock_inodes() can only be used to lock one type of lock at a time - 446 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 447 * lock more than one at a time, lockdep will report false positives saying we 448 * have violated locking orders. 449 */ 450 static void 451 xfs_lock_inodes( 452 struct xfs_inode **ips, 453 int inodes, 454 uint lock_mode) 455 { 456 int attempts = 0; 457 uint i; 458 int j; 459 bool try_lock; 460 struct xfs_log_item *lp; 461 462 /* 463 * Currently supports between 2 and 5 inodes with exclusive locking. We 464 * support an arbitrary depth of locking here, but absolute limits on 465 * inodes depend on the type of locking and the limits placed by 466 * lockdep annotations in xfs_lock_inumorder. These are all checked by 467 * the asserts. 468 */ 469 ASSERT(ips && inodes >= 2 && inodes <= 5); 470 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | 471 XFS_ILOCK_EXCL)); 472 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 473 XFS_ILOCK_SHARED))); 474 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 475 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 476 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 477 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); 478 479 if (lock_mode & XFS_IOLOCK_EXCL) { 480 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); 481 } else if (lock_mode & XFS_MMAPLOCK_EXCL) 482 ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); 483 484 again: 485 try_lock = false; 486 i = 0; 487 for (; i < inodes; i++) { 488 ASSERT(ips[i]); 489 490 if (i && (ips[i] == ips[i - 1])) /* Already locked */ 491 continue; 492 493 /* 494 * If try_lock is not set yet, make sure all locked inodes are 495 * not in the AIL. If any are, set try_lock to be used later. 496 */ 497 if (!try_lock) { 498 for (j = (i - 1); j >= 0 && !try_lock; j--) { 499 lp = &ips[j]->i_itemp->ili_item; 500 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) 501 try_lock = true; 502 } 503 } 504 505 /* 506 * If any of the previous locks we have locked is in the AIL, 507 * we must TRY to get the second and subsequent locks. If 508 * we can't get any, we must release all we have 509 * and try again. 510 */ 511 if (!try_lock) { 512 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); 513 continue; 514 } 515 516 /* try_lock means we have an inode locked that is in the AIL. */ 517 ASSERT(i != 0); 518 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) 519 continue; 520 521 /* 522 * Unlock all previous guys and try again. xfs_iunlock will try 523 * to push the tail if the inode is in the AIL. 524 */ 525 attempts++; 526 for (j = i - 1; j >= 0; j--) { 527 /* 528 * Check to see if we've already unlocked this one. Not 529 * the first one going back, and the inode ptr is the 530 * same. 531 */ 532 if (j != (i - 1) && ips[j] == ips[j + 1]) 533 continue; 534 535 xfs_iunlock(ips[j], lock_mode); 536 } 537 538 if ((attempts % 5) == 0) { 539 delay(1); /* Don't just spin the CPU */ 540 } 541 goto again; 542 } 543 } 544 545 /* 546 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and 547 * mmaplock must be double-locked separately since we use i_rwsem and 548 * invalidate_lock for that. We now support taking one lock EXCL and the 549 * other SHARED. 550 */ 551 void 552 xfs_lock_two_inodes( 553 struct xfs_inode *ip0, 554 uint ip0_mode, 555 struct xfs_inode *ip1, 556 uint ip1_mode) 557 { 558 int attempts = 0; 559 struct xfs_log_item *lp; 560 561 ASSERT(hweight32(ip0_mode) == 1); 562 ASSERT(hweight32(ip1_mode) == 1); 563 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 564 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 565 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); 566 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); 567 ASSERT(ip0->i_ino != ip1->i_ino); 568 569 if (ip0->i_ino > ip1->i_ino) { 570 swap(ip0, ip1); 571 swap(ip0_mode, ip1_mode); 572 } 573 574 again: 575 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); 576 577 /* 578 * If the first lock we have locked is in the AIL, we must TRY to get 579 * the second lock. If we can't get it, we must release the first one 580 * and try again. 581 */ 582 lp = &ip0->i_itemp->ili_item; 583 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { 584 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { 585 xfs_iunlock(ip0, ip0_mode); 586 if ((++attempts % 5) == 0) 587 delay(1); /* Don't just spin the CPU */ 588 goto again; 589 } 590 } else { 591 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); 592 } 593 } 594 595 uint 596 xfs_ip2xflags( 597 struct xfs_inode *ip) 598 { 599 uint flags = 0; 600 601 if (ip->i_diflags & XFS_DIFLAG_ANY) { 602 if (ip->i_diflags & XFS_DIFLAG_REALTIME) 603 flags |= FS_XFLAG_REALTIME; 604 if (ip->i_diflags & XFS_DIFLAG_PREALLOC) 605 flags |= FS_XFLAG_PREALLOC; 606 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE) 607 flags |= FS_XFLAG_IMMUTABLE; 608 if (ip->i_diflags & XFS_DIFLAG_APPEND) 609 flags |= FS_XFLAG_APPEND; 610 if (ip->i_diflags & XFS_DIFLAG_SYNC) 611 flags |= FS_XFLAG_SYNC; 612 if (ip->i_diflags & XFS_DIFLAG_NOATIME) 613 flags |= FS_XFLAG_NOATIME; 614 if (ip->i_diflags & XFS_DIFLAG_NODUMP) 615 flags |= FS_XFLAG_NODUMP; 616 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT) 617 flags |= FS_XFLAG_RTINHERIT; 618 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT) 619 flags |= FS_XFLAG_PROJINHERIT; 620 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS) 621 flags |= FS_XFLAG_NOSYMLINKS; 622 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) 623 flags |= FS_XFLAG_EXTSIZE; 624 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) 625 flags |= FS_XFLAG_EXTSZINHERIT; 626 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG) 627 flags |= FS_XFLAG_NODEFRAG; 628 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM) 629 flags |= FS_XFLAG_FILESTREAM; 630 } 631 632 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) { 633 if (ip->i_diflags2 & XFS_DIFLAG2_DAX) 634 flags |= FS_XFLAG_DAX; 635 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 636 flags |= FS_XFLAG_COWEXTSIZE; 637 } 638 639 if (xfs_inode_has_attr_fork(ip)) 640 flags |= FS_XFLAG_HASATTR; 641 return flags; 642 } 643 644 /* 645 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 646 * is allowed, otherwise it has to be an exact match. If a CI match is found, 647 * ci_name->name will point to a the actual name (caller must free) or 648 * will be set to NULL if an exact match is found. 649 */ 650 int 651 xfs_lookup( 652 struct xfs_inode *dp, 653 const struct xfs_name *name, 654 struct xfs_inode **ipp, 655 struct xfs_name *ci_name) 656 { 657 xfs_ino_t inum; 658 int error; 659 660 trace_xfs_lookup(dp, name); 661 662 if (xfs_is_shutdown(dp->i_mount)) 663 return -EIO; 664 665 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 666 if (error) 667 goto out_unlock; 668 669 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 670 if (error) 671 goto out_free_name; 672 673 return 0; 674 675 out_free_name: 676 if (ci_name) 677 kmem_free(ci_name->name); 678 out_unlock: 679 *ipp = NULL; 680 return error; 681 } 682 683 /* Propagate di_flags from a parent inode to a child inode. */ 684 static void 685 xfs_inode_inherit_flags( 686 struct xfs_inode *ip, 687 const struct xfs_inode *pip) 688 { 689 unsigned int di_flags = 0; 690 xfs_failaddr_t failaddr; 691 umode_t mode = VFS_I(ip)->i_mode; 692 693 if (S_ISDIR(mode)) { 694 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT) 695 di_flags |= XFS_DIFLAG_RTINHERIT; 696 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 697 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 698 ip->i_extsize = pip->i_extsize; 699 } 700 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT) 701 di_flags |= XFS_DIFLAG_PROJINHERIT; 702 } else if (S_ISREG(mode)) { 703 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) && 704 xfs_has_realtime(ip->i_mount)) 705 di_flags |= XFS_DIFLAG_REALTIME; 706 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 707 di_flags |= XFS_DIFLAG_EXTSIZE; 708 ip->i_extsize = pip->i_extsize; 709 } 710 } 711 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) && 712 xfs_inherit_noatime) 713 di_flags |= XFS_DIFLAG_NOATIME; 714 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) && 715 xfs_inherit_nodump) 716 di_flags |= XFS_DIFLAG_NODUMP; 717 if ((pip->i_diflags & XFS_DIFLAG_SYNC) && 718 xfs_inherit_sync) 719 di_flags |= XFS_DIFLAG_SYNC; 720 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) && 721 xfs_inherit_nosymlinks) 722 di_flags |= XFS_DIFLAG_NOSYMLINKS; 723 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) && 724 xfs_inherit_nodefrag) 725 di_flags |= XFS_DIFLAG_NODEFRAG; 726 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM) 727 di_flags |= XFS_DIFLAG_FILESTREAM; 728 729 ip->i_diflags |= di_flags; 730 731 /* 732 * Inode verifiers on older kernels only check that the extent size 733 * hint is an integer multiple of the rt extent size on realtime files. 734 * They did not check the hint alignment on a directory with both 735 * rtinherit and extszinherit flags set. If the misaligned hint is 736 * propagated from a directory into a new realtime file, new file 737 * allocations will fail due to math errors in the rt allocator and/or 738 * trip the verifiers. Validate the hint settings in the new file so 739 * that we don't let broken hints propagate. 740 */ 741 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize, 742 VFS_I(ip)->i_mode, ip->i_diflags); 743 if (failaddr) { 744 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | 745 XFS_DIFLAG_EXTSZINHERIT); 746 ip->i_extsize = 0; 747 } 748 } 749 750 /* Propagate di_flags2 from a parent inode to a child inode. */ 751 static void 752 xfs_inode_inherit_flags2( 753 struct xfs_inode *ip, 754 const struct xfs_inode *pip) 755 { 756 xfs_failaddr_t failaddr; 757 758 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { 759 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; 760 ip->i_cowextsize = pip->i_cowextsize; 761 } 762 if (pip->i_diflags2 & XFS_DIFLAG2_DAX) 763 ip->i_diflags2 |= XFS_DIFLAG2_DAX; 764 765 /* Don't let invalid cowextsize hints propagate. */ 766 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize, 767 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2); 768 if (failaddr) { 769 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; 770 ip->i_cowextsize = 0; 771 } 772 } 773 774 /* 775 * Initialise a newly allocated inode and return the in-core inode to the 776 * caller locked exclusively. 777 */ 778 int 779 xfs_init_new_inode( 780 struct user_namespace *mnt_userns, 781 struct xfs_trans *tp, 782 struct xfs_inode *pip, 783 xfs_ino_t ino, 784 umode_t mode, 785 xfs_nlink_t nlink, 786 dev_t rdev, 787 prid_t prid, 788 bool init_xattrs, 789 struct xfs_inode **ipp) 790 { 791 struct inode *dir = pip ? VFS_I(pip) : NULL; 792 struct xfs_mount *mp = tp->t_mountp; 793 struct xfs_inode *ip; 794 unsigned int flags; 795 int error; 796 struct timespec64 tv; 797 struct inode *inode; 798 799 /* 800 * Protect against obviously corrupt allocation btree records. Later 801 * xfs_iget checks will catch re-allocation of other active in-memory 802 * and on-disk inodes. If we don't catch reallocating the parent inode 803 * here we will deadlock in xfs_iget() so we have to do these checks 804 * first. 805 */ 806 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { 807 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); 808 return -EFSCORRUPTED; 809 } 810 811 /* 812 * Get the in-core inode with the lock held exclusively to prevent 813 * others from looking at until we're done. 814 */ 815 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 816 if (error) 817 return error; 818 819 ASSERT(ip != NULL); 820 inode = VFS_I(ip); 821 set_nlink(inode, nlink); 822 inode->i_rdev = rdev; 823 ip->i_projid = prid; 824 825 if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) { 826 inode_fsuid_set(inode, mnt_userns); 827 inode->i_gid = dir->i_gid; 828 inode->i_mode = mode; 829 } else { 830 inode_init_owner(mnt_userns, inode, dir, mode); 831 } 832 833 /* 834 * If the group ID of the new file does not match the effective group 835 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 836 * (and only if the irix_sgid_inherit compatibility variable is set). 837 */ 838 if (irix_sgid_inherit && (inode->i_mode & S_ISGID) && 839 !vfsgid_in_group_p(i_gid_into_vfsgid(mnt_userns, inode))) 840 inode->i_mode &= ~S_ISGID; 841 842 ip->i_disk_size = 0; 843 ip->i_df.if_nextents = 0; 844 ASSERT(ip->i_nblocks == 0); 845 846 tv = current_time(inode); 847 inode->i_mtime = tv; 848 inode->i_atime = tv; 849 inode->i_ctime = tv; 850 851 ip->i_extsize = 0; 852 ip->i_diflags = 0; 853 854 if (xfs_has_v3inodes(mp)) { 855 inode_set_iversion(inode, 1); 856 ip->i_cowextsize = 0; 857 ip->i_crtime = tv; 858 } 859 860 flags = XFS_ILOG_CORE; 861 switch (mode & S_IFMT) { 862 case S_IFIFO: 863 case S_IFCHR: 864 case S_IFBLK: 865 case S_IFSOCK: 866 ip->i_df.if_format = XFS_DINODE_FMT_DEV; 867 flags |= XFS_ILOG_DEV; 868 break; 869 case S_IFREG: 870 case S_IFDIR: 871 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY)) 872 xfs_inode_inherit_flags(ip, pip); 873 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY)) 874 xfs_inode_inherit_flags2(ip, pip); 875 fallthrough; 876 case S_IFLNK: 877 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 878 ip->i_df.if_bytes = 0; 879 ip->i_df.if_u1.if_root = NULL; 880 break; 881 default: 882 ASSERT(0); 883 } 884 885 /* 886 * If we need to create attributes immediately after allocating the 887 * inode, initialise an empty attribute fork right now. We use the 888 * default fork offset for attributes here as we don't know exactly what 889 * size or how many attributes we might be adding. We can do this 890 * safely here because we know the data fork is completely empty and 891 * this saves us from needing to run a separate transaction to set the 892 * fork offset in the immediate future. 893 */ 894 if (init_xattrs && xfs_has_attr(mp)) { 895 ip->i_forkoff = xfs_default_attroffset(ip) >> 3; 896 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 897 } 898 899 /* 900 * Log the new values stuffed into the inode. 901 */ 902 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 903 xfs_trans_log_inode(tp, ip, flags); 904 905 /* now that we have an i_mode we can setup the inode structure */ 906 xfs_setup_inode(ip); 907 908 *ipp = ip; 909 return 0; 910 } 911 912 /* 913 * Decrement the link count on an inode & log the change. If this causes the 914 * link count to go to zero, move the inode to AGI unlinked list so that it can 915 * be freed when the last active reference goes away via xfs_inactive(). 916 */ 917 static int /* error */ 918 xfs_droplink( 919 xfs_trans_t *tp, 920 xfs_inode_t *ip) 921 { 922 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 923 924 drop_nlink(VFS_I(ip)); 925 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 926 927 if (VFS_I(ip)->i_nlink) 928 return 0; 929 930 return xfs_iunlink(tp, ip); 931 } 932 933 /* 934 * Increment the link count on an inode & log the change. 935 */ 936 static void 937 xfs_bumplink( 938 xfs_trans_t *tp, 939 xfs_inode_t *ip) 940 { 941 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 942 943 inc_nlink(VFS_I(ip)); 944 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 945 } 946 947 int 948 xfs_create( 949 struct user_namespace *mnt_userns, 950 xfs_inode_t *dp, 951 struct xfs_name *name, 952 umode_t mode, 953 dev_t rdev, 954 bool init_xattrs, 955 xfs_inode_t **ipp) 956 { 957 int is_dir = S_ISDIR(mode); 958 struct xfs_mount *mp = dp->i_mount; 959 struct xfs_inode *ip = NULL; 960 struct xfs_trans *tp = NULL; 961 int error; 962 bool unlock_dp_on_error = false; 963 prid_t prid; 964 struct xfs_dquot *udqp = NULL; 965 struct xfs_dquot *gdqp = NULL; 966 struct xfs_dquot *pdqp = NULL; 967 struct xfs_trans_res *tres; 968 uint resblks; 969 xfs_ino_t ino; 970 971 trace_xfs_create(dp, name); 972 973 if (xfs_is_shutdown(mp)) 974 return -EIO; 975 976 prid = xfs_get_initial_prid(dp); 977 978 /* 979 * Make sure that we have allocated dquot(s) on disk. 980 */ 981 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns), 982 mapped_fsgid(mnt_userns, &init_user_ns), prid, 983 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 984 &udqp, &gdqp, &pdqp); 985 if (error) 986 return error; 987 988 if (is_dir) { 989 resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 990 tres = &M_RES(mp)->tr_mkdir; 991 } else { 992 resblks = XFS_CREATE_SPACE_RES(mp, name->len); 993 tres = &M_RES(mp)->tr_create; 994 } 995 996 /* 997 * Initially assume that the file does not exist and 998 * reserve the resources for that case. If that is not 999 * the case we'll drop the one we have and get a more 1000 * appropriate transaction later. 1001 */ 1002 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1003 &tp); 1004 if (error == -ENOSPC) { 1005 /* flush outstanding delalloc blocks and retry */ 1006 xfs_flush_inodes(mp); 1007 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, 1008 resblks, &tp); 1009 } 1010 if (error) 1011 goto out_release_dquots; 1012 1013 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1014 unlock_dp_on_error = true; 1015 1016 /* 1017 * A newly created regular or special file just has one directory 1018 * entry pointing to them, but a directory also the "." entry 1019 * pointing to itself. 1020 */ 1021 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1022 if (!error) 1023 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, 1024 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip); 1025 if (error) 1026 goto out_trans_cancel; 1027 1028 /* 1029 * Now we join the directory inode to the transaction. We do not do it 1030 * earlier because xfs_dialloc might commit the previous transaction 1031 * (and release all the locks). An error from here on will result in 1032 * the transaction cancel unlocking dp so don't do it explicitly in the 1033 * error path. 1034 */ 1035 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1036 unlock_dp_on_error = false; 1037 1038 error = xfs_dir_createname(tp, dp, name, ip->i_ino, 1039 resblks - XFS_IALLOC_SPACE_RES(mp)); 1040 if (error) { 1041 ASSERT(error != -ENOSPC); 1042 goto out_trans_cancel; 1043 } 1044 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1045 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1046 1047 if (is_dir) { 1048 error = xfs_dir_init(tp, ip, dp); 1049 if (error) 1050 goto out_trans_cancel; 1051 1052 xfs_bumplink(tp, dp); 1053 } 1054 1055 /* 1056 * If this is a synchronous mount, make sure that the 1057 * create transaction goes to disk before returning to 1058 * the user. 1059 */ 1060 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 1061 xfs_trans_set_sync(tp); 1062 1063 /* 1064 * Attach the dquot(s) to the inodes and modify them incore. 1065 * These ids of the inode couldn't have changed since the new 1066 * inode has been locked ever since it was created. 1067 */ 1068 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1069 1070 error = xfs_trans_commit(tp); 1071 if (error) 1072 goto out_release_inode; 1073 1074 xfs_qm_dqrele(udqp); 1075 xfs_qm_dqrele(gdqp); 1076 xfs_qm_dqrele(pdqp); 1077 1078 *ipp = ip; 1079 return 0; 1080 1081 out_trans_cancel: 1082 xfs_trans_cancel(tp); 1083 out_release_inode: 1084 /* 1085 * Wait until after the current transaction is aborted to finish the 1086 * setup of the inode and release the inode. This prevents recursive 1087 * transactions and deadlocks from xfs_inactive. 1088 */ 1089 if (ip) { 1090 xfs_finish_inode_setup(ip); 1091 xfs_irele(ip); 1092 } 1093 out_release_dquots: 1094 xfs_qm_dqrele(udqp); 1095 xfs_qm_dqrele(gdqp); 1096 xfs_qm_dqrele(pdqp); 1097 1098 if (unlock_dp_on_error) 1099 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1100 return error; 1101 } 1102 1103 int 1104 xfs_create_tmpfile( 1105 struct user_namespace *mnt_userns, 1106 struct xfs_inode *dp, 1107 umode_t mode, 1108 struct xfs_inode **ipp) 1109 { 1110 struct xfs_mount *mp = dp->i_mount; 1111 struct xfs_inode *ip = NULL; 1112 struct xfs_trans *tp = NULL; 1113 int error; 1114 prid_t prid; 1115 struct xfs_dquot *udqp = NULL; 1116 struct xfs_dquot *gdqp = NULL; 1117 struct xfs_dquot *pdqp = NULL; 1118 struct xfs_trans_res *tres; 1119 uint resblks; 1120 xfs_ino_t ino; 1121 1122 if (xfs_is_shutdown(mp)) 1123 return -EIO; 1124 1125 prid = xfs_get_initial_prid(dp); 1126 1127 /* 1128 * Make sure that we have allocated dquot(s) on disk. 1129 */ 1130 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns), 1131 mapped_fsgid(mnt_userns, &init_user_ns), prid, 1132 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1133 &udqp, &gdqp, &pdqp); 1134 if (error) 1135 return error; 1136 1137 resblks = XFS_IALLOC_SPACE_RES(mp); 1138 tres = &M_RES(mp)->tr_create_tmpfile; 1139 1140 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1141 &tp); 1142 if (error) 1143 goto out_release_dquots; 1144 1145 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1146 if (!error) 1147 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, 1148 0, 0, prid, false, &ip); 1149 if (error) 1150 goto out_trans_cancel; 1151 1152 if (xfs_has_wsync(mp)) 1153 xfs_trans_set_sync(tp); 1154 1155 /* 1156 * Attach the dquot(s) to the inodes and modify them incore. 1157 * These ids of the inode couldn't have changed since the new 1158 * inode has been locked ever since it was created. 1159 */ 1160 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1161 1162 error = xfs_iunlink(tp, ip); 1163 if (error) 1164 goto out_trans_cancel; 1165 1166 error = xfs_trans_commit(tp); 1167 if (error) 1168 goto out_release_inode; 1169 1170 xfs_qm_dqrele(udqp); 1171 xfs_qm_dqrele(gdqp); 1172 xfs_qm_dqrele(pdqp); 1173 1174 *ipp = ip; 1175 return 0; 1176 1177 out_trans_cancel: 1178 xfs_trans_cancel(tp); 1179 out_release_inode: 1180 /* 1181 * Wait until after the current transaction is aborted to finish the 1182 * setup of the inode and release the inode. This prevents recursive 1183 * transactions and deadlocks from xfs_inactive. 1184 */ 1185 if (ip) { 1186 xfs_finish_inode_setup(ip); 1187 xfs_irele(ip); 1188 } 1189 out_release_dquots: 1190 xfs_qm_dqrele(udqp); 1191 xfs_qm_dqrele(gdqp); 1192 xfs_qm_dqrele(pdqp); 1193 1194 return error; 1195 } 1196 1197 int 1198 xfs_link( 1199 xfs_inode_t *tdp, 1200 xfs_inode_t *sip, 1201 struct xfs_name *target_name) 1202 { 1203 xfs_mount_t *mp = tdp->i_mount; 1204 xfs_trans_t *tp; 1205 int error, nospace_error = 0; 1206 int resblks; 1207 1208 trace_xfs_link(tdp, target_name); 1209 1210 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); 1211 1212 if (xfs_is_shutdown(mp)) 1213 return -EIO; 1214 1215 error = xfs_qm_dqattach(sip); 1216 if (error) 1217 goto std_return; 1218 1219 error = xfs_qm_dqattach(tdp); 1220 if (error) 1221 goto std_return; 1222 1223 resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1224 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks, 1225 &tp, &nospace_error); 1226 if (error) 1227 goto std_return; 1228 1229 /* 1230 * If we are using project inheritance, we only allow hard link 1231 * creation in our tree when the project IDs are the same; else 1232 * the tree quota mechanism could be circumvented. 1233 */ 1234 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 1235 tdp->i_projid != sip->i_projid)) { 1236 error = -EXDEV; 1237 goto error_return; 1238 } 1239 1240 if (!resblks) { 1241 error = xfs_dir_canenter(tp, tdp, target_name); 1242 if (error) 1243 goto error_return; 1244 } 1245 1246 /* 1247 * Handle initial link state of O_TMPFILE inode 1248 */ 1249 if (VFS_I(sip)->i_nlink == 0) { 1250 struct xfs_perag *pag; 1251 1252 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino)); 1253 error = xfs_iunlink_remove(tp, pag, sip); 1254 xfs_perag_put(pag); 1255 if (error) 1256 goto error_return; 1257 } 1258 1259 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, 1260 resblks); 1261 if (error) 1262 goto error_return; 1263 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1264 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1265 1266 xfs_bumplink(tp, sip); 1267 1268 /* 1269 * If this is a synchronous mount, make sure that the 1270 * link transaction goes to disk before returning to 1271 * the user. 1272 */ 1273 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 1274 xfs_trans_set_sync(tp); 1275 1276 return xfs_trans_commit(tp); 1277 1278 error_return: 1279 xfs_trans_cancel(tp); 1280 std_return: 1281 if (error == -ENOSPC && nospace_error) 1282 error = nospace_error; 1283 return error; 1284 } 1285 1286 /* Clear the reflink flag and the cowblocks tag if possible. */ 1287 static void 1288 xfs_itruncate_clear_reflink_flags( 1289 struct xfs_inode *ip) 1290 { 1291 struct xfs_ifork *dfork; 1292 struct xfs_ifork *cfork; 1293 1294 if (!xfs_is_reflink_inode(ip)) 1295 return; 1296 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK); 1297 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK); 1298 if (dfork->if_bytes == 0 && cfork->if_bytes == 0) 1299 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1300 if (cfork->if_bytes == 0) 1301 xfs_inode_clear_cowblocks_tag(ip); 1302 } 1303 1304 /* 1305 * Free up the underlying blocks past new_size. The new size must be smaller 1306 * than the current size. This routine can be used both for the attribute and 1307 * data fork, and does not modify the inode size, which is left to the caller. 1308 * 1309 * The transaction passed to this routine must have made a permanent log 1310 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1311 * given transaction and start new ones, so make sure everything involved in 1312 * the transaction is tidy before calling here. Some transaction will be 1313 * returned to the caller to be committed. The incoming transaction must 1314 * already include the inode, and both inode locks must be held exclusively. 1315 * The inode must also be "held" within the transaction. On return the inode 1316 * will be "held" within the returned transaction. This routine does NOT 1317 * require any disk space to be reserved for it within the transaction. 1318 * 1319 * If we get an error, we must return with the inode locked and linked into the 1320 * current transaction. This keeps things simple for the higher level code, 1321 * because it always knows that the inode is locked and held in the transaction 1322 * that returns to it whether errors occur or not. We don't mark the inode 1323 * dirty on error so that transactions can be easily aborted if possible. 1324 */ 1325 int 1326 xfs_itruncate_extents_flags( 1327 struct xfs_trans **tpp, 1328 struct xfs_inode *ip, 1329 int whichfork, 1330 xfs_fsize_t new_size, 1331 int flags) 1332 { 1333 struct xfs_mount *mp = ip->i_mount; 1334 struct xfs_trans *tp = *tpp; 1335 xfs_fileoff_t first_unmap_block; 1336 xfs_filblks_t unmap_len; 1337 int error = 0; 1338 1339 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1340 ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 1341 xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1342 ASSERT(new_size <= XFS_ISIZE(ip)); 1343 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1344 ASSERT(ip->i_itemp != NULL); 1345 ASSERT(ip->i_itemp->ili_lock_flags == 0); 1346 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1347 1348 trace_xfs_itruncate_extents_start(ip, new_size); 1349 1350 flags |= xfs_bmapi_aflag(whichfork); 1351 1352 /* 1353 * Since it is possible for space to become allocated beyond 1354 * the end of the file (in a crash where the space is allocated 1355 * but the inode size is not yet updated), simply remove any 1356 * blocks which show up between the new EOF and the maximum 1357 * possible file size. 1358 * 1359 * We have to free all the blocks to the bmbt maximum offset, even if 1360 * the page cache can't scale that far. 1361 */ 1362 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1363 if (!xfs_verify_fileoff(mp, first_unmap_block)) { 1364 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF); 1365 return 0; 1366 } 1367 1368 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; 1369 while (unmap_len > 0) { 1370 ASSERT(tp->t_firstblock == NULLFSBLOCK); 1371 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, 1372 flags, XFS_ITRUNC_MAX_EXTENTS); 1373 if (error) 1374 goto out; 1375 1376 /* free the just unmapped extents */ 1377 error = xfs_defer_finish(&tp); 1378 if (error) 1379 goto out; 1380 } 1381 1382 if (whichfork == XFS_DATA_FORK) { 1383 /* Remove all pending CoW reservations. */ 1384 error = xfs_reflink_cancel_cow_blocks(ip, &tp, 1385 first_unmap_block, XFS_MAX_FILEOFF, true); 1386 if (error) 1387 goto out; 1388 1389 xfs_itruncate_clear_reflink_flags(ip); 1390 } 1391 1392 /* 1393 * Always re-log the inode so that our permanent transaction can keep 1394 * on rolling it forward in the log. 1395 */ 1396 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1397 1398 trace_xfs_itruncate_extents_end(ip, new_size); 1399 1400 out: 1401 *tpp = tp; 1402 return error; 1403 } 1404 1405 int 1406 xfs_release( 1407 xfs_inode_t *ip) 1408 { 1409 xfs_mount_t *mp = ip->i_mount; 1410 int error = 0; 1411 1412 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) 1413 return 0; 1414 1415 /* If this is a read-only mount, don't do this (would generate I/O) */ 1416 if (xfs_is_readonly(mp)) 1417 return 0; 1418 1419 if (!xfs_is_shutdown(mp)) { 1420 int truncated; 1421 1422 /* 1423 * If we previously truncated this file and removed old data 1424 * in the process, we want to initiate "early" writeout on 1425 * the last close. This is an attempt to combat the notorious 1426 * NULL files problem which is particularly noticeable from a 1427 * truncate down, buffered (re-)write (delalloc), followed by 1428 * a crash. What we are effectively doing here is 1429 * significantly reducing the time window where we'd otherwise 1430 * be exposed to that problem. 1431 */ 1432 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1433 if (truncated) { 1434 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1435 if (ip->i_delayed_blks > 0) { 1436 error = filemap_flush(VFS_I(ip)->i_mapping); 1437 if (error) 1438 return error; 1439 } 1440 } 1441 } 1442 1443 if (VFS_I(ip)->i_nlink == 0) 1444 return 0; 1445 1446 /* 1447 * If we can't get the iolock just skip truncating the blocks past EOF 1448 * because we could deadlock with the mmap_lock otherwise. We'll get 1449 * another chance to drop them once the last reference to the inode is 1450 * dropped, so we'll never leak blocks permanently. 1451 */ 1452 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) 1453 return 0; 1454 1455 if (xfs_can_free_eofblocks(ip, false)) { 1456 /* 1457 * Check if the inode is being opened, written and closed 1458 * frequently and we have delayed allocation blocks outstanding 1459 * (e.g. streaming writes from the NFS server), truncating the 1460 * blocks past EOF will cause fragmentation to occur. 1461 * 1462 * In this case don't do the truncation, but we have to be 1463 * careful how we detect this case. Blocks beyond EOF show up as 1464 * i_delayed_blks even when the inode is clean, so we need to 1465 * truncate them away first before checking for a dirty release. 1466 * Hence on the first dirty close we will still remove the 1467 * speculative allocation, but after that we will leave it in 1468 * place. 1469 */ 1470 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 1471 goto out_unlock; 1472 1473 error = xfs_free_eofblocks(ip); 1474 if (error) 1475 goto out_unlock; 1476 1477 /* delalloc blocks after truncation means it really is dirty */ 1478 if (ip->i_delayed_blks) 1479 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); 1480 } 1481 1482 out_unlock: 1483 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1484 return error; 1485 } 1486 1487 /* 1488 * xfs_inactive_truncate 1489 * 1490 * Called to perform a truncate when an inode becomes unlinked. 1491 */ 1492 STATIC int 1493 xfs_inactive_truncate( 1494 struct xfs_inode *ip) 1495 { 1496 struct xfs_mount *mp = ip->i_mount; 1497 struct xfs_trans *tp; 1498 int error; 1499 1500 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 1501 if (error) { 1502 ASSERT(xfs_is_shutdown(mp)); 1503 return error; 1504 } 1505 xfs_ilock(ip, XFS_ILOCK_EXCL); 1506 xfs_trans_ijoin(tp, ip, 0); 1507 1508 /* 1509 * Log the inode size first to prevent stale data exposure in the event 1510 * of a system crash before the truncate completes. See the related 1511 * comment in xfs_vn_setattr_size() for details. 1512 */ 1513 ip->i_disk_size = 0; 1514 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1515 1516 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 1517 if (error) 1518 goto error_trans_cancel; 1519 1520 ASSERT(ip->i_df.if_nextents == 0); 1521 1522 error = xfs_trans_commit(tp); 1523 if (error) 1524 goto error_unlock; 1525 1526 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1527 return 0; 1528 1529 error_trans_cancel: 1530 xfs_trans_cancel(tp); 1531 error_unlock: 1532 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1533 return error; 1534 } 1535 1536 /* 1537 * xfs_inactive_ifree() 1538 * 1539 * Perform the inode free when an inode is unlinked. 1540 */ 1541 STATIC int 1542 xfs_inactive_ifree( 1543 struct xfs_inode *ip) 1544 { 1545 struct xfs_mount *mp = ip->i_mount; 1546 struct xfs_trans *tp; 1547 int error; 1548 1549 /* 1550 * We try to use a per-AG reservation for any block needed by the finobt 1551 * tree, but as the finobt feature predates the per-AG reservation 1552 * support a degraded file system might not have enough space for the 1553 * reservation at mount time. In that case try to dip into the reserved 1554 * pool and pray. 1555 * 1556 * Send a warning if the reservation does happen to fail, as the inode 1557 * now remains allocated and sits on the unlinked list until the fs is 1558 * repaired. 1559 */ 1560 if (unlikely(mp->m_finobt_nores)) { 1561 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 1562 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 1563 &tp); 1564 } else { 1565 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); 1566 } 1567 if (error) { 1568 if (error == -ENOSPC) { 1569 xfs_warn_ratelimited(mp, 1570 "Failed to remove inode(s) from unlinked list. " 1571 "Please free space, unmount and run xfs_repair."); 1572 } else { 1573 ASSERT(xfs_is_shutdown(mp)); 1574 } 1575 return error; 1576 } 1577 1578 /* 1579 * We do not hold the inode locked across the entire rolling transaction 1580 * here. We only need to hold it for the first transaction that 1581 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the 1582 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode 1583 * here breaks the relationship between cluster buffer invalidation and 1584 * stale inode invalidation on cluster buffer item journal commit 1585 * completion, and can result in leaving dirty stale inodes hanging 1586 * around in memory. 1587 * 1588 * We have no need for serialising this inode operation against other 1589 * operations - we freed the inode and hence reallocation is required 1590 * and that will serialise on reallocating the space the deferops need 1591 * to free. Hence we can unlock the inode on the first commit of 1592 * the transaction rather than roll it right through the deferops. This 1593 * avoids relogging the XFS_ISTALE inode. 1594 * 1595 * We check that xfs_ifree() hasn't grown an internal transaction roll 1596 * by asserting that the inode is still locked when it returns. 1597 */ 1598 xfs_ilock(ip, XFS_ILOCK_EXCL); 1599 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1600 1601 error = xfs_ifree(tp, ip); 1602 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1603 if (error) { 1604 /* 1605 * If we fail to free the inode, shut down. The cancel 1606 * might do that, we need to make sure. Otherwise the 1607 * inode might be lost for a long time or forever. 1608 */ 1609 if (!xfs_is_shutdown(mp)) { 1610 xfs_notice(mp, "%s: xfs_ifree returned error %d", 1611 __func__, error); 1612 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1613 } 1614 xfs_trans_cancel(tp); 1615 return error; 1616 } 1617 1618 /* 1619 * Credit the quota account(s). The inode is gone. 1620 */ 1621 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 1622 1623 /* 1624 * Just ignore errors at this point. There is nothing we can do except 1625 * to try to keep going. Make sure it's not a silent error. 1626 */ 1627 error = xfs_trans_commit(tp); 1628 if (error) 1629 xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 1630 __func__, error); 1631 1632 return 0; 1633 } 1634 1635 /* 1636 * Returns true if we need to update the on-disk metadata before we can free 1637 * the memory used by this inode. Updates include freeing post-eof 1638 * preallocations; freeing COW staging extents; and marking the inode free in 1639 * the inobt if it is on the unlinked list. 1640 */ 1641 bool 1642 xfs_inode_needs_inactive( 1643 struct xfs_inode *ip) 1644 { 1645 struct xfs_mount *mp = ip->i_mount; 1646 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 1647 1648 /* 1649 * If the inode is already free, then there can be nothing 1650 * to clean up here. 1651 */ 1652 if (VFS_I(ip)->i_mode == 0) 1653 return false; 1654 1655 /* If this is a read-only mount, don't do this (would generate I/O) */ 1656 if (xfs_is_readonly(mp)) 1657 return false; 1658 1659 /* If the log isn't running, push inodes straight to reclaim. */ 1660 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp)) 1661 return false; 1662 1663 /* Metadata inodes require explicit resource cleanup. */ 1664 if (xfs_is_metadata_inode(ip)) 1665 return false; 1666 1667 /* Want to clean out the cow blocks if there are any. */ 1668 if (cow_ifp && cow_ifp->if_bytes > 0) 1669 return true; 1670 1671 /* Unlinked files must be freed. */ 1672 if (VFS_I(ip)->i_nlink == 0) 1673 return true; 1674 1675 /* 1676 * This file isn't being freed, so check if there are post-eof blocks 1677 * to free. @force is true because we are evicting an inode from the 1678 * cache. Post-eof blocks must be freed, lest we end up with broken 1679 * free space accounting. 1680 * 1681 * Note: don't bother with iolock here since lockdep complains about 1682 * acquiring it in reclaim context. We have the only reference to the 1683 * inode at this point anyways. 1684 */ 1685 return xfs_can_free_eofblocks(ip, true); 1686 } 1687 1688 /* 1689 * xfs_inactive 1690 * 1691 * This is called when the vnode reference count for the vnode 1692 * goes to zero. If the file has been unlinked, then it must 1693 * now be truncated. Also, we clear all of the read-ahead state 1694 * kept for the inode here since the file is now closed. 1695 */ 1696 void 1697 xfs_inactive( 1698 xfs_inode_t *ip) 1699 { 1700 struct xfs_mount *mp; 1701 int error; 1702 int truncate = 0; 1703 1704 /* 1705 * If the inode is already free, then there can be nothing 1706 * to clean up here. 1707 */ 1708 if (VFS_I(ip)->i_mode == 0) { 1709 ASSERT(ip->i_df.if_broot_bytes == 0); 1710 goto out; 1711 } 1712 1713 mp = ip->i_mount; 1714 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); 1715 1716 /* If this is a read-only mount, don't do this (would generate I/O) */ 1717 if (xfs_is_readonly(mp)) 1718 goto out; 1719 1720 /* Metadata inodes require explicit resource cleanup. */ 1721 if (xfs_is_metadata_inode(ip)) 1722 goto out; 1723 1724 /* Try to clean out the cow blocks if there are any. */ 1725 if (xfs_inode_has_cow_data(ip)) 1726 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); 1727 1728 if (VFS_I(ip)->i_nlink != 0) { 1729 /* 1730 * force is true because we are evicting an inode from the 1731 * cache. Post-eof blocks must be freed, lest we end up with 1732 * broken free space accounting. 1733 * 1734 * Note: don't bother with iolock here since lockdep complains 1735 * about acquiring it in reclaim context. We have the only 1736 * reference to the inode at this point anyways. 1737 */ 1738 if (xfs_can_free_eofblocks(ip, true)) 1739 xfs_free_eofblocks(ip); 1740 1741 goto out; 1742 } 1743 1744 if (S_ISREG(VFS_I(ip)->i_mode) && 1745 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 || 1746 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) 1747 truncate = 1; 1748 1749 error = xfs_qm_dqattach(ip); 1750 if (error) 1751 goto out; 1752 1753 if (S_ISLNK(VFS_I(ip)->i_mode)) 1754 error = xfs_inactive_symlink(ip); 1755 else if (truncate) 1756 error = xfs_inactive_truncate(ip); 1757 if (error) 1758 goto out; 1759 1760 /* 1761 * If there are attributes associated with the file then blow them away 1762 * now. The code calls a routine that recursively deconstructs the 1763 * attribute fork. If also blows away the in-core attribute fork. 1764 */ 1765 if (xfs_inode_has_attr_fork(ip)) { 1766 error = xfs_attr_inactive(ip); 1767 if (error) 1768 goto out; 1769 } 1770 1771 ASSERT(ip->i_forkoff == 0); 1772 1773 /* 1774 * Free the inode. 1775 */ 1776 xfs_inactive_ifree(ip); 1777 1778 out: 1779 /* 1780 * We're done making metadata updates for this inode, so we can release 1781 * the attached dquots. 1782 */ 1783 xfs_qm_dqdetach(ip); 1784 } 1785 1786 /* 1787 * In-Core Unlinked List Lookups 1788 * ============================= 1789 * 1790 * Every inode is supposed to be reachable from some other piece of metadata 1791 * with the exception of the root directory. Inodes with a connection to a 1792 * file descriptor but not linked from anywhere in the on-disk directory tree 1793 * are collectively known as unlinked inodes, though the filesystem itself 1794 * maintains links to these inodes so that on-disk metadata are consistent. 1795 * 1796 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI 1797 * header contains a number of buckets that point to an inode, and each inode 1798 * record has a pointer to the next inode in the hash chain. This 1799 * singly-linked list causes scaling problems in the iunlink remove function 1800 * because we must walk that list to find the inode that points to the inode 1801 * being removed from the unlinked hash bucket list. 1802 * 1803 * Hence we keep an in-memory double linked list to link each inode on an 1804 * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer 1805 * based lists would require having 64 list heads in the perag, one for each 1806 * list. This is expensive in terms of memory (think millions of AGs) and cache 1807 * misses on lookups. Instead, use the fact that inodes on the unlinked list 1808 * must be referenced at the VFS level to keep them on the list and hence we 1809 * have an existence guarantee for inodes on the unlinked list. 1810 * 1811 * Given we have an existence guarantee, we can use lockless inode cache lookups 1812 * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode 1813 * for the double linked unlinked list, and we don't need any extra locking to 1814 * keep the list safe as all manipulations are done under the AGI buffer lock. 1815 * Keeping the list up to date does not require memory allocation, just finding 1816 * the XFS inode and updating the next/prev unlinked list aginos. 1817 */ 1818 1819 /* 1820 * Find an inode on the unlinked list. This does not take references to the 1821 * inode as we have existence guarantees by holding the AGI buffer lock and that 1822 * only unlinked, referenced inodes can be on the unlinked inode list. If we 1823 * don't find the inode in cache, then let the caller handle the situation. 1824 */ 1825 static struct xfs_inode * 1826 xfs_iunlink_lookup( 1827 struct xfs_perag *pag, 1828 xfs_agino_t agino) 1829 { 1830 struct xfs_inode *ip; 1831 1832 rcu_read_lock(); 1833 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 1834 1835 /* 1836 * Inode not in memory or in RCU freeing limbo should not happen. 1837 * Warn about this and let the caller handle the failure. 1838 */ 1839 if (WARN_ON_ONCE(!ip || !ip->i_ino)) { 1840 rcu_read_unlock(); 1841 return NULL; 1842 } 1843 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)); 1844 rcu_read_unlock(); 1845 return ip; 1846 } 1847 1848 /* Update the prev pointer of the next agino. */ 1849 static int 1850 xfs_iunlink_update_backref( 1851 struct xfs_perag *pag, 1852 xfs_agino_t prev_agino, 1853 xfs_agino_t next_agino) 1854 { 1855 struct xfs_inode *ip; 1856 1857 /* No update necessary if we are at the end of the list. */ 1858 if (next_agino == NULLAGINO) 1859 return 0; 1860 1861 ip = xfs_iunlink_lookup(pag, next_agino); 1862 if (!ip) 1863 return -EFSCORRUPTED; 1864 ip->i_prev_unlinked = prev_agino; 1865 return 0; 1866 } 1867 1868 /* 1869 * Point the AGI unlinked bucket at an inode and log the results. The caller 1870 * is responsible for validating the old value. 1871 */ 1872 STATIC int 1873 xfs_iunlink_update_bucket( 1874 struct xfs_trans *tp, 1875 struct xfs_perag *pag, 1876 struct xfs_buf *agibp, 1877 unsigned int bucket_index, 1878 xfs_agino_t new_agino) 1879 { 1880 struct xfs_agi *agi = agibp->b_addr; 1881 xfs_agino_t old_value; 1882 int offset; 1883 1884 ASSERT(xfs_verify_agino_or_null(pag, new_agino)); 1885 1886 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1887 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index, 1888 old_value, new_agino); 1889 1890 /* 1891 * We should never find the head of the list already set to the value 1892 * passed in because either we're adding or removing ourselves from the 1893 * head of the list. 1894 */ 1895 if (old_value == new_agino) { 1896 xfs_buf_mark_corrupt(agibp); 1897 return -EFSCORRUPTED; 1898 } 1899 1900 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); 1901 offset = offsetof(struct xfs_agi, agi_unlinked) + 1902 (sizeof(xfs_agino_t) * bucket_index); 1903 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); 1904 return 0; 1905 } 1906 1907 static int 1908 xfs_iunlink_insert_inode( 1909 struct xfs_trans *tp, 1910 struct xfs_perag *pag, 1911 struct xfs_buf *agibp, 1912 struct xfs_inode *ip) 1913 { 1914 struct xfs_mount *mp = tp->t_mountp; 1915 struct xfs_agi *agi = agibp->b_addr; 1916 xfs_agino_t next_agino; 1917 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 1918 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 1919 int error; 1920 1921 /* 1922 * Get the index into the agi hash table for the list this inode will 1923 * go on. Make sure the pointer isn't garbage and that this inode 1924 * isn't already on the list. 1925 */ 1926 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1927 if (next_agino == agino || 1928 !xfs_verify_agino_or_null(pag, next_agino)) { 1929 xfs_buf_mark_corrupt(agibp); 1930 return -EFSCORRUPTED; 1931 } 1932 1933 /* 1934 * Update the prev pointer in the next inode to point back to this 1935 * inode. 1936 */ 1937 error = xfs_iunlink_update_backref(pag, agino, next_agino); 1938 if (error) 1939 return error; 1940 1941 if (next_agino != NULLAGINO) { 1942 /* 1943 * There is already another inode in the bucket, so point this 1944 * inode to the current head of the list. 1945 */ 1946 error = xfs_iunlink_log_inode(tp, ip, pag, next_agino); 1947 if (error) 1948 return error; 1949 ip->i_next_unlinked = next_agino; 1950 } 1951 1952 /* Point the head of the list to point to this inode. */ 1953 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino); 1954 } 1955 1956 /* 1957 * This is called when the inode's link count has gone to 0 or we are creating 1958 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. 1959 * 1960 * We place the on-disk inode on a list in the AGI. It will be pulled from this 1961 * list when the inode is freed. 1962 */ 1963 STATIC int 1964 xfs_iunlink( 1965 struct xfs_trans *tp, 1966 struct xfs_inode *ip) 1967 { 1968 struct xfs_mount *mp = tp->t_mountp; 1969 struct xfs_perag *pag; 1970 struct xfs_buf *agibp; 1971 int error; 1972 1973 ASSERT(VFS_I(ip)->i_nlink == 0); 1974 ASSERT(VFS_I(ip)->i_mode != 0); 1975 trace_xfs_iunlink(ip); 1976 1977 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 1978 1979 /* Get the agi buffer first. It ensures lock ordering on the list. */ 1980 error = xfs_read_agi(pag, tp, &agibp); 1981 if (error) 1982 goto out; 1983 1984 error = xfs_iunlink_insert_inode(tp, pag, agibp, ip); 1985 out: 1986 xfs_perag_put(pag); 1987 return error; 1988 } 1989 1990 static int 1991 xfs_iunlink_remove_inode( 1992 struct xfs_trans *tp, 1993 struct xfs_perag *pag, 1994 struct xfs_buf *agibp, 1995 struct xfs_inode *ip) 1996 { 1997 struct xfs_mount *mp = tp->t_mountp; 1998 struct xfs_agi *agi = agibp->b_addr; 1999 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2000 xfs_agino_t head_agino; 2001 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 2002 int error; 2003 2004 trace_xfs_iunlink_remove(ip); 2005 2006 /* 2007 * Get the index into the agi hash table for the list this inode will 2008 * go on. Make sure the head pointer isn't garbage. 2009 */ 2010 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2011 if (!xfs_verify_agino(pag, head_agino)) { 2012 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 2013 agi, sizeof(*agi)); 2014 return -EFSCORRUPTED; 2015 } 2016 2017 /* 2018 * Set our inode's next_unlinked pointer to NULL and then return 2019 * the old pointer value so that we can update whatever was previous 2020 * to us in the list to point to whatever was next in the list. 2021 */ 2022 error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO); 2023 if (error) 2024 return error; 2025 2026 /* 2027 * Update the prev pointer in the next inode to point back to previous 2028 * inode in the chain. 2029 */ 2030 error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked, 2031 ip->i_next_unlinked); 2032 if (error) 2033 return error; 2034 2035 if (head_agino != agino) { 2036 struct xfs_inode *prev_ip; 2037 2038 prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked); 2039 if (!prev_ip) 2040 return -EFSCORRUPTED; 2041 2042 error = xfs_iunlink_log_inode(tp, prev_ip, pag, 2043 ip->i_next_unlinked); 2044 prev_ip->i_next_unlinked = ip->i_next_unlinked; 2045 } else { 2046 /* Point the head of the list to the next unlinked inode. */ 2047 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, 2048 ip->i_next_unlinked); 2049 } 2050 2051 ip->i_next_unlinked = NULLAGINO; 2052 ip->i_prev_unlinked = NULLAGINO; 2053 return error; 2054 } 2055 2056 /* 2057 * Pull the on-disk inode from the AGI unlinked list. 2058 */ 2059 STATIC int 2060 xfs_iunlink_remove( 2061 struct xfs_trans *tp, 2062 struct xfs_perag *pag, 2063 struct xfs_inode *ip) 2064 { 2065 struct xfs_buf *agibp; 2066 int error; 2067 2068 trace_xfs_iunlink_remove(ip); 2069 2070 /* Get the agi buffer first. It ensures lock ordering on the list. */ 2071 error = xfs_read_agi(pag, tp, &agibp); 2072 if (error) 2073 return error; 2074 2075 return xfs_iunlink_remove_inode(tp, pag, agibp, ip); 2076 } 2077 2078 /* 2079 * Look up the inode number specified and if it is not already marked XFS_ISTALE 2080 * mark it stale. We should only find clean inodes in this lookup that aren't 2081 * already stale. 2082 */ 2083 static void 2084 xfs_ifree_mark_inode_stale( 2085 struct xfs_perag *pag, 2086 struct xfs_inode *free_ip, 2087 xfs_ino_t inum) 2088 { 2089 struct xfs_mount *mp = pag->pag_mount; 2090 struct xfs_inode_log_item *iip; 2091 struct xfs_inode *ip; 2092 2093 retry: 2094 rcu_read_lock(); 2095 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); 2096 2097 /* Inode not in memory, nothing to do */ 2098 if (!ip) { 2099 rcu_read_unlock(); 2100 return; 2101 } 2102 2103 /* 2104 * because this is an RCU protected lookup, we could find a recently 2105 * freed or even reallocated inode during the lookup. We need to check 2106 * under the i_flags_lock for a valid inode here. Skip it if it is not 2107 * valid, the wrong inode or stale. 2108 */ 2109 spin_lock(&ip->i_flags_lock); 2110 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) 2111 goto out_iflags_unlock; 2112 2113 /* 2114 * Don't try to lock/unlock the current inode, but we _cannot_ skip the 2115 * other inodes that we did not find in the list attached to the buffer 2116 * and are not already marked stale. If we can't lock it, back off and 2117 * retry. 2118 */ 2119 if (ip != free_ip) { 2120 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2121 spin_unlock(&ip->i_flags_lock); 2122 rcu_read_unlock(); 2123 delay(1); 2124 goto retry; 2125 } 2126 } 2127 ip->i_flags |= XFS_ISTALE; 2128 2129 /* 2130 * If the inode is flushing, it is already attached to the buffer. All 2131 * we needed to do here is mark the inode stale so buffer IO completion 2132 * will remove it from the AIL. 2133 */ 2134 iip = ip->i_itemp; 2135 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) { 2136 ASSERT(!list_empty(&iip->ili_item.li_bio_list)); 2137 ASSERT(iip->ili_last_fields); 2138 goto out_iunlock; 2139 } 2140 2141 /* 2142 * Inodes not attached to the buffer can be released immediately. 2143 * Everything else has to go through xfs_iflush_abort() on journal 2144 * commit as the flock synchronises removal of the inode from the 2145 * cluster buffer against inode reclaim. 2146 */ 2147 if (!iip || list_empty(&iip->ili_item.li_bio_list)) 2148 goto out_iunlock; 2149 2150 __xfs_iflags_set(ip, XFS_IFLUSHING); 2151 spin_unlock(&ip->i_flags_lock); 2152 rcu_read_unlock(); 2153 2154 /* we have a dirty inode in memory that has not yet been flushed. */ 2155 spin_lock(&iip->ili_lock); 2156 iip->ili_last_fields = iip->ili_fields; 2157 iip->ili_fields = 0; 2158 iip->ili_fsync_fields = 0; 2159 spin_unlock(&iip->ili_lock); 2160 ASSERT(iip->ili_last_fields); 2161 2162 if (ip != free_ip) 2163 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2164 return; 2165 2166 out_iunlock: 2167 if (ip != free_ip) 2168 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2169 out_iflags_unlock: 2170 spin_unlock(&ip->i_flags_lock); 2171 rcu_read_unlock(); 2172 } 2173 2174 /* 2175 * A big issue when freeing the inode cluster is that we _cannot_ skip any 2176 * inodes that are in memory - they all must be marked stale and attached to 2177 * the cluster buffer. 2178 */ 2179 static int 2180 xfs_ifree_cluster( 2181 struct xfs_trans *tp, 2182 struct xfs_perag *pag, 2183 struct xfs_inode *free_ip, 2184 struct xfs_icluster *xic) 2185 { 2186 struct xfs_mount *mp = free_ip->i_mount; 2187 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2188 struct xfs_buf *bp; 2189 xfs_daddr_t blkno; 2190 xfs_ino_t inum = xic->first_ino; 2191 int nbufs; 2192 int i, j; 2193 int ioffset; 2194 int error; 2195 2196 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; 2197 2198 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { 2199 /* 2200 * The allocation bitmap tells us which inodes of the chunk were 2201 * physically allocated. Skip the cluster if an inode falls into 2202 * a sparse region. 2203 */ 2204 ioffset = inum - xic->first_ino; 2205 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 2206 ASSERT(ioffset % igeo->inodes_per_cluster == 0); 2207 continue; 2208 } 2209 2210 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2211 XFS_INO_TO_AGBNO(mp, inum)); 2212 2213 /* 2214 * We obtain and lock the backing buffer first in the process 2215 * here to ensure dirty inodes attached to the buffer remain in 2216 * the flushing state while we mark them stale. 2217 * 2218 * If we scan the in-memory inodes first, then buffer IO can 2219 * complete before we get a lock on it, and hence we may fail 2220 * to mark all the active inodes on the buffer stale. 2221 */ 2222 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2223 mp->m_bsize * igeo->blocks_per_cluster, 2224 XBF_UNMAPPED, &bp); 2225 if (error) 2226 return error; 2227 2228 /* 2229 * This buffer may not have been correctly initialised as we 2230 * didn't read it from disk. That's not important because we are 2231 * only using to mark the buffer as stale in the log, and to 2232 * attach stale cached inodes on it. That means it will never be 2233 * dispatched for IO. If it is, we want to know about it, and we 2234 * want it to fail. We can acheive this by adding a write 2235 * verifier to the buffer. 2236 */ 2237 bp->b_ops = &xfs_inode_buf_ops; 2238 2239 /* 2240 * Now we need to set all the cached clean inodes as XFS_ISTALE, 2241 * too. This requires lookups, and will skip inodes that we've 2242 * already marked XFS_ISTALE. 2243 */ 2244 for (i = 0; i < igeo->inodes_per_cluster; i++) 2245 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i); 2246 2247 xfs_trans_stale_inode_buf(tp, bp); 2248 xfs_trans_binval(tp, bp); 2249 } 2250 return 0; 2251 } 2252 2253 /* 2254 * This is called to return an inode to the inode free list. The inode should 2255 * already be truncated to 0 length and have no pages associated with it. This 2256 * routine also assumes that the inode is already a part of the transaction. 2257 * 2258 * The on-disk copy of the inode will have been added to the list of unlinked 2259 * inodes in the AGI. We need to remove the inode from that list atomically with 2260 * respect to freeing it here. 2261 */ 2262 int 2263 xfs_ifree( 2264 struct xfs_trans *tp, 2265 struct xfs_inode *ip) 2266 { 2267 struct xfs_mount *mp = ip->i_mount; 2268 struct xfs_perag *pag; 2269 struct xfs_icluster xic = { 0 }; 2270 struct xfs_inode_log_item *iip = ip->i_itemp; 2271 int error; 2272 2273 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2274 ASSERT(VFS_I(ip)->i_nlink == 0); 2275 ASSERT(ip->i_df.if_nextents == 0); 2276 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); 2277 ASSERT(ip->i_nblocks == 0); 2278 2279 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2280 2281 /* 2282 * Free the inode first so that we guarantee that the AGI lock is going 2283 * to be taken before we remove the inode from the unlinked list. This 2284 * makes the AGI lock -> unlinked list modification order the same as 2285 * used in O_TMPFILE creation. 2286 */ 2287 error = xfs_difree(tp, pag, ip->i_ino, &xic); 2288 if (error) 2289 goto out; 2290 2291 error = xfs_iunlink_remove(tp, pag, ip); 2292 if (error) 2293 goto out; 2294 2295 /* 2296 * Free any local-format data sitting around before we reset the 2297 * data fork to extents format. Note that the attr fork data has 2298 * already been freed by xfs_attr_inactive. 2299 */ 2300 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { 2301 kmem_free(ip->i_df.if_u1.if_data); 2302 ip->i_df.if_u1.if_data = NULL; 2303 ip->i_df.if_bytes = 0; 2304 } 2305 2306 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 2307 ip->i_diflags = 0; 2308 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; 2309 ip->i_forkoff = 0; /* mark the attr fork not in use */ 2310 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 2311 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) 2312 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS); 2313 2314 /* Don't attempt to replay owner changes for a deleted inode */ 2315 spin_lock(&iip->ili_lock); 2316 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); 2317 spin_unlock(&iip->ili_lock); 2318 2319 /* 2320 * Bump the generation count so no one will be confused 2321 * by reincarnations of this inode. 2322 */ 2323 VFS_I(ip)->i_generation++; 2324 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2325 2326 if (xic.deleted) 2327 error = xfs_ifree_cluster(tp, pag, ip, &xic); 2328 out: 2329 xfs_perag_put(pag); 2330 return error; 2331 } 2332 2333 /* 2334 * This is called to unpin an inode. The caller must have the inode locked 2335 * in at least shared mode so that the buffer cannot be subsequently pinned 2336 * once someone is waiting for it to be unpinned. 2337 */ 2338 static void 2339 xfs_iunpin( 2340 struct xfs_inode *ip) 2341 { 2342 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2343 2344 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 2345 2346 /* Give the log a push to start the unpinning I/O */ 2347 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL); 2348 2349 } 2350 2351 static void 2352 __xfs_iunpin_wait( 2353 struct xfs_inode *ip) 2354 { 2355 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 2356 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 2357 2358 xfs_iunpin(ip); 2359 2360 do { 2361 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2362 if (xfs_ipincount(ip)) 2363 io_schedule(); 2364 } while (xfs_ipincount(ip)); 2365 finish_wait(wq, &wait.wq_entry); 2366 } 2367 2368 void 2369 xfs_iunpin_wait( 2370 struct xfs_inode *ip) 2371 { 2372 if (xfs_ipincount(ip)) 2373 __xfs_iunpin_wait(ip); 2374 } 2375 2376 /* 2377 * Removing an inode from the namespace involves removing the directory entry 2378 * and dropping the link count on the inode. Removing the directory entry can 2379 * result in locking an AGF (directory blocks were freed) and removing a link 2380 * count can result in placing the inode on an unlinked list which results in 2381 * locking an AGI. 2382 * 2383 * The big problem here is that we have an ordering constraint on AGF and AGI 2384 * locking - inode allocation locks the AGI, then can allocate a new extent for 2385 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode 2386 * removes the inode from the unlinked list, requiring that we lock the AGI 2387 * first, and then freeing the inode can result in an inode chunk being freed 2388 * and hence freeing disk space requiring that we lock an AGF. 2389 * 2390 * Hence the ordering that is imposed by other parts of the code is AGI before 2391 * AGF. This means we cannot remove the directory entry before we drop the inode 2392 * reference count and put it on the unlinked list as this results in a lock 2393 * order of AGF then AGI, and this can deadlock against inode allocation and 2394 * freeing. Therefore we must drop the link counts before we remove the 2395 * directory entry. 2396 * 2397 * This is still safe from a transactional point of view - it is not until we 2398 * get to xfs_defer_finish() that we have the possibility of multiple 2399 * transactions in this operation. Hence as long as we remove the directory 2400 * entry and drop the link count in the first transaction of the remove 2401 * operation, there are no transactional constraints on the ordering here. 2402 */ 2403 int 2404 xfs_remove( 2405 xfs_inode_t *dp, 2406 struct xfs_name *name, 2407 xfs_inode_t *ip) 2408 { 2409 xfs_mount_t *mp = dp->i_mount; 2410 xfs_trans_t *tp = NULL; 2411 int is_dir = S_ISDIR(VFS_I(ip)->i_mode); 2412 int dontcare; 2413 int error = 0; 2414 uint resblks; 2415 2416 trace_xfs_remove(dp, name); 2417 2418 if (xfs_is_shutdown(mp)) 2419 return -EIO; 2420 2421 error = xfs_qm_dqattach(dp); 2422 if (error) 2423 goto std_return; 2424 2425 error = xfs_qm_dqattach(ip); 2426 if (error) 2427 goto std_return; 2428 2429 /* 2430 * We try to get the real space reservation first, allowing for 2431 * directory btree deletion(s) implying possible bmap insert(s). If we 2432 * can't get the space reservation then we use 0 instead, and avoid the 2433 * bmap btree insert(s) in the directory code by, if the bmap insert 2434 * tries to happen, instead trimming the LAST block from the directory. 2435 * 2436 * Ignore EDQUOT and ENOSPC being returned via nospace_error because 2437 * the directory code can handle a reservationless update and we don't 2438 * want to prevent a user from trying to free space by deleting things. 2439 */ 2440 resblks = XFS_REMOVE_SPACE_RES(mp); 2441 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks, 2442 &tp, &dontcare); 2443 if (error) { 2444 ASSERT(error != -ENOSPC); 2445 goto std_return; 2446 } 2447 2448 /* 2449 * If we're removing a directory perform some additional validation. 2450 */ 2451 if (is_dir) { 2452 ASSERT(VFS_I(ip)->i_nlink >= 2); 2453 if (VFS_I(ip)->i_nlink != 2) { 2454 error = -ENOTEMPTY; 2455 goto out_trans_cancel; 2456 } 2457 if (!xfs_dir_isempty(ip)) { 2458 error = -ENOTEMPTY; 2459 goto out_trans_cancel; 2460 } 2461 2462 /* Drop the link from ip's "..". */ 2463 error = xfs_droplink(tp, dp); 2464 if (error) 2465 goto out_trans_cancel; 2466 2467 /* Drop the "." link from ip to self. */ 2468 error = xfs_droplink(tp, ip); 2469 if (error) 2470 goto out_trans_cancel; 2471 2472 /* 2473 * Point the unlinked child directory's ".." entry to the root 2474 * directory to eliminate back-references to inodes that may 2475 * get freed before the child directory is closed. If the fs 2476 * gets shrunk, this can lead to dirent inode validation errors. 2477 */ 2478 if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { 2479 error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, 2480 tp->t_mountp->m_sb.sb_rootino, 0); 2481 if (error) 2482 return error; 2483 } 2484 } else { 2485 /* 2486 * When removing a non-directory we need to log the parent 2487 * inode here. For a directory this is done implicitly 2488 * by the xfs_droplink call for the ".." entry. 2489 */ 2490 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2491 } 2492 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2493 2494 /* Drop the link from dp to ip. */ 2495 error = xfs_droplink(tp, ip); 2496 if (error) 2497 goto out_trans_cancel; 2498 2499 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); 2500 if (error) { 2501 ASSERT(error != -ENOENT); 2502 goto out_trans_cancel; 2503 } 2504 2505 /* 2506 * If this is a synchronous mount, make sure that the 2507 * remove transaction goes to disk before returning to 2508 * the user. 2509 */ 2510 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) 2511 xfs_trans_set_sync(tp); 2512 2513 error = xfs_trans_commit(tp); 2514 if (error) 2515 goto std_return; 2516 2517 if (is_dir && xfs_inode_is_filestream(ip)) 2518 xfs_filestream_deassociate(ip); 2519 2520 return 0; 2521 2522 out_trans_cancel: 2523 xfs_trans_cancel(tp); 2524 std_return: 2525 return error; 2526 } 2527 2528 /* 2529 * Enter all inodes for a rename transaction into a sorted array. 2530 */ 2531 #define __XFS_SORT_INODES 5 2532 STATIC void 2533 xfs_sort_for_rename( 2534 struct xfs_inode *dp1, /* in: old (source) directory inode */ 2535 struct xfs_inode *dp2, /* in: new (target) directory inode */ 2536 struct xfs_inode *ip1, /* in: inode of old entry */ 2537 struct xfs_inode *ip2, /* in: inode of new entry */ 2538 struct xfs_inode *wip, /* in: whiteout inode */ 2539 struct xfs_inode **i_tab,/* out: sorted array of inodes */ 2540 int *num_inodes) /* in/out: inodes in array */ 2541 { 2542 int i, j; 2543 2544 ASSERT(*num_inodes == __XFS_SORT_INODES); 2545 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); 2546 2547 /* 2548 * i_tab contains a list of pointers to inodes. We initialize 2549 * the table here & we'll sort it. We will then use it to 2550 * order the acquisition of the inode locks. 2551 * 2552 * Note that the table may contain duplicates. e.g., dp1 == dp2. 2553 */ 2554 i = 0; 2555 i_tab[i++] = dp1; 2556 i_tab[i++] = dp2; 2557 i_tab[i++] = ip1; 2558 if (ip2) 2559 i_tab[i++] = ip2; 2560 if (wip) 2561 i_tab[i++] = wip; 2562 *num_inodes = i; 2563 2564 /* 2565 * Sort the elements via bubble sort. (Remember, there are at 2566 * most 5 elements to sort, so this is adequate.) 2567 */ 2568 for (i = 0; i < *num_inodes; i++) { 2569 for (j = 1; j < *num_inodes; j++) { 2570 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { 2571 struct xfs_inode *temp = i_tab[j]; 2572 i_tab[j] = i_tab[j-1]; 2573 i_tab[j-1] = temp; 2574 } 2575 } 2576 } 2577 } 2578 2579 static int 2580 xfs_finish_rename( 2581 struct xfs_trans *tp) 2582 { 2583 /* 2584 * If this is a synchronous mount, make sure that the rename transaction 2585 * goes to disk before returning to the user. 2586 */ 2587 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp)) 2588 xfs_trans_set_sync(tp); 2589 2590 return xfs_trans_commit(tp); 2591 } 2592 2593 /* 2594 * xfs_cross_rename() 2595 * 2596 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall 2597 */ 2598 STATIC int 2599 xfs_cross_rename( 2600 struct xfs_trans *tp, 2601 struct xfs_inode *dp1, 2602 struct xfs_name *name1, 2603 struct xfs_inode *ip1, 2604 struct xfs_inode *dp2, 2605 struct xfs_name *name2, 2606 struct xfs_inode *ip2, 2607 int spaceres) 2608 { 2609 int error = 0; 2610 int ip1_flags = 0; 2611 int ip2_flags = 0; 2612 int dp2_flags = 0; 2613 2614 /* Swap inode number for dirent in first parent */ 2615 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); 2616 if (error) 2617 goto out_trans_abort; 2618 2619 /* Swap inode number for dirent in second parent */ 2620 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); 2621 if (error) 2622 goto out_trans_abort; 2623 2624 /* 2625 * If we're renaming one or more directories across different parents, 2626 * update the respective ".." entries (and link counts) to match the new 2627 * parents. 2628 */ 2629 if (dp1 != dp2) { 2630 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2631 2632 if (S_ISDIR(VFS_I(ip2)->i_mode)) { 2633 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, 2634 dp1->i_ino, spaceres); 2635 if (error) 2636 goto out_trans_abort; 2637 2638 /* transfer ip2 ".." reference to dp1 */ 2639 if (!S_ISDIR(VFS_I(ip1)->i_mode)) { 2640 error = xfs_droplink(tp, dp2); 2641 if (error) 2642 goto out_trans_abort; 2643 xfs_bumplink(tp, dp1); 2644 } 2645 2646 /* 2647 * Although ip1 isn't changed here, userspace needs 2648 * to be warned about the change, so that applications 2649 * relying on it (like backup ones), will properly 2650 * notify the change 2651 */ 2652 ip1_flags |= XFS_ICHGTIME_CHG; 2653 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2654 } 2655 2656 if (S_ISDIR(VFS_I(ip1)->i_mode)) { 2657 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, 2658 dp2->i_ino, spaceres); 2659 if (error) 2660 goto out_trans_abort; 2661 2662 /* transfer ip1 ".." reference to dp2 */ 2663 if (!S_ISDIR(VFS_I(ip2)->i_mode)) { 2664 error = xfs_droplink(tp, dp1); 2665 if (error) 2666 goto out_trans_abort; 2667 xfs_bumplink(tp, dp2); 2668 } 2669 2670 /* 2671 * Although ip2 isn't changed here, userspace needs 2672 * to be warned about the change, so that applications 2673 * relying on it (like backup ones), will properly 2674 * notify the change 2675 */ 2676 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2677 ip2_flags |= XFS_ICHGTIME_CHG; 2678 } 2679 } 2680 2681 if (ip1_flags) { 2682 xfs_trans_ichgtime(tp, ip1, ip1_flags); 2683 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); 2684 } 2685 if (ip2_flags) { 2686 xfs_trans_ichgtime(tp, ip2, ip2_flags); 2687 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); 2688 } 2689 if (dp2_flags) { 2690 xfs_trans_ichgtime(tp, dp2, dp2_flags); 2691 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); 2692 } 2693 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2694 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); 2695 return xfs_finish_rename(tp); 2696 2697 out_trans_abort: 2698 xfs_trans_cancel(tp); 2699 return error; 2700 } 2701 2702 /* 2703 * xfs_rename_alloc_whiteout() 2704 * 2705 * Return a referenced, unlinked, unlocked inode that can be used as a 2706 * whiteout in a rename transaction. We use a tmpfile inode here so that if we 2707 * crash between allocating the inode and linking it into the rename transaction 2708 * recovery will free the inode and we won't leak it. 2709 */ 2710 static int 2711 xfs_rename_alloc_whiteout( 2712 struct user_namespace *mnt_userns, 2713 struct xfs_name *src_name, 2714 struct xfs_inode *dp, 2715 struct xfs_inode **wip) 2716 { 2717 struct xfs_inode *tmpfile; 2718 struct qstr name; 2719 int error; 2720 2721 error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE, 2722 &tmpfile); 2723 if (error) 2724 return error; 2725 2726 name.name = src_name->name; 2727 name.len = src_name->len; 2728 error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name); 2729 if (error) { 2730 xfs_finish_inode_setup(tmpfile); 2731 xfs_irele(tmpfile); 2732 return error; 2733 } 2734 2735 /* 2736 * Prepare the tmpfile inode as if it were created through the VFS. 2737 * Complete the inode setup and flag it as linkable. nlink is already 2738 * zero, so we can skip the drop_nlink. 2739 */ 2740 xfs_setup_iops(tmpfile); 2741 xfs_finish_inode_setup(tmpfile); 2742 VFS_I(tmpfile)->i_state |= I_LINKABLE; 2743 2744 *wip = tmpfile; 2745 return 0; 2746 } 2747 2748 /* 2749 * xfs_rename 2750 */ 2751 int 2752 xfs_rename( 2753 struct user_namespace *mnt_userns, 2754 struct xfs_inode *src_dp, 2755 struct xfs_name *src_name, 2756 struct xfs_inode *src_ip, 2757 struct xfs_inode *target_dp, 2758 struct xfs_name *target_name, 2759 struct xfs_inode *target_ip, 2760 unsigned int flags) 2761 { 2762 struct xfs_mount *mp = src_dp->i_mount; 2763 struct xfs_trans *tp; 2764 struct xfs_inode *wip = NULL; /* whiteout inode */ 2765 struct xfs_inode *inodes[__XFS_SORT_INODES]; 2766 int i; 2767 int num_inodes = __XFS_SORT_INODES; 2768 bool new_parent = (src_dp != target_dp); 2769 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); 2770 int spaceres; 2771 bool retried = false; 2772 int error, nospace_error = 0; 2773 2774 trace_xfs_rename(src_dp, target_dp, src_name, target_name); 2775 2776 if ((flags & RENAME_EXCHANGE) && !target_ip) 2777 return -EINVAL; 2778 2779 /* 2780 * If we are doing a whiteout operation, allocate the whiteout inode 2781 * we will be placing at the target and ensure the type is set 2782 * appropriately. 2783 */ 2784 if (flags & RENAME_WHITEOUT) { 2785 error = xfs_rename_alloc_whiteout(mnt_userns, src_name, 2786 target_dp, &wip); 2787 if (error) 2788 return error; 2789 2790 /* setup target dirent info as whiteout */ 2791 src_name->type = XFS_DIR3_FT_CHRDEV; 2792 } 2793 2794 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, 2795 inodes, &num_inodes); 2796 2797 retry: 2798 nospace_error = 0; 2799 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 2800 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); 2801 if (error == -ENOSPC) { 2802 nospace_error = error; 2803 spaceres = 0; 2804 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, 2805 &tp); 2806 } 2807 if (error) 2808 goto out_release_wip; 2809 2810 /* 2811 * Attach the dquots to the inodes 2812 */ 2813 error = xfs_qm_vop_rename_dqattach(inodes); 2814 if (error) 2815 goto out_trans_cancel; 2816 2817 /* 2818 * Lock all the participating inodes. Depending upon whether 2819 * the target_name exists in the target directory, and 2820 * whether the target directory is the same as the source 2821 * directory, we can lock from 2 to 5 inodes. 2822 */ 2823 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 2824 2825 /* 2826 * Join all the inodes to the transaction. From this point on, 2827 * we can rely on either trans_commit or trans_cancel to unlock 2828 * them. 2829 */ 2830 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 2831 if (new_parent) 2832 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 2833 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 2834 if (target_ip) 2835 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 2836 if (wip) 2837 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); 2838 2839 /* 2840 * If we are using project inheritance, we only allow renames 2841 * into our tree when the project IDs are the same; else the 2842 * tree quota mechanism would be circumvented. 2843 */ 2844 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 2845 target_dp->i_projid != src_ip->i_projid)) { 2846 error = -EXDEV; 2847 goto out_trans_cancel; 2848 } 2849 2850 /* RENAME_EXCHANGE is unique from here on. */ 2851 if (flags & RENAME_EXCHANGE) 2852 return xfs_cross_rename(tp, src_dp, src_name, src_ip, 2853 target_dp, target_name, target_ip, 2854 spaceres); 2855 2856 /* 2857 * Try to reserve quota to handle an expansion of the target directory. 2858 * We'll allow the rename to continue in reservationless mode if we hit 2859 * a space usage constraint. If we trigger reservationless mode, save 2860 * the errno if there isn't any free space in the target directory. 2861 */ 2862 if (spaceres != 0) { 2863 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres, 2864 0, false); 2865 if (error == -EDQUOT || error == -ENOSPC) { 2866 if (!retried) { 2867 xfs_trans_cancel(tp); 2868 xfs_blockgc_free_quota(target_dp, 0); 2869 retried = true; 2870 goto retry; 2871 } 2872 2873 nospace_error = error; 2874 spaceres = 0; 2875 error = 0; 2876 } 2877 if (error) 2878 goto out_trans_cancel; 2879 } 2880 2881 /* 2882 * Check for expected errors before we dirty the transaction 2883 * so we can return an error without a transaction abort. 2884 */ 2885 if (target_ip == NULL) { 2886 /* 2887 * If there's no space reservation, check the entry will 2888 * fit before actually inserting it. 2889 */ 2890 if (!spaceres) { 2891 error = xfs_dir_canenter(tp, target_dp, target_name); 2892 if (error) 2893 goto out_trans_cancel; 2894 } 2895 } else { 2896 /* 2897 * If target exists and it's a directory, check that whether 2898 * it can be destroyed. 2899 */ 2900 if (S_ISDIR(VFS_I(target_ip)->i_mode) && 2901 (!xfs_dir_isempty(target_ip) || 2902 (VFS_I(target_ip)->i_nlink > 2))) { 2903 error = -EEXIST; 2904 goto out_trans_cancel; 2905 } 2906 } 2907 2908 /* 2909 * Lock the AGI buffers we need to handle bumping the nlink of the 2910 * whiteout inode off the unlinked list and to handle dropping the 2911 * nlink of the target inode. Per locking order rules, do this in 2912 * increasing AG order and before directory block allocation tries to 2913 * grab AGFs because we grab AGIs before AGFs. 2914 * 2915 * The (vfs) caller must ensure that if src is a directory then 2916 * target_ip is either null or an empty directory. 2917 */ 2918 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { 2919 if (inodes[i] == wip || 2920 (inodes[i] == target_ip && 2921 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { 2922 struct xfs_perag *pag; 2923 struct xfs_buf *bp; 2924 2925 pag = xfs_perag_get(mp, 2926 XFS_INO_TO_AGNO(mp, inodes[i]->i_ino)); 2927 error = xfs_read_agi(pag, tp, &bp); 2928 xfs_perag_put(pag); 2929 if (error) 2930 goto out_trans_cancel; 2931 } 2932 } 2933 2934 /* 2935 * Directory entry creation below may acquire the AGF. Remove 2936 * the whiteout from the unlinked list first to preserve correct 2937 * AGI/AGF locking order. This dirties the transaction so failures 2938 * after this point will abort and log recovery will clean up the 2939 * mess. 2940 * 2941 * For whiteouts, we need to bump the link count on the whiteout 2942 * inode. After this point, we have a real link, clear the tmpfile 2943 * state flag from the inode so it doesn't accidentally get misused 2944 * in future. 2945 */ 2946 if (wip) { 2947 struct xfs_perag *pag; 2948 2949 ASSERT(VFS_I(wip)->i_nlink == 0); 2950 2951 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino)); 2952 error = xfs_iunlink_remove(tp, pag, wip); 2953 xfs_perag_put(pag); 2954 if (error) 2955 goto out_trans_cancel; 2956 2957 xfs_bumplink(tp, wip); 2958 VFS_I(wip)->i_state &= ~I_LINKABLE; 2959 } 2960 2961 /* 2962 * Set up the target. 2963 */ 2964 if (target_ip == NULL) { 2965 /* 2966 * If target does not exist and the rename crosses 2967 * directories, adjust the target directory link count 2968 * to account for the ".." reference from the new entry. 2969 */ 2970 error = xfs_dir_createname(tp, target_dp, target_name, 2971 src_ip->i_ino, spaceres); 2972 if (error) 2973 goto out_trans_cancel; 2974 2975 xfs_trans_ichgtime(tp, target_dp, 2976 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2977 2978 if (new_parent && src_is_directory) { 2979 xfs_bumplink(tp, target_dp); 2980 } 2981 } else { /* target_ip != NULL */ 2982 /* 2983 * Link the source inode under the target name. 2984 * If the source inode is a directory and we are moving 2985 * it across directories, its ".." entry will be 2986 * inconsistent until we replace that down below. 2987 * 2988 * In case there is already an entry with the same 2989 * name at the destination directory, remove it first. 2990 */ 2991 error = xfs_dir_replace(tp, target_dp, target_name, 2992 src_ip->i_ino, spaceres); 2993 if (error) 2994 goto out_trans_cancel; 2995 2996 xfs_trans_ichgtime(tp, target_dp, 2997 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2998 2999 /* 3000 * Decrement the link count on the target since the target 3001 * dir no longer points to it. 3002 */ 3003 error = xfs_droplink(tp, target_ip); 3004 if (error) 3005 goto out_trans_cancel; 3006 3007 if (src_is_directory) { 3008 /* 3009 * Drop the link from the old "." entry. 3010 */ 3011 error = xfs_droplink(tp, target_ip); 3012 if (error) 3013 goto out_trans_cancel; 3014 } 3015 } /* target_ip != NULL */ 3016 3017 /* 3018 * Remove the source. 3019 */ 3020 if (new_parent && src_is_directory) { 3021 /* 3022 * Rewrite the ".." entry to point to the new 3023 * directory. 3024 */ 3025 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 3026 target_dp->i_ino, spaceres); 3027 ASSERT(error != -EEXIST); 3028 if (error) 3029 goto out_trans_cancel; 3030 } 3031 3032 /* 3033 * We always want to hit the ctime on the source inode. 3034 * 3035 * This isn't strictly required by the standards since the source 3036 * inode isn't really being changed, but old unix file systems did 3037 * it and some incremental backup programs won't work without it. 3038 */ 3039 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 3040 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 3041 3042 /* 3043 * Adjust the link count on src_dp. This is necessary when 3044 * renaming a directory, either within one parent when 3045 * the target existed, or across two parent directories. 3046 */ 3047 if (src_is_directory && (new_parent || target_ip != NULL)) { 3048 3049 /* 3050 * Decrement link count on src_directory since the 3051 * entry that's moved no longer points to it. 3052 */ 3053 error = xfs_droplink(tp, src_dp); 3054 if (error) 3055 goto out_trans_cancel; 3056 } 3057 3058 /* 3059 * For whiteouts, we only need to update the source dirent with the 3060 * inode number of the whiteout inode rather than removing it 3061 * altogether. 3062 */ 3063 if (wip) 3064 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, 3065 spaceres); 3066 else 3067 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, 3068 spaceres); 3069 3070 if (error) 3071 goto out_trans_cancel; 3072 3073 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3074 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 3075 if (new_parent) 3076 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3077 3078 error = xfs_finish_rename(tp); 3079 if (wip) 3080 xfs_irele(wip); 3081 return error; 3082 3083 out_trans_cancel: 3084 xfs_trans_cancel(tp); 3085 out_release_wip: 3086 if (wip) 3087 xfs_irele(wip); 3088 if (error == -ENOSPC && nospace_error) 3089 error = nospace_error; 3090 return error; 3091 } 3092 3093 static int 3094 xfs_iflush( 3095 struct xfs_inode *ip, 3096 struct xfs_buf *bp) 3097 { 3098 struct xfs_inode_log_item *iip = ip->i_itemp; 3099 struct xfs_dinode *dip; 3100 struct xfs_mount *mp = ip->i_mount; 3101 int error; 3102 3103 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3104 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING)); 3105 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || 3106 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 3107 ASSERT(iip->ili_item.li_buf == bp); 3108 3109 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); 3110 3111 /* 3112 * We don't flush the inode if any of the following checks fail, but we 3113 * do still update the log item and attach to the backing buffer as if 3114 * the flush happened. This is a formality to facilitate predictable 3115 * error handling as the caller will shutdown and fail the buffer. 3116 */ 3117 error = -EFSCORRUPTED; 3118 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 3119 mp, XFS_ERRTAG_IFLUSH_1)) { 3120 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3121 "%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT, 3122 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 3123 goto flush_out; 3124 } 3125 if (S_ISREG(VFS_I(ip)->i_mode)) { 3126 if (XFS_TEST_ERROR( 3127 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3128 ip->i_df.if_format != XFS_DINODE_FMT_BTREE, 3129 mp, XFS_ERRTAG_IFLUSH_3)) { 3130 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3131 "%s: Bad regular inode %llu, ptr "PTR_FMT, 3132 __func__, ip->i_ino, ip); 3133 goto flush_out; 3134 } 3135 } else if (S_ISDIR(VFS_I(ip)->i_mode)) { 3136 if (XFS_TEST_ERROR( 3137 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3138 ip->i_df.if_format != XFS_DINODE_FMT_BTREE && 3139 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, 3140 mp, XFS_ERRTAG_IFLUSH_4)) { 3141 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3142 "%s: Bad directory inode %llu, ptr "PTR_FMT, 3143 __func__, ip->i_ino, ip); 3144 goto flush_out; 3145 } 3146 } 3147 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) > 3148 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { 3149 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3150 "%s: detected corrupt incore inode %llu, " 3151 "total extents = %llu nblocks = %lld, ptr "PTR_FMT, 3152 __func__, ip->i_ino, 3153 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af), 3154 ip->i_nblocks, ip); 3155 goto flush_out; 3156 } 3157 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize, 3158 mp, XFS_ERRTAG_IFLUSH_6)) { 3159 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3160 "%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT, 3161 __func__, ip->i_ino, ip->i_forkoff, ip); 3162 goto flush_out; 3163 } 3164 3165 /* 3166 * Inode item log recovery for v2 inodes are dependent on the flushiter 3167 * count for correct sequencing. We bump the flush iteration count so 3168 * we can detect flushes which postdate a log record during recovery. 3169 * This is redundant as we now log every change and hence this can't 3170 * happen but we need to still do it to ensure backwards compatibility 3171 * with old kernels that predate logging all inode changes. 3172 */ 3173 if (!xfs_has_v3inodes(mp)) 3174 ip->i_flushiter++; 3175 3176 /* 3177 * If there are inline format data / attr forks attached to this inode, 3178 * make sure they are not corrupt. 3179 */ 3180 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && 3181 xfs_ifork_verify_local_data(ip)) 3182 goto flush_out; 3183 if (xfs_inode_has_attr_fork(ip) && 3184 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL && 3185 xfs_ifork_verify_local_attr(ip)) 3186 goto flush_out; 3187 3188 /* 3189 * Copy the dirty parts of the inode into the on-disk inode. We always 3190 * copy out the core of the inode, because if the inode is dirty at all 3191 * the core must be. 3192 */ 3193 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); 3194 3195 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3196 if (!xfs_has_v3inodes(mp)) { 3197 if (ip->i_flushiter == DI_MAX_FLUSH) 3198 ip->i_flushiter = 0; 3199 } 3200 3201 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3202 if (xfs_inode_has_attr_fork(ip)) 3203 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 3204 3205 /* 3206 * We've recorded everything logged in the inode, so we'd like to clear 3207 * the ili_fields bits so we don't log and flush things unnecessarily. 3208 * However, we can't stop logging all this information until the data 3209 * we've copied into the disk buffer is written to disk. If we did we 3210 * might overwrite the copy of the inode in the log with all the data 3211 * after re-logging only part of it, and in the face of a crash we 3212 * wouldn't have all the data we need to recover. 3213 * 3214 * What we do is move the bits to the ili_last_fields field. When 3215 * logging the inode, these bits are moved back to the ili_fields field. 3216 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since 3217 * we know that the information those bits represent is permanently on 3218 * disk. As long as the flush completes before the inode is logged 3219 * again, then both ili_fields and ili_last_fields will be cleared. 3220 */ 3221 error = 0; 3222 flush_out: 3223 spin_lock(&iip->ili_lock); 3224 iip->ili_last_fields = iip->ili_fields; 3225 iip->ili_fields = 0; 3226 iip->ili_fsync_fields = 0; 3227 spin_unlock(&iip->ili_lock); 3228 3229 /* 3230 * Store the current LSN of the inode so that we can tell whether the 3231 * item has moved in the AIL from xfs_buf_inode_iodone(). 3232 */ 3233 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 3234 &iip->ili_item.li_lsn); 3235 3236 /* generate the checksum. */ 3237 xfs_dinode_calc_crc(mp, dip); 3238 return error; 3239 } 3240 3241 /* 3242 * Non-blocking flush of dirty inode metadata into the backing buffer. 3243 * 3244 * The caller must have a reference to the inode and hold the cluster buffer 3245 * locked. The function will walk across all the inodes on the cluster buffer it 3246 * can find and lock without blocking, and flush them to the cluster buffer. 3247 * 3248 * On successful flushing of at least one inode, the caller must write out the 3249 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and 3250 * the caller needs to release the buffer. On failure, the filesystem will be 3251 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED 3252 * will be returned. 3253 */ 3254 int 3255 xfs_iflush_cluster( 3256 struct xfs_buf *bp) 3257 { 3258 struct xfs_mount *mp = bp->b_mount; 3259 struct xfs_log_item *lip, *n; 3260 struct xfs_inode *ip; 3261 struct xfs_inode_log_item *iip; 3262 int clcount = 0; 3263 int error = 0; 3264 3265 /* 3266 * We must use the safe variant here as on shutdown xfs_iflush_abort() 3267 * will remove itself from the list. 3268 */ 3269 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 3270 iip = (struct xfs_inode_log_item *)lip; 3271 ip = iip->ili_inode; 3272 3273 /* 3274 * Quick and dirty check to avoid locks if possible. 3275 */ 3276 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) 3277 continue; 3278 if (xfs_ipincount(ip)) 3279 continue; 3280 3281 /* 3282 * The inode is still attached to the buffer, which means it is 3283 * dirty but reclaim might try to grab it. Check carefully for 3284 * that, and grab the ilock while still holding the i_flags_lock 3285 * to guarantee reclaim will not be able to reclaim this inode 3286 * once we drop the i_flags_lock. 3287 */ 3288 spin_lock(&ip->i_flags_lock); 3289 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); 3290 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) { 3291 spin_unlock(&ip->i_flags_lock); 3292 continue; 3293 } 3294 3295 /* 3296 * ILOCK will pin the inode against reclaim and prevent 3297 * concurrent transactions modifying the inode while we are 3298 * flushing the inode. If we get the lock, set the flushing 3299 * state before we drop the i_flags_lock. 3300 */ 3301 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 3302 spin_unlock(&ip->i_flags_lock); 3303 continue; 3304 } 3305 __xfs_iflags_set(ip, XFS_IFLUSHING); 3306 spin_unlock(&ip->i_flags_lock); 3307 3308 /* 3309 * Abort flushing this inode if we are shut down because the 3310 * inode may not currently be in the AIL. This can occur when 3311 * log I/O failure unpins the inode without inserting into the 3312 * AIL, leaving a dirty/unpinned inode attached to the buffer 3313 * that otherwise looks like it should be flushed. 3314 */ 3315 if (xlog_is_shutdown(mp->m_log)) { 3316 xfs_iunpin_wait(ip); 3317 xfs_iflush_abort(ip); 3318 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3319 error = -EIO; 3320 continue; 3321 } 3322 3323 /* don't block waiting on a log force to unpin dirty inodes */ 3324 if (xfs_ipincount(ip)) { 3325 xfs_iflags_clear(ip, XFS_IFLUSHING); 3326 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3327 continue; 3328 } 3329 3330 if (!xfs_inode_clean(ip)) 3331 error = xfs_iflush(ip, bp); 3332 else 3333 xfs_iflags_clear(ip, XFS_IFLUSHING); 3334 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3335 if (error) 3336 break; 3337 clcount++; 3338 } 3339 3340 if (error) { 3341 /* 3342 * Shutdown first so we kill the log before we release this 3343 * buffer. If it is an INODE_ALLOC buffer and pins the tail 3344 * of the log, failing it before the _log_ is shut down can 3345 * result in the log tail being moved forward in the journal 3346 * on disk because log writes can still be taking place. Hence 3347 * unpinning the tail will allow the ICREATE intent to be 3348 * removed from the log an recovery will fail with uninitialised 3349 * inode cluster buffers. 3350 */ 3351 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3352 bp->b_flags |= XBF_ASYNC; 3353 xfs_buf_ioend_fail(bp); 3354 return error; 3355 } 3356 3357 if (!clcount) 3358 return -EAGAIN; 3359 3360 XFS_STATS_INC(mp, xs_icluster_flushcnt); 3361 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); 3362 return 0; 3363 3364 } 3365 3366 /* Release an inode. */ 3367 void 3368 xfs_irele( 3369 struct xfs_inode *ip) 3370 { 3371 trace_xfs_irele(ip, _RET_IP_); 3372 iput(VFS_I(ip)); 3373 } 3374 3375 /* 3376 * Ensure all commited transactions touching the inode are written to the log. 3377 */ 3378 int 3379 xfs_log_force_inode( 3380 struct xfs_inode *ip) 3381 { 3382 xfs_csn_t seq = 0; 3383 3384 xfs_ilock(ip, XFS_ILOCK_SHARED); 3385 if (xfs_ipincount(ip)) 3386 seq = ip->i_itemp->ili_commit_seq; 3387 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3388 3389 if (!seq) 3390 return 0; 3391 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL); 3392 } 3393 3394 /* 3395 * Grab the exclusive iolock for a data copy from src to dest, making sure to 3396 * abide vfs locking order (lowest pointer value goes first) and breaking the 3397 * layout leases before proceeding. The loop is needed because we cannot call 3398 * the blocking break_layout() with the iolocks held, and therefore have to 3399 * back out both locks. 3400 */ 3401 static int 3402 xfs_iolock_two_inodes_and_break_layout( 3403 struct inode *src, 3404 struct inode *dest) 3405 { 3406 int error; 3407 3408 if (src > dest) 3409 swap(src, dest); 3410 3411 retry: 3412 /* Wait to break both inodes' layouts before we start locking. */ 3413 error = break_layout(src, true); 3414 if (error) 3415 return error; 3416 if (src != dest) { 3417 error = break_layout(dest, true); 3418 if (error) 3419 return error; 3420 } 3421 3422 /* Lock one inode and make sure nobody got in and leased it. */ 3423 inode_lock(src); 3424 error = break_layout(src, false); 3425 if (error) { 3426 inode_unlock(src); 3427 if (error == -EWOULDBLOCK) 3428 goto retry; 3429 return error; 3430 } 3431 3432 if (src == dest) 3433 return 0; 3434 3435 /* Lock the other inode and make sure nobody got in and leased it. */ 3436 inode_lock_nested(dest, I_MUTEX_NONDIR2); 3437 error = break_layout(dest, false); 3438 if (error) { 3439 inode_unlock(src); 3440 inode_unlock(dest); 3441 if (error == -EWOULDBLOCK) 3442 goto retry; 3443 return error; 3444 } 3445 3446 return 0; 3447 } 3448 3449 static int 3450 xfs_mmaplock_two_inodes_and_break_dax_layout( 3451 struct xfs_inode *ip1, 3452 struct xfs_inode *ip2) 3453 { 3454 int error; 3455 bool retry; 3456 struct page *page; 3457 3458 if (ip1->i_ino > ip2->i_ino) 3459 swap(ip1, ip2); 3460 3461 again: 3462 retry = false; 3463 /* Lock the first inode */ 3464 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); 3465 error = xfs_break_dax_layouts(VFS_I(ip1), &retry); 3466 if (error || retry) { 3467 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 3468 if (error == 0 && retry) 3469 goto again; 3470 return error; 3471 } 3472 3473 if (ip1 == ip2) 3474 return 0; 3475 3476 /* Nested lock the second inode */ 3477 xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1)); 3478 /* 3479 * We cannot use xfs_break_dax_layouts() directly here because it may 3480 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable 3481 * for this nested lock case. 3482 */ 3483 page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); 3484 if (page && page_ref_count(page) != 1) { 3485 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); 3486 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 3487 goto again; 3488 } 3489 3490 return 0; 3491 } 3492 3493 /* 3494 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or 3495 * mmap activity. 3496 */ 3497 int 3498 xfs_ilock2_io_mmap( 3499 struct xfs_inode *ip1, 3500 struct xfs_inode *ip2) 3501 { 3502 int ret; 3503 3504 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); 3505 if (ret) 3506 return ret; 3507 3508 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { 3509 ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2); 3510 if (ret) { 3511 inode_unlock(VFS_I(ip2)); 3512 if (ip1 != ip2) 3513 inode_unlock(VFS_I(ip1)); 3514 return ret; 3515 } 3516 } else 3517 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping, 3518 VFS_I(ip2)->i_mapping); 3519 3520 return 0; 3521 } 3522 3523 /* Unlock both inodes to allow IO and mmap activity. */ 3524 void 3525 xfs_iunlock2_io_mmap( 3526 struct xfs_inode *ip1, 3527 struct xfs_inode *ip2) 3528 { 3529 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) { 3530 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); 3531 if (ip1 != ip2) 3532 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 3533 } else 3534 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping, 3535 VFS_I(ip2)->i_mapping); 3536 3537 inode_unlock(VFS_I(ip2)); 3538 if (ip1 != ip2) 3539 inode_unlock(VFS_I(ip1)); 3540 } 3541