1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include <linux/iversion.h> 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_dir2.h" 18 #include "xfs_attr.h" 19 #include "xfs_trans_space.h" 20 #include "xfs_trans.h" 21 #include "xfs_buf_item.h" 22 #include "xfs_inode_item.h" 23 #include "xfs_ialloc.h" 24 #include "xfs_bmap.h" 25 #include "xfs_bmap_util.h" 26 #include "xfs_errortag.h" 27 #include "xfs_error.h" 28 #include "xfs_quota.h" 29 #include "xfs_filestream.h" 30 #include "xfs_trace.h" 31 #include "xfs_icache.h" 32 #include "xfs_symlink.h" 33 #include "xfs_trans_priv.h" 34 #include "xfs_log.h" 35 #include "xfs_bmap_btree.h" 36 #include "xfs_reflink.h" 37 #include "xfs_ag.h" 38 39 kmem_zone_t *xfs_inode_zone; 40 41 /* 42 * Used in xfs_itruncate_extents(). This is the maximum number of extents 43 * freed from a file in a single transaction. 44 */ 45 #define XFS_ITRUNC_MAX_EXTENTS 2 46 47 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); 48 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, 49 struct xfs_inode *); 50 51 /* 52 * helper function to extract extent size hint from inode 53 */ 54 xfs_extlen_t 55 xfs_get_extsz_hint( 56 struct xfs_inode *ip) 57 { 58 /* 59 * No point in aligning allocations if we need to COW to actually 60 * write to them. 61 */ 62 if (xfs_is_always_cow_inode(ip)) 63 return 0; 64 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) 65 return ip->i_extsize; 66 if (XFS_IS_REALTIME_INODE(ip)) 67 return ip->i_mount->m_sb.sb_rextsize; 68 return 0; 69 } 70 71 /* 72 * Helper function to extract CoW extent size hint from inode. 73 * Between the extent size hint and the CoW extent size hint, we 74 * return the greater of the two. If the value is zero (automatic), 75 * use the default size. 76 */ 77 xfs_extlen_t 78 xfs_get_cowextsz_hint( 79 struct xfs_inode *ip) 80 { 81 xfs_extlen_t a, b; 82 83 a = 0; 84 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 85 a = ip->i_cowextsize; 86 b = xfs_get_extsz_hint(ip); 87 88 a = max(a, b); 89 if (a == 0) 90 return XFS_DEFAULT_COWEXTSZ_HINT; 91 return a; 92 } 93 94 /* 95 * These two are wrapper routines around the xfs_ilock() routine used to 96 * centralize some grungy code. They are used in places that wish to lock the 97 * inode solely for reading the extents. The reason these places can't just 98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to 99 * bringing in of the extents from disk for a file in b-tree format. If the 100 * inode is in b-tree format, then we need to lock the inode exclusively until 101 * the extents are read in. Locking it exclusively all the time would limit 102 * our parallelism unnecessarily, though. What we do instead is check to see 103 * if the extents have been read in yet, and only lock the inode exclusively 104 * if they have not. 105 * 106 * The functions return a value which should be given to the corresponding 107 * xfs_iunlock() call. 108 */ 109 uint 110 xfs_ilock_data_map_shared( 111 struct xfs_inode *ip) 112 { 113 uint lock_mode = XFS_ILOCK_SHARED; 114 115 if (xfs_need_iread_extents(&ip->i_df)) 116 lock_mode = XFS_ILOCK_EXCL; 117 xfs_ilock(ip, lock_mode); 118 return lock_mode; 119 } 120 121 uint 122 xfs_ilock_attr_map_shared( 123 struct xfs_inode *ip) 124 { 125 uint lock_mode = XFS_ILOCK_SHARED; 126 127 if (ip->i_afp && xfs_need_iread_extents(ip->i_afp)) 128 lock_mode = XFS_ILOCK_EXCL; 129 xfs_ilock(ip, lock_mode); 130 return lock_mode; 131 } 132 133 /* 134 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 135 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows 136 * various combinations of the locks to be obtained. 137 * 138 * The 3 locks should always be ordered so that the IO lock is obtained first, 139 * the mmap lock second and the ilock last in order to prevent deadlock. 140 * 141 * Basic locking order: 142 * 143 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock 144 * 145 * mmap_lock locking order: 146 * 147 * i_rwsem -> page lock -> mmap_lock 148 * mmap_lock -> i_mmap_lock -> page_lock 149 * 150 * The difference in mmap_lock locking order mean that we cannot hold the 151 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can 152 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock 153 * in get_user_pages() to map the user pages into the kernel address space for 154 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because 155 * page faults already hold the mmap_lock. 156 * 157 * Hence to serialise fully against both syscall and mmap based IO, we need to 158 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both 159 * taken in places where we need to invalidate the page cache in a race 160 * free manner (e.g. truncate, hole punch and other extent manipulation 161 * functions). 162 */ 163 void 164 xfs_ilock( 165 xfs_inode_t *ip, 166 uint lock_flags) 167 { 168 trace_xfs_ilock(ip, lock_flags, _RET_IP_); 169 170 /* 171 * You can't set both SHARED and EXCL for the same lock, 172 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 173 * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 174 */ 175 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 176 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 177 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 178 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 179 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 180 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 181 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 182 183 if (lock_flags & XFS_IOLOCK_EXCL) { 184 down_write_nested(&VFS_I(ip)->i_rwsem, 185 XFS_IOLOCK_DEP(lock_flags)); 186 } else if (lock_flags & XFS_IOLOCK_SHARED) { 187 down_read_nested(&VFS_I(ip)->i_rwsem, 188 XFS_IOLOCK_DEP(lock_flags)); 189 } 190 191 if (lock_flags & XFS_MMAPLOCK_EXCL) 192 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 193 else if (lock_flags & XFS_MMAPLOCK_SHARED) 194 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); 195 196 if (lock_flags & XFS_ILOCK_EXCL) 197 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 198 else if (lock_flags & XFS_ILOCK_SHARED) 199 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 200 } 201 202 /* 203 * This is just like xfs_ilock(), except that the caller 204 * is guaranteed not to sleep. It returns 1 if it gets 205 * the requested locks and 0 otherwise. If the IO lock is 206 * obtained but the inode lock cannot be, then the IO lock 207 * is dropped before returning. 208 * 209 * ip -- the inode being locked 210 * lock_flags -- this parameter indicates the inode's locks to be 211 * to be locked. See the comment for xfs_ilock() for a list 212 * of valid values. 213 */ 214 int 215 xfs_ilock_nowait( 216 xfs_inode_t *ip, 217 uint lock_flags) 218 { 219 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); 220 221 /* 222 * You can't set both SHARED and EXCL for the same lock, 223 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 224 * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 225 */ 226 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 227 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 228 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 229 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 230 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 231 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 232 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 233 234 if (lock_flags & XFS_IOLOCK_EXCL) { 235 if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) 236 goto out; 237 } else if (lock_flags & XFS_IOLOCK_SHARED) { 238 if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) 239 goto out; 240 } 241 242 if (lock_flags & XFS_MMAPLOCK_EXCL) { 243 if (!mrtryupdate(&ip->i_mmaplock)) 244 goto out_undo_iolock; 245 } else if (lock_flags & XFS_MMAPLOCK_SHARED) { 246 if (!mrtryaccess(&ip->i_mmaplock)) 247 goto out_undo_iolock; 248 } 249 250 if (lock_flags & XFS_ILOCK_EXCL) { 251 if (!mrtryupdate(&ip->i_lock)) 252 goto out_undo_mmaplock; 253 } else if (lock_flags & XFS_ILOCK_SHARED) { 254 if (!mrtryaccess(&ip->i_lock)) 255 goto out_undo_mmaplock; 256 } 257 return 1; 258 259 out_undo_mmaplock: 260 if (lock_flags & XFS_MMAPLOCK_EXCL) 261 mrunlock_excl(&ip->i_mmaplock); 262 else if (lock_flags & XFS_MMAPLOCK_SHARED) 263 mrunlock_shared(&ip->i_mmaplock); 264 out_undo_iolock: 265 if (lock_flags & XFS_IOLOCK_EXCL) 266 up_write(&VFS_I(ip)->i_rwsem); 267 else if (lock_flags & XFS_IOLOCK_SHARED) 268 up_read(&VFS_I(ip)->i_rwsem); 269 out: 270 return 0; 271 } 272 273 /* 274 * xfs_iunlock() is used to drop the inode locks acquired with 275 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass 276 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so 277 * that we know which locks to drop. 278 * 279 * ip -- the inode being unlocked 280 * lock_flags -- this parameter indicates the inode's locks to be 281 * to be unlocked. See the comment for xfs_ilock() for a list 282 * of valid values for this parameter. 283 * 284 */ 285 void 286 xfs_iunlock( 287 xfs_inode_t *ip, 288 uint lock_flags) 289 { 290 /* 291 * You can't set both SHARED and EXCL for the same lock, 292 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, 293 * and XFS_ILOCK_EXCL are valid values to set in lock_flags. 294 */ 295 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != 296 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); 297 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != 298 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); 299 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != 300 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); 301 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); 302 ASSERT(lock_flags != 0); 303 304 if (lock_flags & XFS_IOLOCK_EXCL) 305 up_write(&VFS_I(ip)->i_rwsem); 306 else if (lock_flags & XFS_IOLOCK_SHARED) 307 up_read(&VFS_I(ip)->i_rwsem); 308 309 if (lock_flags & XFS_MMAPLOCK_EXCL) 310 mrunlock_excl(&ip->i_mmaplock); 311 else if (lock_flags & XFS_MMAPLOCK_SHARED) 312 mrunlock_shared(&ip->i_mmaplock); 313 314 if (lock_flags & XFS_ILOCK_EXCL) 315 mrunlock_excl(&ip->i_lock); 316 else if (lock_flags & XFS_ILOCK_SHARED) 317 mrunlock_shared(&ip->i_lock); 318 319 trace_xfs_iunlock(ip, lock_flags, _RET_IP_); 320 } 321 322 /* 323 * give up write locks. the i/o lock cannot be held nested 324 * if it is being demoted. 325 */ 326 void 327 xfs_ilock_demote( 328 xfs_inode_t *ip, 329 uint lock_flags) 330 { 331 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); 332 ASSERT((lock_flags & 333 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); 334 335 if (lock_flags & XFS_ILOCK_EXCL) 336 mrdemote(&ip->i_lock); 337 if (lock_flags & XFS_MMAPLOCK_EXCL) 338 mrdemote(&ip->i_mmaplock); 339 if (lock_flags & XFS_IOLOCK_EXCL) 340 downgrade_write(&VFS_I(ip)->i_rwsem); 341 342 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); 343 } 344 345 #if defined(DEBUG) || defined(XFS_WARN) 346 int 347 xfs_isilocked( 348 xfs_inode_t *ip, 349 uint lock_flags) 350 { 351 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 352 if (!(lock_flags & XFS_ILOCK_SHARED)) 353 return !!ip->i_lock.mr_writer; 354 return rwsem_is_locked(&ip->i_lock.mr_lock); 355 } 356 357 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { 358 if (!(lock_flags & XFS_MMAPLOCK_SHARED)) 359 return !!ip->i_mmaplock.mr_writer; 360 return rwsem_is_locked(&ip->i_mmaplock.mr_lock); 361 } 362 363 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { 364 if (!(lock_flags & XFS_IOLOCK_SHARED)) 365 return !debug_locks || 366 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0); 367 return rwsem_is_locked(&VFS_I(ip)->i_rwsem); 368 } 369 370 ASSERT(0); 371 return 0; 372 } 373 #endif 374 375 /* 376 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when 377 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined 378 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build 379 * errors and warnings. 380 */ 381 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) 382 static bool 383 xfs_lockdep_subclass_ok( 384 int subclass) 385 { 386 return subclass < MAX_LOCKDEP_SUBCLASSES; 387 } 388 #else 389 #define xfs_lockdep_subclass_ok(subclass) (true) 390 #endif 391 392 /* 393 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different 394 * value. This can be called for any type of inode lock combination, including 395 * parent locking. Care must be taken to ensure we don't overrun the subclass 396 * storage fields in the class mask we build. 397 */ 398 static inline int 399 xfs_lock_inumorder(int lock_mode, int subclass) 400 { 401 int class = 0; 402 403 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | 404 XFS_ILOCK_RTSUM))); 405 ASSERT(xfs_lockdep_subclass_ok(subclass)); 406 407 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { 408 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); 409 class += subclass << XFS_IOLOCK_SHIFT; 410 } 411 412 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { 413 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); 414 class += subclass << XFS_MMAPLOCK_SHIFT; 415 } 416 417 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { 418 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); 419 class += subclass << XFS_ILOCK_SHIFT; 420 } 421 422 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; 423 } 424 425 /* 426 * The following routine will lock n inodes in exclusive mode. We assume the 427 * caller calls us with the inodes in i_ino order. 428 * 429 * We need to detect deadlock where an inode that we lock is in the AIL and we 430 * start waiting for another inode that is locked by a thread in a long running 431 * transaction (such as truncate). This can result in deadlock since the long 432 * running trans might need to wait for the inode we just locked in order to 433 * push the tail and free space in the log. 434 * 435 * xfs_lock_inodes() can only be used to lock one type of lock at a time - 436 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we 437 * lock more than one at a time, lockdep will report false positives saying we 438 * have violated locking orders. 439 */ 440 static void 441 xfs_lock_inodes( 442 struct xfs_inode **ips, 443 int inodes, 444 uint lock_mode) 445 { 446 int attempts = 0, i, j, try_lock; 447 struct xfs_log_item *lp; 448 449 /* 450 * Currently supports between 2 and 5 inodes with exclusive locking. We 451 * support an arbitrary depth of locking here, but absolute limits on 452 * inodes depend on the type of locking and the limits placed by 453 * lockdep annotations in xfs_lock_inumorder. These are all checked by 454 * the asserts. 455 */ 456 ASSERT(ips && inodes >= 2 && inodes <= 5); 457 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | 458 XFS_ILOCK_EXCL)); 459 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | 460 XFS_ILOCK_SHARED))); 461 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || 462 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); 463 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || 464 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); 465 466 if (lock_mode & XFS_IOLOCK_EXCL) { 467 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); 468 } else if (lock_mode & XFS_MMAPLOCK_EXCL) 469 ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); 470 471 try_lock = 0; 472 i = 0; 473 again: 474 for (; i < inodes; i++) { 475 ASSERT(ips[i]); 476 477 if (i && (ips[i] == ips[i - 1])) /* Already locked */ 478 continue; 479 480 /* 481 * If try_lock is not set yet, make sure all locked inodes are 482 * not in the AIL. If any are, set try_lock to be used later. 483 */ 484 if (!try_lock) { 485 for (j = (i - 1); j >= 0 && !try_lock; j--) { 486 lp = &ips[j]->i_itemp->ili_item; 487 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) 488 try_lock++; 489 } 490 } 491 492 /* 493 * If any of the previous locks we have locked is in the AIL, 494 * we must TRY to get the second and subsequent locks. If 495 * we can't get any, we must release all we have 496 * and try again. 497 */ 498 if (!try_lock) { 499 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); 500 continue; 501 } 502 503 /* try_lock means we have an inode locked that is in the AIL. */ 504 ASSERT(i != 0); 505 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) 506 continue; 507 508 /* 509 * Unlock all previous guys and try again. xfs_iunlock will try 510 * to push the tail if the inode is in the AIL. 511 */ 512 attempts++; 513 for (j = i - 1; j >= 0; j--) { 514 /* 515 * Check to see if we've already unlocked this one. Not 516 * the first one going back, and the inode ptr is the 517 * same. 518 */ 519 if (j != (i - 1) && ips[j] == ips[j + 1]) 520 continue; 521 522 xfs_iunlock(ips[j], lock_mode); 523 } 524 525 if ((attempts % 5) == 0) { 526 delay(1); /* Don't just spin the CPU */ 527 } 528 i = 0; 529 try_lock = 0; 530 goto again; 531 } 532 } 533 534 /* 535 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time - 536 * the mmaplock or the ilock, but not more than one type at a time. If we lock 537 * more than one at a time, lockdep will report false positives saying we have 538 * violated locking orders. The iolock must be double-locked separately since 539 * we use i_rwsem for that. We now support taking one lock EXCL and the other 540 * SHARED. 541 */ 542 void 543 xfs_lock_two_inodes( 544 struct xfs_inode *ip0, 545 uint ip0_mode, 546 struct xfs_inode *ip1, 547 uint ip1_mode) 548 { 549 struct xfs_inode *temp; 550 uint mode_temp; 551 int attempts = 0; 552 struct xfs_log_item *lp; 553 554 ASSERT(hweight32(ip0_mode) == 1); 555 ASSERT(hweight32(ip1_mode) == 1); 556 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 557 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); 558 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 559 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 560 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 561 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 563 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 564 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || 565 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); 566 567 ASSERT(ip0->i_ino != ip1->i_ino); 568 569 if (ip0->i_ino > ip1->i_ino) { 570 temp = ip0; 571 ip0 = ip1; 572 ip1 = temp; 573 mode_temp = ip0_mode; 574 ip0_mode = ip1_mode; 575 ip1_mode = mode_temp; 576 } 577 578 again: 579 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); 580 581 /* 582 * If the first lock we have locked is in the AIL, we must TRY to get 583 * the second lock. If we can't get it, we must release the first one 584 * and try again. 585 */ 586 lp = &ip0->i_itemp->ili_item; 587 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { 588 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { 589 xfs_iunlock(ip0, ip0_mode); 590 if ((++attempts % 5) == 0) 591 delay(1); /* Don't just spin the CPU */ 592 goto again; 593 } 594 } else { 595 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); 596 } 597 } 598 599 uint 600 xfs_ip2xflags( 601 struct xfs_inode *ip) 602 { 603 uint flags = 0; 604 605 if (ip->i_diflags & XFS_DIFLAG_ANY) { 606 if (ip->i_diflags & XFS_DIFLAG_REALTIME) 607 flags |= FS_XFLAG_REALTIME; 608 if (ip->i_diflags & XFS_DIFLAG_PREALLOC) 609 flags |= FS_XFLAG_PREALLOC; 610 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE) 611 flags |= FS_XFLAG_IMMUTABLE; 612 if (ip->i_diflags & XFS_DIFLAG_APPEND) 613 flags |= FS_XFLAG_APPEND; 614 if (ip->i_diflags & XFS_DIFLAG_SYNC) 615 flags |= FS_XFLAG_SYNC; 616 if (ip->i_diflags & XFS_DIFLAG_NOATIME) 617 flags |= FS_XFLAG_NOATIME; 618 if (ip->i_diflags & XFS_DIFLAG_NODUMP) 619 flags |= FS_XFLAG_NODUMP; 620 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT) 621 flags |= FS_XFLAG_RTINHERIT; 622 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT) 623 flags |= FS_XFLAG_PROJINHERIT; 624 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS) 625 flags |= FS_XFLAG_NOSYMLINKS; 626 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) 627 flags |= FS_XFLAG_EXTSIZE; 628 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) 629 flags |= FS_XFLAG_EXTSZINHERIT; 630 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG) 631 flags |= FS_XFLAG_NODEFRAG; 632 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM) 633 flags |= FS_XFLAG_FILESTREAM; 634 } 635 636 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) { 637 if (ip->i_diflags2 & XFS_DIFLAG2_DAX) 638 flags |= FS_XFLAG_DAX; 639 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 640 flags |= FS_XFLAG_COWEXTSIZE; 641 } 642 643 if (XFS_IFORK_Q(ip)) 644 flags |= FS_XFLAG_HASATTR; 645 return flags; 646 } 647 648 /* 649 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match 650 * is allowed, otherwise it has to be an exact match. If a CI match is found, 651 * ci_name->name will point to a the actual name (caller must free) or 652 * will be set to NULL if an exact match is found. 653 */ 654 int 655 xfs_lookup( 656 xfs_inode_t *dp, 657 struct xfs_name *name, 658 xfs_inode_t **ipp, 659 struct xfs_name *ci_name) 660 { 661 xfs_ino_t inum; 662 int error; 663 664 trace_xfs_lookup(dp, name); 665 666 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 667 return -EIO; 668 669 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); 670 if (error) 671 goto out_unlock; 672 673 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); 674 if (error) 675 goto out_free_name; 676 677 return 0; 678 679 out_free_name: 680 if (ci_name) 681 kmem_free(ci_name->name); 682 out_unlock: 683 *ipp = NULL; 684 return error; 685 } 686 687 /* Propagate di_flags from a parent inode to a child inode. */ 688 static void 689 xfs_inode_inherit_flags( 690 struct xfs_inode *ip, 691 const struct xfs_inode *pip) 692 { 693 unsigned int di_flags = 0; 694 xfs_failaddr_t failaddr; 695 umode_t mode = VFS_I(ip)->i_mode; 696 697 if (S_ISDIR(mode)) { 698 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT) 699 di_flags |= XFS_DIFLAG_RTINHERIT; 700 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 701 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 702 ip->i_extsize = pip->i_extsize; 703 } 704 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT) 705 di_flags |= XFS_DIFLAG_PROJINHERIT; 706 } else if (S_ISREG(mode)) { 707 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) && 708 xfs_sb_version_hasrealtime(&ip->i_mount->m_sb)) 709 di_flags |= XFS_DIFLAG_REALTIME; 710 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 711 di_flags |= XFS_DIFLAG_EXTSIZE; 712 ip->i_extsize = pip->i_extsize; 713 } 714 } 715 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) && 716 xfs_inherit_noatime) 717 di_flags |= XFS_DIFLAG_NOATIME; 718 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) && 719 xfs_inherit_nodump) 720 di_flags |= XFS_DIFLAG_NODUMP; 721 if ((pip->i_diflags & XFS_DIFLAG_SYNC) && 722 xfs_inherit_sync) 723 di_flags |= XFS_DIFLAG_SYNC; 724 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) && 725 xfs_inherit_nosymlinks) 726 di_flags |= XFS_DIFLAG_NOSYMLINKS; 727 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) && 728 xfs_inherit_nodefrag) 729 di_flags |= XFS_DIFLAG_NODEFRAG; 730 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM) 731 di_flags |= XFS_DIFLAG_FILESTREAM; 732 733 ip->i_diflags |= di_flags; 734 735 /* 736 * Inode verifiers on older kernels only check that the extent size 737 * hint is an integer multiple of the rt extent size on realtime files. 738 * They did not check the hint alignment on a directory with both 739 * rtinherit and extszinherit flags set. If the misaligned hint is 740 * propagated from a directory into a new realtime file, new file 741 * allocations will fail due to math errors in the rt allocator and/or 742 * trip the verifiers. Validate the hint settings in the new file so 743 * that we don't let broken hints propagate. 744 */ 745 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize, 746 VFS_I(ip)->i_mode, ip->i_diflags); 747 if (failaddr) { 748 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | 749 XFS_DIFLAG_EXTSZINHERIT); 750 ip->i_extsize = 0; 751 } 752 } 753 754 /* Propagate di_flags2 from a parent inode to a child inode. */ 755 static void 756 xfs_inode_inherit_flags2( 757 struct xfs_inode *ip, 758 const struct xfs_inode *pip) 759 { 760 xfs_failaddr_t failaddr; 761 762 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { 763 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; 764 ip->i_cowextsize = pip->i_cowextsize; 765 } 766 if (pip->i_diflags2 & XFS_DIFLAG2_DAX) 767 ip->i_diflags2 |= XFS_DIFLAG2_DAX; 768 769 /* Don't let invalid cowextsize hints propagate. */ 770 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize, 771 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2); 772 if (failaddr) { 773 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; 774 ip->i_cowextsize = 0; 775 } 776 } 777 778 /* 779 * Initialise a newly allocated inode and return the in-core inode to the 780 * caller locked exclusively. 781 */ 782 int 783 xfs_init_new_inode( 784 struct user_namespace *mnt_userns, 785 struct xfs_trans *tp, 786 struct xfs_inode *pip, 787 xfs_ino_t ino, 788 umode_t mode, 789 xfs_nlink_t nlink, 790 dev_t rdev, 791 prid_t prid, 792 bool init_xattrs, 793 struct xfs_inode **ipp) 794 { 795 struct inode *dir = pip ? VFS_I(pip) : NULL; 796 struct xfs_mount *mp = tp->t_mountp; 797 struct xfs_inode *ip; 798 unsigned int flags; 799 int error; 800 struct timespec64 tv; 801 struct inode *inode; 802 803 /* 804 * Protect against obviously corrupt allocation btree records. Later 805 * xfs_iget checks will catch re-allocation of other active in-memory 806 * and on-disk inodes. If we don't catch reallocating the parent inode 807 * here we will deadlock in xfs_iget() so we have to do these checks 808 * first. 809 */ 810 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { 811 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); 812 return -EFSCORRUPTED; 813 } 814 815 /* 816 * Get the in-core inode with the lock held exclusively to prevent 817 * others from looking at until we're done. 818 */ 819 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 820 if (error) 821 return error; 822 823 ASSERT(ip != NULL); 824 inode = VFS_I(ip); 825 set_nlink(inode, nlink); 826 inode->i_rdev = rdev; 827 ip->i_projid = prid; 828 829 if (dir && !(dir->i_mode & S_ISGID) && 830 (mp->m_flags & XFS_MOUNT_GRPID)) { 831 inode_fsuid_set(inode, mnt_userns); 832 inode->i_gid = dir->i_gid; 833 inode->i_mode = mode; 834 } else { 835 inode_init_owner(mnt_userns, inode, dir, mode); 836 } 837 838 /* 839 * If the group ID of the new file does not match the effective group 840 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared 841 * (and only if the irix_sgid_inherit compatibility variable is set). 842 */ 843 if (irix_sgid_inherit && 844 (inode->i_mode & S_ISGID) && 845 !in_group_p(i_gid_into_mnt(mnt_userns, inode))) 846 inode->i_mode &= ~S_ISGID; 847 848 ip->i_disk_size = 0; 849 ip->i_df.if_nextents = 0; 850 ASSERT(ip->i_nblocks == 0); 851 852 tv = current_time(inode); 853 inode->i_mtime = tv; 854 inode->i_atime = tv; 855 inode->i_ctime = tv; 856 857 ip->i_extsize = 0; 858 ip->i_diflags = 0; 859 860 if (xfs_sb_version_has_v3inode(&mp->m_sb)) { 861 inode_set_iversion(inode, 1); 862 ip->i_cowextsize = 0; 863 ip->i_crtime = tv; 864 } 865 866 flags = XFS_ILOG_CORE; 867 switch (mode & S_IFMT) { 868 case S_IFIFO: 869 case S_IFCHR: 870 case S_IFBLK: 871 case S_IFSOCK: 872 ip->i_df.if_format = XFS_DINODE_FMT_DEV; 873 flags |= XFS_ILOG_DEV; 874 break; 875 case S_IFREG: 876 case S_IFDIR: 877 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY)) 878 xfs_inode_inherit_flags(ip, pip); 879 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY)) 880 xfs_inode_inherit_flags2(ip, pip); 881 fallthrough; 882 case S_IFLNK: 883 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 884 ip->i_df.if_bytes = 0; 885 ip->i_df.if_u1.if_root = NULL; 886 break; 887 default: 888 ASSERT(0); 889 } 890 891 /* 892 * If we need to create attributes immediately after allocating the 893 * inode, initialise an empty attribute fork right now. We use the 894 * default fork offset for attributes here as we don't know exactly what 895 * size or how many attributes we might be adding. We can do this 896 * safely here because we know the data fork is completely empty and 897 * this saves us from needing to run a separate transaction to set the 898 * fork offset in the immediate future. 899 */ 900 if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) { 901 ip->i_forkoff = xfs_default_attroffset(ip) >> 3; 902 ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0); 903 } 904 905 /* 906 * Log the new values stuffed into the inode. 907 */ 908 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 909 xfs_trans_log_inode(tp, ip, flags); 910 911 /* now that we have an i_mode we can setup the inode structure */ 912 xfs_setup_inode(ip); 913 914 *ipp = ip; 915 return 0; 916 } 917 918 /* 919 * Decrement the link count on an inode & log the change. If this causes the 920 * link count to go to zero, move the inode to AGI unlinked list so that it can 921 * be freed when the last active reference goes away via xfs_inactive(). 922 */ 923 static int /* error */ 924 xfs_droplink( 925 xfs_trans_t *tp, 926 xfs_inode_t *ip) 927 { 928 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 929 930 drop_nlink(VFS_I(ip)); 931 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 932 933 if (VFS_I(ip)->i_nlink) 934 return 0; 935 936 return xfs_iunlink(tp, ip); 937 } 938 939 /* 940 * Increment the link count on an inode & log the change. 941 */ 942 static void 943 xfs_bumplink( 944 xfs_trans_t *tp, 945 xfs_inode_t *ip) 946 { 947 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 948 949 inc_nlink(VFS_I(ip)); 950 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 951 } 952 953 int 954 xfs_create( 955 struct user_namespace *mnt_userns, 956 xfs_inode_t *dp, 957 struct xfs_name *name, 958 umode_t mode, 959 dev_t rdev, 960 bool init_xattrs, 961 xfs_inode_t **ipp) 962 { 963 int is_dir = S_ISDIR(mode); 964 struct xfs_mount *mp = dp->i_mount; 965 struct xfs_inode *ip = NULL; 966 struct xfs_trans *tp = NULL; 967 int error; 968 bool unlock_dp_on_error = false; 969 prid_t prid; 970 struct xfs_dquot *udqp = NULL; 971 struct xfs_dquot *gdqp = NULL; 972 struct xfs_dquot *pdqp = NULL; 973 struct xfs_trans_res *tres; 974 uint resblks; 975 xfs_ino_t ino; 976 977 trace_xfs_create(dp, name); 978 979 if (XFS_FORCED_SHUTDOWN(mp)) 980 return -EIO; 981 982 prid = xfs_get_initial_prid(dp); 983 984 /* 985 * Make sure that we have allocated dquot(s) on disk. 986 */ 987 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns), 988 mapped_fsgid(mnt_userns), prid, 989 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 990 &udqp, &gdqp, &pdqp); 991 if (error) 992 return error; 993 994 if (is_dir) { 995 resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 996 tres = &M_RES(mp)->tr_mkdir; 997 } else { 998 resblks = XFS_CREATE_SPACE_RES(mp, name->len); 999 tres = &M_RES(mp)->tr_create; 1000 } 1001 1002 /* 1003 * Initially assume that the file does not exist and 1004 * reserve the resources for that case. If that is not 1005 * the case we'll drop the one we have and get a more 1006 * appropriate transaction later. 1007 */ 1008 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1009 &tp); 1010 if (error == -ENOSPC) { 1011 /* flush outstanding delalloc blocks and retry */ 1012 xfs_flush_inodes(mp); 1013 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, 1014 resblks, &tp); 1015 } 1016 if (error) 1017 goto out_release_dquots; 1018 1019 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 1020 unlock_dp_on_error = true; 1021 1022 error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK, 1023 XFS_IEXT_DIR_MANIP_CNT(mp)); 1024 if (error) 1025 goto out_trans_cancel; 1026 1027 /* 1028 * A newly created regular or special file just has one directory 1029 * entry pointing to them, but a directory also the "." entry 1030 * pointing to itself. 1031 */ 1032 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1033 if (!error) 1034 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, 1035 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip); 1036 if (error) 1037 goto out_trans_cancel; 1038 1039 /* 1040 * Now we join the directory inode to the transaction. We do not do it 1041 * earlier because xfs_dialloc might commit the previous transaction 1042 * (and release all the locks). An error from here on will result in 1043 * the transaction cancel unlocking dp so don't do it explicitly in the 1044 * error path. 1045 */ 1046 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1047 unlock_dp_on_error = false; 1048 1049 error = xfs_dir_createname(tp, dp, name, ip->i_ino, 1050 resblks - XFS_IALLOC_SPACE_RES(mp)); 1051 if (error) { 1052 ASSERT(error != -ENOSPC); 1053 goto out_trans_cancel; 1054 } 1055 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1056 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1057 1058 if (is_dir) { 1059 error = xfs_dir_init(tp, ip, dp); 1060 if (error) 1061 goto out_trans_cancel; 1062 1063 xfs_bumplink(tp, dp); 1064 } 1065 1066 /* 1067 * If this is a synchronous mount, make sure that the 1068 * create transaction goes to disk before returning to 1069 * the user. 1070 */ 1071 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1072 xfs_trans_set_sync(tp); 1073 1074 /* 1075 * Attach the dquot(s) to the inodes and modify them incore. 1076 * These ids of the inode couldn't have changed since the new 1077 * inode has been locked ever since it was created. 1078 */ 1079 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1080 1081 error = xfs_trans_commit(tp); 1082 if (error) 1083 goto out_release_inode; 1084 1085 xfs_qm_dqrele(udqp); 1086 xfs_qm_dqrele(gdqp); 1087 xfs_qm_dqrele(pdqp); 1088 1089 *ipp = ip; 1090 return 0; 1091 1092 out_trans_cancel: 1093 xfs_trans_cancel(tp); 1094 out_release_inode: 1095 /* 1096 * Wait until after the current transaction is aborted to finish the 1097 * setup of the inode and release the inode. This prevents recursive 1098 * transactions and deadlocks from xfs_inactive. 1099 */ 1100 if (ip) { 1101 xfs_finish_inode_setup(ip); 1102 xfs_irele(ip); 1103 } 1104 out_release_dquots: 1105 xfs_qm_dqrele(udqp); 1106 xfs_qm_dqrele(gdqp); 1107 xfs_qm_dqrele(pdqp); 1108 1109 if (unlock_dp_on_error) 1110 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1111 return error; 1112 } 1113 1114 int 1115 xfs_create_tmpfile( 1116 struct user_namespace *mnt_userns, 1117 struct xfs_inode *dp, 1118 umode_t mode, 1119 struct xfs_inode **ipp) 1120 { 1121 struct xfs_mount *mp = dp->i_mount; 1122 struct xfs_inode *ip = NULL; 1123 struct xfs_trans *tp = NULL; 1124 int error; 1125 prid_t prid; 1126 struct xfs_dquot *udqp = NULL; 1127 struct xfs_dquot *gdqp = NULL; 1128 struct xfs_dquot *pdqp = NULL; 1129 struct xfs_trans_res *tres; 1130 uint resblks; 1131 xfs_ino_t ino; 1132 1133 if (XFS_FORCED_SHUTDOWN(mp)) 1134 return -EIO; 1135 1136 prid = xfs_get_initial_prid(dp); 1137 1138 /* 1139 * Make sure that we have allocated dquot(s) on disk. 1140 */ 1141 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns), 1142 mapped_fsgid(mnt_userns), prid, 1143 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 1144 &udqp, &gdqp, &pdqp); 1145 if (error) 1146 return error; 1147 1148 resblks = XFS_IALLOC_SPACE_RES(mp); 1149 tres = &M_RES(mp)->tr_create_tmpfile; 1150 1151 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, 1152 &tp); 1153 if (error) 1154 goto out_release_dquots; 1155 1156 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); 1157 if (!error) 1158 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, 1159 0, 0, prid, false, &ip); 1160 if (error) 1161 goto out_trans_cancel; 1162 1163 if (mp->m_flags & XFS_MOUNT_WSYNC) 1164 xfs_trans_set_sync(tp); 1165 1166 /* 1167 * Attach the dquot(s) to the inodes and modify them incore. 1168 * These ids of the inode couldn't have changed since the new 1169 * inode has been locked ever since it was created. 1170 */ 1171 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); 1172 1173 error = xfs_iunlink(tp, ip); 1174 if (error) 1175 goto out_trans_cancel; 1176 1177 error = xfs_trans_commit(tp); 1178 if (error) 1179 goto out_release_inode; 1180 1181 xfs_qm_dqrele(udqp); 1182 xfs_qm_dqrele(gdqp); 1183 xfs_qm_dqrele(pdqp); 1184 1185 *ipp = ip; 1186 return 0; 1187 1188 out_trans_cancel: 1189 xfs_trans_cancel(tp); 1190 out_release_inode: 1191 /* 1192 * Wait until after the current transaction is aborted to finish the 1193 * setup of the inode and release the inode. This prevents recursive 1194 * transactions and deadlocks from xfs_inactive. 1195 */ 1196 if (ip) { 1197 xfs_finish_inode_setup(ip); 1198 xfs_irele(ip); 1199 } 1200 out_release_dquots: 1201 xfs_qm_dqrele(udqp); 1202 xfs_qm_dqrele(gdqp); 1203 xfs_qm_dqrele(pdqp); 1204 1205 return error; 1206 } 1207 1208 int 1209 xfs_link( 1210 xfs_inode_t *tdp, 1211 xfs_inode_t *sip, 1212 struct xfs_name *target_name) 1213 { 1214 xfs_mount_t *mp = tdp->i_mount; 1215 xfs_trans_t *tp; 1216 int error; 1217 int resblks; 1218 1219 trace_xfs_link(tdp, target_name); 1220 1221 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); 1222 1223 if (XFS_FORCED_SHUTDOWN(mp)) 1224 return -EIO; 1225 1226 error = xfs_qm_dqattach(sip); 1227 if (error) 1228 goto std_return; 1229 1230 error = xfs_qm_dqattach(tdp); 1231 if (error) 1232 goto std_return; 1233 1234 resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1235 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp); 1236 if (error == -ENOSPC) { 1237 resblks = 0; 1238 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp); 1239 } 1240 if (error) 1241 goto std_return; 1242 1243 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL); 1244 1245 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); 1246 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); 1247 1248 error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK, 1249 XFS_IEXT_DIR_MANIP_CNT(mp)); 1250 if (error) 1251 goto error_return; 1252 1253 /* 1254 * If we are using project inheritance, we only allow hard link 1255 * creation in our tree when the project IDs are the same; else 1256 * the tree quota mechanism could be circumvented. 1257 */ 1258 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 1259 tdp->i_projid != sip->i_projid)) { 1260 error = -EXDEV; 1261 goto error_return; 1262 } 1263 1264 if (!resblks) { 1265 error = xfs_dir_canenter(tp, tdp, target_name); 1266 if (error) 1267 goto error_return; 1268 } 1269 1270 /* 1271 * Handle initial link state of O_TMPFILE inode 1272 */ 1273 if (VFS_I(sip)->i_nlink == 0) { 1274 struct xfs_perag *pag; 1275 1276 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino)); 1277 error = xfs_iunlink_remove(tp, pag, sip); 1278 xfs_perag_put(pag); 1279 if (error) 1280 goto error_return; 1281 } 1282 1283 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, 1284 resblks); 1285 if (error) 1286 goto error_return; 1287 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1288 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1289 1290 xfs_bumplink(tp, sip); 1291 1292 /* 1293 * If this is a synchronous mount, make sure that the 1294 * link transaction goes to disk before returning to 1295 * the user. 1296 */ 1297 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 1298 xfs_trans_set_sync(tp); 1299 1300 return xfs_trans_commit(tp); 1301 1302 error_return: 1303 xfs_trans_cancel(tp); 1304 std_return: 1305 return error; 1306 } 1307 1308 /* Clear the reflink flag and the cowblocks tag if possible. */ 1309 static void 1310 xfs_itruncate_clear_reflink_flags( 1311 struct xfs_inode *ip) 1312 { 1313 struct xfs_ifork *dfork; 1314 struct xfs_ifork *cfork; 1315 1316 if (!xfs_is_reflink_inode(ip)) 1317 return; 1318 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1319 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK); 1320 if (dfork->if_bytes == 0 && cfork->if_bytes == 0) 1321 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1322 if (cfork->if_bytes == 0) 1323 xfs_inode_clear_cowblocks_tag(ip); 1324 } 1325 1326 /* 1327 * Free up the underlying blocks past new_size. The new size must be smaller 1328 * than the current size. This routine can be used both for the attribute and 1329 * data fork, and does not modify the inode size, which is left to the caller. 1330 * 1331 * The transaction passed to this routine must have made a permanent log 1332 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the 1333 * given transaction and start new ones, so make sure everything involved in 1334 * the transaction is tidy before calling here. Some transaction will be 1335 * returned to the caller to be committed. The incoming transaction must 1336 * already include the inode, and both inode locks must be held exclusively. 1337 * The inode must also be "held" within the transaction. On return the inode 1338 * will be "held" within the returned transaction. This routine does NOT 1339 * require any disk space to be reserved for it within the transaction. 1340 * 1341 * If we get an error, we must return with the inode locked and linked into the 1342 * current transaction. This keeps things simple for the higher level code, 1343 * because it always knows that the inode is locked and held in the transaction 1344 * that returns to it whether errors occur or not. We don't mark the inode 1345 * dirty on error so that transactions can be easily aborted if possible. 1346 */ 1347 int 1348 xfs_itruncate_extents_flags( 1349 struct xfs_trans **tpp, 1350 struct xfs_inode *ip, 1351 int whichfork, 1352 xfs_fsize_t new_size, 1353 int flags) 1354 { 1355 struct xfs_mount *mp = ip->i_mount; 1356 struct xfs_trans *tp = *tpp; 1357 xfs_fileoff_t first_unmap_block; 1358 xfs_filblks_t unmap_len; 1359 int error = 0; 1360 1361 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1362 ASSERT(!atomic_read(&VFS_I(ip)->i_count) || 1363 xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1364 ASSERT(new_size <= XFS_ISIZE(ip)); 1365 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1366 ASSERT(ip->i_itemp != NULL); 1367 ASSERT(ip->i_itemp->ili_lock_flags == 0); 1368 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1369 1370 trace_xfs_itruncate_extents_start(ip, new_size); 1371 1372 flags |= xfs_bmapi_aflag(whichfork); 1373 1374 /* 1375 * Since it is possible for space to become allocated beyond 1376 * the end of the file (in a crash where the space is allocated 1377 * but the inode size is not yet updated), simply remove any 1378 * blocks which show up between the new EOF and the maximum 1379 * possible file size. 1380 * 1381 * We have to free all the blocks to the bmbt maximum offset, even if 1382 * the page cache can't scale that far. 1383 */ 1384 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1385 if (!xfs_verify_fileoff(mp, first_unmap_block)) { 1386 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF); 1387 return 0; 1388 } 1389 1390 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; 1391 while (unmap_len > 0) { 1392 ASSERT(tp->t_firstblock == NULLFSBLOCK); 1393 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, 1394 flags, XFS_ITRUNC_MAX_EXTENTS); 1395 if (error) 1396 goto out; 1397 1398 /* free the just unmapped extents */ 1399 error = xfs_defer_finish(&tp); 1400 if (error) 1401 goto out; 1402 } 1403 1404 if (whichfork == XFS_DATA_FORK) { 1405 /* Remove all pending CoW reservations. */ 1406 error = xfs_reflink_cancel_cow_blocks(ip, &tp, 1407 first_unmap_block, XFS_MAX_FILEOFF, true); 1408 if (error) 1409 goto out; 1410 1411 xfs_itruncate_clear_reflink_flags(ip); 1412 } 1413 1414 /* 1415 * Always re-log the inode so that our permanent transaction can keep 1416 * on rolling it forward in the log. 1417 */ 1418 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1419 1420 trace_xfs_itruncate_extents_end(ip, new_size); 1421 1422 out: 1423 *tpp = tp; 1424 return error; 1425 } 1426 1427 int 1428 xfs_release( 1429 xfs_inode_t *ip) 1430 { 1431 xfs_mount_t *mp = ip->i_mount; 1432 int error = 0; 1433 1434 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) 1435 return 0; 1436 1437 /* If this is a read-only mount, don't do this (would generate I/O) */ 1438 if (mp->m_flags & XFS_MOUNT_RDONLY) 1439 return 0; 1440 1441 if (!XFS_FORCED_SHUTDOWN(mp)) { 1442 int truncated; 1443 1444 /* 1445 * If we previously truncated this file and removed old data 1446 * in the process, we want to initiate "early" writeout on 1447 * the last close. This is an attempt to combat the notorious 1448 * NULL files problem which is particularly noticeable from a 1449 * truncate down, buffered (re-)write (delalloc), followed by 1450 * a crash. What we are effectively doing here is 1451 * significantly reducing the time window where we'd otherwise 1452 * be exposed to that problem. 1453 */ 1454 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1455 if (truncated) { 1456 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1457 if (ip->i_delayed_blks > 0) { 1458 error = filemap_flush(VFS_I(ip)->i_mapping); 1459 if (error) 1460 return error; 1461 } 1462 } 1463 } 1464 1465 if (VFS_I(ip)->i_nlink == 0) 1466 return 0; 1467 1468 /* 1469 * If we can't get the iolock just skip truncating the blocks past EOF 1470 * because we could deadlock with the mmap_lock otherwise. We'll get 1471 * another chance to drop them once the last reference to the inode is 1472 * dropped, so we'll never leak blocks permanently. 1473 */ 1474 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) 1475 return 0; 1476 1477 if (xfs_can_free_eofblocks(ip, false)) { 1478 /* 1479 * Check if the inode is being opened, written and closed 1480 * frequently and we have delayed allocation blocks outstanding 1481 * (e.g. streaming writes from the NFS server), truncating the 1482 * blocks past EOF will cause fragmentation to occur. 1483 * 1484 * In this case don't do the truncation, but we have to be 1485 * careful how we detect this case. Blocks beyond EOF show up as 1486 * i_delayed_blks even when the inode is clean, so we need to 1487 * truncate them away first before checking for a dirty release. 1488 * Hence on the first dirty close we will still remove the 1489 * speculative allocation, but after that we will leave it in 1490 * place. 1491 */ 1492 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 1493 goto out_unlock; 1494 1495 error = xfs_free_eofblocks(ip); 1496 if (error) 1497 goto out_unlock; 1498 1499 /* delalloc blocks after truncation means it really is dirty */ 1500 if (ip->i_delayed_blks) 1501 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); 1502 } 1503 1504 out_unlock: 1505 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1506 return error; 1507 } 1508 1509 /* 1510 * xfs_inactive_truncate 1511 * 1512 * Called to perform a truncate when an inode becomes unlinked. 1513 */ 1514 STATIC int 1515 xfs_inactive_truncate( 1516 struct xfs_inode *ip) 1517 { 1518 struct xfs_mount *mp = ip->i_mount; 1519 struct xfs_trans *tp; 1520 int error; 1521 1522 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); 1523 if (error) { 1524 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1525 return error; 1526 } 1527 xfs_ilock(ip, XFS_ILOCK_EXCL); 1528 xfs_trans_ijoin(tp, ip, 0); 1529 1530 /* 1531 * Log the inode size first to prevent stale data exposure in the event 1532 * of a system crash before the truncate completes. See the related 1533 * comment in xfs_vn_setattr_size() for details. 1534 */ 1535 ip->i_disk_size = 0; 1536 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1537 1538 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 1539 if (error) 1540 goto error_trans_cancel; 1541 1542 ASSERT(ip->i_df.if_nextents == 0); 1543 1544 error = xfs_trans_commit(tp); 1545 if (error) 1546 goto error_unlock; 1547 1548 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1549 return 0; 1550 1551 error_trans_cancel: 1552 xfs_trans_cancel(tp); 1553 error_unlock: 1554 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1555 return error; 1556 } 1557 1558 /* 1559 * xfs_inactive_ifree() 1560 * 1561 * Perform the inode free when an inode is unlinked. 1562 */ 1563 STATIC int 1564 xfs_inactive_ifree( 1565 struct xfs_inode *ip) 1566 { 1567 struct xfs_mount *mp = ip->i_mount; 1568 struct xfs_trans *tp; 1569 int error; 1570 1571 /* 1572 * We try to use a per-AG reservation for any block needed by the finobt 1573 * tree, but as the finobt feature predates the per-AG reservation 1574 * support a degraded file system might not have enough space for the 1575 * reservation at mount time. In that case try to dip into the reserved 1576 * pool and pray. 1577 * 1578 * Send a warning if the reservation does happen to fail, as the inode 1579 * now remains allocated and sits on the unlinked list until the fs is 1580 * repaired. 1581 */ 1582 if (unlikely(mp->m_finobt_nores)) { 1583 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 1584 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 1585 &tp); 1586 } else { 1587 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); 1588 } 1589 if (error) { 1590 if (error == -ENOSPC) { 1591 xfs_warn_ratelimited(mp, 1592 "Failed to remove inode(s) from unlinked list. " 1593 "Please free space, unmount and run xfs_repair."); 1594 } else { 1595 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1596 } 1597 return error; 1598 } 1599 1600 /* 1601 * We do not hold the inode locked across the entire rolling transaction 1602 * here. We only need to hold it for the first transaction that 1603 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the 1604 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode 1605 * here breaks the relationship between cluster buffer invalidation and 1606 * stale inode invalidation on cluster buffer item journal commit 1607 * completion, and can result in leaving dirty stale inodes hanging 1608 * around in memory. 1609 * 1610 * We have no need for serialising this inode operation against other 1611 * operations - we freed the inode and hence reallocation is required 1612 * and that will serialise on reallocating the space the deferops need 1613 * to free. Hence we can unlock the inode on the first commit of 1614 * the transaction rather than roll it right through the deferops. This 1615 * avoids relogging the XFS_ISTALE inode. 1616 * 1617 * We check that xfs_ifree() hasn't grown an internal transaction roll 1618 * by asserting that the inode is still locked when it returns. 1619 */ 1620 xfs_ilock(ip, XFS_ILOCK_EXCL); 1621 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1622 1623 error = xfs_ifree(tp, ip); 1624 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1625 if (error) { 1626 /* 1627 * If we fail to free the inode, shut down. The cancel 1628 * might do that, we need to make sure. Otherwise the 1629 * inode might be lost for a long time or forever. 1630 */ 1631 if (!XFS_FORCED_SHUTDOWN(mp)) { 1632 xfs_notice(mp, "%s: xfs_ifree returned error %d", 1633 __func__, error); 1634 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1635 } 1636 xfs_trans_cancel(tp); 1637 return error; 1638 } 1639 1640 /* 1641 * Credit the quota account(s). The inode is gone. 1642 */ 1643 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); 1644 1645 /* 1646 * Just ignore errors at this point. There is nothing we can do except 1647 * to try to keep going. Make sure it's not a silent error. 1648 */ 1649 error = xfs_trans_commit(tp); 1650 if (error) 1651 xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 1652 __func__, error); 1653 1654 return 0; 1655 } 1656 1657 /* 1658 * xfs_inactive 1659 * 1660 * This is called when the vnode reference count for the vnode 1661 * goes to zero. If the file has been unlinked, then it must 1662 * now be truncated. Also, we clear all of the read-ahead state 1663 * kept for the inode here since the file is now closed. 1664 */ 1665 void 1666 xfs_inactive( 1667 xfs_inode_t *ip) 1668 { 1669 struct xfs_mount *mp; 1670 int error; 1671 int truncate = 0; 1672 1673 /* 1674 * If the inode is already free, then there can be nothing 1675 * to clean up here. 1676 */ 1677 if (VFS_I(ip)->i_mode == 0) { 1678 ASSERT(ip->i_df.if_broot_bytes == 0); 1679 goto out; 1680 } 1681 1682 mp = ip->i_mount; 1683 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); 1684 1685 /* If this is a read-only mount, don't do this (would generate I/O) */ 1686 if (mp->m_flags & XFS_MOUNT_RDONLY) 1687 goto out; 1688 1689 /* Metadata inodes require explicit resource cleanup. */ 1690 if (xfs_is_metadata_inode(ip)) 1691 goto out; 1692 1693 /* Try to clean out the cow blocks if there are any. */ 1694 if (xfs_inode_has_cow_data(ip)) 1695 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); 1696 1697 if (VFS_I(ip)->i_nlink != 0) { 1698 /* 1699 * force is true because we are evicting an inode from the 1700 * cache. Post-eof blocks must be freed, lest we end up with 1701 * broken free space accounting. 1702 * 1703 * Note: don't bother with iolock here since lockdep complains 1704 * about acquiring it in reclaim context. We have the only 1705 * reference to the inode at this point anyways. 1706 */ 1707 if (xfs_can_free_eofblocks(ip, true)) 1708 xfs_free_eofblocks(ip); 1709 1710 goto out; 1711 } 1712 1713 if (S_ISREG(VFS_I(ip)->i_mode) && 1714 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 || 1715 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) 1716 truncate = 1; 1717 1718 error = xfs_qm_dqattach(ip); 1719 if (error) 1720 goto out; 1721 1722 if (S_ISLNK(VFS_I(ip)->i_mode)) 1723 error = xfs_inactive_symlink(ip); 1724 else if (truncate) 1725 error = xfs_inactive_truncate(ip); 1726 if (error) 1727 goto out; 1728 1729 /* 1730 * If there are attributes associated with the file then blow them away 1731 * now. The code calls a routine that recursively deconstructs the 1732 * attribute fork. If also blows away the in-core attribute fork. 1733 */ 1734 if (XFS_IFORK_Q(ip)) { 1735 error = xfs_attr_inactive(ip); 1736 if (error) 1737 goto out; 1738 } 1739 1740 ASSERT(!ip->i_afp); 1741 ASSERT(ip->i_forkoff == 0); 1742 1743 /* 1744 * Free the inode. 1745 */ 1746 xfs_inactive_ifree(ip); 1747 1748 out: 1749 /* 1750 * We're done making metadata updates for this inode, so we can release 1751 * the attached dquots. 1752 */ 1753 xfs_qm_dqdetach(ip); 1754 } 1755 1756 /* 1757 * In-Core Unlinked List Lookups 1758 * ============================= 1759 * 1760 * Every inode is supposed to be reachable from some other piece of metadata 1761 * with the exception of the root directory. Inodes with a connection to a 1762 * file descriptor but not linked from anywhere in the on-disk directory tree 1763 * are collectively known as unlinked inodes, though the filesystem itself 1764 * maintains links to these inodes so that on-disk metadata are consistent. 1765 * 1766 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI 1767 * header contains a number of buckets that point to an inode, and each inode 1768 * record has a pointer to the next inode in the hash chain. This 1769 * singly-linked list causes scaling problems in the iunlink remove function 1770 * because we must walk that list to find the inode that points to the inode 1771 * being removed from the unlinked hash bucket list. 1772 * 1773 * What if we modelled the unlinked list as a collection of records capturing 1774 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd 1775 * have a fast way to look up unlinked list predecessors, which avoids the 1776 * slow list walk. That's exactly what we do here (in-core) with a per-AG 1777 * rhashtable. 1778 * 1779 * Because this is a backref cache, we ignore operational failures since the 1780 * iunlink code can fall back to the slow bucket walk. The only errors that 1781 * should bubble out are for obviously incorrect situations. 1782 * 1783 * All users of the backref cache MUST hold the AGI buffer lock to serialize 1784 * access or have otherwise provided for concurrency control. 1785 */ 1786 1787 /* Capture a "X.next_unlinked = Y" relationship. */ 1788 struct xfs_iunlink { 1789 struct rhash_head iu_rhash_head; 1790 xfs_agino_t iu_agino; /* X */ 1791 xfs_agino_t iu_next_unlinked; /* Y */ 1792 }; 1793 1794 /* Unlinked list predecessor lookup hashtable construction */ 1795 static int 1796 xfs_iunlink_obj_cmpfn( 1797 struct rhashtable_compare_arg *arg, 1798 const void *obj) 1799 { 1800 const xfs_agino_t *key = arg->key; 1801 const struct xfs_iunlink *iu = obj; 1802 1803 if (iu->iu_next_unlinked != *key) 1804 return 1; 1805 return 0; 1806 } 1807 1808 static const struct rhashtable_params xfs_iunlink_hash_params = { 1809 .min_size = XFS_AGI_UNLINKED_BUCKETS, 1810 .key_len = sizeof(xfs_agino_t), 1811 .key_offset = offsetof(struct xfs_iunlink, 1812 iu_next_unlinked), 1813 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head), 1814 .automatic_shrinking = true, 1815 .obj_cmpfn = xfs_iunlink_obj_cmpfn, 1816 }; 1817 1818 /* 1819 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such 1820 * relation is found. 1821 */ 1822 static xfs_agino_t 1823 xfs_iunlink_lookup_backref( 1824 struct xfs_perag *pag, 1825 xfs_agino_t agino) 1826 { 1827 struct xfs_iunlink *iu; 1828 1829 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, 1830 xfs_iunlink_hash_params); 1831 return iu ? iu->iu_agino : NULLAGINO; 1832 } 1833 1834 /* 1835 * Take ownership of an iunlink cache entry and insert it into the hash table. 1836 * If successful, the entry will be owned by the cache; if not, it is freed. 1837 * Either way, the caller does not own @iu after this call. 1838 */ 1839 static int 1840 xfs_iunlink_insert_backref( 1841 struct xfs_perag *pag, 1842 struct xfs_iunlink *iu) 1843 { 1844 int error; 1845 1846 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash, 1847 &iu->iu_rhash_head, xfs_iunlink_hash_params); 1848 /* 1849 * Fail loudly if there already was an entry because that's a sign of 1850 * corruption of in-memory data. Also fail loudly if we see an error 1851 * code we didn't anticipate from the rhashtable code. Currently we 1852 * only anticipate ENOMEM. 1853 */ 1854 if (error) { 1855 WARN(error != -ENOMEM, "iunlink cache insert error %d", error); 1856 kmem_free(iu); 1857 } 1858 /* 1859 * Absorb any runtime errors that aren't a result of corruption because 1860 * this is a cache and we can always fall back to bucket list scanning. 1861 */ 1862 if (error != 0 && error != -EEXIST) 1863 error = 0; 1864 return error; 1865 } 1866 1867 /* Remember that @prev_agino.next_unlinked = @this_agino. */ 1868 static int 1869 xfs_iunlink_add_backref( 1870 struct xfs_perag *pag, 1871 xfs_agino_t prev_agino, 1872 xfs_agino_t this_agino) 1873 { 1874 struct xfs_iunlink *iu; 1875 1876 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) 1877 return 0; 1878 1879 iu = kmem_zalloc(sizeof(*iu), KM_NOFS); 1880 iu->iu_agino = prev_agino; 1881 iu->iu_next_unlinked = this_agino; 1882 1883 return xfs_iunlink_insert_backref(pag, iu); 1884 } 1885 1886 /* 1887 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked. 1888 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there 1889 * wasn't any such entry then we don't bother. 1890 */ 1891 static int 1892 xfs_iunlink_change_backref( 1893 struct xfs_perag *pag, 1894 xfs_agino_t agino, 1895 xfs_agino_t next_unlinked) 1896 { 1897 struct xfs_iunlink *iu; 1898 int error; 1899 1900 /* Look up the old entry; if there wasn't one then exit. */ 1901 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, 1902 xfs_iunlink_hash_params); 1903 if (!iu) 1904 return 0; 1905 1906 /* 1907 * Remove the entry. This shouldn't ever return an error, but if we 1908 * couldn't remove the old entry we don't want to add it again to the 1909 * hash table, and if the entry disappeared on us then someone's 1910 * violated the locking rules and we need to fail loudly. Either way 1911 * we cannot remove the inode because internal state is or would have 1912 * been corrupt. 1913 */ 1914 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash, 1915 &iu->iu_rhash_head, xfs_iunlink_hash_params); 1916 if (error) 1917 return error; 1918 1919 /* If there is no new next entry just free our item and return. */ 1920 if (next_unlinked == NULLAGINO) { 1921 kmem_free(iu); 1922 return 0; 1923 } 1924 1925 /* Update the entry and re-add it to the hash table. */ 1926 iu->iu_next_unlinked = next_unlinked; 1927 return xfs_iunlink_insert_backref(pag, iu); 1928 } 1929 1930 /* Set up the in-core predecessor structures. */ 1931 int 1932 xfs_iunlink_init( 1933 struct xfs_perag *pag) 1934 { 1935 return rhashtable_init(&pag->pagi_unlinked_hash, 1936 &xfs_iunlink_hash_params); 1937 } 1938 1939 /* Free the in-core predecessor structures. */ 1940 static void 1941 xfs_iunlink_free_item( 1942 void *ptr, 1943 void *arg) 1944 { 1945 struct xfs_iunlink *iu = ptr; 1946 bool *freed_anything = arg; 1947 1948 *freed_anything = true; 1949 kmem_free(iu); 1950 } 1951 1952 void 1953 xfs_iunlink_destroy( 1954 struct xfs_perag *pag) 1955 { 1956 bool freed_anything = false; 1957 1958 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash, 1959 xfs_iunlink_free_item, &freed_anything); 1960 1961 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount)); 1962 } 1963 1964 /* 1965 * Point the AGI unlinked bucket at an inode and log the results. The caller 1966 * is responsible for validating the old value. 1967 */ 1968 STATIC int 1969 xfs_iunlink_update_bucket( 1970 struct xfs_trans *tp, 1971 struct xfs_perag *pag, 1972 struct xfs_buf *agibp, 1973 unsigned int bucket_index, 1974 xfs_agino_t new_agino) 1975 { 1976 struct xfs_agi *agi = agibp->b_addr; 1977 xfs_agino_t old_value; 1978 int offset; 1979 1980 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino)); 1981 1982 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1983 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index, 1984 old_value, new_agino); 1985 1986 /* 1987 * We should never find the head of the list already set to the value 1988 * passed in because either we're adding or removing ourselves from the 1989 * head of the list. 1990 */ 1991 if (old_value == new_agino) { 1992 xfs_buf_mark_corrupt(agibp); 1993 return -EFSCORRUPTED; 1994 } 1995 1996 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); 1997 offset = offsetof(struct xfs_agi, agi_unlinked) + 1998 (sizeof(xfs_agino_t) * bucket_index); 1999 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); 2000 return 0; 2001 } 2002 2003 /* Set an on-disk inode's next_unlinked pointer. */ 2004 STATIC void 2005 xfs_iunlink_update_dinode( 2006 struct xfs_trans *tp, 2007 struct xfs_perag *pag, 2008 xfs_agino_t agino, 2009 struct xfs_buf *ibp, 2010 struct xfs_dinode *dip, 2011 struct xfs_imap *imap, 2012 xfs_agino_t next_agino) 2013 { 2014 struct xfs_mount *mp = tp->t_mountp; 2015 int offset; 2016 2017 ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)); 2018 2019 trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino, 2020 be32_to_cpu(dip->di_next_unlinked), next_agino); 2021 2022 dip->di_next_unlinked = cpu_to_be32(next_agino); 2023 offset = imap->im_boffset + 2024 offsetof(struct xfs_dinode, di_next_unlinked); 2025 2026 /* need to recalc the inode CRC if appropriate */ 2027 xfs_dinode_calc_crc(mp, dip); 2028 xfs_trans_inode_buf(tp, ibp); 2029 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); 2030 } 2031 2032 /* Set an in-core inode's unlinked pointer and return the old value. */ 2033 STATIC int 2034 xfs_iunlink_update_inode( 2035 struct xfs_trans *tp, 2036 struct xfs_inode *ip, 2037 struct xfs_perag *pag, 2038 xfs_agino_t next_agino, 2039 xfs_agino_t *old_next_agino) 2040 { 2041 struct xfs_mount *mp = tp->t_mountp; 2042 struct xfs_dinode *dip; 2043 struct xfs_buf *ibp; 2044 xfs_agino_t old_value; 2045 int error; 2046 2047 ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)); 2048 2049 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp); 2050 if (error) 2051 return error; 2052 dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset); 2053 2054 /* Make sure the old pointer isn't garbage. */ 2055 old_value = be32_to_cpu(dip->di_next_unlinked); 2056 if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) { 2057 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, 2058 sizeof(*dip), __this_address); 2059 error = -EFSCORRUPTED; 2060 goto out; 2061 } 2062 2063 /* 2064 * Since we're updating a linked list, we should never find that the 2065 * current pointer is the same as the new value, unless we're 2066 * terminating the list. 2067 */ 2068 *old_next_agino = old_value; 2069 if (old_value == next_agino) { 2070 if (next_agino != NULLAGINO) { 2071 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, 2072 dip, sizeof(*dip), __this_address); 2073 error = -EFSCORRUPTED; 2074 } 2075 goto out; 2076 } 2077 2078 /* Ok, update the new pointer. */ 2079 xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino), 2080 ibp, dip, &ip->i_imap, next_agino); 2081 return 0; 2082 out: 2083 xfs_trans_brelse(tp, ibp); 2084 return error; 2085 } 2086 2087 /* 2088 * This is called when the inode's link count has gone to 0 or we are creating 2089 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. 2090 * 2091 * We place the on-disk inode on a list in the AGI. It will be pulled from this 2092 * list when the inode is freed. 2093 */ 2094 STATIC int 2095 xfs_iunlink( 2096 struct xfs_trans *tp, 2097 struct xfs_inode *ip) 2098 { 2099 struct xfs_mount *mp = tp->t_mountp; 2100 struct xfs_perag *pag; 2101 struct xfs_agi *agi; 2102 struct xfs_buf *agibp; 2103 xfs_agino_t next_agino; 2104 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2105 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 2106 int error; 2107 2108 ASSERT(VFS_I(ip)->i_nlink == 0); 2109 ASSERT(VFS_I(ip)->i_mode != 0); 2110 trace_xfs_iunlink(ip); 2111 2112 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2113 2114 /* Get the agi buffer first. It ensures lock ordering on the list. */ 2115 error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp); 2116 if (error) 2117 goto out; 2118 agi = agibp->b_addr; 2119 2120 /* 2121 * Get the index into the agi hash table for the list this inode will 2122 * go on. Make sure the pointer isn't garbage and that this inode 2123 * isn't already on the list. 2124 */ 2125 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2126 if (next_agino == agino || 2127 !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) { 2128 xfs_buf_mark_corrupt(agibp); 2129 error = -EFSCORRUPTED; 2130 goto out; 2131 } 2132 2133 if (next_agino != NULLAGINO) { 2134 xfs_agino_t old_agino; 2135 2136 /* 2137 * There is already another inode in the bucket, so point this 2138 * inode to the current head of the list. 2139 */ 2140 error = xfs_iunlink_update_inode(tp, ip, pag, next_agino, 2141 &old_agino); 2142 if (error) 2143 goto out; 2144 ASSERT(old_agino == NULLAGINO); 2145 2146 /* 2147 * agino has been unlinked, add a backref from the next inode 2148 * back to agino. 2149 */ 2150 error = xfs_iunlink_add_backref(pag, agino, next_agino); 2151 if (error) 2152 goto out; 2153 } 2154 2155 /* Point the head of the list to point to this inode. */ 2156 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino); 2157 out: 2158 xfs_perag_put(pag); 2159 return error; 2160 } 2161 2162 /* Return the imap, dinode pointer, and buffer for an inode. */ 2163 STATIC int 2164 xfs_iunlink_map_ino( 2165 struct xfs_trans *tp, 2166 xfs_agnumber_t agno, 2167 xfs_agino_t agino, 2168 struct xfs_imap *imap, 2169 struct xfs_dinode **dipp, 2170 struct xfs_buf **bpp) 2171 { 2172 struct xfs_mount *mp = tp->t_mountp; 2173 int error; 2174 2175 imap->im_blkno = 0; 2176 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0); 2177 if (error) { 2178 xfs_warn(mp, "%s: xfs_imap returned error %d.", 2179 __func__, error); 2180 return error; 2181 } 2182 2183 error = xfs_imap_to_bp(mp, tp, imap, bpp); 2184 if (error) { 2185 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", 2186 __func__, error); 2187 return error; 2188 } 2189 2190 *dipp = xfs_buf_offset(*bpp, imap->im_boffset); 2191 return 0; 2192 } 2193 2194 /* 2195 * Walk the unlinked chain from @head_agino until we find the inode that 2196 * points to @target_agino. Return the inode number, map, dinode pointer, 2197 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp. 2198 * 2199 * @tp, @pag, @head_agino, and @target_agino are input parameters. 2200 * @agino, @imap, @dipp, and @bpp are all output parameters. 2201 * 2202 * Do not call this function if @target_agino is the head of the list. 2203 */ 2204 STATIC int 2205 xfs_iunlink_map_prev( 2206 struct xfs_trans *tp, 2207 struct xfs_perag *pag, 2208 xfs_agino_t head_agino, 2209 xfs_agino_t target_agino, 2210 xfs_agino_t *agino, 2211 struct xfs_imap *imap, 2212 struct xfs_dinode **dipp, 2213 struct xfs_buf **bpp) 2214 { 2215 struct xfs_mount *mp = tp->t_mountp; 2216 xfs_agino_t next_agino; 2217 int error; 2218 2219 ASSERT(head_agino != target_agino); 2220 *bpp = NULL; 2221 2222 /* See if our backref cache can find it faster. */ 2223 *agino = xfs_iunlink_lookup_backref(pag, target_agino); 2224 if (*agino != NULLAGINO) { 2225 error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap, 2226 dipp, bpp); 2227 if (error) 2228 return error; 2229 2230 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino) 2231 return 0; 2232 2233 /* 2234 * If we get here the cache contents were corrupt, so drop the 2235 * buffer and fall back to walking the bucket list. 2236 */ 2237 xfs_trans_brelse(tp, *bpp); 2238 *bpp = NULL; 2239 WARN_ON_ONCE(1); 2240 } 2241 2242 trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno); 2243 2244 /* Otherwise, walk the entire bucket until we find it. */ 2245 next_agino = head_agino; 2246 while (next_agino != target_agino) { 2247 xfs_agino_t unlinked_agino; 2248 2249 if (*bpp) 2250 xfs_trans_brelse(tp, *bpp); 2251 2252 *agino = next_agino; 2253 error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap, 2254 dipp, bpp); 2255 if (error) 2256 return error; 2257 2258 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked); 2259 /* 2260 * Make sure this pointer is valid and isn't an obvious 2261 * infinite loop. 2262 */ 2263 if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) || 2264 next_agino == unlinked_agino) { 2265 XFS_CORRUPTION_ERROR(__func__, 2266 XFS_ERRLEVEL_LOW, mp, 2267 *dipp, sizeof(**dipp)); 2268 error = -EFSCORRUPTED; 2269 return error; 2270 } 2271 next_agino = unlinked_agino; 2272 } 2273 2274 return 0; 2275 } 2276 2277 /* 2278 * Pull the on-disk inode from the AGI unlinked list. 2279 */ 2280 STATIC int 2281 xfs_iunlink_remove( 2282 struct xfs_trans *tp, 2283 struct xfs_perag *pag, 2284 struct xfs_inode *ip) 2285 { 2286 struct xfs_mount *mp = tp->t_mountp; 2287 struct xfs_agi *agi; 2288 struct xfs_buf *agibp; 2289 struct xfs_buf *last_ibp; 2290 struct xfs_dinode *last_dip = NULL; 2291 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 2292 xfs_agino_t next_agino; 2293 xfs_agino_t head_agino; 2294 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; 2295 int error; 2296 2297 trace_xfs_iunlink_remove(ip); 2298 2299 /* Get the agi buffer first. It ensures lock ordering on the list. */ 2300 error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp); 2301 if (error) 2302 return error; 2303 agi = agibp->b_addr; 2304 2305 /* 2306 * Get the index into the agi hash table for the list this inode will 2307 * go on. Make sure the head pointer isn't garbage. 2308 */ 2309 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 2310 if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) { 2311 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 2312 agi, sizeof(*agi)); 2313 return -EFSCORRUPTED; 2314 } 2315 2316 /* 2317 * Set our inode's next_unlinked pointer to NULL and then return 2318 * the old pointer value so that we can update whatever was previous 2319 * to us in the list to point to whatever was next in the list. 2320 */ 2321 error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino); 2322 if (error) 2323 return error; 2324 2325 /* 2326 * If there was a backref pointing from the next inode back to this 2327 * one, remove it because we've removed this inode from the list. 2328 * 2329 * Later, if this inode was in the middle of the list we'll update 2330 * this inode's backref to point from the next inode. 2331 */ 2332 if (next_agino != NULLAGINO) { 2333 error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO); 2334 if (error) 2335 return error; 2336 } 2337 2338 if (head_agino != agino) { 2339 struct xfs_imap imap; 2340 xfs_agino_t prev_agino; 2341 2342 /* We need to search the list for the inode being freed. */ 2343 error = xfs_iunlink_map_prev(tp, pag, head_agino, agino, 2344 &prev_agino, &imap, &last_dip, &last_ibp); 2345 if (error) 2346 return error; 2347 2348 /* Point the previous inode on the list to the next inode. */ 2349 xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp, 2350 last_dip, &imap, next_agino); 2351 2352 /* 2353 * Now we deal with the backref for this inode. If this inode 2354 * pointed at a real inode, change the backref that pointed to 2355 * us to point to our old next. If this inode was the end of 2356 * the list, delete the backref that pointed to us. Note that 2357 * change_backref takes care of deleting the backref if 2358 * next_agino is NULLAGINO. 2359 */ 2360 return xfs_iunlink_change_backref(agibp->b_pag, agino, 2361 next_agino); 2362 } 2363 2364 /* Point the head of the list to the next unlinked inode. */ 2365 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, 2366 next_agino); 2367 } 2368 2369 /* 2370 * Look up the inode number specified and if it is not already marked XFS_ISTALE 2371 * mark it stale. We should only find clean inodes in this lookup that aren't 2372 * already stale. 2373 */ 2374 static void 2375 xfs_ifree_mark_inode_stale( 2376 struct xfs_perag *pag, 2377 struct xfs_inode *free_ip, 2378 xfs_ino_t inum) 2379 { 2380 struct xfs_mount *mp = pag->pag_mount; 2381 struct xfs_inode_log_item *iip; 2382 struct xfs_inode *ip; 2383 2384 retry: 2385 rcu_read_lock(); 2386 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); 2387 2388 /* Inode not in memory, nothing to do */ 2389 if (!ip) { 2390 rcu_read_unlock(); 2391 return; 2392 } 2393 2394 /* 2395 * because this is an RCU protected lookup, we could find a recently 2396 * freed or even reallocated inode during the lookup. We need to check 2397 * under the i_flags_lock for a valid inode here. Skip it if it is not 2398 * valid, the wrong inode or stale. 2399 */ 2400 spin_lock(&ip->i_flags_lock); 2401 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) 2402 goto out_iflags_unlock; 2403 2404 /* 2405 * Don't try to lock/unlock the current inode, but we _cannot_ skip the 2406 * other inodes that we did not find in the list attached to the buffer 2407 * and are not already marked stale. If we can't lock it, back off and 2408 * retry. 2409 */ 2410 if (ip != free_ip) { 2411 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2412 spin_unlock(&ip->i_flags_lock); 2413 rcu_read_unlock(); 2414 delay(1); 2415 goto retry; 2416 } 2417 } 2418 ip->i_flags |= XFS_ISTALE; 2419 2420 /* 2421 * If the inode is flushing, it is already attached to the buffer. All 2422 * we needed to do here is mark the inode stale so buffer IO completion 2423 * will remove it from the AIL. 2424 */ 2425 iip = ip->i_itemp; 2426 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) { 2427 ASSERT(!list_empty(&iip->ili_item.li_bio_list)); 2428 ASSERT(iip->ili_last_fields); 2429 goto out_iunlock; 2430 } 2431 2432 /* 2433 * Inodes not attached to the buffer can be released immediately. 2434 * Everything else has to go through xfs_iflush_abort() on journal 2435 * commit as the flock synchronises removal of the inode from the 2436 * cluster buffer against inode reclaim. 2437 */ 2438 if (!iip || list_empty(&iip->ili_item.li_bio_list)) 2439 goto out_iunlock; 2440 2441 __xfs_iflags_set(ip, XFS_IFLUSHING); 2442 spin_unlock(&ip->i_flags_lock); 2443 rcu_read_unlock(); 2444 2445 /* we have a dirty inode in memory that has not yet been flushed. */ 2446 spin_lock(&iip->ili_lock); 2447 iip->ili_last_fields = iip->ili_fields; 2448 iip->ili_fields = 0; 2449 iip->ili_fsync_fields = 0; 2450 spin_unlock(&iip->ili_lock); 2451 ASSERT(iip->ili_last_fields); 2452 2453 if (ip != free_ip) 2454 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2455 return; 2456 2457 out_iunlock: 2458 if (ip != free_ip) 2459 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2460 out_iflags_unlock: 2461 spin_unlock(&ip->i_flags_lock); 2462 rcu_read_unlock(); 2463 } 2464 2465 /* 2466 * A big issue when freeing the inode cluster is that we _cannot_ skip any 2467 * inodes that are in memory - they all must be marked stale and attached to 2468 * the cluster buffer. 2469 */ 2470 static int 2471 xfs_ifree_cluster( 2472 struct xfs_trans *tp, 2473 struct xfs_perag *pag, 2474 struct xfs_inode *free_ip, 2475 struct xfs_icluster *xic) 2476 { 2477 struct xfs_mount *mp = free_ip->i_mount; 2478 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2479 struct xfs_buf *bp; 2480 xfs_daddr_t blkno; 2481 xfs_ino_t inum = xic->first_ino; 2482 int nbufs; 2483 int i, j; 2484 int ioffset; 2485 int error; 2486 2487 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; 2488 2489 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { 2490 /* 2491 * The allocation bitmap tells us which inodes of the chunk were 2492 * physically allocated. Skip the cluster if an inode falls into 2493 * a sparse region. 2494 */ 2495 ioffset = inum - xic->first_ino; 2496 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { 2497 ASSERT(ioffset % igeo->inodes_per_cluster == 0); 2498 continue; 2499 } 2500 2501 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 2502 XFS_INO_TO_AGBNO(mp, inum)); 2503 2504 /* 2505 * We obtain and lock the backing buffer first in the process 2506 * here to ensure dirty inodes attached to the buffer remain in 2507 * the flushing state while we mark them stale. 2508 * 2509 * If we scan the in-memory inodes first, then buffer IO can 2510 * complete before we get a lock on it, and hence we may fail 2511 * to mark all the active inodes on the buffer stale. 2512 */ 2513 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2514 mp->m_bsize * igeo->blocks_per_cluster, 2515 XBF_UNMAPPED, &bp); 2516 if (error) 2517 return error; 2518 2519 /* 2520 * This buffer may not have been correctly initialised as we 2521 * didn't read it from disk. That's not important because we are 2522 * only using to mark the buffer as stale in the log, and to 2523 * attach stale cached inodes on it. That means it will never be 2524 * dispatched for IO. If it is, we want to know about it, and we 2525 * want it to fail. We can acheive this by adding a write 2526 * verifier to the buffer. 2527 */ 2528 bp->b_ops = &xfs_inode_buf_ops; 2529 2530 /* 2531 * Now we need to set all the cached clean inodes as XFS_ISTALE, 2532 * too. This requires lookups, and will skip inodes that we've 2533 * already marked XFS_ISTALE. 2534 */ 2535 for (i = 0; i < igeo->inodes_per_cluster; i++) 2536 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i); 2537 2538 xfs_trans_stale_inode_buf(tp, bp); 2539 xfs_trans_binval(tp, bp); 2540 } 2541 return 0; 2542 } 2543 2544 /* 2545 * This is called to return an inode to the inode free list. 2546 * The inode should already be truncated to 0 length and have 2547 * no pages associated with it. This routine also assumes that 2548 * the inode is already a part of the transaction. 2549 * 2550 * The on-disk copy of the inode will have been added to the list 2551 * of unlinked inodes in the AGI. We need to remove the inode from 2552 * that list atomically with respect to freeing it here. 2553 */ 2554 int 2555 xfs_ifree( 2556 struct xfs_trans *tp, 2557 struct xfs_inode *ip) 2558 { 2559 struct xfs_mount *mp = ip->i_mount; 2560 struct xfs_perag *pag; 2561 struct xfs_icluster xic = { 0 }; 2562 struct xfs_inode_log_item *iip = ip->i_itemp; 2563 int error; 2564 2565 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 2566 ASSERT(VFS_I(ip)->i_nlink == 0); 2567 ASSERT(ip->i_df.if_nextents == 0); 2568 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); 2569 ASSERT(ip->i_nblocks == 0); 2570 2571 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2572 2573 /* 2574 * Pull the on-disk inode from the AGI unlinked list. 2575 */ 2576 error = xfs_iunlink_remove(tp, pag, ip); 2577 if (error) 2578 goto out; 2579 2580 error = xfs_difree(tp, pag, ip->i_ino, &xic); 2581 if (error) 2582 goto out; 2583 2584 /* 2585 * Free any local-format data sitting around before we reset the 2586 * data fork to extents format. Note that the attr fork data has 2587 * already been freed by xfs_attr_inactive. 2588 */ 2589 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { 2590 kmem_free(ip->i_df.if_u1.if_data); 2591 ip->i_df.if_u1.if_data = NULL; 2592 ip->i_df.if_bytes = 0; 2593 } 2594 2595 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ 2596 ip->i_diflags = 0; 2597 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; 2598 ip->i_forkoff = 0; /* mark the attr fork not in use */ 2599 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; 2600 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) 2601 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS); 2602 2603 /* Don't attempt to replay owner changes for a deleted inode */ 2604 spin_lock(&iip->ili_lock); 2605 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); 2606 spin_unlock(&iip->ili_lock); 2607 2608 /* 2609 * Bump the generation count so no one will be confused 2610 * by reincarnations of this inode. 2611 */ 2612 VFS_I(ip)->i_generation++; 2613 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2614 2615 if (xic.deleted) 2616 error = xfs_ifree_cluster(tp, pag, ip, &xic); 2617 out: 2618 xfs_perag_put(pag); 2619 return error; 2620 } 2621 2622 /* 2623 * This is called to unpin an inode. The caller must have the inode locked 2624 * in at least shared mode so that the buffer cannot be subsequently pinned 2625 * once someone is waiting for it to be unpinned. 2626 */ 2627 static void 2628 xfs_iunpin( 2629 struct xfs_inode *ip) 2630 { 2631 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 2632 2633 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); 2634 2635 /* Give the log a push to start the unpinning I/O */ 2636 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL); 2637 2638 } 2639 2640 static void 2641 __xfs_iunpin_wait( 2642 struct xfs_inode *ip) 2643 { 2644 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); 2645 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); 2646 2647 xfs_iunpin(ip); 2648 2649 do { 2650 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2651 if (xfs_ipincount(ip)) 2652 io_schedule(); 2653 } while (xfs_ipincount(ip)); 2654 finish_wait(wq, &wait.wq_entry); 2655 } 2656 2657 void 2658 xfs_iunpin_wait( 2659 struct xfs_inode *ip) 2660 { 2661 if (xfs_ipincount(ip)) 2662 __xfs_iunpin_wait(ip); 2663 } 2664 2665 /* 2666 * Removing an inode from the namespace involves removing the directory entry 2667 * and dropping the link count on the inode. Removing the directory entry can 2668 * result in locking an AGF (directory blocks were freed) and removing a link 2669 * count can result in placing the inode on an unlinked list which results in 2670 * locking an AGI. 2671 * 2672 * The big problem here is that we have an ordering constraint on AGF and AGI 2673 * locking - inode allocation locks the AGI, then can allocate a new extent for 2674 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode 2675 * removes the inode from the unlinked list, requiring that we lock the AGI 2676 * first, and then freeing the inode can result in an inode chunk being freed 2677 * and hence freeing disk space requiring that we lock an AGF. 2678 * 2679 * Hence the ordering that is imposed by other parts of the code is AGI before 2680 * AGF. This means we cannot remove the directory entry before we drop the inode 2681 * reference count and put it on the unlinked list as this results in a lock 2682 * order of AGF then AGI, and this can deadlock against inode allocation and 2683 * freeing. Therefore we must drop the link counts before we remove the 2684 * directory entry. 2685 * 2686 * This is still safe from a transactional point of view - it is not until we 2687 * get to xfs_defer_finish() that we have the possibility of multiple 2688 * transactions in this operation. Hence as long as we remove the directory 2689 * entry and drop the link count in the first transaction of the remove 2690 * operation, there are no transactional constraints on the ordering here. 2691 */ 2692 int 2693 xfs_remove( 2694 xfs_inode_t *dp, 2695 struct xfs_name *name, 2696 xfs_inode_t *ip) 2697 { 2698 xfs_mount_t *mp = dp->i_mount; 2699 xfs_trans_t *tp = NULL; 2700 int is_dir = S_ISDIR(VFS_I(ip)->i_mode); 2701 int error = 0; 2702 uint resblks; 2703 2704 trace_xfs_remove(dp, name); 2705 2706 if (XFS_FORCED_SHUTDOWN(mp)) 2707 return -EIO; 2708 2709 error = xfs_qm_dqattach(dp); 2710 if (error) 2711 goto std_return; 2712 2713 error = xfs_qm_dqattach(ip); 2714 if (error) 2715 goto std_return; 2716 2717 /* 2718 * We try to get the real space reservation first, 2719 * allowing for directory btree deletion(s) implying 2720 * possible bmap insert(s). If we can't get the space 2721 * reservation then we use 0 instead, and avoid the bmap 2722 * btree insert(s) in the directory code by, if the bmap 2723 * insert tries to happen, instead trimming the LAST 2724 * block from the directory. 2725 */ 2726 resblks = XFS_REMOVE_SPACE_RES(mp); 2727 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp); 2728 if (error == -ENOSPC) { 2729 resblks = 0; 2730 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0, 2731 &tp); 2732 } 2733 if (error) { 2734 ASSERT(error != -ENOSPC); 2735 goto std_return; 2736 } 2737 2738 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 2739 2740 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 2741 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2742 2743 /* 2744 * If we're removing a directory perform some additional validation. 2745 */ 2746 if (is_dir) { 2747 ASSERT(VFS_I(ip)->i_nlink >= 2); 2748 if (VFS_I(ip)->i_nlink != 2) { 2749 error = -ENOTEMPTY; 2750 goto out_trans_cancel; 2751 } 2752 if (!xfs_dir_isempty(ip)) { 2753 error = -ENOTEMPTY; 2754 goto out_trans_cancel; 2755 } 2756 2757 /* Drop the link from ip's "..". */ 2758 error = xfs_droplink(tp, dp); 2759 if (error) 2760 goto out_trans_cancel; 2761 2762 /* Drop the "." link from ip to self. */ 2763 error = xfs_droplink(tp, ip); 2764 if (error) 2765 goto out_trans_cancel; 2766 } else { 2767 /* 2768 * When removing a non-directory we need to log the parent 2769 * inode here. For a directory this is done implicitly 2770 * by the xfs_droplink call for the ".." entry. 2771 */ 2772 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2773 } 2774 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2775 2776 /* Drop the link from dp to ip. */ 2777 error = xfs_droplink(tp, ip); 2778 if (error) 2779 goto out_trans_cancel; 2780 2781 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); 2782 if (error) { 2783 ASSERT(error != -ENOENT); 2784 goto out_trans_cancel; 2785 } 2786 2787 /* 2788 * If this is a synchronous mount, make sure that the 2789 * remove transaction goes to disk before returning to 2790 * the user. 2791 */ 2792 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2793 xfs_trans_set_sync(tp); 2794 2795 error = xfs_trans_commit(tp); 2796 if (error) 2797 goto std_return; 2798 2799 if (is_dir && xfs_inode_is_filestream(ip)) 2800 xfs_filestream_deassociate(ip); 2801 2802 return 0; 2803 2804 out_trans_cancel: 2805 xfs_trans_cancel(tp); 2806 std_return: 2807 return error; 2808 } 2809 2810 /* 2811 * Enter all inodes for a rename transaction into a sorted array. 2812 */ 2813 #define __XFS_SORT_INODES 5 2814 STATIC void 2815 xfs_sort_for_rename( 2816 struct xfs_inode *dp1, /* in: old (source) directory inode */ 2817 struct xfs_inode *dp2, /* in: new (target) directory inode */ 2818 struct xfs_inode *ip1, /* in: inode of old entry */ 2819 struct xfs_inode *ip2, /* in: inode of new entry */ 2820 struct xfs_inode *wip, /* in: whiteout inode */ 2821 struct xfs_inode **i_tab,/* out: sorted array of inodes */ 2822 int *num_inodes) /* in/out: inodes in array */ 2823 { 2824 int i, j; 2825 2826 ASSERT(*num_inodes == __XFS_SORT_INODES); 2827 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); 2828 2829 /* 2830 * i_tab contains a list of pointers to inodes. We initialize 2831 * the table here & we'll sort it. We will then use it to 2832 * order the acquisition of the inode locks. 2833 * 2834 * Note that the table may contain duplicates. e.g., dp1 == dp2. 2835 */ 2836 i = 0; 2837 i_tab[i++] = dp1; 2838 i_tab[i++] = dp2; 2839 i_tab[i++] = ip1; 2840 if (ip2) 2841 i_tab[i++] = ip2; 2842 if (wip) 2843 i_tab[i++] = wip; 2844 *num_inodes = i; 2845 2846 /* 2847 * Sort the elements via bubble sort. (Remember, there are at 2848 * most 5 elements to sort, so this is adequate.) 2849 */ 2850 for (i = 0; i < *num_inodes; i++) { 2851 for (j = 1; j < *num_inodes; j++) { 2852 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { 2853 struct xfs_inode *temp = i_tab[j]; 2854 i_tab[j] = i_tab[j-1]; 2855 i_tab[j-1] = temp; 2856 } 2857 } 2858 } 2859 } 2860 2861 static int 2862 xfs_finish_rename( 2863 struct xfs_trans *tp) 2864 { 2865 /* 2866 * If this is a synchronous mount, make sure that the rename transaction 2867 * goes to disk before returning to the user. 2868 */ 2869 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) 2870 xfs_trans_set_sync(tp); 2871 2872 return xfs_trans_commit(tp); 2873 } 2874 2875 /* 2876 * xfs_cross_rename() 2877 * 2878 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall 2879 */ 2880 STATIC int 2881 xfs_cross_rename( 2882 struct xfs_trans *tp, 2883 struct xfs_inode *dp1, 2884 struct xfs_name *name1, 2885 struct xfs_inode *ip1, 2886 struct xfs_inode *dp2, 2887 struct xfs_name *name2, 2888 struct xfs_inode *ip2, 2889 int spaceres) 2890 { 2891 int error = 0; 2892 int ip1_flags = 0; 2893 int ip2_flags = 0; 2894 int dp2_flags = 0; 2895 2896 /* Swap inode number for dirent in first parent */ 2897 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); 2898 if (error) 2899 goto out_trans_abort; 2900 2901 /* Swap inode number for dirent in second parent */ 2902 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); 2903 if (error) 2904 goto out_trans_abort; 2905 2906 /* 2907 * If we're renaming one or more directories across different parents, 2908 * update the respective ".." entries (and link counts) to match the new 2909 * parents. 2910 */ 2911 if (dp1 != dp2) { 2912 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2913 2914 if (S_ISDIR(VFS_I(ip2)->i_mode)) { 2915 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, 2916 dp1->i_ino, spaceres); 2917 if (error) 2918 goto out_trans_abort; 2919 2920 /* transfer ip2 ".." reference to dp1 */ 2921 if (!S_ISDIR(VFS_I(ip1)->i_mode)) { 2922 error = xfs_droplink(tp, dp2); 2923 if (error) 2924 goto out_trans_abort; 2925 xfs_bumplink(tp, dp1); 2926 } 2927 2928 /* 2929 * Although ip1 isn't changed here, userspace needs 2930 * to be warned about the change, so that applications 2931 * relying on it (like backup ones), will properly 2932 * notify the change 2933 */ 2934 ip1_flags |= XFS_ICHGTIME_CHG; 2935 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2936 } 2937 2938 if (S_ISDIR(VFS_I(ip1)->i_mode)) { 2939 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, 2940 dp2->i_ino, spaceres); 2941 if (error) 2942 goto out_trans_abort; 2943 2944 /* transfer ip1 ".." reference to dp2 */ 2945 if (!S_ISDIR(VFS_I(ip2)->i_mode)) { 2946 error = xfs_droplink(tp, dp1); 2947 if (error) 2948 goto out_trans_abort; 2949 xfs_bumplink(tp, dp2); 2950 } 2951 2952 /* 2953 * Although ip2 isn't changed here, userspace needs 2954 * to be warned about the change, so that applications 2955 * relying on it (like backup ones), will properly 2956 * notify the change 2957 */ 2958 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 2959 ip2_flags |= XFS_ICHGTIME_CHG; 2960 } 2961 } 2962 2963 if (ip1_flags) { 2964 xfs_trans_ichgtime(tp, ip1, ip1_flags); 2965 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); 2966 } 2967 if (ip2_flags) { 2968 xfs_trans_ichgtime(tp, ip2, ip2_flags); 2969 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); 2970 } 2971 if (dp2_flags) { 2972 xfs_trans_ichgtime(tp, dp2, dp2_flags); 2973 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); 2974 } 2975 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2976 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); 2977 return xfs_finish_rename(tp); 2978 2979 out_trans_abort: 2980 xfs_trans_cancel(tp); 2981 return error; 2982 } 2983 2984 /* 2985 * xfs_rename_alloc_whiteout() 2986 * 2987 * Return a referenced, unlinked, unlocked inode that can be used as a 2988 * whiteout in a rename transaction. We use a tmpfile inode here so that if we 2989 * crash between allocating the inode and linking it into the rename transaction 2990 * recovery will free the inode and we won't leak it. 2991 */ 2992 static int 2993 xfs_rename_alloc_whiteout( 2994 struct user_namespace *mnt_userns, 2995 struct xfs_inode *dp, 2996 struct xfs_inode **wip) 2997 { 2998 struct xfs_inode *tmpfile; 2999 int error; 3000 3001 error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE, 3002 &tmpfile); 3003 if (error) 3004 return error; 3005 3006 /* 3007 * Prepare the tmpfile inode as if it were created through the VFS. 3008 * Complete the inode setup and flag it as linkable. nlink is already 3009 * zero, so we can skip the drop_nlink. 3010 */ 3011 xfs_setup_iops(tmpfile); 3012 xfs_finish_inode_setup(tmpfile); 3013 VFS_I(tmpfile)->i_state |= I_LINKABLE; 3014 3015 *wip = tmpfile; 3016 return 0; 3017 } 3018 3019 /* 3020 * xfs_rename 3021 */ 3022 int 3023 xfs_rename( 3024 struct user_namespace *mnt_userns, 3025 struct xfs_inode *src_dp, 3026 struct xfs_name *src_name, 3027 struct xfs_inode *src_ip, 3028 struct xfs_inode *target_dp, 3029 struct xfs_name *target_name, 3030 struct xfs_inode *target_ip, 3031 unsigned int flags) 3032 { 3033 struct xfs_mount *mp = src_dp->i_mount; 3034 struct xfs_trans *tp; 3035 struct xfs_inode *wip = NULL; /* whiteout inode */ 3036 struct xfs_inode *inodes[__XFS_SORT_INODES]; 3037 int i; 3038 int num_inodes = __XFS_SORT_INODES; 3039 bool new_parent = (src_dp != target_dp); 3040 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); 3041 int spaceres; 3042 int error; 3043 3044 trace_xfs_rename(src_dp, target_dp, src_name, target_name); 3045 3046 if ((flags & RENAME_EXCHANGE) && !target_ip) 3047 return -EINVAL; 3048 3049 /* 3050 * If we are doing a whiteout operation, allocate the whiteout inode 3051 * we will be placing at the target and ensure the type is set 3052 * appropriately. 3053 */ 3054 if (flags & RENAME_WHITEOUT) { 3055 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); 3056 error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip); 3057 if (error) 3058 return error; 3059 3060 /* setup target dirent info as whiteout */ 3061 src_name->type = XFS_DIR3_FT_CHRDEV; 3062 } 3063 3064 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, 3065 inodes, &num_inodes); 3066 3067 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 3068 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); 3069 if (error == -ENOSPC) { 3070 spaceres = 0; 3071 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, 3072 &tp); 3073 } 3074 if (error) 3075 goto out_release_wip; 3076 3077 /* 3078 * Attach the dquots to the inodes 3079 */ 3080 error = xfs_qm_vop_rename_dqattach(inodes); 3081 if (error) 3082 goto out_trans_cancel; 3083 3084 /* 3085 * Lock all the participating inodes. Depending upon whether 3086 * the target_name exists in the target directory, and 3087 * whether the target directory is the same as the source 3088 * directory, we can lock from 2 to 4 inodes. 3089 */ 3090 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 3091 3092 /* 3093 * Join all the inodes to the transaction. From this point on, 3094 * we can rely on either trans_commit or trans_cancel to unlock 3095 * them. 3096 */ 3097 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 3098 if (new_parent) 3099 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 3100 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); 3101 if (target_ip) 3102 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); 3103 if (wip) 3104 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); 3105 3106 /* 3107 * If we are using project inheritance, we only allow renames 3108 * into our tree when the project IDs are the same; else the 3109 * tree quota mechanism would be circumvented. 3110 */ 3111 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) && 3112 target_dp->i_projid != src_ip->i_projid)) { 3113 error = -EXDEV; 3114 goto out_trans_cancel; 3115 } 3116 3117 /* RENAME_EXCHANGE is unique from here on. */ 3118 if (flags & RENAME_EXCHANGE) 3119 return xfs_cross_rename(tp, src_dp, src_name, src_ip, 3120 target_dp, target_name, target_ip, 3121 spaceres); 3122 3123 /* 3124 * Check for expected errors before we dirty the transaction 3125 * so we can return an error without a transaction abort. 3126 * 3127 * Extent count overflow check: 3128 * 3129 * From the perspective of src_dp, a rename operation is essentially a 3130 * directory entry remove operation. Hence the only place where we check 3131 * for extent count overflow for src_dp is in 3132 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns 3133 * -ENOSPC when it detects a possible extent count overflow and in 3134 * response, the higher layers of directory handling code do the 3135 * following: 3136 * 1. Data/Free blocks: XFS lets these blocks linger until a 3137 * future remove operation removes them. 3138 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the 3139 * Leaf space and unmaps the last block. 3140 * 3141 * For target_dp, there are two cases depending on whether the 3142 * destination directory entry exists or not. 3143 * 3144 * When destination directory entry does not exist (i.e. target_ip == 3145 * NULL), extent count overflow check is performed only when transaction 3146 * has a non-zero sized space reservation associated with it. With a 3147 * zero-sized space reservation, XFS allows a rename operation to 3148 * continue only when the directory has sufficient free space in its 3149 * data/leaf/free space blocks to hold the new entry. 3150 * 3151 * When destination directory entry exists (i.e. target_ip != NULL), all 3152 * we need to do is change the inode number associated with the already 3153 * existing entry. Hence there is no need to perform an extent count 3154 * overflow check. 3155 */ 3156 if (target_ip == NULL) { 3157 /* 3158 * If there's no space reservation, check the entry will 3159 * fit before actually inserting it. 3160 */ 3161 if (!spaceres) { 3162 error = xfs_dir_canenter(tp, target_dp, target_name); 3163 if (error) 3164 goto out_trans_cancel; 3165 } else { 3166 error = xfs_iext_count_may_overflow(target_dp, 3167 XFS_DATA_FORK, 3168 XFS_IEXT_DIR_MANIP_CNT(mp)); 3169 if (error) 3170 goto out_trans_cancel; 3171 } 3172 } else { 3173 /* 3174 * If target exists and it's a directory, check that whether 3175 * it can be destroyed. 3176 */ 3177 if (S_ISDIR(VFS_I(target_ip)->i_mode) && 3178 (!xfs_dir_isempty(target_ip) || 3179 (VFS_I(target_ip)->i_nlink > 2))) { 3180 error = -EEXIST; 3181 goto out_trans_cancel; 3182 } 3183 } 3184 3185 /* 3186 * Lock the AGI buffers we need to handle bumping the nlink of the 3187 * whiteout inode off the unlinked list and to handle dropping the 3188 * nlink of the target inode. Per locking order rules, do this in 3189 * increasing AG order and before directory block allocation tries to 3190 * grab AGFs because we grab AGIs before AGFs. 3191 * 3192 * The (vfs) caller must ensure that if src is a directory then 3193 * target_ip is either null or an empty directory. 3194 */ 3195 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { 3196 if (inodes[i] == wip || 3197 (inodes[i] == target_ip && 3198 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { 3199 struct xfs_buf *bp; 3200 xfs_agnumber_t agno; 3201 3202 agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino); 3203 error = xfs_read_agi(mp, tp, agno, &bp); 3204 if (error) 3205 goto out_trans_cancel; 3206 } 3207 } 3208 3209 /* 3210 * Directory entry creation below may acquire the AGF. Remove 3211 * the whiteout from the unlinked list first to preserve correct 3212 * AGI/AGF locking order. This dirties the transaction so failures 3213 * after this point will abort and log recovery will clean up the 3214 * mess. 3215 * 3216 * For whiteouts, we need to bump the link count on the whiteout 3217 * inode. After this point, we have a real link, clear the tmpfile 3218 * state flag from the inode so it doesn't accidentally get misused 3219 * in future. 3220 */ 3221 if (wip) { 3222 struct xfs_perag *pag; 3223 3224 ASSERT(VFS_I(wip)->i_nlink == 0); 3225 3226 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino)); 3227 error = xfs_iunlink_remove(tp, pag, wip); 3228 xfs_perag_put(pag); 3229 if (error) 3230 goto out_trans_cancel; 3231 3232 xfs_bumplink(tp, wip); 3233 VFS_I(wip)->i_state &= ~I_LINKABLE; 3234 } 3235 3236 /* 3237 * Set up the target. 3238 */ 3239 if (target_ip == NULL) { 3240 /* 3241 * If target does not exist and the rename crosses 3242 * directories, adjust the target directory link count 3243 * to account for the ".." reference from the new entry. 3244 */ 3245 error = xfs_dir_createname(tp, target_dp, target_name, 3246 src_ip->i_ino, spaceres); 3247 if (error) 3248 goto out_trans_cancel; 3249 3250 xfs_trans_ichgtime(tp, target_dp, 3251 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3252 3253 if (new_parent && src_is_directory) { 3254 xfs_bumplink(tp, target_dp); 3255 } 3256 } else { /* target_ip != NULL */ 3257 /* 3258 * Link the source inode under the target name. 3259 * If the source inode is a directory and we are moving 3260 * it across directories, its ".." entry will be 3261 * inconsistent until we replace that down below. 3262 * 3263 * In case there is already an entry with the same 3264 * name at the destination directory, remove it first. 3265 */ 3266 error = xfs_dir_replace(tp, target_dp, target_name, 3267 src_ip->i_ino, spaceres); 3268 if (error) 3269 goto out_trans_cancel; 3270 3271 xfs_trans_ichgtime(tp, target_dp, 3272 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3273 3274 /* 3275 * Decrement the link count on the target since the target 3276 * dir no longer points to it. 3277 */ 3278 error = xfs_droplink(tp, target_ip); 3279 if (error) 3280 goto out_trans_cancel; 3281 3282 if (src_is_directory) { 3283 /* 3284 * Drop the link from the old "." entry. 3285 */ 3286 error = xfs_droplink(tp, target_ip); 3287 if (error) 3288 goto out_trans_cancel; 3289 } 3290 } /* target_ip != NULL */ 3291 3292 /* 3293 * Remove the source. 3294 */ 3295 if (new_parent && src_is_directory) { 3296 /* 3297 * Rewrite the ".." entry to point to the new 3298 * directory. 3299 */ 3300 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 3301 target_dp->i_ino, spaceres); 3302 ASSERT(error != -EEXIST); 3303 if (error) 3304 goto out_trans_cancel; 3305 } 3306 3307 /* 3308 * We always want to hit the ctime on the source inode. 3309 * 3310 * This isn't strictly required by the standards since the source 3311 * inode isn't really being changed, but old unix file systems did 3312 * it and some incremental backup programs won't work without it. 3313 */ 3314 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 3315 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); 3316 3317 /* 3318 * Adjust the link count on src_dp. This is necessary when 3319 * renaming a directory, either within one parent when 3320 * the target existed, or across two parent directories. 3321 */ 3322 if (src_is_directory && (new_parent || target_ip != NULL)) { 3323 3324 /* 3325 * Decrement link count on src_directory since the 3326 * entry that's moved no longer points to it. 3327 */ 3328 error = xfs_droplink(tp, src_dp); 3329 if (error) 3330 goto out_trans_cancel; 3331 } 3332 3333 /* 3334 * For whiteouts, we only need to update the source dirent with the 3335 * inode number of the whiteout inode rather than removing it 3336 * altogether. 3337 */ 3338 if (wip) { 3339 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, 3340 spaceres); 3341 } else { 3342 /* 3343 * NOTE: We don't need to check for extent count overflow here 3344 * because the dir remove name code will leave the dir block in 3345 * place if the extent count would overflow. 3346 */ 3347 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, 3348 spaceres); 3349 } 3350 3351 if (error) 3352 goto out_trans_cancel; 3353 3354 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 3355 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); 3356 if (new_parent) 3357 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); 3358 3359 error = xfs_finish_rename(tp); 3360 if (wip) 3361 xfs_irele(wip); 3362 return error; 3363 3364 out_trans_cancel: 3365 xfs_trans_cancel(tp); 3366 out_release_wip: 3367 if (wip) 3368 xfs_irele(wip); 3369 return error; 3370 } 3371 3372 static int 3373 xfs_iflush( 3374 struct xfs_inode *ip, 3375 struct xfs_buf *bp) 3376 { 3377 struct xfs_inode_log_item *iip = ip->i_itemp; 3378 struct xfs_dinode *dip; 3379 struct xfs_mount *mp = ip->i_mount; 3380 int error; 3381 3382 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3383 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING)); 3384 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || 3385 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); 3386 ASSERT(iip->ili_item.li_buf == bp); 3387 3388 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); 3389 3390 /* 3391 * We don't flush the inode if any of the following checks fail, but we 3392 * do still update the log item and attach to the backing buffer as if 3393 * the flush happened. This is a formality to facilitate predictable 3394 * error handling as the caller will shutdown and fail the buffer. 3395 */ 3396 error = -EFSCORRUPTED; 3397 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), 3398 mp, XFS_ERRTAG_IFLUSH_1)) { 3399 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3400 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT, 3401 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 3402 goto flush_out; 3403 } 3404 if (S_ISREG(VFS_I(ip)->i_mode)) { 3405 if (XFS_TEST_ERROR( 3406 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3407 ip->i_df.if_format != XFS_DINODE_FMT_BTREE, 3408 mp, XFS_ERRTAG_IFLUSH_3)) { 3409 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3410 "%s: Bad regular inode %Lu, ptr "PTR_FMT, 3411 __func__, ip->i_ino, ip); 3412 goto flush_out; 3413 } 3414 } else if (S_ISDIR(VFS_I(ip)->i_mode)) { 3415 if (XFS_TEST_ERROR( 3416 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 3417 ip->i_df.if_format != XFS_DINODE_FMT_BTREE && 3418 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, 3419 mp, XFS_ERRTAG_IFLUSH_4)) { 3420 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3421 "%s: Bad directory inode %Lu, ptr "PTR_FMT, 3422 __func__, ip->i_ino, ip); 3423 goto flush_out; 3424 } 3425 } 3426 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) > 3427 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { 3428 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3429 "%s: detected corrupt incore inode %Lu, " 3430 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT, 3431 __func__, ip->i_ino, 3432 ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp), 3433 ip->i_nblocks, ip); 3434 goto flush_out; 3435 } 3436 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize, 3437 mp, XFS_ERRTAG_IFLUSH_6)) { 3438 xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 3439 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT, 3440 __func__, ip->i_ino, ip->i_forkoff, ip); 3441 goto flush_out; 3442 } 3443 3444 /* 3445 * Inode item log recovery for v2 inodes are dependent on the flushiter 3446 * count for correct sequencing. We bump the flush iteration count so 3447 * we can detect flushes which postdate a log record during recovery. 3448 * This is redundant as we now log every change and hence this can't 3449 * happen but we need to still do it to ensure backwards compatibility 3450 * with old kernels that predate logging all inode changes. 3451 */ 3452 if (!xfs_sb_version_has_v3inode(&mp->m_sb)) 3453 ip->i_flushiter++; 3454 3455 /* 3456 * If there are inline format data / attr forks attached to this inode, 3457 * make sure they are not corrupt. 3458 */ 3459 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && 3460 xfs_ifork_verify_local_data(ip)) 3461 goto flush_out; 3462 if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL && 3463 xfs_ifork_verify_local_attr(ip)) 3464 goto flush_out; 3465 3466 /* 3467 * Copy the dirty parts of the inode into the on-disk inode. We always 3468 * copy out the core of the inode, because if the inode is dirty at all 3469 * the core must be. 3470 */ 3471 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); 3472 3473 /* Wrap, we never let the log put out DI_MAX_FLUSH */ 3474 if (!xfs_sb_version_has_v3inode(&mp->m_sb)) { 3475 if (ip->i_flushiter == DI_MAX_FLUSH) 3476 ip->i_flushiter = 0; 3477 } 3478 3479 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3480 if (XFS_IFORK_Q(ip)) 3481 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 3482 3483 /* 3484 * We've recorded everything logged in the inode, so we'd like to clear 3485 * the ili_fields bits so we don't log and flush things unnecessarily. 3486 * However, we can't stop logging all this information until the data 3487 * we've copied into the disk buffer is written to disk. If we did we 3488 * might overwrite the copy of the inode in the log with all the data 3489 * after re-logging only part of it, and in the face of a crash we 3490 * wouldn't have all the data we need to recover. 3491 * 3492 * What we do is move the bits to the ili_last_fields field. When 3493 * logging the inode, these bits are moved back to the ili_fields field. 3494 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since 3495 * we know that the information those bits represent is permanently on 3496 * disk. As long as the flush completes before the inode is logged 3497 * again, then both ili_fields and ili_last_fields will be cleared. 3498 */ 3499 error = 0; 3500 flush_out: 3501 spin_lock(&iip->ili_lock); 3502 iip->ili_last_fields = iip->ili_fields; 3503 iip->ili_fields = 0; 3504 iip->ili_fsync_fields = 0; 3505 spin_unlock(&iip->ili_lock); 3506 3507 /* 3508 * Store the current LSN of the inode so that we can tell whether the 3509 * item has moved in the AIL from xfs_buf_inode_iodone(). 3510 */ 3511 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 3512 &iip->ili_item.li_lsn); 3513 3514 /* generate the checksum. */ 3515 xfs_dinode_calc_crc(mp, dip); 3516 return error; 3517 } 3518 3519 /* 3520 * Non-blocking flush of dirty inode metadata into the backing buffer. 3521 * 3522 * The caller must have a reference to the inode and hold the cluster buffer 3523 * locked. The function will walk across all the inodes on the cluster buffer it 3524 * can find and lock without blocking, and flush them to the cluster buffer. 3525 * 3526 * On successful flushing of at least one inode, the caller must write out the 3527 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and 3528 * the caller needs to release the buffer. On failure, the filesystem will be 3529 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED 3530 * will be returned. 3531 */ 3532 int 3533 xfs_iflush_cluster( 3534 struct xfs_buf *bp) 3535 { 3536 struct xfs_mount *mp = bp->b_mount; 3537 struct xfs_log_item *lip, *n; 3538 struct xfs_inode *ip; 3539 struct xfs_inode_log_item *iip; 3540 int clcount = 0; 3541 int error = 0; 3542 3543 /* 3544 * We must use the safe variant here as on shutdown xfs_iflush_abort() 3545 * can remove itself from the list. 3546 */ 3547 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 3548 iip = (struct xfs_inode_log_item *)lip; 3549 ip = iip->ili_inode; 3550 3551 /* 3552 * Quick and dirty check to avoid locks if possible. 3553 */ 3554 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) 3555 continue; 3556 if (xfs_ipincount(ip)) 3557 continue; 3558 3559 /* 3560 * The inode is still attached to the buffer, which means it is 3561 * dirty but reclaim might try to grab it. Check carefully for 3562 * that, and grab the ilock while still holding the i_flags_lock 3563 * to guarantee reclaim will not be able to reclaim this inode 3564 * once we drop the i_flags_lock. 3565 */ 3566 spin_lock(&ip->i_flags_lock); 3567 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); 3568 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) { 3569 spin_unlock(&ip->i_flags_lock); 3570 continue; 3571 } 3572 3573 /* 3574 * ILOCK will pin the inode against reclaim and prevent 3575 * concurrent transactions modifying the inode while we are 3576 * flushing the inode. If we get the lock, set the flushing 3577 * state before we drop the i_flags_lock. 3578 */ 3579 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 3580 spin_unlock(&ip->i_flags_lock); 3581 continue; 3582 } 3583 __xfs_iflags_set(ip, XFS_IFLUSHING); 3584 spin_unlock(&ip->i_flags_lock); 3585 3586 /* 3587 * Abort flushing this inode if we are shut down because the 3588 * inode may not currently be in the AIL. This can occur when 3589 * log I/O failure unpins the inode without inserting into the 3590 * AIL, leaving a dirty/unpinned inode attached to the buffer 3591 * that otherwise looks like it should be flushed. 3592 */ 3593 if (XFS_FORCED_SHUTDOWN(mp)) { 3594 xfs_iunpin_wait(ip); 3595 xfs_iflush_abort(ip); 3596 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3597 error = -EIO; 3598 continue; 3599 } 3600 3601 /* don't block waiting on a log force to unpin dirty inodes */ 3602 if (xfs_ipincount(ip)) { 3603 xfs_iflags_clear(ip, XFS_IFLUSHING); 3604 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3605 continue; 3606 } 3607 3608 if (!xfs_inode_clean(ip)) 3609 error = xfs_iflush(ip, bp); 3610 else 3611 xfs_iflags_clear(ip, XFS_IFLUSHING); 3612 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3613 if (error) 3614 break; 3615 clcount++; 3616 } 3617 3618 if (error) { 3619 bp->b_flags |= XBF_ASYNC; 3620 xfs_buf_ioend_fail(bp); 3621 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3622 return error; 3623 } 3624 3625 if (!clcount) 3626 return -EAGAIN; 3627 3628 XFS_STATS_INC(mp, xs_icluster_flushcnt); 3629 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); 3630 return 0; 3631 3632 } 3633 3634 /* Release an inode. */ 3635 void 3636 xfs_irele( 3637 struct xfs_inode *ip) 3638 { 3639 trace_xfs_irele(ip, _RET_IP_); 3640 iput(VFS_I(ip)); 3641 } 3642 3643 /* 3644 * Ensure all commited transactions touching the inode are written to the log. 3645 */ 3646 int 3647 xfs_log_force_inode( 3648 struct xfs_inode *ip) 3649 { 3650 xfs_csn_t seq = 0; 3651 3652 xfs_ilock(ip, XFS_ILOCK_SHARED); 3653 if (xfs_ipincount(ip)) 3654 seq = ip->i_itemp->ili_commit_seq; 3655 xfs_iunlock(ip, XFS_ILOCK_SHARED); 3656 3657 if (!seq) 3658 return 0; 3659 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL); 3660 } 3661 3662 /* 3663 * Grab the exclusive iolock for a data copy from src to dest, making sure to 3664 * abide vfs locking order (lowest pointer value goes first) and breaking the 3665 * layout leases before proceeding. The loop is needed because we cannot call 3666 * the blocking break_layout() with the iolocks held, and therefore have to 3667 * back out both locks. 3668 */ 3669 static int 3670 xfs_iolock_two_inodes_and_break_layout( 3671 struct inode *src, 3672 struct inode *dest) 3673 { 3674 int error; 3675 3676 if (src > dest) 3677 swap(src, dest); 3678 3679 retry: 3680 /* Wait to break both inodes' layouts before we start locking. */ 3681 error = break_layout(src, true); 3682 if (error) 3683 return error; 3684 if (src != dest) { 3685 error = break_layout(dest, true); 3686 if (error) 3687 return error; 3688 } 3689 3690 /* Lock one inode and make sure nobody got in and leased it. */ 3691 inode_lock(src); 3692 error = break_layout(src, false); 3693 if (error) { 3694 inode_unlock(src); 3695 if (error == -EWOULDBLOCK) 3696 goto retry; 3697 return error; 3698 } 3699 3700 if (src == dest) 3701 return 0; 3702 3703 /* Lock the other inode and make sure nobody got in and leased it. */ 3704 inode_lock_nested(dest, I_MUTEX_NONDIR2); 3705 error = break_layout(dest, false); 3706 if (error) { 3707 inode_unlock(src); 3708 inode_unlock(dest); 3709 if (error == -EWOULDBLOCK) 3710 goto retry; 3711 return error; 3712 } 3713 3714 return 0; 3715 } 3716 3717 /* 3718 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or 3719 * mmap activity. 3720 */ 3721 int 3722 xfs_ilock2_io_mmap( 3723 struct xfs_inode *ip1, 3724 struct xfs_inode *ip2) 3725 { 3726 int ret; 3727 3728 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); 3729 if (ret) 3730 return ret; 3731 if (ip1 == ip2) 3732 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); 3733 else 3734 xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL, 3735 ip2, XFS_MMAPLOCK_EXCL); 3736 return 0; 3737 } 3738 3739 /* Unlock both inodes to allow IO and mmap activity. */ 3740 void 3741 xfs_iunlock2_io_mmap( 3742 struct xfs_inode *ip1, 3743 struct xfs_inode *ip2) 3744 { 3745 bool same_inode = (ip1 == ip2); 3746 3747 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); 3748 if (!same_inode) 3749 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); 3750 inode_unlock(VFS_I(ip2)); 3751 if (!same_inode) 3752 inode_unlock(VFS_I(ip1)); 3753 } 3754