1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include <linux/iversion.h>
7
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_dir2.h"
18 #include "xfs_attr.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_trans.h"
21 #include "xfs_buf_item.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_iunlink_item.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_bmap.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
35 #include "xfs_log.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
38 #include "xfs_ag.h"
39 #include "xfs_log_priv.h"
40
41 struct kmem_cache *xfs_inode_cache;
42
43 /*
44 * Used in xfs_itruncate_extents(). This is the maximum number of extents
45 * freed from a file in a single transaction.
46 */
47 #define XFS_ITRUNC_MAX_EXTENTS 2
48
49 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
50 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
51 struct xfs_inode *);
52
53 /*
54 * helper function to extract extent size hint from inode
55 */
56 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)57 xfs_get_extsz_hint(
58 struct xfs_inode *ip)
59 {
60 /*
61 * No point in aligning allocations if we need to COW to actually
62 * write to them.
63 */
64 if (xfs_is_always_cow_inode(ip))
65 return 0;
66 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
67 return ip->i_extsize;
68 if (XFS_IS_REALTIME_INODE(ip))
69 return ip->i_mount->m_sb.sb_rextsize;
70 return 0;
71 }
72
73 /*
74 * Helper function to extract CoW extent size hint from inode.
75 * Between the extent size hint and the CoW extent size hint, we
76 * return the greater of the two. If the value is zero (automatic),
77 * use the default size.
78 */
79 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)80 xfs_get_cowextsz_hint(
81 struct xfs_inode *ip)
82 {
83 xfs_extlen_t a, b;
84
85 a = 0;
86 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
87 a = ip->i_cowextsize;
88 b = xfs_get_extsz_hint(ip);
89
90 a = max(a, b);
91 if (a == 0)
92 return XFS_DEFAULT_COWEXTSZ_HINT;
93 return a;
94 }
95
96 /*
97 * These two are wrapper routines around the xfs_ilock() routine used to
98 * centralize some grungy code. They are used in places that wish to lock the
99 * inode solely for reading the extents. The reason these places can't just
100 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
101 * bringing in of the extents from disk for a file in b-tree format. If the
102 * inode is in b-tree format, then we need to lock the inode exclusively until
103 * the extents are read in. Locking it exclusively all the time would limit
104 * our parallelism unnecessarily, though. What we do instead is check to see
105 * if the extents have been read in yet, and only lock the inode exclusively
106 * if they have not.
107 *
108 * The functions return a value which should be given to the corresponding
109 * xfs_iunlock() call.
110 */
111 uint
xfs_ilock_data_map_shared(struct xfs_inode * ip)112 xfs_ilock_data_map_shared(
113 struct xfs_inode *ip)
114 {
115 uint lock_mode = XFS_ILOCK_SHARED;
116
117 if (xfs_need_iread_extents(&ip->i_df))
118 lock_mode = XFS_ILOCK_EXCL;
119 xfs_ilock(ip, lock_mode);
120 return lock_mode;
121 }
122
123 uint
xfs_ilock_attr_map_shared(struct xfs_inode * ip)124 xfs_ilock_attr_map_shared(
125 struct xfs_inode *ip)
126 {
127 uint lock_mode = XFS_ILOCK_SHARED;
128
129 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
132 return lock_mode;
133 }
134
135 /*
136 * You can't set both SHARED and EXCL for the same lock,
137 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
138 * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
139 * to set in lock_flags.
140 */
141 static inline void
xfs_lock_flags_assert(uint lock_flags)142 xfs_lock_flags_assert(
143 uint lock_flags)
144 {
145 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
146 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
147 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
148 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
149 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
150 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
151 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
152 ASSERT(lock_flags != 0);
153 }
154
155 /*
156 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
157 * multi-reader locks: invalidate_lock and the i_lock. This routine allows
158 * various combinations of the locks to be obtained.
159 *
160 * The 3 locks should always be ordered so that the IO lock is obtained first,
161 * the mmap lock second and the ilock last in order to prevent deadlock.
162 *
163 * Basic locking order:
164 *
165 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
166 *
167 * mmap_lock locking order:
168 *
169 * i_rwsem -> page lock -> mmap_lock
170 * mmap_lock -> invalidate_lock -> page_lock
171 *
172 * The difference in mmap_lock locking order mean that we cannot hold the
173 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
174 * can fault in pages during copy in/out (for buffered IO) or require the
175 * mmap_lock in get_user_pages() to map the user pages into the kernel address
176 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
177 * fault because page faults already hold the mmap_lock.
178 *
179 * Hence to serialise fully against both syscall and mmap based IO, we need to
180 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
181 * both taken in places where we need to invalidate the page cache in a race
182 * free manner (e.g. truncate, hole punch and other extent manipulation
183 * functions).
184 */
185 void
xfs_ilock(xfs_inode_t * ip,uint lock_flags)186 xfs_ilock(
187 xfs_inode_t *ip,
188 uint lock_flags)
189 {
190 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
191
192 xfs_lock_flags_assert(lock_flags);
193
194 if (lock_flags & XFS_IOLOCK_EXCL) {
195 down_write_nested(&VFS_I(ip)->i_rwsem,
196 XFS_IOLOCK_DEP(lock_flags));
197 } else if (lock_flags & XFS_IOLOCK_SHARED) {
198 down_read_nested(&VFS_I(ip)->i_rwsem,
199 XFS_IOLOCK_DEP(lock_flags));
200 }
201
202 if (lock_flags & XFS_MMAPLOCK_EXCL) {
203 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
204 XFS_MMAPLOCK_DEP(lock_flags));
205 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
206 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
207 XFS_MMAPLOCK_DEP(lock_flags));
208 }
209
210 if (lock_flags & XFS_ILOCK_EXCL)
211 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212 else if (lock_flags & XFS_ILOCK_SHARED)
213 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214 }
215
216 /*
217 * This is just like xfs_ilock(), except that the caller
218 * is guaranteed not to sleep. It returns 1 if it gets
219 * the requested locks and 0 otherwise. If the IO lock is
220 * obtained but the inode lock cannot be, then the IO lock
221 * is dropped before returning.
222 *
223 * ip -- the inode being locked
224 * lock_flags -- this parameter indicates the inode's locks to be
225 * to be locked. See the comment for xfs_ilock() for a list
226 * of valid values.
227 */
228 int
xfs_ilock_nowait(xfs_inode_t * ip,uint lock_flags)229 xfs_ilock_nowait(
230 xfs_inode_t *ip,
231 uint lock_flags)
232 {
233 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234
235 xfs_lock_flags_assert(lock_flags);
236
237 if (lock_flags & XFS_IOLOCK_EXCL) {
238 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239 goto out;
240 } else if (lock_flags & XFS_IOLOCK_SHARED) {
241 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
242 goto out;
243 }
244
245 if (lock_flags & XFS_MMAPLOCK_EXCL) {
246 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247 goto out_undo_iolock;
248 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
249 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250 goto out_undo_iolock;
251 }
252
253 if (lock_flags & XFS_ILOCK_EXCL) {
254 if (!mrtryupdate(&ip->i_lock))
255 goto out_undo_mmaplock;
256 } else if (lock_flags & XFS_ILOCK_SHARED) {
257 if (!mrtryaccess(&ip->i_lock))
258 goto out_undo_mmaplock;
259 }
260 return 1;
261
262 out_undo_mmaplock:
263 if (lock_flags & XFS_MMAPLOCK_EXCL)
264 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265 else if (lock_flags & XFS_MMAPLOCK_SHARED)
266 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
267 out_undo_iolock:
268 if (lock_flags & XFS_IOLOCK_EXCL)
269 up_write(&VFS_I(ip)->i_rwsem);
270 else if (lock_flags & XFS_IOLOCK_SHARED)
271 up_read(&VFS_I(ip)->i_rwsem);
272 out:
273 return 0;
274 }
275
276 /*
277 * xfs_iunlock() is used to drop the inode locks acquired with
278 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
279 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280 * that we know which locks to drop.
281 *
282 * ip -- the inode being unlocked
283 * lock_flags -- this parameter indicates the inode's locks to be
284 * to be unlocked. See the comment for xfs_ilock() for a list
285 * of valid values for this parameter.
286 *
287 */
288 void
xfs_iunlock(xfs_inode_t * ip,uint lock_flags)289 xfs_iunlock(
290 xfs_inode_t *ip,
291 uint lock_flags)
292 {
293 xfs_lock_flags_assert(lock_flags);
294
295 if (lock_flags & XFS_IOLOCK_EXCL)
296 up_write(&VFS_I(ip)->i_rwsem);
297 else if (lock_flags & XFS_IOLOCK_SHARED)
298 up_read(&VFS_I(ip)->i_rwsem);
299
300 if (lock_flags & XFS_MMAPLOCK_EXCL)
301 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
302 else if (lock_flags & XFS_MMAPLOCK_SHARED)
303 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
304
305 if (lock_flags & XFS_ILOCK_EXCL)
306 mrunlock_excl(&ip->i_lock);
307 else if (lock_flags & XFS_ILOCK_SHARED)
308 mrunlock_shared(&ip->i_lock);
309
310 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
311 }
312
313 /*
314 * give up write locks. the i/o lock cannot be held nested
315 * if it is being demoted.
316 */
317 void
xfs_ilock_demote(xfs_inode_t * ip,uint lock_flags)318 xfs_ilock_demote(
319 xfs_inode_t *ip,
320 uint lock_flags)
321 {
322 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
323 ASSERT((lock_flags &
324 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
325
326 if (lock_flags & XFS_ILOCK_EXCL)
327 mrdemote(&ip->i_lock);
328 if (lock_flags & XFS_MMAPLOCK_EXCL)
329 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
330 if (lock_flags & XFS_IOLOCK_EXCL)
331 downgrade_write(&VFS_I(ip)->i_rwsem);
332
333 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
334 }
335
336 #if defined(DEBUG) || defined(XFS_WARN)
337 static inline bool
__xfs_rwsem_islocked(struct rw_semaphore * rwsem,bool shared)338 __xfs_rwsem_islocked(
339 struct rw_semaphore *rwsem,
340 bool shared)
341 {
342 if (!debug_locks)
343 return rwsem_is_locked(rwsem);
344
345 if (!shared)
346 return lockdep_is_held_type(rwsem, 0);
347
348 /*
349 * We are checking that the lock is held at least in shared
350 * mode but don't care that it might be held exclusively
351 * (i.e. shared | excl). Hence we check if the lock is held
352 * in any mode rather than an explicit shared mode.
353 */
354 return lockdep_is_held_type(rwsem, -1);
355 }
356
357 bool
xfs_isilocked(struct xfs_inode * ip,uint lock_flags)358 xfs_isilocked(
359 struct xfs_inode *ip,
360 uint lock_flags)
361 {
362 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
363 if (!(lock_flags & XFS_ILOCK_SHARED))
364 return !!ip->i_lock.mr_writer;
365 return rwsem_is_locked(&ip->i_lock.mr_lock);
366 }
367
368 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
369 return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
370 (lock_flags & XFS_MMAPLOCK_SHARED));
371 }
372
373 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
374 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
375 (lock_flags & XFS_IOLOCK_SHARED));
376 }
377
378 ASSERT(0);
379 return false;
380 }
381 #endif
382
383 /*
384 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
385 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
386 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
387 * errors and warnings.
388 */
389 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
390 static bool
xfs_lockdep_subclass_ok(int subclass)391 xfs_lockdep_subclass_ok(
392 int subclass)
393 {
394 return subclass < MAX_LOCKDEP_SUBCLASSES;
395 }
396 #else
397 #define xfs_lockdep_subclass_ok(subclass) (true)
398 #endif
399
400 /*
401 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
402 * value. This can be called for any type of inode lock combination, including
403 * parent locking. Care must be taken to ensure we don't overrun the subclass
404 * storage fields in the class mask we build.
405 */
406 static inline uint
xfs_lock_inumorder(uint lock_mode,uint subclass)407 xfs_lock_inumorder(
408 uint lock_mode,
409 uint subclass)
410 {
411 uint class = 0;
412
413 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
414 XFS_ILOCK_RTSUM)));
415 ASSERT(xfs_lockdep_subclass_ok(subclass));
416
417 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
418 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
419 class += subclass << XFS_IOLOCK_SHIFT;
420 }
421
422 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
423 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
424 class += subclass << XFS_MMAPLOCK_SHIFT;
425 }
426
427 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
428 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
429 class += subclass << XFS_ILOCK_SHIFT;
430 }
431
432 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
433 }
434
435 /*
436 * The following routine will lock n inodes in exclusive mode. We assume the
437 * caller calls us with the inodes in i_ino order.
438 *
439 * We need to detect deadlock where an inode that we lock is in the AIL and we
440 * start waiting for another inode that is locked by a thread in a long running
441 * transaction (such as truncate). This can result in deadlock since the long
442 * running trans might need to wait for the inode we just locked in order to
443 * push the tail and free space in the log.
444 *
445 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
446 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
447 * lock more than one at a time, lockdep will report false positives saying we
448 * have violated locking orders.
449 */
450 static void
xfs_lock_inodes(struct xfs_inode ** ips,int inodes,uint lock_mode)451 xfs_lock_inodes(
452 struct xfs_inode **ips,
453 int inodes,
454 uint lock_mode)
455 {
456 int attempts = 0;
457 uint i;
458 int j;
459 bool try_lock;
460 struct xfs_log_item *lp;
461
462 /*
463 * Currently supports between 2 and 5 inodes with exclusive locking. We
464 * support an arbitrary depth of locking here, but absolute limits on
465 * inodes depend on the type of locking and the limits placed by
466 * lockdep annotations in xfs_lock_inumorder. These are all checked by
467 * the asserts.
468 */
469 ASSERT(ips && inodes >= 2 && inodes <= 5);
470 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
471 XFS_ILOCK_EXCL));
472 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
473 XFS_ILOCK_SHARED)));
474 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
475 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
476 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
477 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
478
479 if (lock_mode & XFS_IOLOCK_EXCL) {
480 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
481 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
482 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
483
484 again:
485 try_lock = false;
486 i = 0;
487 for (; i < inodes; i++) {
488 ASSERT(ips[i]);
489
490 if (i && (ips[i] == ips[i - 1])) /* Already locked */
491 continue;
492
493 /*
494 * If try_lock is not set yet, make sure all locked inodes are
495 * not in the AIL. If any are, set try_lock to be used later.
496 */
497 if (!try_lock) {
498 for (j = (i - 1); j >= 0 && !try_lock; j--) {
499 lp = &ips[j]->i_itemp->ili_item;
500 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
501 try_lock = true;
502 }
503 }
504
505 /*
506 * If any of the previous locks we have locked is in the AIL,
507 * we must TRY to get the second and subsequent locks. If
508 * we can't get any, we must release all we have
509 * and try again.
510 */
511 if (!try_lock) {
512 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
513 continue;
514 }
515
516 /* try_lock means we have an inode locked that is in the AIL. */
517 ASSERT(i != 0);
518 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
519 continue;
520
521 /*
522 * Unlock all previous guys and try again. xfs_iunlock will try
523 * to push the tail if the inode is in the AIL.
524 */
525 attempts++;
526 for (j = i - 1; j >= 0; j--) {
527 /*
528 * Check to see if we've already unlocked this one. Not
529 * the first one going back, and the inode ptr is the
530 * same.
531 */
532 if (j != (i - 1) && ips[j] == ips[j + 1])
533 continue;
534
535 xfs_iunlock(ips[j], lock_mode);
536 }
537
538 if ((attempts % 5) == 0) {
539 delay(1); /* Don't just spin the CPU */
540 }
541 goto again;
542 }
543 }
544
545 /*
546 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
547 * mmaplock must be double-locked separately since we use i_rwsem and
548 * invalidate_lock for that. We now support taking one lock EXCL and the
549 * other SHARED.
550 */
551 void
xfs_lock_two_inodes(struct xfs_inode * ip0,uint ip0_mode,struct xfs_inode * ip1,uint ip1_mode)552 xfs_lock_two_inodes(
553 struct xfs_inode *ip0,
554 uint ip0_mode,
555 struct xfs_inode *ip1,
556 uint ip1_mode)
557 {
558 int attempts = 0;
559 struct xfs_log_item *lp;
560
561 ASSERT(hweight32(ip0_mode) == 1);
562 ASSERT(hweight32(ip1_mode) == 1);
563 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
565 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
567 ASSERT(ip0->i_ino != ip1->i_ino);
568
569 if (ip0->i_ino > ip1->i_ino) {
570 swap(ip0, ip1);
571 swap(ip0_mode, ip1_mode);
572 }
573
574 again:
575 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576
577 /*
578 * If the first lock we have locked is in the AIL, we must TRY to get
579 * the second lock. If we can't get it, we must release the first one
580 * and try again.
581 */
582 lp = &ip0->i_itemp->ili_item;
583 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
584 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
585 xfs_iunlock(ip0, ip0_mode);
586 if ((++attempts % 5) == 0)
587 delay(1); /* Don't just spin the CPU */
588 goto again;
589 }
590 } else {
591 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592 }
593 }
594
595 uint
xfs_ip2xflags(struct xfs_inode * ip)596 xfs_ip2xflags(
597 struct xfs_inode *ip)
598 {
599 uint flags = 0;
600
601 if (ip->i_diflags & XFS_DIFLAG_ANY) {
602 if (ip->i_diflags & XFS_DIFLAG_REALTIME)
603 flags |= FS_XFLAG_REALTIME;
604 if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
605 flags |= FS_XFLAG_PREALLOC;
606 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
607 flags |= FS_XFLAG_IMMUTABLE;
608 if (ip->i_diflags & XFS_DIFLAG_APPEND)
609 flags |= FS_XFLAG_APPEND;
610 if (ip->i_diflags & XFS_DIFLAG_SYNC)
611 flags |= FS_XFLAG_SYNC;
612 if (ip->i_diflags & XFS_DIFLAG_NOATIME)
613 flags |= FS_XFLAG_NOATIME;
614 if (ip->i_diflags & XFS_DIFLAG_NODUMP)
615 flags |= FS_XFLAG_NODUMP;
616 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
617 flags |= FS_XFLAG_RTINHERIT;
618 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
619 flags |= FS_XFLAG_PROJINHERIT;
620 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
621 flags |= FS_XFLAG_NOSYMLINKS;
622 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
623 flags |= FS_XFLAG_EXTSIZE;
624 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
625 flags |= FS_XFLAG_EXTSZINHERIT;
626 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
627 flags |= FS_XFLAG_NODEFRAG;
628 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
629 flags |= FS_XFLAG_FILESTREAM;
630 }
631
632 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
633 if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
634 flags |= FS_XFLAG_DAX;
635 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
636 flags |= FS_XFLAG_COWEXTSIZE;
637 }
638
639 if (xfs_inode_has_attr_fork(ip))
640 flags |= FS_XFLAG_HASATTR;
641 return flags;
642 }
643
644 /*
645 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
646 * is allowed, otherwise it has to be an exact match. If a CI match is found,
647 * ci_name->name will point to a the actual name (caller must free) or
648 * will be set to NULL if an exact match is found.
649 */
650 int
xfs_lookup(struct xfs_inode * dp,const struct xfs_name * name,struct xfs_inode ** ipp,struct xfs_name * ci_name)651 xfs_lookup(
652 struct xfs_inode *dp,
653 const struct xfs_name *name,
654 struct xfs_inode **ipp,
655 struct xfs_name *ci_name)
656 {
657 xfs_ino_t inum;
658 int error;
659
660 trace_xfs_lookup(dp, name);
661
662 if (xfs_is_shutdown(dp->i_mount))
663 return -EIO;
664
665 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
666 if (error)
667 goto out_unlock;
668
669 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
670 if (error)
671 goto out_free_name;
672
673 return 0;
674
675 out_free_name:
676 if (ci_name)
677 kmem_free(ci_name->name);
678 out_unlock:
679 *ipp = NULL;
680 return error;
681 }
682
683 /* Propagate di_flags from a parent inode to a child inode. */
684 static void
xfs_inode_inherit_flags(struct xfs_inode * ip,const struct xfs_inode * pip)685 xfs_inode_inherit_flags(
686 struct xfs_inode *ip,
687 const struct xfs_inode *pip)
688 {
689 unsigned int di_flags = 0;
690 xfs_failaddr_t failaddr;
691 umode_t mode = VFS_I(ip)->i_mode;
692
693 if (S_ISDIR(mode)) {
694 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
695 di_flags |= XFS_DIFLAG_RTINHERIT;
696 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
697 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
698 ip->i_extsize = pip->i_extsize;
699 }
700 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
701 di_flags |= XFS_DIFLAG_PROJINHERIT;
702 } else if (S_ISREG(mode)) {
703 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
704 xfs_has_realtime(ip->i_mount))
705 di_flags |= XFS_DIFLAG_REALTIME;
706 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
707 di_flags |= XFS_DIFLAG_EXTSIZE;
708 ip->i_extsize = pip->i_extsize;
709 }
710 }
711 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
712 xfs_inherit_noatime)
713 di_flags |= XFS_DIFLAG_NOATIME;
714 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
715 xfs_inherit_nodump)
716 di_flags |= XFS_DIFLAG_NODUMP;
717 if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
718 xfs_inherit_sync)
719 di_flags |= XFS_DIFLAG_SYNC;
720 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
721 xfs_inherit_nosymlinks)
722 di_flags |= XFS_DIFLAG_NOSYMLINKS;
723 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
724 xfs_inherit_nodefrag)
725 di_flags |= XFS_DIFLAG_NODEFRAG;
726 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
727 di_flags |= XFS_DIFLAG_FILESTREAM;
728
729 ip->i_diflags |= di_flags;
730
731 /*
732 * Inode verifiers on older kernels only check that the extent size
733 * hint is an integer multiple of the rt extent size on realtime files.
734 * They did not check the hint alignment on a directory with both
735 * rtinherit and extszinherit flags set. If the misaligned hint is
736 * propagated from a directory into a new realtime file, new file
737 * allocations will fail due to math errors in the rt allocator and/or
738 * trip the verifiers. Validate the hint settings in the new file so
739 * that we don't let broken hints propagate.
740 */
741 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
742 VFS_I(ip)->i_mode, ip->i_diflags);
743 if (failaddr) {
744 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
745 XFS_DIFLAG_EXTSZINHERIT);
746 ip->i_extsize = 0;
747 }
748 }
749
750 /* Propagate di_flags2 from a parent inode to a child inode. */
751 static void
xfs_inode_inherit_flags2(struct xfs_inode * ip,const struct xfs_inode * pip)752 xfs_inode_inherit_flags2(
753 struct xfs_inode *ip,
754 const struct xfs_inode *pip)
755 {
756 xfs_failaddr_t failaddr;
757
758 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
759 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
760 ip->i_cowextsize = pip->i_cowextsize;
761 }
762 if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
763 ip->i_diflags2 |= XFS_DIFLAG2_DAX;
764
765 /* Don't let invalid cowextsize hints propagate. */
766 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
767 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
768 if (failaddr) {
769 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
770 ip->i_cowextsize = 0;
771 }
772 }
773
774 /*
775 * Initialise a newly allocated inode and return the in-core inode to the
776 * caller locked exclusively.
777 */
778 int
xfs_init_new_inode(struct mnt_idmap * idmap,struct xfs_trans * tp,struct xfs_inode * pip,xfs_ino_t ino,umode_t mode,xfs_nlink_t nlink,dev_t rdev,prid_t prid,bool init_xattrs,struct xfs_inode ** ipp)779 xfs_init_new_inode(
780 struct mnt_idmap *idmap,
781 struct xfs_trans *tp,
782 struct xfs_inode *pip,
783 xfs_ino_t ino,
784 umode_t mode,
785 xfs_nlink_t nlink,
786 dev_t rdev,
787 prid_t prid,
788 bool init_xattrs,
789 struct xfs_inode **ipp)
790 {
791 struct inode *dir = pip ? VFS_I(pip) : NULL;
792 struct xfs_mount *mp = tp->t_mountp;
793 struct xfs_inode *ip;
794 unsigned int flags;
795 int error;
796 struct timespec64 tv;
797 struct inode *inode;
798
799 /*
800 * Protect against obviously corrupt allocation btree records. Later
801 * xfs_iget checks will catch re-allocation of other active in-memory
802 * and on-disk inodes. If we don't catch reallocating the parent inode
803 * here we will deadlock in xfs_iget() so we have to do these checks
804 * first.
805 */
806 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
807 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
808 return -EFSCORRUPTED;
809 }
810
811 /*
812 * Get the in-core inode with the lock held exclusively to prevent
813 * others from looking at until we're done.
814 */
815 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
816 if (error)
817 return error;
818
819 ASSERT(ip != NULL);
820 inode = VFS_I(ip);
821 set_nlink(inode, nlink);
822 inode->i_rdev = rdev;
823 ip->i_projid = prid;
824
825 if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
826 inode_fsuid_set(inode, idmap);
827 inode->i_gid = dir->i_gid;
828 inode->i_mode = mode;
829 } else {
830 inode_init_owner(idmap, inode, dir, mode);
831 }
832
833 /*
834 * If the group ID of the new file does not match the effective group
835 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
836 * (and only if the irix_sgid_inherit compatibility variable is set).
837 */
838 if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
839 !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
840 inode->i_mode &= ~S_ISGID;
841
842 ip->i_disk_size = 0;
843 ip->i_df.if_nextents = 0;
844 ASSERT(ip->i_nblocks == 0);
845
846 tv = inode_set_ctime_current(inode);
847 inode->i_mtime = tv;
848 inode->i_atime = tv;
849
850 ip->i_extsize = 0;
851 ip->i_diflags = 0;
852
853 if (xfs_has_v3inodes(mp)) {
854 inode_set_iversion(inode, 1);
855 ip->i_cowextsize = 0;
856 ip->i_crtime = tv;
857 }
858
859 flags = XFS_ILOG_CORE;
860 switch (mode & S_IFMT) {
861 case S_IFIFO:
862 case S_IFCHR:
863 case S_IFBLK:
864 case S_IFSOCK:
865 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
866 flags |= XFS_ILOG_DEV;
867 break;
868 case S_IFREG:
869 case S_IFDIR:
870 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
871 xfs_inode_inherit_flags(ip, pip);
872 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
873 xfs_inode_inherit_flags2(ip, pip);
874 fallthrough;
875 case S_IFLNK:
876 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
877 ip->i_df.if_bytes = 0;
878 ip->i_df.if_u1.if_root = NULL;
879 break;
880 default:
881 ASSERT(0);
882 }
883
884 /*
885 * If we need to create attributes immediately after allocating the
886 * inode, initialise an empty attribute fork right now. We use the
887 * default fork offset for attributes here as we don't know exactly what
888 * size or how many attributes we might be adding. We can do this
889 * safely here because we know the data fork is completely empty and
890 * this saves us from needing to run a separate transaction to set the
891 * fork offset in the immediate future.
892 */
893 if (init_xattrs && xfs_has_attr(mp)) {
894 ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
895 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
896 }
897
898 /*
899 * Log the new values stuffed into the inode.
900 */
901 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
902 xfs_trans_log_inode(tp, ip, flags);
903
904 /* now that we have an i_mode we can setup the inode structure */
905 xfs_setup_inode(ip);
906
907 *ipp = ip;
908 return 0;
909 }
910
911 /*
912 * Decrement the link count on an inode & log the change. If this causes the
913 * link count to go to zero, move the inode to AGI unlinked list so that it can
914 * be freed when the last active reference goes away via xfs_inactive().
915 */
916 static int /* error */
xfs_droplink(xfs_trans_t * tp,xfs_inode_t * ip)917 xfs_droplink(
918 xfs_trans_t *tp,
919 xfs_inode_t *ip)
920 {
921 if (VFS_I(ip)->i_nlink == 0) {
922 xfs_alert(ip->i_mount,
923 "%s: Attempt to drop inode (%llu) with nlink zero.",
924 __func__, ip->i_ino);
925 return -EFSCORRUPTED;
926 }
927
928 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
929
930 drop_nlink(VFS_I(ip));
931 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
932
933 if (VFS_I(ip)->i_nlink)
934 return 0;
935
936 return xfs_iunlink(tp, ip);
937 }
938
939 /*
940 * Increment the link count on an inode & log the change.
941 */
942 static void
xfs_bumplink(xfs_trans_t * tp,xfs_inode_t * ip)943 xfs_bumplink(
944 xfs_trans_t *tp,
945 xfs_inode_t *ip)
946 {
947 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
948
949 inc_nlink(VFS_I(ip));
950 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
951 }
952
953 int
xfs_create(struct mnt_idmap * idmap,xfs_inode_t * dp,struct xfs_name * name,umode_t mode,dev_t rdev,bool init_xattrs,xfs_inode_t ** ipp)954 xfs_create(
955 struct mnt_idmap *idmap,
956 xfs_inode_t *dp,
957 struct xfs_name *name,
958 umode_t mode,
959 dev_t rdev,
960 bool init_xattrs,
961 xfs_inode_t **ipp)
962 {
963 int is_dir = S_ISDIR(mode);
964 struct xfs_mount *mp = dp->i_mount;
965 struct xfs_inode *ip = NULL;
966 struct xfs_trans *tp = NULL;
967 int error;
968 bool unlock_dp_on_error = false;
969 prid_t prid;
970 struct xfs_dquot *udqp = NULL;
971 struct xfs_dquot *gdqp = NULL;
972 struct xfs_dquot *pdqp = NULL;
973 struct xfs_trans_res *tres;
974 uint resblks;
975 xfs_ino_t ino;
976
977 trace_xfs_create(dp, name);
978
979 if (xfs_is_shutdown(mp))
980 return -EIO;
981
982 prid = xfs_get_initial_prid(dp);
983
984 /*
985 * Make sure that we have allocated dquot(s) on disk.
986 */
987 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
988 mapped_fsgid(idmap, &init_user_ns), prid,
989 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
990 &udqp, &gdqp, &pdqp);
991 if (error)
992 return error;
993
994 if (is_dir) {
995 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
996 tres = &M_RES(mp)->tr_mkdir;
997 } else {
998 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
999 tres = &M_RES(mp)->tr_create;
1000 }
1001
1002 /*
1003 * Initially assume that the file does not exist and
1004 * reserve the resources for that case. If that is not
1005 * the case we'll drop the one we have and get a more
1006 * appropriate transaction later.
1007 */
1008 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1009 &tp);
1010 if (error == -ENOSPC) {
1011 /* flush outstanding delalloc blocks and retry */
1012 xfs_flush_inodes(mp);
1013 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1014 resblks, &tp);
1015 }
1016 if (error)
1017 goto out_release_dquots;
1018
1019 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1020 unlock_dp_on_error = true;
1021
1022 /*
1023 * A newly created regular or special file just has one directory
1024 * entry pointing to them, but a directory also the "." entry
1025 * pointing to itself.
1026 */
1027 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1028 if (!error)
1029 error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1030 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1031 if (error)
1032 goto out_trans_cancel;
1033
1034 /*
1035 * Now we join the directory inode to the transaction. We do not do it
1036 * earlier because xfs_dialloc might commit the previous transaction
1037 * (and release all the locks). An error from here on will result in
1038 * the transaction cancel unlocking dp so don't do it explicitly in the
1039 * error path.
1040 */
1041 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1042 unlock_dp_on_error = false;
1043
1044 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1045 resblks - XFS_IALLOC_SPACE_RES(mp));
1046 if (error) {
1047 ASSERT(error != -ENOSPC);
1048 goto out_trans_cancel;
1049 }
1050 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1051 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1052
1053 if (is_dir) {
1054 error = xfs_dir_init(tp, ip, dp);
1055 if (error)
1056 goto out_trans_cancel;
1057
1058 xfs_bumplink(tp, dp);
1059 }
1060
1061 /*
1062 * If this is a synchronous mount, make sure that the
1063 * create transaction goes to disk before returning to
1064 * the user.
1065 */
1066 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1067 xfs_trans_set_sync(tp);
1068
1069 /*
1070 * Attach the dquot(s) to the inodes and modify them incore.
1071 * These ids of the inode couldn't have changed since the new
1072 * inode has been locked ever since it was created.
1073 */
1074 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1075
1076 error = xfs_trans_commit(tp);
1077 if (error)
1078 goto out_release_inode;
1079
1080 xfs_qm_dqrele(udqp);
1081 xfs_qm_dqrele(gdqp);
1082 xfs_qm_dqrele(pdqp);
1083
1084 *ipp = ip;
1085 return 0;
1086
1087 out_trans_cancel:
1088 xfs_trans_cancel(tp);
1089 out_release_inode:
1090 /*
1091 * Wait until after the current transaction is aborted to finish the
1092 * setup of the inode and release the inode. This prevents recursive
1093 * transactions and deadlocks from xfs_inactive.
1094 */
1095 if (ip) {
1096 xfs_finish_inode_setup(ip);
1097 xfs_irele(ip);
1098 }
1099 out_release_dquots:
1100 xfs_qm_dqrele(udqp);
1101 xfs_qm_dqrele(gdqp);
1102 xfs_qm_dqrele(pdqp);
1103
1104 if (unlock_dp_on_error)
1105 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1106 return error;
1107 }
1108
1109 int
xfs_create_tmpfile(struct mnt_idmap * idmap,struct xfs_inode * dp,umode_t mode,struct xfs_inode ** ipp)1110 xfs_create_tmpfile(
1111 struct mnt_idmap *idmap,
1112 struct xfs_inode *dp,
1113 umode_t mode,
1114 struct xfs_inode **ipp)
1115 {
1116 struct xfs_mount *mp = dp->i_mount;
1117 struct xfs_inode *ip = NULL;
1118 struct xfs_trans *tp = NULL;
1119 int error;
1120 prid_t prid;
1121 struct xfs_dquot *udqp = NULL;
1122 struct xfs_dquot *gdqp = NULL;
1123 struct xfs_dquot *pdqp = NULL;
1124 struct xfs_trans_res *tres;
1125 uint resblks;
1126 xfs_ino_t ino;
1127
1128 if (xfs_is_shutdown(mp))
1129 return -EIO;
1130
1131 prid = xfs_get_initial_prid(dp);
1132
1133 /*
1134 * Make sure that we have allocated dquot(s) on disk.
1135 */
1136 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1137 mapped_fsgid(idmap, &init_user_ns), prid,
1138 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1139 &udqp, &gdqp, &pdqp);
1140 if (error)
1141 return error;
1142
1143 resblks = XFS_IALLOC_SPACE_RES(mp);
1144 tres = &M_RES(mp)->tr_create_tmpfile;
1145
1146 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1147 &tp);
1148 if (error)
1149 goto out_release_dquots;
1150
1151 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1152 if (!error)
1153 error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1154 0, 0, prid, false, &ip);
1155 if (error)
1156 goto out_trans_cancel;
1157
1158 if (xfs_has_wsync(mp))
1159 xfs_trans_set_sync(tp);
1160
1161 /*
1162 * Attach the dquot(s) to the inodes and modify them incore.
1163 * These ids of the inode couldn't have changed since the new
1164 * inode has been locked ever since it was created.
1165 */
1166 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1167
1168 error = xfs_iunlink(tp, ip);
1169 if (error)
1170 goto out_trans_cancel;
1171
1172 error = xfs_trans_commit(tp);
1173 if (error)
1174 goto out_release_inode;
1175
1176 xfs_qm_dqrele(udqp);
1177 xfs_qm_dqrele(gdqp);
1178 xfs_qm_dqrele(pdqp);
1179
1180 *ipp = ip;
1181 return 0;
1182
1183 out_trans_cancel:
1184 xfs_trans_cancel(tp);
1185 out_release_inode:
1186 /*
1187 * Wait until after the current transaction is aborted to finish the
1188 * setup of the inode and release the inode. This prevents recursive
1189 * transactions and deadlocks from xfs_inactive.
1190 */
1191 if (ip) {
1192 xfs_finish_inode_setup(ip);
1193 xfs_irele(ip);
1194 }
1195 out_release_dquots:
1196 xfs_qm_dqrele(udqp);
1197 xfs_qm_dqrele(gdqp);
1198 xfs_qm_dqrele(pdqp);
1199
1200 return error;
1201 }
1202
1203 int
xfs_link(xfs_inode_t * tdp,xfs_inode_t * sip,struct xfs_name * target_name)1204 xfs_link(
1205 xfs_inode_t *tdp,
1206 xfs_inode_t *sip,
1207 struct xfs_name *target_name)
1208 {
1209 xfs_mount_t *mp = tdp->i_mount;
1210 xfs_trans_t *tp;
1211 int error, nospace_error = 0;
1212 int resblks;
1213
1214 trace_xfs_link(tdp, target_name);
1215
1216 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1217
1218 if (xfs_is_shutdown(mp))
1219 return -EIO;
1220
1221 error = xfs_qm_dqattach(sip);
1222 if (error)
1223 goto std_return;
1224
1225 error = xfs_qm_dqattach(tdp);
1226 if (error)
1227 goto std_return;
1228
1229 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1230 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1231 &tp, &nospace_error);
1232 if (error)
1233 goto std_return;
1234
1235 /*
1236 * If we are using project inheritance, we only allow hard link
1237 * creation in our tree when the project IDs are the same; else
1238 * the tree quota mechanism could be circumvented.
1239 */
1240 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1241 tdp->i_projid != sip->i_projid)) {
1242 /*
1243 * Project quota setup skips special files which can
1244 * leave inodes in a PROJINHERIT directory without a
1245 * project ID set. We need to allow links to be made
1246 * to these "project-less" inodes because userspace
1247 * expects them to succeed after project ID setup,
1248 * but everything else should be rejected.
1249 */
1250 if (!special_file(VFS_I(sip)->i_mode) ||
1251 sip->i_projid != 0) {
1252 error = -EXDEV;
1253 goto error_return;
1254 }
1255 }
1256
1257 if (!resblks) {
1258 error = xfs_dir_canenter(tp, tdp, target_name);
1259 if (error)
1260 goto error_return;
1261 }
1262
1263 /*
1264 * Handle initial link state of O_TMPFILE inode
1265 */
1266 if (VFS_I(sip)->i_nlink == 0) {
1267 struct xfs_perag *pag;
1268
1269 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1270 error = xfs_iunlink_remove(tp, pag, sip);
1271 xfs_perag_put(pag);
1272 if (error)
1273 goto error_return;
1274 }
1275
1276 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1277 resblks);
1278 if (error)
1279 goto error_return;
1280 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1281 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1282
1283 xfs_bumplink(tp, sip);
1284
1285 /*
1286 * If this is a synchronous mount, make sure that the
1287 * link transaction goes to disk before returning to
1288 * the user.
1289 */
1290 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1291 xfs_trans_set_sync(tp);
1292
1293 return xfs_trans_commit(tp);
1294
1295 error_return:
1296 xfs_trans_cancel(tp);
1297 std_return:
1298 if (error == -ENOSPC && nospace_error)
1299 error = nospace_error;
1300 return error;
1301 }
1302
1303 /* Clear the reflink flag and the cowblocks tag if possible. */
1304 static void
xfs_itruncate_clear_reflink_flags(struct xfs_inode * ip)1305 xfs_itruncate_clear_reflink_flags(
1306 struct xfs_inode *ip)
1307 {
1308 struct xfs_ifork *dfork;
1309 struct xfs_ifork *cfork;
1310
1311 if (!xfs_is_reflink_inode(ip))
1312 return;
1313 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1314 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1315 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1316 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1317 if (cfork->if_bytes == 0)
1318 xfs_inode_clear_cowblocks_tag(ip);
1319 }
1320
1321 /*
1322 * Free up the underlying blocks past new_size. The new size must be smaller
1323 * than the current size. This routine can be used both for the attribute and
1324 * data fork, and does not modify the inode size, which is left to the caller.
1325 *
1326 * The transaction passed to this routine must have made a permanent log
1327 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1328 * given transaction and start new ones, so make sure everything involved in
1329 * the transaction is tidy before calling here. Some transaction will be
1330 * returned to the caller to be committed. The incoming transaction must
1331 * already include the inode, and both inode locks must be held exclusively.
1332 * The inode must also be "held" within the transaction. On return the inode
1333 * will be "held" within the returned transaction. This routine does NOT
1334 * require any disk space to be reserved for it within the transaction.
1335 *
1336 * If we get an error, we must return with the inode locked and linked into the
1337 * current transaction. This keeps things simple for the higher level code,
1338 * because it always knows that the inode is locked and held in the transaction
1339 * that returns to it whether errors occur or not. We don't mark the inode
1340 * dirty on error so that transactions can be easily aborted if possible.
1341 */
1342 int
xfs_itruncate_extents_flags(struct xfs_trans ** tpp,struct xfs_inode * ip,int whichfork,xfs_fsize_t new_size,int flags)1343 xfs_itruncate_extents_flags(
1344 struct xfs_trans **tpp,
1345 struct xfs_inode *ip,
1346 int whichfork,
1347 xfs_fsize_t new_size,
1348 int flags)
1349 {
1350 struct xfs_mount *mp = ip->i_mount;
1351 struct xfs_trans *tp = *tpp;
1352 xfs_fileoff_t first_unmap_block;
1353 xfs_filblks_t unmap_len;
1354 int error = 0;
1355
1356 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1357 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1358 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1359 ASSERT(new_size <= XFS_ISIZE(ip));
1360 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1361 ASSERT(ip->i_itemp != NULL);
1362 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1363 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1364
1365 trace_xfs_itruncate_extents_start(ip, new_size);
1366
1367 flags |= xfs_bmapi_aflag(whichfork);
1368
1369 /*
1370 * Since it is possible for space to become allocated beyond
1371 * the end of the file (in a crash where the space is allocated
1372 * but the inode size is not yet updated), simply remove any
1373 * blocks which show up between the new EOF and the maximum
1374 * possible file size.
1375 *
1376 * We have to free all the blocks to the bmbt maximum offset, even if
1377 * the page cache can't scale that far.
1378 */
1379 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1380 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1381 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1382 return 0;
1383 }
1384
1385 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1386 while (unmap_len > 0) {
1387 ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1388 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1389 flags, XFS_ITRUNC_MAX_EXTENTS);
1390 if (error)
1391 goto out;
1392
1393 /* free the just unmapped extents */
1394 error = xfs_defer_finish(&tp);
1395 if (error)
1396 goto out;
1397 }
1398
1399 if (whichfork == XFS_DATA_FORK) {
1400 /* Remove all pending CoW reservations. */
1401 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1402 first_unmap_block, XFS_MAX_FILEOFF, true);
1403 if (error)
1404 goto out;
1405
1406 xfs_itruncate_clear_reflink_flags(ip);
1407 }
1408
1409 /*
1410 * Always re-log the inode so that our permanent transaction can keep
1411 * on rolling it forward in the log.
1412 */
1413 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1414
1415 trace_xfs_itruncate_extents_end(ip, new_size);
1416
1417 out:
1418 *tpp = tp;
1419 return error;
1420 }
1421
1422 int
xfs_release(xfs_inode_t * ip)1423 xfs_release(
1424 xfs_inode_t *ip)
1425 {
1426 xfs_mount_t *mp = ip->i_mount;
1427 int error = 0;
1428
1429 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1430 return 0;
1431
1432 /* If this is a read-only mount, don't do this (would generate I/O) */
1433 if (xfs_is_readonly(mp))
1434 return 0;
1435
1436 if (!xfs_is_shutdown(mp)) {
1437 int truncated;
1438
1439 /*
1440 * If we previously truncated this file and removed old data
1441 * in the process, we want to initiate "early" writeout on
1442 * the last close. This is an attempt to combat the notorious
1443 * NULL files problem which is particularly noticeable from a
1444 * truncate down, buffered (re-)write (delalloc), followed by
1445 * a crash. What we are effectively doing here is
1446 * significantly reducing the time window where we'd otherwise
1447 * be exposed to that problem.
1448 */
1449 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1450 if (truncated) {
1451 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1452 if (ip->i_delayed_blks > 0) {
1453 error = filemap_flush(VFS_I(ip)->i_mapping);
1454 if (error)
1455 return error;
1456 }
1457 }
1458 }
1459
1460 if (VFS_I(ip)->i_nlink == 0)
1461 return 0;
1462
1463 /*
1464 * If we can't get the iolock just skip truncating the blocks past EOF
1465 * because we could deadlock with the mmap_lock otherwise. We'll get
1466 * another chance to drop them once the last reference to the inode is
1467 * dropped, so we'll never leak blocks permanently.
1468 */
1469 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1470 return 0;
1471
1472 if (xfs_can_free_eofblocks(ip)) {
1473 /*
1474 * Check if the inode is being opened, written and closed
1475 * frequently and we have delayed allocation blocks outstanding
1476 * (e.g. streaming writes from the NFS server), truncating the
1477 * blocks past EOF will cause fragmentation to occur.
1478 *
1479 * In this case don't do the truncation, but we have to be
1480 * careful how we detect this case. Blocks beyond EOF show up as
1481 * i_delayed_blks even when the inode is clean, so we need to
1482 * truncate them away first before checking for a dirty release.
1483 * Hence on the first dirty close we will still remove the
1484 * speculative allocation, but after that we will leave it in
1485 * place.
1486 */
1487 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1488 goto out_unlock;
1489
1490 error = xfs_free_eofblocks(ip);
1491 if (error)
1492 goto out_unlock;
1493
1494 /* delalloc blocks after truncation means it really is dirty */
1495 if (ip->i_delayed_blks)
1496 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1497 }
1498
1499 out_unlock:
1500 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1501 return error;
1502 }
1503
1504 /*
1505 * xfs_inactive_truncate
1506 *
1507 * Called to perform a truncate when an inode becomes unlinked.
1508 */
1509 STATIC int
xfs_inactive_truncate(struct xfs_inode * ip)1510 xfs_inactive_truncate(
1511 struct xfs_inode *ip)
1512 {
1513 struct xfs_mount *mp = ip->i_mount;
1514 struct xfs_trans *tp;
1515 int error;
1516
1517 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1518 if (error) {
1519 ASSERT(xfs_is_shutdown(mp));
1520 return error;
1521 }
1522 xfs_ilock(ip, XFS_ILOCK_EXCL);
1523 xfs_trans_ijoin(tp, ip, 0);
1524
1525 /*
1526 * Log the inode size first to prevent stale data exposure in the event
1527 * of a system crash before the truncate completes. See the related
1528 * comment in xfs_vn_setattr_size() for details.
1529 */
1530 ip->i_disk_size = 0;
1531 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1532
1533 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1534 if (error)
1535 goto error_trans_cancel;
1536
1537 ASSERT(ip->i_df.if_nextents == 0);
1538
1539 error = xfs_trans_commit(tp);
1540 if (error)
1541 goto error_unlock;
1542
1543 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1544 return 0;
1545
1546 error_trans_cancel:
1547 xfs_trans_cancel(tp);
1548 error_unlock:
1549 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1550 return error;
1551 }
1552
1553 /*
1554 * xfs_inactive_ifree()
1555 *
1556 * Perform the inode free when an inode is unlinked.
1557 */
1558 STATIC int
xfs_inactive_ifree(struct xfs_inode * ip)1559 xfs_inactive_ifree(
1560 struct xfs_inode *ip)
1561 {
1562 struct xfs_mount *mp = ip->i_mount;
1563 struct xfs_trans *tp;
1564 int error;
1565
1566 /*
1567 * We try to use a per-AG reservation for any block needed by the finobt
1568 * tree, but as the finobt feature predates the per-AG reservation
1569 * support a degraded file system might not have enough space for the
1570 * reservation at mount time. In that case try to dip into the reserved
1571 * pool and pray.
1572 *
1573 * Send a warning if the reservation does happen to fail, as the inode
1574 * now remains allocated and sits on the unlinked list until the fs is
1575 * repaired.
1576 */
1577 if (unlikely(mp->m_finobt_nores)) {
1578 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1579 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1580 &tp);
1581 } else {
1582 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1583 }
1584 if (error) {
1585 if (error == -ENOSPC) {
1586 xfs_warn_ratelimited(mp,
1587 "Failed to remove inode(s) from unlinked list. "
1588 "Please free space, unmount and run xfs_repair.");
1589 } else {
1590 ASSERT(xfs_is_shutdown(mp));
1591 }
1592 return error;
1593 }
1594
1595 /*
1596 * We do not hold the inode locked across the entire rolling transaction
1597 * here. We only need to hold it for the first transaction that
1598 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1599 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1600 * here breaks the relationship between cluster buffer invalidation and
1601 * stale inode invalidation on cluster buffer item journal commit
1602 * completion, and can result in leaving dirty stale inodes hanging
1603 * around in memory.
1604 *
1605 * We have no need for serialising this inode operation against other
1606 * operations - we freed the inode and hence reallocation is required
1607 * and that will serialise on reallocating the space the deferops need
1608 * to free. Hence we can unlock the inode on the first commit of
1609 * the transaction rather than roll it right through the deferops. This
1610 * avoids relogging the XFS_ISTALE inode.
1611 *
1612 * We check that xfs_ifree() hasn't grown an internal transaction roll
1613 * by asserting that the inode is still locked when it returns.
1614 */
1615 xfs_ilock(ip, XFS_ILOCK_EXCL);
1616 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1617
1618 error = xfs_ifree(tp, ip);
1619 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1620 if (error) {
1621 /*
1622 * If we fail to free the inode, shut down. The cancel
1623 * might do that, we need to make sure. Otherwise the
1624 * inode might be lost for a long time or forever.
1625 */
1626 if (!xfs_is_shutdown(mp)) {
1627 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1628 __func__, error);
1629 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1630 }
1631 xfs_trans_cancel(tp);
1632 return error;
1633 }
1634
1635 /*
1636 * Credit the quota account(s). The inode is gone.
1637 */
1638 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1639
1640 return xfs_trans_commit(tp);
1641 }
1642
1643 /*
1644 * Returns true if we need to update the on-disk metadata before we can free
1645 * the memory used by this inode. Updates include freeing post-eof
1646 * preallocations; freeing COW staging extents; and marking the inode free in
1647 * the inobt if it is on the unlinked list.
1648 */
1649 bool
xfs_inode_needs_inactive(struct xfs_inode * ip)1650 xfs_inode_needs_inactive(
1651 struct xfs_inode *ip)
1652 {
1653 struct xfs_mount *mp = ip->i_mount;
1654 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1655
1656 /*
1657 * If the inode is already free, then there can be nothing
1658 * to clean up here.
1659 */
1660 if (VFS_I(ip)->i_mode == 0)
1661 return false;
1662
1663 /*
1664 * If this is a read-only mount, don't do this (would generate I/O)
1665 * unless we're in log recovery and cleaning the iunlinked list.
1666 */
1667 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1668 return false;
1669
1670 /* If the log isn't running, push inodes straight to reclaim. */
1671 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1672 return false;
1673
1674 /* Metadata inodes require explicit resource cleanup. */
1675 if (xfs_is_metadata_inode(ip))
1676 return false;
1677
1678 /* Want to clean out the cow blocks if there are any. */
1679 if (cow_ifp && cow_ifp->if_bytes > 0)
1680 return true;
1681
1682 /* Unlinked files must be freed. */
1683 if (VFS_I(ip)->i_nlink == 0)
1684 return true;
1685
1686 /*
1687 * This file isn't being freed, so check if there are post-eof blocks
1688 * to free.
1689 *
1690 * Note: don't bother with iolock here since lockdep complains about
1691 * acquiring it in reclaim context. We have the only reference to the
1692 * inode at this point anyways.
1693 */
1694 return xfs_can_free_eofblocks(ip);
1695 }
1696
1697 /*
1698 * xfs_inactive
1699 *
1700 * This is called when the vnode reference count for the vnode
1701 * goes to zero. If the file has been unlinked, then it must
1702 * now be truncated. Also, we clear all of the read-ahead state
1703 * kept for the inode here since the file is now closed.
1704 */
1705 int
xfs_inactive(xfs_inode_t * ip)1706 xfs_inactive(
1707 xfs_inode_t *ip)
1708 {
1709 struct xfs_mount *mp;
1710 int error = 0;
1711 int truncate = 0;
1712
1713 /*
1714 * If the inode is already free, then there can be nothing
1715 * to clean up here.
1716 */
1717 if (VFS_I(ip)->i_mode == 0) {
1718 ASSERT(ip->i_df.if_broot_bytes == 0);
1719 goto out;
1720 }
1721
1722 mp = ip->i_mount;
1723 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1724
1725 /*
1726 * If this is a read-only mount, don't do this (would generate I/O)
1727 * unless we're in log recovery and cleaning the iunlinked list.
1728 */
1729 if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1730 goto out;
1731
1732 /* Metadata inodes require explicit resource cleanup. */
1733 if (xfs_is_metadata_inode(ip))
1734 goto out;
1735
1736 /* Try to clean out the cow blocks if there are any. */
1737 if (xfs_inode_has_cow_data(ip))
1738 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1739
1740 if (VFS_I(ip)->i_nlink != 0) {
1741 /*
1742 * Note: don't bother with iolock here since lockdep complains
1743 * about acquiring it in reclaim context. We have the only
1744 * reference to the inode at this point anyways.
1745 */
1746 if (xfs_can_free_eofblocks(ip))
1747 error = xfs_free_eofblocks(ip);
1748
1749 goto out;
1750 }
1751
1752 if (S_ISREG(VFS_I(ip)->i_mode) &&
1753 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1754 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1755 truncate = 1;
1756
1757 if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1758 /*
1759 * If this inode is being inactivated during a quotacheck and
1760 * has not yet been scanned by quotacheck, we /must/ remove
1761 * the dquots from the inode before inactivation changes the
1762 * block and inode counts. Most probably this is a result of
1763 * reloading the incore iunlinked list to purge unrecovered
1764 * unlinked inodes.
1765 */
1766 xfs_qm_dqdetach(ip);
1767 } else {
1768 error = xfs_qm_dqattach(ip);
1769 if (error)
1770 goto out;
1771 }
1772
1773 if (S_ISLNK(VFS_I(ip)->i_mode))
1774 error = xfs_inactive_symlink(ip);
1775 else if (truncate)
1776 error = xfs_inactive_truncate(ip);
1777 if (error)
1778 goto out;
1779
1780 /*
1781 * If there are attributes associated with the file then blow them away
1782 * now. The code calls a routine that recursively deconstructs the
1783 * attribute fork. If also blows away the in-core attribute fork.
1784 */
1785 if (xfs_inode_has_attr_fork(ip)) {
1786 error = xfs_attr_inactive(ip);
1787 if (error)
1788 goto out;
1789 }
1790
1791 ASSERT(ip->i_forkoff == 0);
1792
1793 /*
1794 * Free the inode.
1795 */
1796 error = xfs_inactive_ifree(ip);
1797
1798 out:
1799 /*
1800 * We're done making metadata updates for this inode, so we can release
1801 * the attached dquots.
1802 */
1803 xfs_qm_dqdetach(ip);
1804 return error;
1805 }
1806
1807 /*
1808 * In-Core Unlinked List Lookups
1809 * =============================
1810 *
1811 * Every inode is supposed to be reachable from some other piece of metadata
1812 * with the exception of the root directory. Inodes with a connection to a
1813 * file descriptor but not linked from anywhere in the on-disk directory tree
1814 * are collectively known as unlinked inodes, though the filesystem itself
1815 * maintains links to these inodes so that on-disk metadata are consistent.
1816 *
1817 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1818 * header contains a number of buckets that point to an inode, and each inode
1819 * record has a pointer to the next inode in the hash chain. This
1820 * singly-linked list causes scaling problems in the iunlink remove function
1821 * because we must walk that list to find the inode that points to the inode
1822 * being removed from the unlinked hash bucket list.
1823 *
1824 * Hence we keep an in-memory double linked list to link each inode on an
1825 * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1826 * based lists would require having 64 list heads in the perag, one for each
1827 * list. This is expensive in terms of memory (think millions of AGs) and cache
1828 * misses on lookups. Instead, use the fact that inodes on the unlinked list
1829 * must be referenced at the VFS level to keep them on the list and hence we
1830 * have an existence guarantee for inodes on the unlinked list.
1831 *
1832 * Given we have an existence guarantee, we can use lockless inode cache lookups
1833 * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1834 * for the double linked unlinked list, and we don't need any extra locking to
1835 * keep the list safe as all manipulations are done under the AGI buffer lock.
1836 * Keeping the list up to date does not require memory allocation, just finding
1837 * the XFS inode and updating the next/prev unlinked list aginos.
1838 */
1839
1840 /*
1841 * Find an inode on the unlinked list. This does not take references to the
1842 * inode as we have existence guarantees by holding the AGI buffer lock and that
1843 * only unlinked, referenced inodes can be on the unlinked inode list. If we
1844 * don't find the inode in cache, then let the caller handle the situation.
1845 */
1846 static struct xfs_inode *
xfs_iunlink_lookup(struct xfs_perag * pag,xfs_agino_t agino)1847 xfs_iunlink_lookup(
1848 struct xfs_perag *pag,
1849 xfs_agino_t agino)
1850 {
1851 struct xfs_inode *ip;
1852
1853 rcu_read_lock();
1854 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1855 if (!ip) {
1856 /* Caller can handle inode not being in memory. */
1857 rcu_read_unlock();
1858 return NULL;
1859 }
1860
1861 /*
1862 * Inode in RCU freeing limbo should not happen. Warn about this and
1863 * let the caller handle the failure.
1864 */
1865 if (WARN_ON_ONCE(!ip->i_ino)) {
1866 rcu_read_unlock();
1867 return NULL;
1868 }
1869 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1870 rcu_read_unlock();
1871 return ip;
1872 }
1873
1874 /*
1875 * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
1876 * is not in cache.
1877 */
1878 static int
xfs_iunlink_update_backref(struct xfs_perag * pag,xfs_agino_t prev_agino,xfs_agino_t next_agino)1879 xfs_iunlink_update_backref(
1880 struct xfs_perag *pag,
1881 xfs_agino_t prev_agino,
1882 xfs_agino_t next_agino)
1883 {
1884 struct xfs_inode *ip;
1885
1886 /* No update necessary if we are at the end of the list. */
1887 if (next_agino == NULLAGINO)
1888 return 0;
1889
1890 ip = xfs_iunlink_lookup(pag, next_agino);
1891 if (!ip)
1892 return -ENOLINK;
1893
1894 ip->i_prev_unlinked = prev_agino;
1895 return 0;
1896 }
1897
1898 /*
1899 * Point the AGI unlinked bucket at an inode and log the results. The caller
1900 * is responsible for validating the old value.
1901 */
1902 STATIC int
xfs_iunlink_update_bucket(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,unsigned int bucket_index,xfs_agino_t new_agino)1903 xfs_iunlink_update_bucket(
1904 struct xfs_trans *tp,
1905 struct xfs_perag *pag,
1906 struct xfs_buf *agibp,
1907 unsigned int bucket_index,
1908 xfs_agino_t new_agino)
1909 {
1910 struct xfs_agi *agi = agibp->b_addr;
1911 xfs_agino_t old_value;
1912 int offset;
1913
1914 ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1915
1916 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1917 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1918 old_value, new_agino);
1919
1920 /*
1921 * We should never find the head of the list already set to the value
1922 * passed in because either we're adding or removing ourselves from the
1923 * head of the list.
1924 */
1925 if (old_value == new_agino) {
1926 xfs_buf_mark_corrupt(agibp);
1927 return -EFSCORRUPTED;
1928 }
1929
1930 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1931 offset = offsetof(struct xfs_agi, agi_unlinked) +
1932 (sizeof(xfs_agino_t) * bucket_index);
1933 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1934 return 0;
1935 }
1936
1937 /*
1938 * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1939 * to @prev_agino. Caller must hold the AGI to synchronize with other changes
1940 * to the unlinked list.
1941 */
1942 STATIC int
xfs_iunlink_reload_next(struct xfs_trans * tp,struct xfs_buf * agibp,xfs_agino_t prev_agino,xfs_agino_t next_agino)1943 xfs_iunlink_reload_next(
1944 struct xfs_trans *tp,
1945 struct xfs_buf *agibp,
1946 xfs_agino_t prev_agino,
1947 xfs_agino_t next_agino)
1948 {
1949 struct xfs_perag *pag = agibp->b_pag;
1950 struct xfs_mount *mp = pag->pag_mount;
1951 struct xfs_inode *next_ip = NULL;
1952 xfs_ino_t ino;
1953 int error;
1954
1955 ASSERT(next_agino != NULLAGINO);
1956
1957 #ifdef DEBUG
1958 rcu_read_lock();
1959 next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1960 ASSERT(next_ip == NULL);
1961 rcu_read_unlock();
1962 #endif
1963
1964 xfs_info_ratelimited(mp,
1965 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
1966 next_agino, pag->pag_agno);
1967
1968 /*
1969 * Use an untrusted lookup just to be cautious in case the AGI has been
1970 * corrupted and now points at a free inode. That shouldn't happen,
1971 * but we'd rather shut down now since we're already running in a weird
1972 * situation.
1973 */
1974 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1975 error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1976 if (error)
1977 return error;
1978
1979 /* If this is not an unlinked inode, something is very wrong. */
1980 if (VFS_I(next_ip)->i_nlink != 0) {
1981 error = -EFSCORRUPTED;
1982 goto rele;
1983 }
1984
1985 next_ip->i_prev_unlinked = prev_agino;
1986 trace_xfs_iunlink_reload_next(next_ip);
1987 rele:
1988 ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1989 if (xfs_is_quotacheck_running(mp) && next_ip)
1990 xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1991 xfs_irele(next_ip);
1992 return error;
1993 }
1994
1995 static int
xfs_iunlink_insert_inode(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,struct xfs_inode * ip)1996 xfs_iunlink_insert_inode(
1997 struct xfs_trans *tp,
1998 struct xfs_perag *pag,
1999 struct xfs_buf *agibp,
2000 struct xfs_inode *ip)
2001 {
2002 struct xfs_mount *mp = tp->t_mountp;
2003 struct xfs_agi *agi = agibp->b_addr;
2004 xfs_agino_t next_agino;
2005 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2006 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2007 int error;
2008
2009 /*
2010 * Get the index into the agi hash table for the list this inode will
2011 * go on. Make sure the pointer isn't garbage and that this inode
2012 * isn't already on the list.
2013 */
2014 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2015 if (next_agino == agino ||
2016 !xfs_verify_agino_or_null(pag, next_agino)) {
2017 xfs_buf_mark_corrupt(agibp);
2018 return -EFSCORRUPTED;
2019 }
2020
2021 /*
2022 * Update the prev pointer in the next inode to point back to this
2023 * inode.
2024 */
2025 error = xfs_iunlink_update_backref(pag, agino, next_agino);
2026 if (error == -ENOLINK)
2027 error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
2028 if (error)
2029 return error;
2030
2031 if (next_agino != NULLAGINO) {
2032 /*
2033 * There is already another inode in the bucket, so point this
2034 * inode to the current head of the list.
2035 */
2036 error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
2037 if (error)
2038 return error;
2039 ip->i_next_unlinked = next_agino;
2040 }
2041
2042 /* Point the head of the list to point to this inode. */
2043 ip->i_prev_unlinked = NULLAGINO;
2044 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2045 }
2046
2047 /*
2048 * This is called when the inode's link count has gone to 0 or we are creating
2049 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2050 *
2051 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2052 * list when the inode is freed.
2053 */
2054 STATIC int
xfs_iunlink(struct xfs_trans * tp,struct xfs_inode * ip)2055 xfs_iunlink(
2056 struct xfs_trans *tp,
2057 struct xfs_inode *ip)
2058 {
2059 struct xfs_mount *mp = tp->t_mountp;
2060 struct xfs_perag *pag;
2061 struct xfs_buf *agibp;
2062 int error;
2063
2064 ASSERT(VFS_I(ip)->i_nlink == 0);
2065 ASSERT(VFS_I(ip)->i_mode != 0);
2066 trace_xfs_iunlink(ip);
2067
2068 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2069
2070 /* Get the agi buffer first. It ensures lock ordering on the list. */
2071 error = xfs_read_agi(pag, tp, &agibp);
2072 if (error)
2073 goto out;
2074
2075 error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2076 out:
2077 xfs_perag_put(pag);
2078 return error;
2079 }
2080
2081 static int
xfs_iunlink_remove_inode(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,struct xfs_inode * ip)2082 xfs_iunlink_remove_inode(
2083 struct xfs_trans *tp,
2084 struct xfs_perag *pag,
2085 struct xfs_buf *agibp,
2086 struct xfs_inode *ip)
2087 {
2088 struct xfs_mount *mp = tp->t_mountp;
2089 struct xfs_agi *agi = agibp->b_addr;
2090 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2091 xfs_agino_t head_agino;
2092 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2093 int error;
2094
2095 trace_xfs_iunlink_remove(ip);
2096
2097 /*
2098 * Get the index into the agi hash table for the list this inode will
2099 * go on. Make sure the head pointer isn't garbage.
2100 */
2101 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2102 if (!xfs_verify_agino(pag, head_agino)) {
2103 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2104 agi, sizeof(*agi));
2105 return -EFSCORRUPTED;
2106 }
2107
2108 /*
2109 * Set our inode's next_unlinked pointer to NULL and then return
2110 * the old pointer value so that we can update whatever was previous
2111 * to us in the list to point to whatever was next in the list.
2112 */
2113 error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2114 if (error)
2115 return error;
2116
2117 /*
2118 * Update the prev pointer in the next inode to point back to previous
2119 * inode in the chain.
2120 */
2121 error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2122 ip->i_next_unlinked);
2123 if (error == -ENOLINK)
2124 error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2125 ip->i_next_unlinked);
2126 if (error)
2127 return error;
2128
2129 if (head_agino != agino) {
2130 struct xfs_inode *prev_ip;
2131
2132 prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2133 if (!prev_ip)
2134 return -EFSCORRUPTED;
2135
2136 error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2137 ip->i_next_unlinked);
2138 prev_ip->i_next_unlinked = ip->i_next_unlinked;
2139 } else {
2140 /* Point the head of the list to the next unlinked inode. */
2141 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2142 ip->i_next_unlinked);
2143 }
2144
2145 ip->i_next_unlinked = NULLAGINO;
2146 ip->i_prev_unlinked = 0;
2147 return error;
2148 }
2149
2150 /*
2151 * Pull the on-disk inode from the AGI unlinked list.
2152 */
2153 STATIC int
xfs_iunlink_remove(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_inode * ip)2154 xfs_iunlink_remove(
2155 struct xfs_trans *tp,
2156 struct xfs_perag *pag,
2157 struct xfs_inode *ip)
2158 {
2159 struct xfs_buf *agibp;
2160 int error;
2161
2162 trace_xfs_iunlink_remove(ip);
2163
2164 /* Get the agi buffer first. It ensures lock ordering on the list. */
2165 error = xfs_read_agi(pag, tp, &agibp);
2166 if (error)
2167 return error;
2168
2169 return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2170 }
2171
2172 /*
2173 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2174 * mark it stale. We should only find clean inodes in this lookup that aren't
2175 * already stale.
2176 */
2177 static void
xfs_ifree_mark_inode_stale(struct xfs_perag * pag,struct xfs_inode * free_ip,xfs_ino_t inum)2178 xfs_ifree_mark_inode_stale(
2179 struct xfs_perag *pag,
2180 struct xfs_inode *free_ip,
2181 xfs_ino_t inum)
2182 {
2183 struct xfs_mount *mp = pag->pag_mount;
2184 struct xfs_inode_log_item *iip;
2185 struct xfs_inode *ip;
2186
2187 retry:
2188 rcu_read_lock();
2189 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2190
2191 /* Inode not in memory, nothing to do */
2192 if (!ip) {
2193 rcu_read_unlock();
2194 return;
2195 }
2196
2197 /*
2198 * because this is an RCU protected lookup, we could find a recently
2199 * freed or even reallocated inode during the lookup. We need to check
2200 * under the i_flags_lock for a valid inode here. Skip it if it is not
2201 * valid, the wrong inode or stale.
2202 */
2203 spin_lock(&ip->i_flags_lock);
2204 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2205 goto out_iflags_unlock;
2206
2207 /*
2208 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2209 * other inodes that we did not find in the list attached to the buffer
2210 * and are not already marked stale. If we can't lock it, back off and
2211 * retry.
2212 */
2213 if (ip != free_ip) {
2214 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2215 spin_unlock(&ip->i_flags_lock);
2216 rcu_read_unlock();
2217 delay(1);
2218 goto retry;
2219 }
2220 }
2221 ip->i_flags |= XFS_ISTALE;
2222
2223 /*
2224 * If the inode is flushing, it is already attached to the buffer. All
2225 * we needed to do here is mark the inode stale so buffer IO completion
2226 * will remove it from the AIL.
2227 */
2228 iip = ip->i_itemp;
2229 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2230 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2231 ASSERT(iip->ili_last_fields);
2232 goto out_iunlock;
2233 }
2234
2235 /*
2236 * Inodes not attached to the buffer can be released immediately.
2237 * Everything else has to go through xfs_iflush_abort() on journal
2238 * commit as the flock synchronises removal of the inode from the
2239 * cluster buffer against inode reclaim.
2240 */
2241 if (!iip || list_empty(&iip->ili_item.li_bio_list))
2242 goto out_iunlock;
2243
2244 __xfs_iflags_set(ip, XFS_IFLUSHING);
2245 spin_unlock(&ip->i_flags_lock);
2246 rcu_read_unlock();
2247
2248 /* we have a dirty inode in memory that has not yet been flushed. */
2249 spin_lock(&iip->ili_lock);
2250 iip->ili_last_fields = iip->ili_fields;
2251 iip->ili_fields = 0;
2252 iip->ili_fsync_fields = 0;
2253 spin_unlock(&iip->ili_lock);
2254 ASSERT(iip->ili_last_fields);
2255
2256 if (ip != free_ip)
2257 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2258 return;
2259
2260 out_iunlock:
2261 if (ip != free_ip)
2262 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2263 out_iflags_unlock:
2264 spin_unlock(&ip->i_flags_lock);
2265 rcu_read_unlock();
2266 }
2267
2268 /*
2269 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2270 * inodes that are in memory - they all must be marked stale and attached to
2271 * the cluster buffer.
2272 */
2273 static int
xfs_ifree_cluster(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_inode * free_ip,struct xfs_icluster * xic)2274 xfs_ifree_cluster(
2275 struct xfs_trans *tp,
2276 struct xfs_perag *pag,
2277 struct xfs_inode *free_ip,
2278 struct xfs_icluster *xic)
2279 {
2280 struct xfs_mount *mp = free_ip->i_mount;
2281 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2282 struct xfs_buf *bp;
2283 xfs_daddr_t blkno;
2284 xfs_ino_t inum = xic->first_ino;
2285 int nbufs;
2286 int i, j;
2287 int ioffset;
2288 int error;
2289
2290 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2291
2292 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2293 /*
2294 * The allocation bitmap tells us which inodes of the chunk were
2295 * physically allocated. Skip the cluster if an inode falls into
2296 * a sparse region.
2297 */
2298 ioffset = inum - xic->first_ino;
2299 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2300 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2301 continue;
2302 }
2303
2304 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2305 XFS_INO_TO_AGBNO(mp, inum));
2306
2307 /*
2308 * We obtain and lock the backing buffer first in the process
2309 * here to ensure dirty inodes attached to the buffer remain in
2310 * the flushing state while we mark them stale.
2311 *
2312 * If we scan the in-memory inodes first, then buffer IO can
2313 * complete before we get a lock on it, and hence we may fail
2314 * to mark all the active inodes on the buffer stale.
2315 */
2316 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2317 mp->m_bsize * igeo->blocks_per_cluster,
2318 XBF_UNMAPPED, &bp);
2319 if (error)
2320 return error;
2321
2322 /*
2323 * This buffer may not have been correctly initialised as we
2324 * didn't read it from disk. That's not important because we are
2325 * only using to mark the buffer as stale in the log, and to
2326 * attach stale cached inodes on it.
2327 *
2328 * For the inode that triggered the cluster freeing, this
2329 * attachment may occur in xfs_inode_item_precommit() after we
2330 * have marked this buffer stale. If this buffer was not in
2331 * memory before xfs_ifree_cluster() started, it will not be
2332 * marked XBF_DONE and this will cause problems later in
2333 * xfs_inode_item_precommit() when we trip over a (stale, !done)
2334 * buffer to attached to the transaction.
2335 *
2336 * Hence we have to mark the buffer as XFS_DONE here. This is
2337 * safe because we are also marking the buffer as XBF_STALE and
2338 * XFS_BLI_STALE. That means it will never be dispatched for
2339 * IO and it won't be unlocked until the cluster freeing has
2340 * been committed to the journal and the buffer unpinned. If it
2341 * is written, we want to know about it, and we want it to
2342 * fail. We can acheive this by adding a write verifier to the
2343 * buffer.
2344 */
2345 bp->b_flags |= XBF_DONE;
2346 bp->b_ops = &xfs_inode_buf_ops;
2347
2348 /*
2349 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2350 * too. This requires lookups, and will skip inodes that we've
2351 * already marked XFS_ISTALE.
2352 */
2353 for (i = 0; i < igeo->inodes_per_cluster; i++)
2354 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2355
2356 xfs_trans_stale_inode_buf(tp, bp);
2357 xfs_trans_binval(tp, bp);
2358 }
2359 return 0;
2360 }
2361
2362 /*
2363 * This is called to return an inode to the inode free list. The inode should
2364 * already be truncated to 0 length and have no pages associated with it. This
2365 * routine also assumes that the inode is already a part of the transaction.
2366 *
2367 * The on-disk copy of the inode will have been added to the list of unlinked
2368 * inodes in the AGI. We need to remove the inode from that list atomically with
2369 * respect to freeing it here.
2370 */
2371 int
xfs_ifree(struct xfs_trans * tp,struct xfs_inode * ip)2372 xfs_ifree(
2373 struct xfs_trans *tp,
2374 struct xfs_inode *ip)
2375 {
2376 struct xfs_mount *mp = ip->i_mount;
2377 struct xfs_perag *pag;
2378 struct xfs_icluster xic = { 0 };
2379 struct xfs_inode_log_item *iip = ip->i_itemp;
2380 int error;
2381
2382 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2383 ASSERT(VFS_I(ip)->i_nlink == 0);
2384 ASSERT(ip->i_df.if_nextents == 0);
2385 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2386 ASSERT(ip->i_nblocks == 0);
2387
2388 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2389
2390 /*
2391 * Free the inode first so that we guarantee that the AGI lock is going
2392 * to be taken before we remove the inode from the unlinked list. This
2393 * makes the AGI lock -> unlinked list modification order the same as
2394 * used in O_TMPFILE creation.
2395 */
2396 error = xfs_difree(tp, pag, ip->i_ino, &xic);
2397 if (error)
2398 goto out;
2399
2400 error = xfs_iunlink_remove(tp, pag, ip);
2401 if (error)
2402 goto out;
2403
2404 /*
2405 * Free any local-format data sitting around before we reset the
2406 * data fork to extents format. Note that the attr fork data has
2407 * already been freed by xfs_attr_inactive.
2408 */
2409 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2410 kmem_free(ip->i_df.if_u1.if_data);
2411 ip->i_df.if_u1.if_data = NULL;
2412 ip->i_df.if_bytes = 0;
2413 }
2414
2415 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2416 ip->i_diflags = 0;
2417 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2418 ip->i_forkoff = 0; /* mark the attr fork not in use */
2419 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2420 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2421 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2422
2423 /* Don't attempt to replay owner changes for a deleted inode */
2424 spin_lock(&iip->ili_lock);
2425 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2426 spin_unlock(&iip->ili_lock);
2427
2428 /*
2429 * Bump the generation count so no one will be confused
2430 * by reincarnations of this inode.
2431 */
2432 VFS_I(ip)->i_generation++;
2433 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2434
2435 if (xic.deleted)
2436 error = xfs_ifree_cluster(tp, pag, ip, &xic);
2437 out:
2438 xfs_perag_put(pag);
2439 return error;
2440 }
2441
2442 /*
2443 * This is called to unpin an inode. The caller must have the inode locked
2444 * in at least shared mode so that the buffer cannot be subsequently pinned
2445 * once someone is waiting for it to be unpinned.
2446 */
2447 static void
xfs_iunpin(struct xfs_inode * ip)2448 xfs_iunpin(
2449 struct xfs_inode *ip)
2450 {
2451 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2452
2453 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2454
2455 /* Give the log a push to start the unpinning I/O */
2456 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2457
2458 }
2459
2460 static void
__xfs_iunpin_wait(struct xfs_inode * ip)2461 __xfs_iunpin_wait(
2462 struct xfs_inode *ip)
2463 {
2464 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2465 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2466
2467 xfs_iunpin(ip);
2468
2469 do {
2470 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2471 if (xfs_ipincount(ip))
2472 io_schedule();
2473 } while (xfs_ipincount(ip));
2474 finish_wait(wq, &wait.wq_entry);
2475 }
2476
2477 void
xfs_iunpin_wait(struct xfs_inode * ip)2478 xfs_iunpin_wait(
2479 struct xfs_inode *ip)
2480 {
2481 if (xfs_ipincount(ip))
2482 __xfs_iunpin_wait(ip);
2483 }
2484
2485 /*
2486 * Removing an inode from the namespace involves removing the directory entry
2487 * and dropping the link count on the inode. Removing the directory entry can
2488 * result in locking an AGF (directory blocks were freed) and removing a link
2489 * count can result in placing the inode on an unlinked list which results in
2490 * locking an AGI.
2491 *
2492 * The big problem here is that we have an ordering constraint on AGF and AGI
2493 * locking - inode allocation locks the AGI, then can allocate a new extent for
2494 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2495 * removes the inode from the unlinked list, requiring that we lock the AGI
2496 * first, and then freeing the inode can result in an inode chunk being freed
2497 * and hence freeing disk space requiring that we lock an AGF.
2498 *
2499 * Hence the ordering that is imposed by other parts of the code is AGI before
2500 * AGF. This means we cannot remove the directory entry before we drop the inode
2501 * reference count and put it on the unlinked list as this results in a lock
2502 * order of AGF then AGI, and this can deadlock against inode allocation and
2503 * freeing. Therefore we must drop the link counts before we remove the
2504 * directory entry.
2505 *
2506 * This is still safe from a transactional point of view - it is not until we
2507 * get to xfs_defer_finish() that we have the possibility of multiple
2508 * transactions in this operation. Hence as long as we remove the directory
2509 * entry and drop the link count in the first transaction of the remove
2510 * operation, there are no transactional constraints on the ordering here.
2511 */
2512 int
xfs_remove(xfs_inode_t * dp,struct xfs_name * name,xfs_inode_t * ip)2513 xfs_remove(
2514 xfs_inode_t *dp,
2515 struct xfs_name *name,
2516 xfs_inode_t *ip)
2517 {
2518 xfs_mount_t *mp = dp->i_mount;
2519 xfs_trans_t *tp = NULL;
2520 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2521 int dontcare;
2522 int error = 0;
2523 uint resblks;
2524
2525 trace_xfs_remove(dp, name);
2526
2527 if (xfs_is_shutdown(mp))
2528 return -EIO;
2529
2530 error = xfs_qm_dqattach(dp);
2531 if (error)
2532 goto std_return;
2533
2534 error = xfs_qm_dqattach(ip);
2535 if (error)
2536 goto std_return;
2537
2538 /*
2539 * We try to get the real space reservation first, allowing for
2540 * directory btree deletion(s) implying possible bmap insert(s). If we
2541 * can't get the space reservation then we use 0 instead, and avoid the
2542 * bmap btree insert(s) in the directory code by, if the bmap insert
2543 * tries to happen, instead trimming the LAST block from the directory.
2544 *
2545 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2546 * the directory code can handle a reservationless update and we don't
2547 * want to prevent a user from trying to free space by deleting things.
2548 */
2549 resblks = XFS_REMOVE_SPACE_RES(mp);
2550 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2551 &tp, &dontcare);
2552 if (error) {
2553 ASSERT(error != -ENOSPC);
2554 goto std_return;
2555 }
2556
2557 /*
2558 * If we're removing a directory perform some additional validation.
2559 */
2560 if (is_dir) {
2561 ASSERT(VFS_I(ip)->i_nlink >= 2);
2562 if (VFS_I(ip)->i_nlink != 2) {
2563 error = -ENOTEMPTY;
2564 goto out_trans_cancel;
2565 }
2566 if (!xfs_dir_isempty(ip)) {
2567 error = -ENOTEMPTY;
2568 goto out_trans_cancel;
2569 }
2570
2571 /* Drop the link from ip's "..". */
2572 error = xfs_droplink(tp, dp);
2573 if (error)
2574 goto out_trans_cancel;
2575
2576 /* Drop the "." link from ip to self. */
2577 error = xfs_droplink(tp, ip);
2578 if (error)
2579 goto out_trans_cancel;
2580
2581 /*
2582 * Point the unlinked child directory's ".." entry to the root
2583 * directory to eliminate back-references to inodes that may
2584 * get freed before the child directory is closed. If the fs
2585 * gets shrunk, this can lead to dirent inode validation errors.
2586 */
2587 if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2588 error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2589 tp->t_mountp->m_sb.sb_rootino, 0);
2590 if (error)
2591 goto out_trans_cancel;
2592 }
2593 } else {
2594 /*
2595 * When removing a non-directory we need to log the parent
2596 * inode here. For a directory this is done implicitly
2597 * by the xfs_droplink call for the ".." entry.
2598 */
2599 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2600 }
2601 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2602
2603 /* Drop the link from dp to ip. */
2604 error = xfs_droplink(tp, ip);
2605 if (error)
2606 goto out_trans_cancel;
2607
2608 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2609 if (error) {
2610 ASSERT(error != -ENOENT);
2611 goto out_trans_cancel;
2612 }
2613
2614 /*
2615 * If this is a synchronous mount, make sure that the
2616 * remove transaction goes to disk before returning to
2617 * the user.
2618 */
2619 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2620 xfs_trans_set_sync(tp);
2621
2622 error = xfs_trans_commit(tp);
2623 if (error)
2624 goto std_return;
2625
2626 if (is_dir && xfs_inode_is_filestream(ip))
2627 xfs_filestream_deassociate(ip);
2628
2629 return 0;
2630
2631 out_trans_cancel:
2632 xfs_trans_cancel(tp);
2633 std_return:
2634 return error;
2635 }
2636
2637 /*
2638 * Enter all inodes for a rename transaction into a sorted array.
2639 */
2640 #define __XFS_SORT_INODES 5
2641 STATIC void
xfs_sort_for_rename(struct xfs_inode * dp1,struct xfs_inode * dp2,struct xfs_inode * ip1,struct xfs_inode * ip2,struct xfs_inode * wip,struct xfs_inode ** i_tab,int * num_inodes)2642 xfs_sort_for_rename(
2643 struct xfs_inode *dp1, /* in: old (source) directory inode */
2644 struct xfs_inode *dp2, /* in: new (target) directory inode */
2645 struct xfs_inode *ip1, /* in: inode of old entry */
2646 struct xfs_inode *ip2, /* in: inode of new entry */
2647 struct xfs_inode *wip, /* in: whiteout inode */
2648 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2649 int *num_inodes) /* in/out: inodes in array */
2650 {
2651 int i, j;
2652
2653 ASSERT(*num_inodes == __XFS_SORT_INODES);
2654 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2655
2656 /*
2657 * i_tab contains a list of pointers to inodes. We initialize
2658 * the table here & we'll sort it. We will then use it to
2659 * order the acquisition of the inode locks.
2660 *
2661 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2662 */
2663 i = 0;
2664 i_tab[i++] = dp1;
2665 i_tab[i++] = dp2;
2666 i_tab[i++] = ip1;
2667 if (ip2)
2668 i_tab[i++] = ip2;
2669 if (wip)
2670 i_tab[i++] = wip;
2671 *num_inodes = i;
2672
2673 /*
2674 * Sort the elements via bubble sort. (Remember, there are at
2675 * most 5 elements to sort, so this is adequate.)
2676 */
2677 for (i = 0; i < *num_inodes; i++) {
2678 for (j = 1; j < *num_inodes; j++) {
2679 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2680 struct xfs_inode *temp = i_tab[j];
2681 i_tab[j] = i_tab[j-1];
2682 i_tab[j-1] = temp;
2683 }
2684 }
2685 }
2686 }
2687
2688 static int
xfs_finish_rename(struct xfs_trans * tp)2689 xfs_finish_rename(
2690 struct xfs_trans *tp)
2691 {
2692 /*
2693 * If this is a synchronous mount, make sure that the rename transaction
2694 * goes to disk before returning to the user.
2695 */
2696 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2697 xfs_trans_set_sync(tp);
2698
2699 return xfs_trans_commit(tp);
2700 }
2701
2702 /*
2703 * xfs_cross_rename()
2704 *
2705 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2706 */
2707 STATIC int
xfs_cross_rename(struct xfs_trans * tp,struct xfs_inode * dp1,struct xfs_name * name1,struct xfs_inode * ip1,struct xfs_inode * dp2,struct xfs_name * name2,struct xfs_inode * ip2,int spaceres)2708 xfs_cross_rename(
2709 struct xfs_trans *tp,
2710 struct xfs_inode *dp1,
2711 struct xfs_name *name1,
2712 struct xfs_inode *ip1,
2713 struct xfs_inode *dp2,
2714 struct xfs_name *name2,
2715 struct xfs_inode *ip2,
2716 int spaceres)
2717 {
2718 int error = 0;
2719 int ip1_flags = 0;
2720 int ip2_flags = 0;
2721 int dp2_flags = 0;
2722
2723 /* Swap inode number for dirent in first parent */
2724 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2725 if (error)
2726 goto out_trans_abort;
2727
2728 /* Swap inode number for dirent in second parent */
2729 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2730 if (error)
2731 goto out_trans_abort;
2732
2733 /*
2734 * If we're renaming one or more directories across different parents,
2735 * update the respective ".." entries (and link counts) to match the new
2736 * parents.
2737 */
2738 if (dp1 != dp2) {
2739 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2740
2741 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2742 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2743 dp1->i_ino, spaceres);
2744 if (error)
2745 goto out_trans_abort;
2746
2747 /* transfer ip2 ".." reference to dp1 */
2748 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2749 error = xfs_droplink(tp, dp2);
2750 if (error)
2751 goto out_trans_abort;
2752 xfs_bumplink(tp, dp1);
2753 }
2754
2755 /*
2756 * Although ip1 isn't changed here, userspace needs
2757 * to be warned about the change, so that applications
2758 * relying on it (like backup ones), will properly
2759 * notify the change
2760 */
2761 ip1_flags |= XFS_ICHGTIME_CHG;
2762 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2763 }
2764
2765 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2766 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2767 dp2->i_ino, spaceres);
2768 if (error)
2769 goto out_trans_abort;
2770
2771 /* transfer ip1 ".." reference to dp2 */
2772 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2773 error = xfs_droplink(tp, dp1);
2774 if (error)
2775 goto out_trans_abort;
2776 xfs_bumplink(tp, dp2);
2777 }
2778
2779 /*
2780 * Although ip2 isn't changed here, userspace needs
2781 * to be warned about the change, so that applications
2782 * relying on it (like backup ones), will properly
2783 * notify the change
2784 */
2785 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2786 ip2_flags |= XFS_ICHGTIME_CHG;
2787 }
2788 }
2789
2790 if (ip1_flags) {
2791 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2792 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2793 }
2794 if (ip2_flags) {
2795 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2796 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2797 }
2798 if (dp2_flags) {
2799 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2800 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2801 }
2802 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2803 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2804 return xfs_finish_rename(tp);
2805
2806 out_trans_abort:
2807 xfs_trans_cancel(tp);
2808 return error;
2809 }
2810
2811 /*
2812 * xfs_rename_alloc_whiteout()
2813 *
2814 * Return a referenced, unlinked, unlocked inode that can be used as a
2815 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2816 * crash between allocating the inode and linking it into the rename transaction
2817 * recovery will free the inode and we won't leak it.
2818 */
2819 static int
xfs_rename_alloc_whiteout(struct mnt_idmap * idmap,struct xfs_name * src_name,struct xfs_inode * dp,struct xfs_inode ** wip)2820 xfs_rename_alloc_whiteout(
2821 struct mnt_idmap *idmap,
2822 struct xfs_name *src_name,
2823 struct xfs_inode *dp,
2824 struct xfs_inode **wip)
2825 {
2826 struct xfs_inode *tmpfile;
2827 struct qstr name;
2828 int error;
2829
2830 error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2831 &tmpfile);
2832 if (error)
2833 return error;
2834
2835 name.name = src_name->name;
2836 name.len = src_name->len;
2837 error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2838 if (error) {
2839 xfs_finish_inode_setup(tmpfile);
2840 xfs_irele(tmpfile);
2841 return error;
2842 }
2843
2844 /*
2845 * Prepare the tmpfile inode as if it were created through the VFS.
2846 * Complete the inode setup and flag it as linkable. nlink is already
2847 * zero, so we can skip the drop_nlink.
2848 */
2849 xfs_setup_iops(tmpfile);
2850 xfs_finish_inode_setup(tmpfile);
2851 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2852
2853 *wip = tmpfile;
2854 return 0;
2855 }
2856
2857 /*
2858 * xfs_rename
2859 */
2860 int
xfs_rename(struct mnt_idmap * idmap,struct xfs_inode * src_dp,struct xfs_name * src_name,struct xfs_inode * src_ip,struct xfs_inode * target_dp,struct xfs_name * target_name,struct xfs_inode * target_ip,unsigned int flags)2861 xfs_rename(
2862 struct mnt_idmap *idmap,
2863 struct xfs_inode *src_dp,
2864 struct xfs_name *src_name,
2865 struct xfs_inode *src_ip,
2866 struct xfs_inode *target_dp,
2867 struct xfs_name *target_name,
2868 struct xfs_inode *target_ip,
2869 unsigned int flags)
2870 {
2871 struct xfs_mount *mp = src_dp->i_mount;
2872 struct xfs_trans *tp;
2873 struct xfs_inode *wip = NULL; /* whiteout inode */
2874 struct xfs_inode *inodes[__XFS_SORT_INODES];
2875 int i;
2876 int num_inodes = __XFS_SORT_INODES;
2877 bool new_parent = (src_dp != target_dp);
2878 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2879 int spaceres;
2880 bool retried = false;
2881 int error, nospace_error = 0;
2882
2883 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2884
2885 if ((flags & RENAME_EXCHANGE) && !target_ip)
2886 return -EINVAL;
2887
2888 /*
2889 * If we are doing a whiteout operation, allocate the whiteout inode
2890 * we will be placing at the target and ensure the type is set
2891 * appropriately.
2892 */
2893 if (flags & RENAME_WHITEOUT) {
2894 error = xfs_rename_alloc_whiteout(idmap, src_name,
2895 target_dp, &wip);
2896 if (error)
2897 return error;
2898
2899 /* setup target dirent info as whiteout */
2900 src_name->type = XFS_DIR3_FT_CHRDEV;
2901 }
2902
2903 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2904 inodes, &num_inodes);
2905
2906 retry:
2907 nospace_error = 0;
2908 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2909 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2910 if (error == -ENOSPC) {
2911 nospace_error = error;
2912 spaceres = 0;
2913 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2914 &tp);
2915 }
2916 if (error)
2917 goto out_release_wip;
2918
2919 /*
2920 * Attach the dquots to the inodes
2921 */
2922 error = xfs_qm_vop_rename_dqattach(inodes);
2923 if (error)
2924 goto out_trans_cancel;
2925
2926 /*
2927 * Lock all the participating inodes. Depending upon whether
2928 * the target_name exists in the target directory, and
2929 * whether the target directory is the same as the source
2930 * directory, we can lock from 2 to 5 inodes.
2931 */
2932 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2933
2934 /*
2935 * Join all the inodes to the transaction. From this point on,
2936 * we can rely on either trans_commit or trans_cancel to unlock
2937 * them.
2938 */
2939 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2940 if (new_parent)
2941 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2942 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2943 if (target_ip)
2944 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2945 if (wip)
2946 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2947
2948 /*
2949 * If we are using project inheritance, we only allow renames
2950 * into our tree when the project IDs are the same; else the
2951 * tree quota mechanism would be circumvented.
2952 */
2953 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2954 target_dp->i_projid != src_ip->i_projid)) {
2955 error = -EXDEV;
2956 goto out_trans_cancel;
2957 }
2958
2959 /* RENAME_EXCHANGE is unique from here on. */
2960 if (flags & RENAME_EXCHANGE)
2961 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2962 target_dp, target_name, target_ip,
2963 spaceres);
2964
2965 /*
2966 * Try to reserve quota to handle an expansion of the target directory.
2967 * We'll allow the rename to continue in reservationless mode if we hit
2968 * a space usage constraint. If we trigger reservationless mode, save
2969 * the errno if there isn't any free space in the target directory.
2970 */
2971 if (spaceres != 0) {
2972 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2973 0, false);
2974 if (error == -EDQUOT || error == -ENOSPC) {
2975 if (!retried) {
2976 xfs_trans_cancel(tp);
2977 xfs_blockgc_free_quota(target_dp, 0);
2978 retried = true;
2979 goto retry;
2980 }
2981
2982 nospace_error = error;
2983 spaceres = 0;
2984 error = 0;
2985 }
2986 if (error)
2987 goto out_trans_cancel;
2988 }
2989
2990 /*
2991 * Check for expected errors before we dirty the transaction
2992 * so we can return an error without a transaction abort.
2993 */
2994 if (target_ip == NULL) {
2995 /*
2996 * If there's no space reservation, check the entry will
2997 * fit before actually inserting it.
2998 */
2999 if (!spaceres) {
3000 error = xfs_dir_canenter(tp, target_dp, target_name);
3001 if (error)
3002 goto out_trans_cancel;
3003 }
3004 } else {
3005 /*
3006 * If target exists and it's a directory, check that whether
3007 * it can be destroyed.
3008 */
3009 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3010 (!xfs_dir_isempty(target_ip) ||
3011 (VFS_I(target_ip)->i_nlink > 2))) {
3012 error = -EEXIST;
3013 goto out_trans_cancel;
3014 }
3015 }
3016
3017 /*
3018 * Lock the AGI buffers we need to handle bumping the nlink of the
3019 * whiteout inode off the unlinked list and to handle dropping the
3020 * nlink of the target inode. Per locking order rules, do this in
3021 * increasing AG order and before directory block allocation tries to
3022 * grab AGFs because we grab AGIs before AGFs.
3023 *
3024 * The (vfs) caller must ensure that if src is a directory then
3025 * target_ip is either null or an empty directory.
3026 */
3027 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3028 if (inodes[i] == wip ||
3029 (inodes[i] == target_ip &&
3030 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3031 struct xfs_perag *pag;
3032 struct xfs_buf *bp;
3033
3034 pag = xfs_perag_get(mp,
3035 XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
3036 error = xfs_read_agi(pag, tp, &bp);
3037 xfs_perag_put(pag);
3038 if (error)
3039 goto out_trans_cancel;
3040 }
3041 }
3042
3043 /*
3044 * Directory entry creation below may acquire the AGF. Remove
3045 * the whiteout from the unlinked list first to preserve correct
3046 * AGI/AGF locking order. This dirties the transaction so failures
3047 * after this point will abort and log recovery will clean up the
3048 * mess.
3049 *
3050 * For whiteouts, we need to bump the link count on the whiteout
3051 * inode. After this point, we have a real link, clear the tmpfile
3052 * state flag from the inode so it doesn't accidentally get misused
3053 * in future.
3054 */
3055 if (wip) {
3056 struct xfs_perag *pag;
3057
3058 ASSERT(VFS_I(wip)->i_nlink == 0);
3059
3060 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3061 error = xfs_iunlink_remove(tp, pag, wip);
3062 xfs_perag_put(pag);
3063 if (error)
3064 goto out_trans_cancel;
3065
3066 xfs_bumplink(tp, wip);
3067 VFS_I(wip)->i_state &= ~I_LINKABLE;
3068 }
3069
3070 /*
3071 * Set up the target.
3072 */
3073 if (target_ip == NULL) {
3074 /*
3075 * If target does not exist and the rename crosses
3076 * directories, adjust the target directory link count
3077 * to account for the ".." reference from the new entry.
3078 */
3079 error = xfs_dir_createname(tp, target_dp, target_name,
3080 src_ip->i_ino, spaceres);
3081 if (error)
3082 goto out_trans_cancel;
3083
3084 xfs_trans_ichgtime(tp, target_dp,
3085 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3086
3087 if (new_parent && src_is_directory) {
3088 xfs_bumplink(tp, target_dp);
3089 }
3090 } else { /* target_ip != NULL */
3091 /*
3092 * Link the source inode under the target name.
3093 * If the source inode is a directory and we are moving
3094 * it across directories, its ".." entry will be
3095 * inconsistent until we replace that down below.
3096 *
3097 * In case there is already an entry with the same
3098 * name at the destination directory, remove it first.
3099 */
3100 error = xfs_dir_replace(tp, target_dp, target_name,
3101 src_ip->i_ino, spaceres);
3102 if (error)
3103 goto out_trans_cancel;
3104
3105 xfs_trans_ichgtime(tp, target_dp,
3106 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3107
3108 /*
3109 * Decrement the link count on the target since the target
3110 * dir no longer points to it.
3111 */
3112 error = xfs_droplink(tp, target_ip);
3113 if (error)
3114 goto out_trans_cancel;
3115
3116 if (src_is_directory) {
3117 /*
3118 * Drop the link from the old "." entry.
3119 */
3120 error = xfs_droplink(tp, target_ip);
3121 if (error)
3122 goto out_trans_cancel;
3123 }
3124 } /* target_ip != NULL */
3125
3126 /*
3127 * Remove the source.
3128 */
3129 if (new_parent && src_is_directory) {
3130 /*
3131 * Rewrite the ".." entry to point to the new
3132 * directory.
3133 */
3134 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3135 target_dp->i_ino, spaceres);
3136 ASSERT(error != -EEXIST);
3137 if (error)
3138 goto out_trans_cancel;
3139 }
3140
3141 /*
3142 * We always want to hit the ctime on the source inode.
3143 *
3144 * This isn't strictly required by the standards since the source
3145 * inode isn't really being changed, but old unix file systems did
3146 * it and some incremental backup programs won't work without it.
3147 */
3148 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3149 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3150
3151 /*
3152 * Adjust the link count on src_dp. This is necessary when
3153 * renaming a directory, either within one parent when
3154 * the target existed, or across two parent directories.
3155 */
3156 if (src_is_directory && (new_parent || target_ip != NULL)) {
3157
3158 /*
3159 * Decrement link count on src_directory since the
3160 * entry that's moved no longer points to it.
3161 */
3162 error = xfs_droplink(tp, src_dp);
3163 if (error)
3164 goto out_trans_cancel;
3165 }
3166
3167 /*
3168 * For whiteouts, we only need to update the source dirent with the
3169 * inode number of the whiteout inode rather than removing it
3170 * altogether.
3171 */
3172 if (wip)
3173 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3174 spaceres);
3175 else
3176 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3177 spaceres);
3178
3179 if (error)
3180 goto out_trans_cancel;
3181
3182 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3183 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3184 if (new_parent)
3185 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3186
3187 error = xfs_finish_rename(tp);
3188 if (wip)
3189 xfs_irele(wip);
3190 return error;
3191
3192 out_trans_cancel:
3193 xfs_trans_cancel(tp);
3194 out_release_wip:
3195 if (wip)
3196 xfs_irele(wip);
3197 if (error == -ENOSPC && nospace_error)
3198 error = nospace_error;
3199 return error;
3200 }
3201
3202 static int
xfs_iflush(struct xfs_inode * ip,struct xfs_buf * bp)3203 xfs_iflush(
3204 struct xfs_inode *ip,
3205 struct xfs_buf *bp)
3206 {
3207 struct xfs_inode_log_item *iip = ip->i_itemp;
3208 struct xfs_dinode *dip;
3209 struct xfs_mount *mp = ip->i_mount;
3210 int error;
3211
3212 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3213 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3214 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3215 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3216 ASSERT(iip->ili_item.li_buf == bp);
3217
3218 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3219
3220 /*
3221 * We don't flush the inode if any of the following checks fail, but we
3222 * do still update the log item and attach to the backing buffer as if
3223 * the flush happened. This is a formality to facilitate predictable
3224 * error handling as the caller will shutdown and fail the buffer.
3225 */
3226 error = -EFSCORRUPTED;
3227 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3228 mp, XFS_ERRTAG_IFLUSH_1)) {
3229 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3230 "%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3231 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3232 goto flush_out;
3233 }
3234 if (S_ISREG(VFS_I(ip)->i_mode)) {
3235 if (XFS_TEST_ERROR(
3236 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3237 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3238 mp, XFS_ERRTAG_IFLUSH_3)) {
3239 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3240 "%s: Bad regular inode %llu, ptr "PTR_FMT,
3241 __func__, ip->i_ino, ip);
3242 goto flush_out;
3243 }
3244 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3245 if (XFS_TEST_ERROR(
3246 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3247 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3248 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3249 mp, XFS_ERRTAG_IFLUSH_4)) {
3250 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3251 "%s: Bad directory inode %llu, ptr "PTR_FMT,
3252 __func__, ip->i_ino, ip);
3253 goto flush_out;
3254 }
3255 }
3256 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3257 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3258 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3259 "%s: detected corrupt incore inode %llu, "
3260 "total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3261 __func__, ip->i_ino,
3262 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3263 ip->i_nblocks, ip);
3264 goto flush_out;
3265 }
3266 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3267 mp, XFS_ERRTAG_IFLUSH_6)) {
3268 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3269 "%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3270 __func__, ip->i_ino, ip->i_forkoff, ip);
3271 goto flush_out;
3272 }
3273
3274 /*
3275 * Inode item log recovery for v2 inodes are dependent on the flushiter
3276 * count for correct sequencing. We bump the flush iteration count so
3277 * we can detect flushes which postdate a log record during recovery.
3278 * This is redundant as we now log every change and hence this can't
3279 * happen but we need to still do it to ensure backwards compatibility
3280 * with old kernels that predate logging all inode changes.
3281 */
3282 if (!xfs_has_v3inodes(mp))
3283 ip->i_flushiter++;
3284
3285 /*
3286 * If there are inline format data / attr forks attached to this inode,
3287 * make sure they are not corrupt.
3288 */
3289 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3290 xfs_ifork_verify_local_data(ip))
3291 goto flush_out;
3292 if (xfs_inode_has_attr_fork(ip) &&
3293 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3294 xfs_ifork_verify_local_attr(ip))
3295 goto flush_out;
3296
3297 /*
3298 * Copy the dirty parts of the inode into the on-disk inode. We always
3299 * copy out the core of the inode, because if the inode is dirty at all
3300 * the core must be.
3301 */
3302 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3303
3304 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3305 if (!xfs_has_v3inodes(mp)) {
3306 if (ip->i_flushiter == DI_MAX_FLUSH)
3307 ip->i_flushiter = 0;
3308 }
3309
3310 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3311 if (xfs_inode_has_attr_fork(ip))
3312 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3313
3314 /*
3315 * We've recorded everything logged in the inode, so we'd like to clear
3316 * the ili_fields bits so we don't log and flush things unnecessarily.
3317 * However, we can't stop logging all this information until the data
3318 * we've copied into the disk buffer is written to disk. If we did we
3319 * might overwrite the copy of the inode in the log with all the data
3320 * after re-logging only part of it, and in the face of a crash we
3321 * wouldn't have all the data we need to recover.
3322 *
3323 * What we do is move the bits to the ili_last_fields field. When
3324 * logging the inode, these bits are moved back to the ili_fields field.
3325 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3326 * we know that the information those bits represent is permanently on
3327 * disk. As long as the flush completes before the inode is logged
3328 * again, then both ili_fields and ili_last_fields will be cleared.
3329 */
3330 error = 0;
3331 flush_out:
3332 spin_lock(&iip->ili_lock);
3333 iip->ili_last_fields = iip->ili_fields;
3334 iip->ili_fields = 0;
3335 iip->ili_fsync_fields = 0;
3336 spin_unlock(&iip->ili_lock);
3337
3338 /*
3339 * Store the current LSN of the inode so that we can tell whether the
3340 * item has moved in the AIL from xfs_buf_inode_iodone().
3341 */
3342 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3343 &iip->ili_item.li_lsn);
3344
3345 /* generate the checksum. */
3346 xfs_dinode_calc_crc(mp, dip);
3347 return error;
3348 }
3349
3350 /*
3351 * Non-blocking flush of dirty inode metadata into the backing buffer.
3352 *
3353 * The caller must have a reference to the inode and hold the cluster buffer
3354 * locked. The function will walk across all the inodes on the cluster buffer it
3355 * can find and lock without blocking, and flush them to the cluster buffer.
3356 *
3357 * On successful flushing of at least one inode, the caller must write out the
3358 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3359 * the caller needs to release the buffer. On failure, the filesystem will be
3360 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3361 * will be returned.
3362 */
3363 int
xfs_iflush_cluster(struct xfs_buf * bp)3364 xfs_iflush_cluster(
3365 struct xfs_buf *bp)
3366 {
3367 struct xfs_mount *mp = bp->b_mount;
3368 struct xfs_log_item *lip, *n;
3369 struct xfs_inode *ip;
3370 struct xfs_inode_log_item *iip;
3371 int clcount = 0;
3372 int error = 0;
3373
3374 /*
3375 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3376 * will remove itself from the list.
3377 */
3378 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3379 iip = (struct xfs_inode_log_item *)lip;
3380 ip = iip->ili_inode;
3381
3382 /*
3383 * Quick and dirty check to avoid locks if possible.
3384 */
3385 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3386 continue;
3387 if (xfs_ipincount(ip))
3388 continue;
3389
3390 /*
3391 * The inode is still attached to the buffer, which means it is
3392 * dirty but reclaim might try to grab it. Check carefully for
3393 * that, and grab the ilock while still holding the i_flags_lock
3394 * to guarantee reclaim will not be able to reclaim this inode
3395 * once we drop the i_flags_lock.
3396 */
3397 spin_lock(&ip->i_flags_lock);
3398 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3399 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3400 spin_unlock(&ip->i_flags_lock);
3401 continue;
3402 }
3403
3404 /*
3405 * ILOCK will pin the inode against reclaim and prevent
3406 * concurrent transactions modifying the inode while we are
3407 * flushing the inode. If we get the lock, set the flushing
3408 * state before we drop the i_flags_lock.
3409 */
3410 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3411 spin_unlock(&ip->i_flags_lock);
3412 continue;
3413 }
3414 __xfs_iflags_set(ip, XFS_IFLUSHING);
3415 spin_unlock(&ip->i_flags_lock);
3416
3417 /*
3418 * Abort flushing this inode if we are shut down because the
3419 * inode may not currently be in the AIL. This can occur when
3420 * log I/O failure unpins the inode without inserting into the
3421 * AIL, leaving a dirty/unpinned inode attached to the buffer
3422 * that otherwise looks like it should be flushed.
3423 */
3424 if (xlog_is_shutdown(mp->m_log)) {
3425 xfs_iunpin_wait(ip);
3426 xfs_iflush_abort(ip);
3427 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3428 error = -EIO;
3429 continue;
3430 }
3431
3432 /* don't block waiting on a log force to unpin dirty inodes */
3433 if (xfs_ipincount(ip)) {
3434 xfs_iflags_clear(ip, XFS_IFLUSHING);
3435 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3436 continue;
3437 }
3438
3439 if (!xfs_inode_clean(ip))
3440 error = xfs_iflush(ip, bp);
3441 else
3442 xfs_iflags_clear(ip, XFS_IFLUSHING);
3443 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3444 if (error)
3445 break;
3446 clcount++;
3447 }
3448
3449 if (error) {
3450 /*
3451 * Shutdown first so we kill the log before we release this
3452 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3453 * of the log, failing it before the _log_ is shut down can
3454 * result in the log tail being moved forward in the journal
3455 * on disk because log writes can still be taking place. Hence
3456 * unpinning the tail will allow the ICREATE intent to be
3457 * removed from the log an recovery will fail with uninitialised
3458 * inode cluster buffers.
3459 */
3460 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3461 bp->b_flags |= XBF_ASYNC;
3462 xfs_buf_ioend_fail(bp);
3463 return error;
3464 }
3465
3466 if (!clcount)
3467 return -EAGAIN;
3468
3469 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3470 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3471 return 0;
3472
3473 }
3474
3475 /* Release an inode. */
3476 void
xfs_irele(struct xfs_inode * ip)3477 xfs_irele(
3478 struct xfs_inode *ip)
3479 {
3480 trace_xfs_irele(ip, _RET_IP_);
3481 iput(VFS_I(ip));
3482 }
3483
3484 /*
3485 * Ensure all commited transactions touching the inode are written to the log.
3486 */
3487 int
xfs_log_force_inode(struct xfs_inode * ip)3488 xfs_log_force_inode(
3489 struct xfs_inode *ip)
3490 {
3491 xfs_csn_t seq = 0;
3492
3493 xfs_ilock(ip, XFS_ILOCK_SHARED);
3494 if (xfs_ipincount(ip))
3495 seq = ip->i_itemp->ili_commit_seq;
3496 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3497
3498 if (!seq)
3499 return 0;
3500 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3501 }
3502
3503 /*
3504 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3505 * abide vfs locking order (lowest pointer value goes first) and breaking the
3506 * layout leases before proceeding. The loop is needed because we cannot call
3507 * the blocking break_layout() with the iolocks held, and therefore have to
3508 * back out both locks.
3509 */
3510 static int
xfs_iolock_two_inodes_and_break_layout(struct inode * src,struct inode * dest)3511 xfs_iolock_two_inodes_and_break_layout(
3512 struct inode *src,
3513 struct inode *dest)
3514 {
3515 int error;
3516
3517 if (src > dest)
3518 swap(src, dest);
3519
3520 retry:
3521 /* Wait to break both inodes' layouts before we start locking. */
3522 error = break_layout(src, true);
3523 if (error)
3524 return error;
3525 if (src != dest) {
3526 error = break_layout(dest, true);
3527 if (error)
3528 return error;
3529 }
3530
3531 /* Lock one inode and make sure nobody got in and leased it. */
3532 inode_lock(src);
3533 error = break_layout(src, false);
3534 if (error) {
3535 inode_unlock(src);
3536 if (error == -EWOULDBLOCK)
3537 goto retry;
3538 return error;
3539 }
3540
3541 if (src == dest)
3542 return 0;
3543
3544 /* Lock the other inode and make sure nobody got in and leased it. */
3545 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3546 error = break_layout(dest, false);
3547 if (error) {
3548 inode_unlock(src);
3549 inode_unlock(dest);
3550 if (error == -EWOULDBLOCK)
3551 goto retry;
3552 return error;
3553 }
3554
3555 return 0;
3556 }
3557
3558 static int
xfs_mmaplock_two_inodes_and_break_dax_layout(struct xfs_inode * ip1,struct xfs_inode * ip2)3559 xfs_mmaplock_two_inodes_and_break_dax_layout(
3560 struct xfs_inode *ip1,
3561 struct xfs_inode *ip2)
3562 {
3563 int error;
3564 bool retry;
3565 struct page *page;
3566
3567 if (ip1->i_ino > ip2->i_ino)
3568 swap(ip1, ip2);
3569
3570 again:
3571 retry = false;
3572 /* Lock the first inode */
3573 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3574 error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3575 if (error || retry) {
3576 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3577 if (error == 0 && retry)
3578 goto again;
3579 return error;
3580 }
3581
3582 if (ip1 == ip2)
3583 return 0;
3584
3585 /* Nested lock the second inode */
3586 xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3587 /*
3588 * We cannot use xfs_break_dax_layouts() directly here because it may
3589 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3590 * for this nested lock case.
3591 */
3592 page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3593 if (page && page_ref_count(page) != 1) {
3594 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3595 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3596 goto again;
3597 }
3598
3599 return 0;
3600 }
3601
3602 /*
3603 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3604 * mmap activity.
3605 */
3606 int
xfs_ilock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3607 xfs_ilock2_io_mmap(
3608 struct xfs_inode *ip1,
3609 struct xfs_inode *ip2)
3610 {
3611 int ret;
3612
3613 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3614 if (ret)
3615 return ret;
3616
3617 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3618 ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3619 if (ret) {
3620 inode_unlock(VFS_I(ip2));
3621 if (ip1 != ip2)
3622 inode_unlock(VFS_I(ip1));
3623 return ret;
3624 }
3625 } else
3626 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3627 VFS_I(ip2)->i_mapping);
3628
3629 return 0;
3630 }
3631
3632 /* Unlock both inodes to allow IO and mmap activity. */
3633 void
xfs_iunlock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3634 xfs_iunlock2_io_mmap(
3635 struct xfs_inode *ip1,
3636 struct xfs_inode *ip2)
3637 {
3638 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3639 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3640 if (ip1 != ip2)
3641 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3642 } else
3643 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3644 VFS_I(ip2)->i_mapping);
3645
3646 inode_unlock(VFS_I(ip2));
3647 if (ip1 != ip2)
3648 inode_unlock(VFS_I(ip1));
3649 }
3650
3651 /* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
3652 void
xfs_iunlock2_remapping(struct xfs_inode * ip1,struct xfs_inode * ip2)3653 xfs_iunlock2_remapping(
3654 struct xfs_inode *ip1,
3655 struct xfs_inode *ip2)
3656 {
3657 xfs_iflags_clear(ip1, XFS_IREMAPPING);
3658
3659 if (ip1 != ip2)
3660 xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
3661 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3662
3663 if (ip1 != ip2)
3664 inode_unlock_shared(VFS_I(ip1));
3665 inode_unlock(VFS_I(ip2));
3666 }
3667
3668 /*
3669 * Reload the incore inode list for this inode. Caller should ensure that
3670 * the link count cannot change, either by taking ILOCK_SHARED or otherwise
3671 * preventing other threads from executing.
3672 */
3673 int
xfs_inode_reload_unlinked_bucket(struct xfs_trans * tp,struct xfs_inode * ip)3674 xfs_inode_reload_unlinked_bucket(
3675 struct xfs_trans *tp,
3676 struct xfs_inode *ip)
3677 {
3678 struct xfs_mount *mp = tp->t_mountp;
3679 struct xfs_buf *agibp;
3680 struct xfs_agi *agi;
3681 struct xfs_perag *pag;
3682 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
3683 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
3684 xfs_agino_t prev_agino, next_agino;
3685 unsigned int bucket;
3686 bool foundit = false;
3687 int error;
3688
3689 /* Grab the first inode in the list */
3690 pag = xfs_perag_get(mp, agno);
3691 error = xfs_ialloc_read_agi(pag, tp, &agibp);
3692 xfs_perag_put(pag);
3693 if (error)
3694 return error;
3695
3696 /*
3697 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
3698 * incore unlinked list pointers for this inode. Check once more to
3699 * see if we raced with anyone else to reload the unlinked list.
3700 */
3701 if (!xfs_inode_unlinked_incomplete(ip)) {
3702 foundit = true;
3703 goto out_agibp;
3704 }
3705
3706 bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
3707 agi = agibp->b_addr;
3708
3709 trace_xfs_inode_reload_unlinked_bucket(ip);
3710
3711 xfs_info_ratelimited(mp,
3712 "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.",
3713 agino, agno);
3714
3715 prev_agino = NULLAGINO;
3716 next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3717 while (next_agino != NULLAGINO) {
3718 struct xfs_inode *next_ip = NULL;
3719
3720 /* Found this caller's inode, set its backlink. */
3721 if (next_agino == agino) {
3722 next_ip = ip;
3723 next_ip->i_prev_unlinked = prev_agino;
3724 foundit = true;
3725 goto next_inode;
3726 }
3727
3728 /* Try in-memory lookup first. */
3729 next_ip = xfs_iunlink_lookup(pag, next_agino);
3730 if (next_ip)
3731 goto next_inode;
3732
3733 /* Inode not in memory, try reloading it. */
3734 error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
3735 next_agino);
3736 if (error)
3737 break;
3738
3739 /* Grab the reloaded inode. */
3740 next_ip = xfs_iunlink_lookup(pag, next_agino);
3741 if (!next_ip) {
3742 /* No incore inode at all? We reloaded it... */
3743 ASSERT(next_ip != NULL);
3744 error = -EFSCORRUPTED;
3745 break;
3746 }
3747
3748 next_inode:
3749 prev_agino = next_agino;
3750 next_agino = next_ip->i_next_unlinked;
3751 }
3752
3753 out_agibp:
3754 xfs_trans_brelse(tp, agibp);
3755 /* Should have found this inode somewhere in the iunlinked bucket. */
3756 if (!error && !foundit)
3757 error = -EFSCORRUPTED;
3758 return error;
3759 }
3760
3761 /* Decide if this inode is missing its unlinked list and reload it. */
3762 int
xfs_inode_reload_unlinked(struct xfs_inode * ip)3763 xfs_inode_reload_unlinked(
3764 struct xfs_inode *ip)
3765 {
3766 struct xfs_trans *tp;
3767 int error;
3768
3769 error = xfs_trans_alloc_empty(ip->i_mount, &tp);
3770 if (error)
3771 return error;
3772
3773 xfs_ilock(ip, XFS_ILOCK_SHARED);
3774 if (xfs_inode_unlinked_incomplete(ip))
3775 error = xfs_inode_reload_unlinked_bucket(tp, ip);
3776 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3777 xfs_trans_cancel(tp);
3778
3779 return error;
3780 }
3781