1*784eb7d8SDave Chinner // SPDX-License-Identifier: GPL-2.0
2*784eb7d8SDave Chinner /*
3*784eb7d8SDave Chinner * Copyright (c) 2020-2022, Red Hat, Inc.
4*784eb7d8SDave Chinner * All Rights Reserved.
5*784eb7d8SDave Chinner */
6*784eb7d8SDave Chinner #include "xfs.h"
7*784eb7d8SDave Chinner #include "xfs_fs.h"
8*784eb7d8SDave Chinner #include "xfs_shared.h"
9*784eb7d8SDave Chinner #include "xfs_format.h"
10*784eb7d8SDave Chinner #include "xfs_log_format.h"
11*784eb7d8SDave Chinner #include "xfs_trans_resv.h"
12*784eb7d8SDave Chinner #include "xfs_mount.h"
13*784eb7d8SDave Chinner #include "xfs_inode.h"
14*784eb7d8SDave Chinner #include "xfs_trans.h"
15*784eb7d8SDave Chinner #include "xfs_trans_priv.h"
16*784eb7d8SDave Chinner #include "xfs_ag.h"
17*784eb7d8SDave Chinner #include "xfs_iunlink_item.h"
18*784eb7d8SDave Chinner #include "xfs_trace.h"
19*784eb7d8SDave Chinner #include "xfs_error.h"
20*784eb7d8SDave Chinner
21*784eb7d8SDave Chinner struct kmem_cache *xfs_iunlink_cache;
22*784eb7d8SDave Chinner
IUL_ITEM(struct xfs_log_item * lip)23*784eb7d8SDave Chinner static inline struct xfs_iunlink_item *IUL_ITEM(struct xfs_log_item *lip)
24*784eb7d8SDave Chinner {
25*784eb7d8SDave Chinner return container_of(lip, struct xfs_iunlink_item, item);
26*784eb7d8SDave Chinner }
27*784eb7d8SDave Chinner
28*784eb7d8SDave Chinner static void
xfs_iunlink_item_release(struct xfs_log_item * lip)29*784eb7d8SDave Chinner xfs_iunlink_item_release(
30*784eb7d8SDave Chinner struct xfs_log_item *lip)
31*784eb7d8SDave Chinner {
32*784eb7d8SDave Chinner struct xfs_iunlink_item *iup = IUL_ITEM(lip);
33*784eb7d8SDave Chinner
34*784eb7d8SDave Chinner xfs_perag_put(iup->pag);
35*784eb7d8SDave Chinner kmem_cache_free(xfs_iunlink_cache, IUL_ITEM(lip));
36*784eb7d8SDave Chinner }
37*784eb7d8SDave Chinner
38*784eb7d8SDave Chinner
39*784eb7d8SDave Chinner static uint64_t
xfs_iunlink_item_sort(struct xfs_log_item * lip)40*784eb7d8SDave Chinner xfs_iunlink_item_sort(
41*784eb7d8SDave Chinner struct xfs_log_item *lip)
42*784eb7d8SDave Chinner {
43*784eb7d8SDave Chinner return IUL_ITEM(lip)->ip->i_ino;
44*784eb7d8SDave Chinner }
45*784eb7d8SDave Chinner
46*784eb7d8SDave Chinner /*
47*784eb7d8SDave Chinner * Look up the inode cluster buffer and log the on-disk unlinked inode change
48*784eb7d8SDave Chinner * we need to make.
49*784eb7d8SDave Chinner */
50*784eb7d8SDave Chinner static int
xfs_iunlink_log_dinode(struct xfs_trans * tp,struct xfs_iunlink_item * iup)51*784eb7d8SDave Chinner xfs_iunlink_log_dinode(
52*784eb7d8SDave Chinner struct xfs_trans *tp,
53*784eb7d8SDave Chinner struct xfs_iunlink_item *iup)
54*784eb7d8SDave Chinner {
55*784eb7d8SDave Chinner struct xfs_mount *mp = tp->t_mountp;
56*784eb7d8SDave Chinner struct xfs_inode *ip = iup->ip;
57*784eb7d8SDave Chinner struct xfs_dinode *dip;
58*784eb7d8SDave Chinner struct xfs_buf *ibp;
59*784eb7d8SDave Chinner int offset;
60*784eb7d8SDave Chinner int error;
61*784eb7d8SDave Chinner
62*784eb7d8SDave Chinner error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
63*784eb7d8SDave Chinner if (error)
64*784eb7d8SDave Chinner return error;
65*784eb7d8SDave Chinner /*
66*784eb7d8SDave Chinner * Don't log the unlinked field on stale buffers as this may be the
67*784eb7d8SDave Chinner * transaction that frees the inode cluster and relogging the buffer
68*784eb7d8SDave Chinner * here will incorrectly remove the stale state.
69*784eb7d8SDave Chinner */
70*784eb7d8SDave Chinner if (ibp->b_flags & XBF_STALE)
71*784eb7d8SDave Chinner goto out;
72*784eb7d8SDave Chinner
73*784eb7d8SDave Chinner dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
74*784eb7d8SDave Chinner
75*784eb7d8SDave Chinner /* Make sure the old pointer isn't garbage. */
76*784eb7d8SDave Chinner if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) {
77*784eb7d8SDave Chinner xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
78*784eb7d8SDave Chinner sizeof(*dip), __this_address);
79*784eb7d8SDave Chinner error = -EFSCORRUPTED;
80*784eb7d8SDave Chinner goto out;
81*784eb7d8SDave Chinner }
82*784eb7d8SDave Chinner
83*784eb7d8SDave Chinner trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno,
84*784eb7d8SDave Chinner XFS_INO_TO_AGINO(mp, ip->i_ino),
85*784eb7d8SDave Chinner be32_to_cpu(dip->di_next_unlinked), iup->next_agino);
86*784eb7d8SDave Chinner
87*784eb7d8SDave Chinner dip->di_next_unlinked = cpu_to_be32(iup->next_agino);
88*784eb7d8SDave Chinner offset = ip->i_imap.im_boffset +
89*784eb7d8SDave Chinner offsetof(struct xfs_dinode, di_next_unlinked);
90*784eb7d8SDave Chinner
91*784eb7d8SDave Chinner xfs_dinode_calc_crc(mp, dip);
92*784eb7d8SDave Chinner xfs_trans_inode_buf(tp, ibp);
93*784eb7d8SDave Chinner xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
94*784eb7d8SDave Chinner return 0;
95*784eb7d8SDave Chinner out:
96*784eb7d8SDave Chinner xfs_trans_brelse(tp, ibp);
97*784eb7d8SDave Chinner return error;
98*784eb7d8SDave Chinner }
99*784eb7d8SDave Chinner
100*784eb7d8SDave Chinner /*
101*784eb7d8SDave Chinner * On precommit, we grab the inode cluster buffer for the inode number we were
102*784eb7d8SDave Chinner * passed, then update the next unlinked field for that inode in the buffer and
103*784eb7d8SDave Chinner * log the buffer. This ensures that the inode cluster buffer was logged in the
104*784eb7d8SDave Chinner * correct order w.r.t. other inode cluster buffers. We can then remove the
105*784eb7d8SDave Chinner * iunlink item from the transaction and release it as it is has now served it's
106*784eb7d8SDave Chinner * purpose.
107*784eb7d8SDave Chinner */
108*784eb7d8SDave Chinner static int
xfs_iunlink_item_precommit(struct xfs_trans * tp,struct xfs_log_item * lip)109*784eb7d8SDave Chinner xfs_iunlink_item_precommit(
110*784eb7d8SDave Chinner struct xfs_trans *tp,
111*784eb7d8SDave Chinner struct xfs_log_item *lip)
112*784eb7d8SDave Chinner {
113*784eb7d8SDave Chinner struct xfs_iunlink_item *iup = IUL_ITEM(lip);
114*784eb7d8SDave Chinner int error;
115*784eb7d8SDave Chinner
116*784eb7d8SDave Chinner error = xfs_iunlink_log_dinode(tp, iup);
117*784eb7d8SDave Chinner list_del(&lip->li_trans);
118*784eb7d8SDave Chinner xfs_iunlink_item_release(lip);
119*784eb7d8SDave Chinner return error;
120*784eb7d8SDave Chinner }
121*784eb7d8SDave Chinner
122*784eb7d8SDave Chinner static const struct xfs_item_ops xfs_iunlink_item_ops = {
123*784eb7d8SDave Chinner .iop_release = xfs_iunlink_item_release,
124*784eb7d8SDave Chinner .iop_sort = xfs_iunlink_item_sort,
125*784eb7d8SDave Chinner .iop_precommit = xfs_iunlink_item_precommit,
126*784eb7d8SDave Chinner };
127*784eb7d8SDave Chinner
128*784eb7d8SDave Chinner
129*784eb7d8SDave Chinner /*
130*784eb7d8SDave Chinner * Initialize the inode log item for a newly allocated (in-core) inode.
131*784eb7d8SDave Chinner *
132*784eb7d8SDave Chinner * Inode extents can only reside within an AG. Hence specify the starting
133*784eb7d8SDave Chinner * block for the inode chunk by offset within an AG as well as the
134*784eb7d8SDave Chinner * length of the allocated extent.
135*784eb7d8SDave Chinner *
136*784eb7d8SDave Chinner * This joins the item to the transaction and marks it dirty so
137*784eb7d8SDave Chinner * that we don't need a separate call to do this, nor does the
138*784eb7d8SDave Chinner * caller need to know anything about the iunlink item.
139*784eb7d8SDave Chinner */
140*784eb7d8SDave Chinner int
xfs_iunlink_log_inode(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_perag * pag,xfs_agino_t next_agino)141*784eb7d8SDave Chinner xfs_iunlink_log_inode(
142*784eb7d8SDave Chinner struct xfs_trans *tp,
143*784eb7d8SDave Chinner struct xfs_inode *ip,
144*784eb7d8SDave Chinner struct xfs_perag *pag,
145*784eb7d8SDave Chinner xfs_agino_t next_agino)
146*784eb7d8SDave Chinner {
147*784eb7d8SDave Chinner struct xfs_mount *mp = tp->t_mountp;
148*784eb7d8SDave Chinner struct xfs_iunlink_item *iup;
149*784eb7d8SDave Chinner
150*784eb7d8SDave Chinner ASSERT(xfs_verify_agino_or_null(pag, next_agino));
151*784eb7d8SDave Chinner ASSERT(xfs_verify_agino_or_null(pag, ip->i_next_unlinked));
152*784eb7d8SDave Chinner
153*784eb7d8SDave Chinner /*
154*784eb7d8SDave Chinner * Since we're updating a linked list, we should never find that the
155*784eb7d8SDave Chinner * current pointer is the same as the new value, unless we're
156*784eb7d8SDave Chinner * terminating the list.
157*784eb7d8SDave Chinner */
158*784eb7d8SDave Chinner if (ip->i_next_unlinked == next_agino) {
159*784eb7d8SDave Chinner if (next_agino != NULLAGINO)
160*784eb7d8SDave Chinner return -EFSCORRUPTED;
161*784eb7d8SDave Chinner return 0;
162*784eb7d8SDave Chinner }
163*784eb7d8SDave Chinner
164*784eb7d8SDave Chinner iup = kmem_cache_zalloc(xfs_iunlink_cache, GFP_KERNEL | __GFP_NOFAIL);
165*784eb7d8SDave Chinner xfs_log_item_init(mp, &iup->item, XFS_LI_IUNLINK,
166*784eb7d8SDave Chinner &xfs_iunlink_item_ops);
167*784eb7d8SDave Chinner
168*784eb7d8SDave Chinner iup->ip = ip;
169*784eb7d8SDave Chinner iup->next_agino = next_agino;
170*784eb7d8SDave Chinner iup->old_agino = ip->i_next_unlinked;
171*784eb7d8SDave Chinner iup->pag = xfs_perag_hold(pag);
172*784eb7d8SDave Chinner
173*784eb7d8SDave Chinner xfs_trans_add_item(tp, &iup->item);
174*784eb7d8SDave Chinner tp->t_flags |= XFS_TRANS_DIRTY;
175*784eb7d8SDave Chinner set_bit(XFS_LI_DIRTY, &iup->item.li_flags);
176*784eb7d8SDave Chinner return 0;
177*784eb7d8SDave Chinner }
178*784eb7d8SDave Chinner
179*784eb7d8SDave Chinner