10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2abec5f2bSDave Chinner /*
3abec5f2bSDave Chinner * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4abec5f2bSDave Chinner * Copyright (c) 2013 Red Hat, Inc.
5abec5f2bSDave Chinner * All Rights Reserved.
6abec5f2bSDave Chinner */
7abec5f2bSDave Chinner #include "xfs.h"
8abec5f2bSDave Chinner #include "xfs_fs.h"
95467b34bSDarrick J. Wong #include "xfs_shared.h"
10a4fbe6abSDave Chinner #include "xfs_format.h"
11239880efSDave Chinner #include "xfs_log_format.h"
12239880efSDave Chinner #include "xfs_trans_resv.h"
13abec5f2bSDave Chinner #include "xfs_mount.h"
1457062787SDave Chinner #include "xfs_da_format.h"
15abec5f2bSDave Chinner #include "xfs_inode.h"
16239880efSDave Chinner #include "xfs_trans.h"
17abec5f2bSDave Chinner #include "xfs_bmap.h"
18fd920008SAllison Henderson #include "xfs_da_btree.h"
19abec5f2bSDave Chinner #include "xfs_attr.h"
20a4fbe6abSDave Chinner #include "xfs_attr_sf.h"
21abec5f2bSDave Chinner #include "xfs_attr_leaf.h"
22abec5f2bSDave Chinner #include "xfs_error.h"
23abec5f2bSDave Chinner #include "xfs_trace.h"
244bceb18fSDave Chinner #include "xfs_dir2.h"
25abec5f2bSDave Chinner
26abec5f2bSDave Chinner STATIC int
xfs_attr_shortform_compare(const void * a,const void * b)27abec5f2bSDave Chinner xfs_attr_shortform_compare(const void *a, const void *b)
28abec5f2bSDave Chinner {
29abec5f2bSDave Chinner xfs_attr_sf_sort_t *sa, *sb;
30abec5f2bSDave Chinner
31abec5f2bSDave Chinner sa = (xfs_attr_sf_sort_t *)a;
32abec5f2bSDave Chinner sb = (xfs_attr_sf_sort_t *)b;
33abec5f2bSDave Chinner if (sa->hash < sb->hash) {
34d99831ffSEric Sandeen return -1;
35abec5f2bSDave Chinner } else if (sa->hash > sb->hash) {
36d99831ffSEric Sandeen return 1;
37abec5f2bSDave Chinner } else {
38d99831ffSEric Sandeen return sa->entno - sb->entno;
39abec5f2bSDave Chinner }
40abec5f2bSDave Chinner }
41abec5f2bSDave Chinner
42abec5f2bSDave Chinner #define XFS_ISRESET_CURSOR(cursor) \
43abec5f2bSDave Chinner (!((cursor)->initted) && !((cursor)->hashval) && \
44abec5f2bSDave Chinner !((cursor)->blkno) && !((cursor)->offset))
45abec5f2bSDave Chinner /*
46abec5f2bSDave Chinner * Copy out entries of shortform attribute lists for attr_list().
47abec5f2bSDave Chinner * Shortform attribute lists are not stored in hashval sorted order.
48b63da6c8SRandy Dunlap * If the output buffer is not large enough to hold them all, then
49abec5f2bSDave Chinner * we have to calculate each entries' hashvalue and sort them before
50abec5f2bSDave Chinner * we can begin returning them to the user.
51abec5f2bSDave Chinner */
520d5a75e9SEric Sandeen static int
xfs_attr_shortform_list(struct xfs_attr_list_context * context)5316c6e92cSDarrick J. Wong xfs_attr_shortform_list(
5416c6e92cSDarrick J. Wong struct xfs_attr_list_context *context)
55abec5f2bSDave Chinner {
56e3a19cdeSChristoph Hellwig struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
57e3a19cdeSChristoph Hellwig struct xfs_inode *dp = context->dp;
5816c6e92cSDarrick J. Wong struct xfs_attr_sf_sort *sbuf, *sbp;
5916c6e92cSDarrick J. Wong struct xfs_attr_shortform *sf;
6016c6e92cSDarrick J. Wong struct xfs_attr_sf_entry *sfe;
61abec5f2bSDave Chinner int sbsize, nsbuf, count, i;
6216c6e92cSDarrick J. Wong int error = 0;
63abec5f2bSDave Chinner
64*2ed5b09bSDarrick J. Wong sf = (struct xfs_attr_shortform *)dp->i_af.if_u1.if_data;
65abec5f2bSDave Chinner ASSERT(sf != NULL);
66abec5f2bSDave Chinner if (!sf->hdr.count)
67d99831ffSEric Sandeen return 0;
68abec5f2bSDave Chinner
69abec5f2bSDave Chinner trace_xfs_attr_list_sf(context);
70abec5f2bSDave Chinner
71abec5f2bSDave Chinner /*
72abec5f2bSDave Chinner * If the buffer is large enough and the cursor is at the start,
73abec5f2bSDave Chinner * do not bother with sorting since we will return everything in
74abec5f2bSDave Chinner * one buffer and another call using the cursor won't need to be
75abec5f2bSDave Chinner * made.
76abec5f2bSDave Chinner * Note the generous fudge factor of 16 overhead bytes per entry.
77abec5f2bSDave Chinner * If bufsize is zero then put_listent must be a search function
78abec5f2bSDave Chinner * and can just scan through what we have.
79abec5f2bSDave Chinner */
80abec5f2bSDave Chinner if (context->bufsize == 0 ||
81abec5f2bSDave Chinner (XFS_ISRESET_CURSOR(cursor) &&
82*2ed5b09bSDarrick J. Wong (dp->i_af.if_bytes + sf->hdr.count * 16) < context->bufsize)) {
83abec5f2bSDave Chinner for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
84a71895c5SDarrick J. Wong if (XFS_IS_CORRUPT(context->dp->i_mount,
85a71895c5SDarrick J. Wong !xfs_attr_namecheck(sfe->nameval,
86a71895c5SDarrick J. Wong sfe->namelen)))
8716c6e92cSDarrick J. Wong return -EFSCORRUPTED;
88f7a136aeSEric Sandeen context->put_listent(context,
89abec5f2bSDave Chinner sfe->flags,
90abec5f2bSDave Chinner sfe->nameval,
91abec5f2bSDave Chinner (int)sfe->namelen,
92e5bd12bfSEric Sandeen (int)sfe->valuelen);
93abec5f2bSDave Chinner /*
94abec5f2bSDave Chinner * Either search callback finished early or
95abec5f2bSDave Chinner * didn't fit it all in the buffer after all.
96abec5f2bSDave Chinner */
97abec5f2bSDave Chinner if (context->seen_enough)
98abec5f2bSDave Chinner break;
99e01b7eedSCarlos Maiolino sfe = xfs_attr_sf_nextentry(sfe);
100abec5f2bSDave Chinner }
101abec5f2bSDave Chinner trace_xfs_attr_list_sf_all(context);
102d99831ffSEric Sandeen return 0;
103abec5f2bSDave Chinner }
104abec5f2bSDave Chinner
105abec5f2bSDave Chinner /* do no more for a search callback */
106abec5f2bSDave Chinner if (context->bufsize == 0)
107abec5f2bSDave Chinner return 0;
108abec5f2bSDave Chinner
109abec5f2bSDave Chinner /*
110abec5f2bSDave Chinner * It didn't all fit, so we have to sort everything on hashval.
111abec5f2bSDave Chinner */
112abec5f2bSDave Chinner sbsize = sf->hdr.count * sizeof(*sbuf);
113707e0ddaSTetsuo Handa sbp = sbuf = kmem_alloc(sbsize, KM_NOFS);
114abec5f2bSDave Chinner
115abec5f2bSDave Chinner /*
116abec5f2bSDave Chinner * Scan the attribute list for the rest of the entries, storing
117abec5f2bSDave Chinner * the relevant info from only those that match into a buffer.
118abec5f2bSDave Chinner */
119abec5f2bSDave Chinner nsbuf = 0;
120abec5f2bSDave Chinner for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
121abec5f2bSDave Chinner if (unlikely(
122abec5f2bSDave Chinner ((char *)sfe < (char *)sf) ||
123*2ed5b09bSDarrick J. Wong ((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)))) {
124abec5f2bSDave Chinner XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
125abec5f2bSDave Chinner XFS_ERRLEVEL_LOW,
1262551a530SDarrick J. Wong context->dp->i_mount, sfe,
1272551a530SDarrick J. Wong sizeof(*sfe));
128abec5f2bSDave Chinner kmem_free(sbuf);
1292451337dSDave Chinner return -EFSCORRUPTED;
130abec5f2bSDave Chinner }
131abec5f2bSDave Chinner
132abec5f2bSDave Chinner sbp->entno = i;
133abec5f2bSDave Chinner sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
134abec5f2bSDave Chinner sbp->name = sfe->nameval;
135abec5f2bSDave Chinner sbp->namelen = sfe->namelen;
136abec5f2bSDave Chinner /* These are bytes, and both on-disk, don't endian-flip */
137abec5f2bSDave Chinner sbp->valuelen = sfe->valuelen;
138abec5f2bSDave Chinner sbp->flags = sfe->flags;
139e01b7eedSCarlos Maiolino sfe = xfs_attr_sf_nextentry(sfe);
140abec5f2bSDave Chinner sbp++;
141abec5f2bSDave Chinner nsbuf++;
142abec5f2bSDave Chinner }
143abec5f2bSDave Chinner
144abec5f2bSDave Chinner /*
145abec5f2bSDave Chinner * Sort the entries on hash then entno.
146abec5f2bSDave Chinner */
147abec5f2bSDave Chinner xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
148abec5f2bSDave Chinner
149abec5f2bSDave Chinner /*
150abec5f2bSDave Chinner * Re-find our place IN THE SORTED LIST.
151abec5f2bSDave Chinner */
152abec5f2bSDave Chinner count = 0;
153abec5f2bSDave Chinner cursor->initted = 1;
154abec5f2bSDave Chinner cursor->blkno = 0;
155abec5f2bSDave Chinner for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
156abec5f2bSDave Chinner if (sbp->hash == cursor->hashval) {
157abec5f2bSDave Chinner if (cursor->offset == count) {
158abec5f2bSDave Chinner break;
159abec5f2bSDave Chinner }
160abec5f2bSDave Chinner count++;
161abec5f2bSDave Chinner } else if (sbp->hash > cursor->hashval) {
162abec5f2bSDave Chinner break;
163abec5f2bSDave Chinner }
164abec5f2bSDave Chinner }
16516c6e92cSDarrick J. Wong if (i == nsbuf)
16616c6e92cSDarrick J. Wong goto out;
167abec5f2bSDave Chinner
168abec5f2bSDave Chinner /*
169abec5f2bSDave Chinner * Loop putting entries into the user buffer.
170abec5f2bSDave Chinner */
171abec5f2bSDave Chinner for ( ; i < nsbuf; i++, sbp++) {
172abec5f2bSDave Chinner if (cursor->hashval != sbp->hash) {
173abec5f2bSDave Chinner cursor->hashval = sbp->hash;
174abec5f2bSDave Chinner cursor->offset = 0;
175abec5f2bSDave Chinner }
176a71895c5SDarrick J. Wong if (XFS_IS_CORRUPT(context->dp->i_mount,
177a71895c5SDarrick J. Wong !xfs_attr_namecheck(sbp->name,
178a71895c5SDarrick J. Wong sbp->namelen))) {
17916c6e92cSDarrick J. Wong error = -EFSCORRUPTED;
18016c6e92cSDarrick J. Wong goto out;
18116c6e92cSDarrick J. Wong }
182f7a136aeSEric Sandeen context->put_listent(context,
183abec5f2bSDave Chinner sbp->flags,
184abec5f2bSDave Chinner sbp->name,
185abec5f2bSDave Chinner sbp->namelen,
186e5bd12bfSEric Sandeen sbp->valuelen);
187abec5f2bSDave Chinner if (context->seen_enough)
188abec5f2bSDave Chinner break;
189abec5f2bSDave Chinner cursor->offset++;
190abec5f2bSDave Chinner }
19116c6e92cSDarrick J. Wong out:
192abec5f2bSDave Chinner kmem_free(sbuf);
19316c6e92cSDarrick J. Wong return error;
194abec5f2bSDave Chinner }
195abec5f2bSDave Chinner
196bdaac93fSDarrick J. Wong /*
197bdaac93fSDarrick J. Wong * We didn't find the block & hash mentioned in the cursor state, so
198bdaac93fSDarrick J. Wong * walk down the attr btree looking for the hash.
199bdaac93fSDarrick J. Wong */
200abec5f2bSDave Chinner STATIC int
xfs_attr_node_list_lookup(struct xfs_attr_list_context * context,struct xfs_attrlist_cursor_kern * cursor,struct xfs_buf ** pbp)201bdaac93fSDarrick J. Wong xfs_attr_node_list_lookup(
202bdaac93fSDarrick J. Wong struct xfs_attr_list_context *context,
203e3a19cdeSChristoph Hellwig struct xfs_attrlist_cursor_kern *cursor,
204bdaac93fSDarrick J. Wong struct xfs_buf **pbp)
205abec5f2bSDave Chinner {
206abec5f2bSDave Chinner struct xfs_da3_icnode_hdr nodehdr;
207bdaac93fSDarrick J. Wong struct xfs_da_intnode *node;
208abec5f2bSDave Chinner struct xfs_da_node_entry *btree;
209bdaac93fSDarrick J. Wong struct xfs_inode *dp = context->dp;
210bdaac93fSDarrick J. Wong struct xfs_mount *mp = dp->i_mount;
211bdaac93fSDarrick J. Wong struct xfs_trans *tp = context->tp;
212bdaac93fSDarrick J. Wong struct xfs_buf *bp;
213bdaac93fSDarrick J. Wong int i;
214bdaac93fSDarrick J. Wong int error = 0;
2158210f4ddSDarrick J. Wong unsigned int expected_level = 0;
216bdaac93fSDarrick J. Wong uint16_t magic;
217bdaac93fSDarrick J. Wong
218bdaac93fSDarrick J. Wong ASSERT(*pbp == NULL);
219bdaac93fSDarrick J. Wong cursor->blkno = 0;
220bdaac93fSDarrick J. Wong for (;;) {
22102c57f0aSChristoph Hellwig error = xfs_da3_node_read(tp, dp, cursor->blkno, &bp,
222bdaac93fSDarrick J. Wong XFS_ATTR_FORK);
223bdaac93fSDarrick J. Wong if (error)
224bdaac93fSDarrick J. Wong return error;
225bdaac93fSDarrick J. Wong node = bp->b_addr;
226bdaac93fSDarrick J. Wong magic = be16_to_cpu(node->hdr.info.magic);
227bdaac93fSDarrick J. Wong if (magic == XFS_ATTR_LEAF_MAGIC ||
228bdaac93fSDarrick J. Wong magic == XFS_ATTR3_LEAF_MAGIC)
229bdaac93fSDarrick J. Wong break;
230bdaac93fSDarrick J. Wong if (magic != XFS_DA_NODE_MAGIC &&
231bdaac93fSDarrick J. Wong magic != XFS_DA3_NODE_MAGIC) {
232bdaac93fSDarrick J. Wong XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2332551a530SDarrick J. Wong node, sizeof(*node));
234bdaac93fSDarrick J. Wong goto out_corruptbuf;
235bdaac93fSDarrick J. Wong }
236bdaac93fSDarrick J. Wong
237f475dc4dSChristoph Hellwig xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
238bdaac93fSDarrick J. Wong
2398210f4ddSDarrick J. Wong /* Tree taller than we can handle; bail out! */
2408210f4ddSDarrick J. Wong if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
2418210f4ddSDarrick J. Wong goto out_corruptbuf;
2428210f4ddSDarrick J. Wong
2438210f4ddSDarrick J. Wong /* Check the level from the root node. */
2448210f4ddSDarrick J. Wong if (cursor->blkno == 0)
2458210f4ddSDarrick J. Wong expected_level = nodehdr.level - 1;
2468210f4ddSDarrick J. Wong else if (expected_level != nodehdr.level)
2478210f4ddSDarrick J. Wong goto out_corruptbuf;
2488210f4ddSDarrick J. Wong else
2498210f4ddSDarrick J. Wong expected_level--;
2508210f4ddSDarrick J. Wong
25151908ca7SChristoph Hellwig btree = nodehdr.btree;
252bdaac93fSDarrick J. Wong for (i = 0; i < nodehdr.count; btree++, i++) {
253bdaac93fSDarrick J. Wong if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
254bdaac93fSDarrick J. Wong cursor->blkno = be32_to_cpu(btree->before);
255bdaac93fSDarrick J. Wong trace_xfs_attr_list_node_descend(context,
256bdaac93fSDarrick J. Wong btree);
257bdaac93fSDarrick J. Wong break;
258bdaac93fSDarrick J. Wong }
259bdaac93fSDarrick J. Wong }
260bdaac93fSDarrick J. Wong xfs_trans_brelse(tp, bp);
261bdaac93fSDarrick J. Wong
262bdaac93fSDarrick J. Wong if (i == nodehdr.count)
263bdaac93fSDarrick J. Wong return 0;
2648210f4ddSDarrick J. Wong
2658210f4ddSDarrick J. Wong /* We can't point back to the root. */
266a71895c5SDarrick J. Wong if (XFS_IS_CORRUPT(mp, cursor->blkno == 0))
2678210f4ddSDarrick J. Wong return -EFSCORRUPTED;
268bdaac93fSDarrick J. Wong }
269bdaac93fSDarrick J. Wong
2708210f4ddSDarrick J. Wong if (expected_level != 0)
2718210f4ddSDarrick J. Wong goto out_corruptbuf;
2728210f4ddSDarrick J. Wong
273bdaac93fSDarrick J. Wong *pbp = bp;
274bdaac93fSDarrick J. Wong return 0;
275bdaac93fSDarrick J. Wong
276bdaac93fSDarrick J. Wong out_corruptbuf:
2778d57c216SDarrick J. Wong xfs_buf_mark_corrupt(bp);
278bdaac93fSDarrick J. Wong xfs_trans_brelse(tp, bp);
279bdaac93fSDarrick J. Wong return -EFSCORRUPTED;
280bdaac93fSDarrick J. Wong }
281bdaac93fSDarrick J. Wong
282bdaac93fSDarrick J. Wong STATIC int
xfs_attr_node_list(struct xfs_attr_list_context * context)283bdaac93fSDarrick J. Wong xfs_attr_node_list(
284bdaac93fSDarrick J. Wong struct xfs_attr_list_context *context)
285bdaac93fSDarrick J. Wong {
286e3a19cdeSChristoph Hellwig struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
287bdaac93fSDarrick J. Wong struct xfs_attr3_icleaf_hdr leafhdr;
288bdaac93fSDarrick J. Wong struct xfs_attr_leafblock *leaf;
289bdaac93fSDarrick J. Wong struct xfs_da_intnode *node;
290abec5f2bSDave Chinner struct xfs_buf *bp;
2914bceb18fSDave Chinner struct xfs_inode *dp = context->dp;
2922f661241SBrian Foster struct xfs_mount *mp = dp->i_mount;
29316c6e92cSDarrick J. Wong int error = 0;
294abec5f2bSDave Chinner
295abec5f2bSDave Chinner trace_xfs_attr_node_list(context);
296abec5f2bSDave Chinner
297abec5f2bSDave Chinner cursor->initted = 1;
298abec5f2bSDave Chinner
299abec5f2bSDave Chinner /*
300abec5f2bSDave Chinner * Do all sorts of validation on the passed-in cursor structure.
301abec5f2bSDave Chinner * If anything is amiss, ignore the cursor and look up the hashval
302abec5f2bSDave Chinner * starting from the btree root.
303abec5f2bSDave Chinner */
304abec5f2bSDave Chinner bp = NULL;
305abec5f2bSDave Chinner if (cursor->blkno > 0) {
30602c57f0aSChristoph Hellwig error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp,
30702c57f0aSChristoph Hellwig XFS_ATTR_FORK);
3082451337dSDave Chinner if ((error != 0) && (error != -EFSCORRUPTED))
309d99831ffSEric Sandeen return error;
310abec5f2bSDave Chinner if (bp) {
311abec5f2bSDave Chinner struct xfs_attr_leaf_entry *entries;
312abec5f2bSDave Chinner
313abec5f2bSDave Chinner node = bp->b_addr;
314abec5f2bSDave Chinner switch (be16_to_cpu(node->hdr.info.magic)) {
315abec5f2bSDave Chinner case XFS_DA_NODE_MAGIC:
316abec5f2bSDave Chinner case XFS_DA3_NODE_MAGIC:
317abec5f2bSDave Chinner trace_xfs_attr_list_wrong_blk(context);
318ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
319abec5f2bSDave Chinner bp = NULL;
320abec5f2bSDave Chinner break;
321abec5f2bSDave Chinner case XFS_ATTR_LEAF_MAGIC:
322abec5f2bSDave Chinner case XFS_ATTR3_LEAF_MAGIC:
323abec5f2bSDave Chinner leaf = bp->b_addr;
3242f661241SBrian Foster xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
3252f661241SBrian Foster &leafhdr, leaf);
326abec5f2bSDave Chinner entries = xfs_attr3_leaf_entryp(leaf);
327abec5f2bSDave Chinner if (cursor->hashval > be32_to_cpu(
328abec5f2bSDave Chinner entries[leafhdr.count - 1].hashval)) {
329abec5f2bSDave Chinner trace_xfs_attr_list_wrong_blk(context);
330ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
331abec5f2bSDave Chinner bp = NULL;
332abec5f2bSDave Chinner } else if (cursor->hashval <= be32_to_cpu(
333abec5f2bSDave Chinner entries[0].hashval)) {
334abec5f2bSDave Chinner trace_xfs_attr_list_wrong_blk(context);
335ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
336abec5f2bSDave Chinner bp = NULL;
337abec5f2bSDave Chinner }
338abec5f2bSDave Chinner break;
339abec5f2bSDave Chinner default:
340abec5f2bSDave Chinner trace_xfs_attr_list_wrong_blk(context);
341ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
342abec5f2bSDave Chinner bp = NULL;
343abec5f2bSDave Chinner }
344abec5f2bSDave Chinner }
345abec5f2bSDave Chinner }
346abec5f2bSDave Chinner
347abec5f2bSDave Chinner /*
348abec5f2bSDave Chinner * We did not find what we expected given the cursor's contents,
349abec5f2bSDave Chinner * so we start from the top and work down based on the hash value.
350abec5f2bSDave Chinner * Note that start of node block is same as start of leaf block.
351abec5f2bSDave Chinner */
352abec5f2bSDave Chinner if (bp == NULL) {
353bdaac93fSDarrick J. Wong error = xfs_attr_node_list_lookup(context, cursor, &bp);
354bdaac93fSDarrick J. Wong if (error || !bp)
355d99831ffSEric Sandeen return error;
356abec5f2bSDave Chinner }
357abec5f2bSDave Chinner ASSERT(bp != NULL);
358abec5f2bSDave Chinner
359abec5f2bSDave Chinner /*
360abec5f2bSDave Chinner * Roll upward through the blocks, processing each leaf block in
361abec5f2bSDave Chinner * order. As long as there is space in the result buffer, keep
362abec5f2bSDave Chinner * adding the information.
363abec5f2bSDave Chinner */
364abec5f2bSDave Chinner for (;;) {
365abec5f2bSDave Chinner leaf = bp->b_addr;
36616c6e92cSDarrick J. Wong error = xfs_attr3_leaf_list_int(bp, context);
36716c6e92cSDarrick J. Wong if (error)
36816c6e92cSDarrick J. Wong break;
3692f661241SBrian Foster xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
370abec5f2bSDave Chinner if (context->seen_enough || leafhdr.forw == 0)
371abec5f2bSDave Chinner break;
372abec5f2bSDave Chinner cursor->blkno = leafhdr.forw;
373ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
374dfb87594SChristoph Hellwig error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno,
375dfb87594SChristoph Hellwig &bp);
376abec5f2bSDave Chinner if (error)
377abec5f2bSDave Chinner return error;
378abec5f2bSDave Chinner }
379ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
38016c6e92cSDarrick J. Wong return error;
381abec5f2bSDave Chinner }
382abec5f2bSDave Chinner
383abec5f2bSDave Chinner /*
384abec5f2bSDave Chinner * Copy out attribute list entries for attr_list(), for leaf attribute lists.
385abec5f2bSDave Chinner */
38616c6e92cSDarrick J. Wong int
xfs_attr3_leaf_list_int(struct xfs_buf * bp,struct xfs_attr_list_context * context)387abec5f2bSDave Chinner xfs_attr3_leaf_list_int(
388abec5f2bSDave Chinner struct xfs_buf *bp,
389abec5f2bSDave Chinner struct xfs_attr_list_context *context)
390abec5f2bSDave Chinner {
391e3a19cdeSChristoph Hellwig struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
392abec5f2bSDave Chinner struct xfs_attr_leafblock *leaf;
393abec5f2bSDave Chinner struct xfs_attr3_icleaf_hdr ichdr;
394abec5f2bSDave Chinner struct xfs_attr_leaf_entry *entries;
395abec5f2bSDave Chinner struct xfs_attr_leaf_entry *entry;
396abec5f2bSDave Chinner int i;
3972f661241SBrian Foster struct xfs_mount *mp = context->dp->i_mount;
398abec5f2bSDave Chinner
399abec5f2bSDave Chinner trace_xfs_attr_list_leaf(context);
400abec5f2bSDave Chinner
401abec5f2bSDave Chinner leaf = bp->b_addr;
4022f661241SBrian Foster xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
403abec5f2bSDave Chinner entries = xfs_attr3_leaf_entryp(leaf);
404abec5f2bSDave Chinner
405abec5f2bSDave Chinner cursor->initted = 1;
406abec5f2bSDave Chinner
407abec5f2bSDave Chinner /*
408abec5f2bSDave Chinner * Re-find our place in the leaf block if this is a new syscall.
409abec5f2bSDave Chinner */
410abec5f2bSDave Chinner if (context->resynch) {
411abec5f2bSDave Chinner entry = &entries[0];
412abec5f2bSDave Chinner for (i = 0; i < ichdr.count; entry++, i++) {
413abec5f2bSDave Chinner if (be32_to_cpu(entry->hashval) == cursor->hashval) {
414abec5f2bSDave Chinner if (cursor->offset == context->dupcnt) {
415abec5f2bSDave Chinner context->dupcnt = 0;
416abec5f2bSDave Chinner break;
417abec5f2bSDave Chinner }
418abec5f2bSDave Chinner context->dupcnt++;
419abec5f2bSDave Chinner } else if (be32_to_cpu(entry->hashval) >
420abec5f2bSDave Chinner cursor->hashval) {
421abec5f2bSDave Chinner context->dupcnt = 0;
422abec5f2bSDave Chinner break;
423abec5f2bSDave Chinner }
424abec5f2bSDave Chinner }
425abec5f2bSDave Chinner if (i == ichdr.count) {
426abec5f2bSDave Chinner trace_xfs_attr_list_notfound(context);
42716c6e92cSDarrick J. Wong return 0;
428abec5f2bSDave Chinner }
429abec5f2bSDave Chinner } else {
430abec5f2bSDave Chinner entry = &entries[0];
431abec5f2bSDave Chinner i = 0;
432abec5f2bSDave Chinner }
433abec5f2bSDave Chinner context->resynch = 0;
434abec5f2bSDave Chinner
435abec5f2bSDave Chinner /*
436abec5f2bSDave Chinner * We have found our place, start copying out the new attributes.
437abec5f2bSDave Chinner */
438abec5f2bSDave Chinner for (; i < ichdr.count; entry++, i++) {
4393ab3ffcaSEric Sandeen char *name;
4403ab3ffcaSEric Sandeen int namelen, valuelen;
4413ab3ffcaSEric Sandeen
442abec5f2bSDave Chinner if (be32_to_cpu(entry->hashval) != cursor->hashval) {
443abec5f2bSDave Chinner cursor->hashval = be32_to_cpu(entry->hashval);
444abec5f2bSDave Chinner cursor->offset = 0;
445abec5f2bSDave Chinner }
446abec5f2bSDave Chinner
447eec0482eSDarrick J. Wong if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
4485e813574SChristoph Hellwig !context->allow_incomplete)
4495e813574SChristoph Hellwig continue;
450abec5f2bSDave Chinner
451abec5f2bSDave Chinner if (entry->flags & XFS_ATTR_LOCAL) {
4523ab3ffcaSEric Sandeen xfs_attr_leaf_name_local_t *name_loc;
453abec5f2bSDave Chinner
4543ab3ffcaSEric Sandeen name_loc = xfs_attr3_leaf_name_local(leaf, i);
4553ab3ffcaSEric Sandeen name = name_loc->nameval;
4563ab3ffcaSEric Sandeen namelen = name_loc->namelen;
4573ab3ffcaSEric Sandeen valuelen = be16_to_cpu(name_loc->valuelen);
458abec5f2bSDave Chinner } else {
4593ab3ffcaSEric Sandeen xfs_attr_leaf_name_remote_t *name_rmt;
460abec5f2bSDave Chinner
4613ab3ffcaSEric Sandeen name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
4623ab3ffcaSEric Sandeen name = name_rmt->name;
4633ab3ffcaSEric Sandeen namelen = name_rmt->namelen;
4643ab3ffcaSEric Sandeen valuelen = be32_to_cpu(name_rmt->valuelen);
465abec5f2bSDave Chinner }
4663ab3ffcaSEric Sandeen
467a71895c5SDarrick J. Wong if (XFS_IS_CORRUPT(context->dp->i_mount,
468a71895c5SDarrick J. Wong !xfs_attr_namecheck(name, namelen)))
46916c6e92cSDarrick J. Wong return -EFSCORRUPTED;
470f7a136aeSEric Sandeen context->put_listent(context, entry->flags,
4713ab3ffcaSEric Sandeen name, namelen, valuelen);
472abec5f2bSDave Chinner if (context->seen_enough)
473abec5f2bSDave Chinner break;
474abec5f2bSDave Chinner cursor->offset++;
475abec5f2bSDave Chinner }
476abec5f2bSDave Chinner trace_xfs_attr_list_leaf_end(context);
47716c6e92cSDarrick J. Wong return 0;
478abec5f2bSDave Chinner }
479abec5f2bSDave Chinner
480abec5f2bSDave Chinner /*
481abec5f2bSDave Chinner * Copy out attribute entries for attr_list(), for leaf attribute lists.
482abec5f2bSDave Chinner */
483abec5f2bSDave Chinner STATIC int
xfs_attr_leaf_list(struct xfs_attr_list_context * context)484a9c8c69bSChristoph Hellwig xfs_attr_leaf_list(
485a9c8c69bSChristoph Hellwig struct xfs_attr_list_context *context)
486abec5f2bSDave Chinner {
487abec5f2bSDave Chinner struct xfs_buf *bp;
488a9c8c69bSChristoph Hellwig int error;
489abec5f2bSDave Chinner
490abec5f2bSDave Chinner trace_xfs_attr_leaf_list(context);
491abec5f2bSDave Chinner
492e3a19cdeSChristoph Hellwig context->cursor.blkno = 0;
493dfb87594SChristoph Hellwig error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
494abec5f2bSDave Chinner if (error)
495b474c7aeSEric Sandeen return error;
496abec5f2bSDave Chinner
49716c6e92cSDarrick J. Wong error = xfs_attr3_leaf_list_int(bp, context);
498ad017f65SDarrick J. Wong xfs_trans_brelse(context->tp, bp);
49916c6e92cSDarrick J. Wong return error;
500abec5f2bSDave Chinner }
501abec5f2bSDave Chinner
502abec5f2bSDave Chinner int
xfs_attr_list_ilocked(struct xfs_attr_list_context * context)50317e1dd83SChristoph Hellwig xfs_attr_list_ilocked(
504ad017f65SDarrick J. Wong struct xfs_attr_list_context *context)
505ad017f65SDarrick J. Wong {
506ad017f65SDarrick J. Wong struct xfs_inode *dp = context->dp;
507ad017f65SDarrick J. Wong
5085af7777eSChristoph Hellwig ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
5095af7777eSChristoph Hellwig
510ad017f65SDarrick J. Wong /*
511ad017f65SDarrick J. Wong * Decide on what work routines to call based on the inode size.
512ad017f65SDarrick J. Wong */
513ad017f65SDarrick J. Wong if (!xfs_inode_hasattr(dp))
514ad017f65SDarrick J. Wong return 0;
515*2ed5b09bSDarrick J. Wong if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
516ad017f65SDarrick J. Wong return xfs_attr_shortform_list(context);
5172ac131dfSChristoph Hellwig if (xfs_attr_is_leaf(dp))
518ad017f65SDarrick J. Wong return xfs_attr_leaf_list(context);
519ad017f65SDarrick J. Wong return xfs_attr_node_list(context);
520ad017f65SDarrick J. Wong }
521ad017f65SDarrick J. Wong
522ad017f65SDarrick J. Wong int
xfs_attr_list(struct xfs_attr_list_context * context)52317e1dd83SChristoph Hellwig xfs_attr_list(
524a9c8c69bSChristoph Hellwig struct xfs_attr_list_context *context)
525abec5f2bSDave Chinner {
526a9c8c69bSChristoph Hellwig struct xfs_inode *dp = context->dp;
527568d994eSChristoph Hellwig uint lock_mode;
528a9c8c69bSChristoph Hellwig int error;
529abec5f2bSDave Chinner
530ff6d6af2SBill O'Donnell XFS_STATS_INC(dp->i_mount, xs_attr_list);
531abec5f2bSDave Chinner
53275c8c50fSDave Chinner if (xfs_is_shutdown(dp->i_mount))
5332451337dSDave Chinner return -EIO;
534abec5f2bSDave Chinner
535568d994eSChristoph Hellwig lock_mode = xfs_ilock_attr_map_shared(dp);
53617e1dd83SChristoph Hellwig error = xfs_attr_list_ilocked(context);
537568d994eSChristoph Hellwig xfs_iunlock(dp, lock_mode);
538abec5f2bSDave Chinner return error;
539abec5f2bSDave Chinner }
540