xref: /openbmc/linux/fs/xfs/xfs_attr_list.c (revision 94c7b6fc)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * Copyright (c) 2013 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_trans.h"
32 #include "xfs_inode_item.h"
33 #include "xfs_bmap.h"
34 #include "xfs_attr.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_attr_remote.h"
37 #include "xfs_attr_leaf.h"
38 #include "xfs_error.h"
39 #include "xfs_trace.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_cksum.h"
42 #include "xfs_dinode.h"
43 #include "xfs_dir2.h"
44 
45 STATIC int
46 xfs_attr_shortform_compare(const void *a, const void *b)
47 {
48 	xfs_attr_sf_sort_t *sa, *sb;
49 
50 	sa = (xfs_attr_sf_sort_t *)a;
51 	sb = (xfs_attr_sf_sort_t *)b;
52 	if (sa->hash < sb->hash) {
53 		return(-1);
54 	} else if (sa->hash > sb->hash) {
55 		return(1);
56 	} else {
57 		return(sa->entno - sb->entno);
58 	}
59 }
60 
61 #define XFS_ISRESET_CURSOR(cursor) \
62 	(!((cursor)->initted) && !((cursor)->hashval) && \
63 	 !((cursor)->blkno) && !((cursor)->offset))
64 /*
65  * Copy out entries of shortform attribute lists for attr_list().
66  * Shortform attribute lists are not stored in hashval sorted order.
67  * If the output buffer is not large enough to hold them all, then we
68  * we have to calculate each entries' hashvalue and sort them before
69  * we can begin returning them to the user.
70  */
71 int
72 xfs_attr_shortform_list(xfs_attr_list_context_t *context)
73 {
74 	attrlist_cursor_kern_t *cursor;
75 	xfs_attr_sf_sort_t *sbuf, *sbp;
76 	xfs_attr_shortform_t *sf;
77 	xfs_attr_sf_entry_t *sfe;
78 	xfs_inode_t *dp;
79 	int sbsize, nsbuf, count, i;
80 	int error;
81 
82 	ASSERT(context != NULL);
83 	dp = context->dp;
84 	ASSERT(dp != NULL);
85 	ASSERT(dp->i_afp != NULL);
86 	sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
87 	ASSERT(sf != NULL);
88 	if (!sf->hdr.count)
89 		return(0);
90 	cursor = context->cursor;
91 	ASSERT(cursor != NULL);
92 
93 	trace_xfs_attr_list_sf(context);
94 
95 	/*
96 	 * If the buffer is large enough and the cursor is at the start,
97 	 * do not bother with sorting since we will return everything in
98 	 * one buffer and another call using the cursor won't need to be
99 	 * made.
100 	 * Note the generous fudge factor of 16 overhead bytes per entry.
101 	 * If bufsize is zero then put_listent must be a search function
102 	 * and can just scan through what we have.
103 	 */
104 	if (context->bufsize == 0 ||
105 	    (XFS_ISRESET_CURSOR(cursor) &&
106              (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
107 		for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
108 			error = context->put_listent(context,
109 					   sfe->flags,
110 					   sfe->nameval,
111 					   (int)sfe->namelen,
112 					   (int)sfe->valuelen,
113 					   &sfe->nameval[sfe->namelen]);
114 
115 			/*
116 			 * Either search callback finished early or
117 			 * didn't fit it all in the buffer after all.
118 			 */
119 			if (context->seen_enough)
120 				break;
121 
122 			if (error)
123 				return error;
124 			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
125 		}
126 		trace_xfs_attr_list_sf_all(context);
127 		return(0);
128 	}
129 
130 	/* do no more for a search callback */
131 	if (context->bufsize == 0)
132 		return 0;
133 
134 	/*
135 	 * It didn't all fit, so we have to sort everything on hashval.
136 	 */
137 	sbsize = sf->hdr.count * sizeof(*sbuf);
138 	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
139 
140 	/*
141 	 * Scan the attribute list for the rest of the entries, storing
142 	 * the relevant info from only those that match into a buffer.
143 	 */
144 	nsbuf = 0;
145 	for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
146 		if (unlikely(
147 		    ((char *)sfe < (char *)sf) ||
148 		    ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
149 			XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
150 					     XFS_ERRLEVEL_LOW,
151 					     context->dp->i_mount, sfe);
152 			kmem_free(sbuf);
153 			return XFS_ERROR(EFSCORRUPTED);
154 		}
155 
156 		sbp->entno = i;
157 		sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
158 		sbp->name = sfe->nameval;
159 		sbp->namelen = sfe->namelen;
160 		/* These are bytes, and both on-disk, don't endian-flip */
161 		sbp->valuelen = sfe->valuelen;
162 		sbp->flags = sfe->flags;
163 		sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
164 		sbp++;
165 		nsbuf++;
166 	}
167 
168 	/*
169 	 * Sort the entries on hash then entno.
170 	 */
171 	xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
172 
173 	/*
174 	 * Re-find our place IN THE SORTED LIST.
175 	 */
176 	count = 0;
177 	cursor->initted = 1;
178 	cursor->blkno = 0;
179 	for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
180 		if (sbp->hash == cursor->hashval) {
181 			if (cursor->offset == count) {
182 				break;
183 			}
184 			count++;
185 		} else if (sbp->hash > cursor->hashval) {
186 			break;
187 		}
188 	}
189 	if (i == nsbuf) {
190 		kmem_free(sbuf);
191 		return(0);
192 	}
193 
194 	/*
195 	 * Loop putting entries into the user buffer.
196 	 */
197 	for ( ; i < nsbuf; i++, sbp++) {
198 		if (cursor->hashval != sbp->hash) {
199 			cursor->hashval = sbp->hash;
200 			cursor->offset = 0;
201 		}
202 		error = context->put_listent(context,
203 					sbp->flags,
204 					sbp->name,
205 					sbp->namelen,
206 					sbp->valuelen,
207 					&sbp->name[sbp->namelen]);
208 		if (error)
209 			return error;
210 		if (context->seen_enough)
211 			break;
212 		cursor->offset++;
213 	}
214 
215 	kmem_free(sbuf);
216 	return(0);
217 }
218 
219 STATIC int
220 xfs_attr_node_list(xfs_attr_list_context_t *context)
221 {
222 	attrlist_cursor_kern_t *cursor;
223 	xfs_attr_leafblock_t *leaf;
224 	xfs_da_intnode_t *node;
225 	struct xfs_attr3_icleaf_hdr leafhdr;
226 	struct xfs_da3_icnode_hdr nodehdr;
227 	struct xfs_da_node_entry *btree;
228 	int error, i;
229 	struct xfs_buf *bp;
230 	struct xfs_inode	*dp = context->dp;
231 
232 	trace_xfs_attr_node_list(context);
233 
234 	cursor = context->cursor;
235 	cursor->initted = 1;
236 
237 	/*
238 	 * Do all sorts of validation on the passed-in cursor structure.
239 	 * If anything is amiss, ignore the cursor and look up the hashval
240 	 * starting from the btree root.
241 	 */
242 	bp = NULL;
243 	if (cursor->blkno > 0) {
244 		error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
245 					      &bp, XFS_ATTR_FORK);
246 		if ((error != 0) && (error != EFSCORRUPTED))
247 			return(error);
248 		if (bp) {
249 			struct xfs_attr_leaf_entry *entries;
250 
251 			node = bp->b_addr;
252 			switch (be16_to_cpu(node->hdr.info.magic)) {
253 			case XFS_DA_NODE_MAGIC:
254 			case XFS_DA3_NODE_MAGIC:
255 				trace_xfs_attr_list_wrong_blk(context);
256 				xfs_trans_brelse(NULL, bp);
257 				bp = NULL;
258 				break;
259 			case XFS_ATTR_LEAF_MAGIC:
260 			case XFS_ATTR3_LEAF_MAGIC:
261 				leaf = bp->b_addr;
262 				xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
263 				entries = xfs_attr3_leaf_entryp(leaf);
264 				if (cursor->hashval > be32_to_cpu(
265 						entries[leafhdr.count - 1].hashval)) {
266 					trace_xfs_attr_list_wrong_blk(context);
267 					xfs_trans_brelse(NULL, bp);
268 					bp = NULL;
269 				} else if (cursor->hashval <= be32_to_cpu(
270 						entries[0].hashval)) {
271 					trace_xfs_attr_list_wrong_blk(context);
272 					xfs_trans_brelse(NULL, bp);
273 					bp = NULL;
274 				}
275 				break;
276 			default:
277 				trace_xfs_attr_list_wrong_blk(context);
278 				xfs_trans_brelse(NULL, bp);
279 				bp = NULL;
280 			}
281 		}
282 	}
283 
284 	/*
285 	 * We did not find what we expected given the cursor's contents,
286 	 * so we start from the top and work down based on the hash value.
287 	 * Note that start of node block is same as start of leaf block.
288 	 */
289 	if (bp == NULL) {
290 		cursor->blkno = 0;
291 		for (;;) {
292 			__uint16_t magic;
293 
294 			error = xfs_da3_node_read(NULL, dp,
295 						      cursor->blkno, -1, &bp,
296 						      XFS_ATTR_FORK);
297 			if (error)
298 				return(error);
299 			node = bp->b_addr;
300 			magic = be16_to_cpu(node->hdr.info.magic);
301 			if (magic == XFS_ATTR_LEAF_MAGIC ||
302 			    magic == XFS_ATTR3_LEAF_MAGIC)
303 				break;
304 			if (magic != XFS_DA_NODE_MAGIC &&
305 			    magic != XFS_DA3_NODE_MAGIC) {
306 				XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
307 						     XFS_ERRLEVEL_LOW,
308 						     context->dp->i_mount,
309 						     node);
310 				xfs_trans_brelse(NULL, bp);
311 				return XFS_ERROR(EFSCORRUPTED);
312 			}
313 
314 			dp->d_ops->node_hdr_from_disk(&nodehdr, node);
315 			btree = dp->d_ops->node_tree_p(node);
316 			for (i = 0; i < nodehdr.count; btree++, i++) {
317 				if (cursor->hashval
318 						<= be32_to_cpu(btree->hashval)) {
319 					cursor->blkno = be32_to_cpu(btree->before);
320 					trace_xfs_attr_list_node_descend(context,
321 									 btree);
322 					break;
323 				}
324 			}
325 			if (i == nodehdr.count) {
326 				xfs_trans_brelse(NULL, bp);
327 				return 0;
328 			}
329 			xfs_trans_brelse(NULL, bp);
330 		}
331 	}
332 	ASSERT(bp != NULL);
333 
334 	/*
335 	 * Roll upward through the blocks, processing each leaf block in
336 	 * order.  As long as there is space in the result buffer, keep
337 	 * adding the information.
338 	 */
339 	for (;;) {
340 		leaf = bp->b_addr;
341 		error = xfs_attr3_leaf_list_int(bp, context);
342 		if (error) {
343 			xfs_trans_brelse(NULL, bp);
344 			return error;
345 		}
346 		xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
347 		if (context->seen_enough || leafhdr.forw == 0)
348 			break;
349 		cursor->blkno = leafhdr.forw;
350 		xfs_trans_brelse(NULL, bp);
351 		error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
352 		if (error)
353 			return error;
354 	}
355 	xfs_trans_brelse(NULL, bp);
356 	return 0;
357 }
358 
359 /*
360  * Copy out attribute list entries for attr_list(), for leaf attribute lists.
361  */
362 int
363 xfs_attr3_leaf_list_int(
364 	struct xfs_buf			*bp,
365 	struct xfs_attr_list_context	*context)
366 {
367 	struct attrlist_cursor_kern	*cursor;
368 	struct xfs_attr_leafblock	*leaf;
369 	struct xfs_attr3_icleaf_hdr	ichdr;
370 	struct xfs_attr_leaf_entry	*entries;
371 	struct xfs_attr_leaf_entry	*entry;
372 	int				retval;
373 	int				i;
374 
375 	trace_xfs_attr_list_leaf(context);
376 
377 	leaf = bp->b_addr;
378 	xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
379 	entries = xfs_attr3_leaf_entryp(leaf);
380 
381 	cursor = context->cursor;
382 	cursor->initted = 1;
383 
384 	/*
385 	 * Re-find our place in the leaf block if this is a new syscall.
386 	 */
387 	if (context->resynch) {
388 		entry = &entries[0];
389 		for (i = 0; i < ichdr.count; entry++, i++) {
390 			if (be32_to_cpu(entry->hashval) == cursor->hashval) {
391 				if (cursor->offset == context->dupcnt) {
392 					context->dupcnt = 0;
393 					break;
394 				}
395 				context->dupcnt++;
396 			} else if (be32_to_cpu(entry->hashval) >
397 					cursor->hashval) {
398 				context->dupcnt = 0;
399 				break;
400 			}
401 		}
402 		if (i == ichdr.count) {
403 			trace_xfs_attr_list_notfound(context);
404 			return 0;
405 		}
406 	} else {
407 		entry = &entries[0];
408 		i = 0;
409 	}
410 	context->resynch = 0;
411 
412 	/*
413 	 * We have found our place, start copying out the new attributes.
414 	 */
415 	retval = 0;
416 	for (; i < ichdr.count; entry++, i++) {
417 		if (be32_to_cpu(entry->hashval) != cursor->hashval) {
418 			cursor->hashval = be32_to_cpu(entry->hashval);
419 			cursor->offset = 0;
420 		}
421 
422 		if (entry->flags & XFS_ATTR_INCOMPLETE)
423 			continue;		/* skip incomplete entries */
424 
425 		if (entry->flags & XFS_ATTR_LOCAL) {
426 			xfs_attr_leaf_name_local_t *name_loc =
427 				xfs_attr3_leaf_name_local(leaf, i);
428 
429 			retval = context->put_listent(context,
430 						entry->flags,
431 						name_loc->nameval,
432 						(int)name_loc->namelen,
433 						be16_to_cpu(name_loc->valuelen),
434 						&name_loc->nameval[name_loc->namelen]);
435 			if (retval)
436 				return retval;
437 		} else {
438 			xfs_attr_leaf_name_remote_t *name_rmt =
439 				xfs_attr3_leaf_name_remote(leaf, i);
440 
441 			int valuelen = be32_to_cpu(name_rmt->valuelen);
442 
443 			if (context->put_value) {
444 				xfs_da_args_t args;
445 
446 				memset((char *)&args, 0, sizeof(args));
447 				args.geo = context->dp->i_mount->m_attr_geo;
448 				args.dp = context->dp;
449 				args.whichfork = XFS_ATTR_FORK;
450 				args.valuelen = valuelen;
451 				args.rmtvaluelen = valuelen;
452 				args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
453 				args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
454 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
455 							args.dp->i_mount, valuelen);
456 				retval = xfs_attr_rmtval_get(&args);
457 				if (retval)
458 					return retval;
459 				retval = context->put_listent(context,
460 						entry->flags,
461 						name_rmt->name,
462 						(int)name_rmt->namelen,
463 						valuelen,
464 						args.value);
465 				kmem_free(args.value);
466 			} else {
467 				retval = context->put_listent(context,
468 						entry->flags,
469 						name_rmt->name,
470 						(int)name_rmt->namelen,
471 						valuelen,
472 						NULL);
473 			}
474 			if (retval)
475 				return retval;
476 		}
477 		if (context->seen_enough)
478 			break;
479 		cursor->offset++;
480 	}
481 	trace_xfs_attr_list_leaf_end(context);
482 	return retval;
483 }
484 
485 /*
486  * Copy out attribute entries for attr_list(), for leaf attribute lists.
487  */
488 STATIC int
489 xfs_attr_leaf_list(xfs_attr_list_context_t *context)
490 {
491 	int error;
492 	struct xfs_buf *bp;
493 
494 	trace_xfs_attr_leaf_list(context);
495 
496 	context->cursor->blkno = 0;
497 	error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp);
498 	if (error)
499 		return XFS_ERROR(error);
500 
501 	error = xfs_attr3_leaf_list_int(bp, context);
502 	xfs_trans_brelse(NULL, bp);
503 	return XFS_ERROR(error);
504 }
505 
506 int
507 xfs_attr_list_int(
508 	xfs_attr_list_context_t *context)
509 {
510 	int error;
511 	xfs_inode_t *dp = context->dp;
512 	uint		lock_mode;
513 
514 	XFS_STATS_INC(xs_attr_list);
515 
516 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
517 		return EIO;
518 
519 	/*
520 	 * Decide on what work routines to call based on the inode size.
521 	 */
522 	lock_mode = xfs_ilock_attr_map_shared(dp);
523 	if (!xfs_inode_hasattr(dp)) {
524 		error = 0;
525 	} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
526 		error = xfs_attr_shortform_list(context);
527 	} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
528 		error = xfs_attr_leaf_list(context);
529 	} else {
530 		error = xfs_attr_node_list(context);
531 	}
532 	xfs_iunlock(dp, lock_mode);
533 	return error;
534 }
535 
536 #define	ATTR_ENTBASESIZE		/* minimum bytes used by an attr */ \
537 	(((struct attrlist_ent *) 0)->a_name - (char *) 0)
538 #define	ATTR_ENTSIZE(namelen)		/* actual bytes used by an attr */ \
539 	((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
540 	 & ~(sizeof(u_int32_t)-1))
541 
542 /*
543  * Format an attribute and copy it out to the user's buffer.
544  * Take care to check values and protect against them changing later,
545  * we may be reading them directly out of a user buffer.
546  */
547 STATIC int
548 xfs_attr_put_listent(
549 	xfs_attr_list_context_t *context,
550 	int		flags,
551 	unsigned char	*name,
552 	int		namelen,
553 	int		valuelen,
554 	unsigned char	*value)
555 {
556 	struct attrlist *alist = (struct attrlist *)context->alist;
557 	attrlist_ent_t *aep;
558 	int arraytop;
559 
560 	ASSERT(!(context->flags & ATTR_KERNOVAL));
561 	ASSERT(context->count >= 0);
562 	ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
563 	ASSERT(context->firstu >= sizeof(*alist));
564 	ASSERT(context->firstu <= context->bufsize);
565 
566 	/*
567 	 * Only list entries in the right namespace.
568 	 */
569 	if (((context->flags & ATTR_SECURE) == 0) !=
570 	    ((flags & XFS_ATTR_SECURE) == 0))
571 		return 0;
572 	if (((context->flags & ATTR_ROOT) == 0) !=
573 	    ((flags & XFS_ATTR_ROOT) == 0))
574 		return 0;
575 
576 	arraytop = sizeof(*alist) +
577 			context->count * sizeof(alist->al_offset[0]);
578 	context->firstu -= ATTR_ENTSIZE(namelen);
579 	if (context->firstu < arraytop) {
580 		trace_xfs_attr_list_full(context);
581 		alist->al_more = 1;
582 		context->seen_enough = 1;
583 		return 1;
584 	}
585 
586 	aep = (attrlist_ent_t *)&context->alist[context->firstu];
587 	aep->a_valuelen = valuelen;
588 	memcpy(aep->a_name, name, namelen);
589 	aep->a_name[namelen] = 0;
590 	alist->al_offset[context->count++] = context->firstu;
591 	alist->al_count = context->count;
592 	trace_xfs_attr_list_add(context);
593 	return 0;
594 }
595 
596 /*
597  * Generate a list of extended attribute names and optionally
598  * also value lengths.  Positive return value follows the XFS
599  * convention of being an error, zero or negative return code
600  * is the length of the buffer returned (negated), indicating
601  * success.
602  */
603 int
604 xfs_attr_list(
605 	xfs_inode_t	*dp,
606 	char		*buffer,
607 	int		bufsize,
608 	int		flags,
609 	attrlist_cursor_kern_t *cursor)
610 {
611 	xfs_attr_list_context_t context;
612 	struct attrlist *alist;
613 	int error;
614 
615 	/*
616 	 * Validate the cursor.
617 	 */
618 	if (cursor->pad1 || cursor->pad2)
619 		return(XFS_ERROR(EINVAL));
620 	if ((cursor->initted == 0) &&
621 	    (cursor->hashval || cursor->blkno || cursor->offset))
622 		return XFS_ERROR(EINVAL);
623 
624 	/*
625 	 * Check for a properly aligned buffer.
626 	 */
627 	if (((long)buffer) & (sizeof(int)-1))
628 		return XFS_ERROR(EFAULT);
629 	if (flags & ATTR_KERNOVAL)
630 		bufsize = 0;
631 
632 	/*
633 	 * Initialize the output buffer.
634 	 */
635 	memset(&context, 0, sizeof(context));
636 	context.dp = dp;
637 	context.cursor = cursor;
638 	context.resynch = 1;
639 	context.flags = flags;
640 	context.alist = buffer;
641 	context.bufsize = (bufsize & ~(sizeof(int)-1));  /* align */
642 	context.firstu = context.bufsize;
643 	context.put_listent = xfs_attr_put_listent;
644 
645 	alist = (struct attrlist *)context.alist;
646 	alist->al_count = 0;
647 	alist->al_more = 0;
648 	alist->al_offset[0] = context.bufsize;
649 
650 	error = xfs_attr_list_int(&context);
651 	ASSERT(error >= 0);
652 	return error;
653 }
654