xref: /openbmc/linux/fs/ext2/xattr.c (revision 036b9e7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext2/xattr.c
4  *
5  * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
6  *
7  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
8  * Extended attributes for symlinks and special files added per
9  *  suggestion of Luka Renko <luka.renko@hermes.si>.
10  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
11  *  Red Hat Inc.
12  *
13  */
14 
15 /*
16  * Extended attributes are stored on disk blocks allocated outside of
17  * any inode. The i_file_acl field is then made to point to this allocated
18  * block. If all extended attributes of an inode are identical, these
19  * inodes may share the same extended attribute block. Such situations
20  * are automatically detected by keeping a cache of recent attribute block
21  * numbers and hashes over the block's contents in memory.
22  *
23  *
24  * Extended attribute block layout:
25  *
26  *   +------------------+
27  *   | header           |
28  *   | entry 1          | |
29  *   | entry 2          | | growing downwards
30  *   | entry 3          | v
31  *   | four null bytes  |
32  *   | . . .            |
33  *   | value 1          | ^
34  *   | value 3          | | growing upwards
35  *   | value 2          | |
36  *   +------------------+
37  *
38  * The block header is followed by multiple entry descriptors. These entry
39  * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
40  * byte boundaries. The entry descriptors are sorted by attribute name,
41  * so that two extended attribute blocks can be compared efficiently.
42  *
43  * Attribute values are aligned to the end of the block, stored in
44  * no specific order. They are also padded to EXT2_XATTR_PAD byte
45  * boundaries. No additional gaps are left between them.
46  *
47  * Locking strategy
48  * ----------------
49  * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
50  * EA blocks are only changed if they are exclusive to an inode, so
51  * holding xattr_sem also means that nothing but the EA block's reference
52  * count will change. Multiple writers to an EA block are synchronized
53  * by the bh lock. No more than a single bh lock is held at any time
54  * to avoid deadlocks.
55  */
56 
57 #include <linux/buffer_head.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mbcache.h>
61 #include <linux/quotaops.h>
62 #include <linux/rwsem.h>
63 #include <linux/security.h>
64 #include "ext2.h"
65 #include "xattr.h"
66 #include "acl.h"
67 
68 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
69 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
70 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
71 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
72 
73 #ifdef EXT2_XATTR_DEBUG
74 # define ea_idebug(inode, f...) do { \
75 		printk(KERN_DEBUG "inode %s:%ld: ", \
76 			inode->i_sb->s_id, inode->i_ino); \
77 		printk(f); \
78 		printk("\n"); \
79 	} while (0)
80 # define ea_bdebug(bh, f...) do { \
81 		printk(KERN_DEBUG "block %pg:%lu: ", \
82 			bh->b_bdev, (unsigned long) bh->b_blocknr); \
83 		printk(f); \
84 		printk("\n"); \
85 	} while (0)
86 #else
87 # define ea_idebug(f...)
88 # define ea_bdebug(f...)
89 #endif
90 
91 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
92 			   struct ext2_xattr_header *);
93 
94 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
95 static struct buffer_head *ext2_xattr_cache_find(struct inode *,
96 						 struct ext2_xattr_header *);
97 static void ext2_xattr_rehash(struct ext2_xattr_header *,
98 			      struct ext2_xattr_entry *);
99 
100 static const struct xattr_handler *ext2_xattr_handler_map[] = {
101 	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
102 #ifdef CONFIG_EXT2_FS_POSIX_ACL
103 	[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
104 	[EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
105 #endif
106 	[EXT2_XATTR_INDEX_TRUSTED]	     = &ext2_xattr_trusted_handler,
107 #ifdef CONFIG_EXT2_FS_SECURITY
108 	[EXT2_XATTR_INDEX_SECURITY]	     = &ext2_xattr_security_handler,
109 #endif
110 };
111 
112 const struct xattr_handler *ext2_xattr_handlers[] = {
113 	&ext2_xattr_user_handler,
114 	&ext2_xattr_trusted_handler,
115 #ifdef CONFIG_EXT2_FS_POSIX_ACL
116 	&posix_acl_access_xattr_handler,
117 	&posix_acl_default_xattr_handler,
118 #endif
119 #ifdef CONFIG_EXT2_FS_SECURITY
120 	&ext2_xattr_security_handler,
121 #endif
122 	NULL
123 };
124 
125 #define EA_BLOCK_CACHE(inode)	(EXT2_SB(inode->i_sb)->s_ea_block_cache)
126 
127 static inline const struct xattr_handler *
128 ext2_xattr_handler(int name_index)
129 {
130 	const struct xattr_handler *handler = NULL;
131 
132 	if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
133 		handler = ext2_xattr_handler_map[name_index];
134 	return handler;
135 }
136 
137 /*
138  * ext2_xattr_get()
139  *
140  * Copy an extended attribute into the buffer
141  * provided, or compute the buffer size required.
142  * Buffer is NULL to compute the size of the buffer required.
143  *
144  * Returns a negative error number on failure, or the number of bytes
145  * used / required on success.
146  */
147 int
148 ext2_xattr_get(struct inode *inode, int name_index, const char *name,
149 	       void *buffer, size_t buffer_size)
150 {
151 	struct buffer_head *bh = NULL;
152 	struct ext2_xattr_entry *entry;
153 	size_t name_len, size;
154 	char *end;
155 	int error;
156 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
157 
158 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
159 		  name_index, name, buffer, (long)buffer_size);
160 
161 	if (name == NULL)
162 		return -EINVAL;
163 	name_len = strlen(name);
164 	if (name_len > 255)
165 		return -ERANGE;
166 
167 	down_read(&EXT2_I(inode)->xattr_sem);
168 	error = -ENODATA;
169 	if (!EXT2_I(inode)->i_file_acl)
170 		goto cleanup;
171 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
172 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
173 	error = -EIO;
174 	if (!bh)
175 		goto cleanup;
176 	ea_bdebug(bh, "b_count=%d, refcount=%d",
177 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
178 	end = bh->b_data + bh->b_size;
179 	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
180 	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
181 bad_block:	ext2_error(inode->i_sb, "ext2_xattr_get",
182 			"inode %ld: bad block %d", inode->i_ino,
183 			EXT2_I(inode)->i_file_acl);
184 		error = -EIO;
185 		goto cleanup;
186 	}
187 
188 	/* find named attribute */
189 	entry = FIRST_ENTRY(bh);
190 	while (!IS_LAST_ENTRY(entry)) {
191 		struct ext2_xattr_entry *next =
192 			EXT2_XATTR_NEXT(entry);
193 		if ((char *)next >= end)
194 			goto bad_block;
195 		if (name_index == entry->e_name_index &&
196 		    name_len == entry->e_name_len &&
197 		    memcmp(name, entry->e_name, name_len) == 0)
198 			goto found;
199 		entry = next;
200 	}
201 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
202 		ea_idebug(inode, "cache insert failed");
203 	error = -ENODATA;
204 	goto cleanup;
205 found:
206 	/* check the buffer size */
207 	if (entry->e_value_block != 0)
208 		goto bad_block;
209 	size = le32_to_cpu(entry->e_value_size);
210 	if (size > inode->i_sb->s_blocksize ||
211 	    le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
212 		goto bad_block;
213 
214 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
215 		ea_idebug(inode, "cache insert failed");
216 	if (buffer) {
217 		error = -ERANGE;
218 		if (size > buffer_size)
219 			goto cleanup;
220 		/* return value of attribute */
221 		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
222 			size);
223 	}
224 	error = size;
225 
226 cleanup:
227 	brelse(bh);
228 	up_read(&EXT2_I(inode)->xattr_sem);
229 
230 	return error;
231 }
232 
233 /*
234  * ext2_xattr_list()
235  *
236  * Copy a list of attribute names into the buffer
237  * provided, or compute the buffer size required.
238  * Buffer is NULL to compute the size of the buffer required.
239  *
240  * Returns a negative error number on failure, or the number of bytes
241  * used / required on success.
242  */
243 static int
244 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
245 {
246 	struct inode *inode = d_inode(dentry);
247 	struct buffer_head *bh = NULL;
248 	struct ext2_xattr_entry *entry;
249 	char *end;
250 	size_t rest = buffer_size;
251 	int error;
252 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
253 
254 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
255 		  buffer, (long)buffer_size);
256 
257 	down_read(&EXT2_I(inode)->xattr_sem);
258 	error = 0;
259 	if (!EXT2_I(inode)->i_file_acl)
260 		goto cleanup;
261 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
262 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
263 	error = -EIO;
264 	if (!bh)
265 		goto cleanup;
266 	ea_bdebug(bh, "b_count=%d, refcount=%d",
267 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
268 	end = bh->b_data + bh->b_size;
269 	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
270 	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
271 bad_block:	ext2_error(inode->i_sb, "ext2_xattr_list",
272 			"inode %ld: bad block %d", inode->i_ino,
273 			EXT2_I(inode)->i_file_acl);
274 		error = -EIO;
275 		goto cleanup;
276 	}
277 
278 	/* check the on-disk data structure */
279 	entry = FIRST_ENTRY(bh);
280 	while (!IS_LAST_ENTRY(entry)) {
281 		struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
282 
283 		if ((char *)next >= end)
284 			goto bad_block;
285 		entry = next;
286 	}
287 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
288 		ea_idebug(inode, "cache insert failed");
289 
290 	/* list the attribute names */
291 	for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
292 	     entry = EXT2_XATTR_NEXT(entry)) {
293 		const struct xattr_handler *handler =
294 			ext2_xattr_handler(entry->e_name_index);
295 
296 		if (handler && (!handler->list || handler->list(dentry))) {
297 			const char *prefix = handler->prefix ?: handler->name;
298 			size_t prefix_len = strlen(prefix);
299 			size_t size = prefix_len + entry->e_name_len + 1;
300 
301 			if (buffer) {
302 				if (size > rest) {
303 					error = -ERANGE;
304 					goto cleanup;
305 				}
306 				memcpy(buffer, prefix, prefix_len);
307 				buffer += prefix_len;
308 				memcpy(buffer, entry->e_name, entry->e_name_len);
309 				buffer += entry->e_name_len;
310 				*buffer++ = 0;
311 			}
312 			rest -= size;
313 		}
314 	}
315 	error = buffer_size - rest;  /* total size */
316 
317 cleanup:
318 	brelse(bh);
319 	up_read(&EXT2_I(inode)->xattr_sem);
320 
321 	return error;
322 }
323 
324 /*
325  * Inode operation listxattr()
326  *
327  * d_inode(dentry)->i_mutex: don't care
328  */
329 ssize_t
330 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
331 {
332 	return ext2_xattr_list(dentry, buffer, size);
333 }
334 
335 /*
336  * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
337  * not set, set it.
338  */
339 static void ext2_xattr_update_super_block(struct super_block *sb)
340 {
341 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
342 		return;
343 
344 	spin_lock(&EXT2_SB(sb)->s_lock);
345 	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
346 	spin_unlock(&EXT2_SB(sb)->s_lock);
347 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
348 }
349 
350 /*
351  * ext2_xattr_set()
352  *
353  * Create, replace or remove an extended attribute for this inode.  Value
354  * is NULL to remove an existing extended attribute, and non-NULL to
355  * either replace an existing extended attribute, or create a new extended
356  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
357  * specify that an extended attribute must exist and must not exist
358  * previous to the call, respectively.
359  *
360  * Returns 0, or a negative error number on failure.
361  */
362 int
363 ext2_xattr_set(struct inode *inode, int name_index, const char *name,
364 	       const void *value, size_t value_len, int flags)
365 {
366 	struct super_block *sb = inode->i_sb;
367 	struct buffer_head *bh = NULL;
368 	struct ext2_xattr_header *header = NULL;
369 	struct ext2_xattr_entry *here, *last;
370 	size_t name_len, free, min_offs = sb->s_blocksize;
371 	int not_found = 1, error;
372 	char *end;
373 
374 	/*
375 	 * header -- Points either into bh, or to a temporarily
376 	 *           allocated buffer.
377 	 * here -- The named entry found, or the place for inserting, within
378 	 *         the block pointed to by header.
379 	 * last -- Points right after the last named entry within the block
380 	 *         pointed to by header.
381 	 * min_offs -- The offset of the first value (values are aligned
382 	 *             towards the end of the block).
383 	 * end -- Points right after the block pointed to by header.
384 	 */
385 
386 	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
387 		  name_index, name, value, (long)value_len);
388 
389 	if (value == NULL)
390 		value_len = 0;
391 	if (name == NULL)
392 		return -EINVAL;
393 	name_len = strlen(name);
394 	if (name_len > 255 || value_len > sb->s_blocksize)
395 		return -ERANGE;
396 	down_write(&EXT2_I(inode)->xattr_sem);
397 	if (EXT2_I(inode)->i_file_acl) {
398 		/* The inode already has an extended attribute block. */
399 		bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
400 		error = -EIO;
401 		if (!bh)
402 			goto cleanup;
403 		ea_bdebug(bh, "b_count=%d, refcount=%d",
404 			atomic_read(&(bh->b_count)),
405 			le32_to_cpu(HDR(bh)->h_refcount));
406 		header = HDR(bh);
407 		end = bh->b_data + bh->b_size;
408 		if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
409 		    header->h_blocks != cpu_to_le32(1)) {
410 bad_block:		ext2_error(sb, "ext2_xattr_set",
411 				"inode %ld: bad block %d", inode->i_ino,
412 				   EXT2_I(inode)->i_file_acl);
413 			error = -EIO;
414 			goto cleanup;
415 		}
416 		/* Find the named attribute. */
417 		here = FIRST_ENTRY(bh);
418 		while (!IS_LAST_ENTRY(here)) {
419 			struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
420 			if ((char *)next >= end)
421 				goto bad_block;
422 			if (!here->e_value_block && here->e_value_size) {
423 				size_t offs = le16_to_cpu(here->e_value_offs);
424 				if (offs < min_offs)
425 					min_offs = offs;
426 			}
427 			not_found = name_index - here->e_name_index;
428 			if (!not_found)
429 				not_found = name_len - here->e_name_len;
430 			if (!not_found)
431 				not_found = memcmp(name, here->e_name,name_len);
432 			if (not_found <= 0)
433 				break;
434 			here = next;
435 		}
436 		last = here;
437 		/* We still need to compute min_offs and last. */
438 		while (!IS_LAST_ENTRY(last)) {
439 			struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
440 			if ((char *)next >= end)
441 				goto bad_block;
442 			if (!last->e_value_block && last->e_value_size) {
443 				size_t offs = le16_to_cpu(last->e_value_offs);
444 				if (offs < min_offs)
445 					min_offs = offs;
446 			}
447 			last = next;
448 		}
449 
450 		/* Check whether we have enough space left. */
451 		free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
452 	} else {
453 		/* We will use a new extended attribute block. */
454 		free = sb->s_blocksize -
455 			sizeof(struct ext2_xattr_header) - sizeof(__u32);
456 		here = last = NULL;  /* avoid gcc uninitialized warning. */
457 	}
458 
459 	if (not_found) {
460 		/* Request to remove a nonexistent attribute? */
461 		error = -ENODATA;
462 		if (flags & XATTR_REPLACE)
463 			goto cleanup;
464 		error = 0;
465 		if (value == NULL)
466 			goto cleanup;
467 	} else {
468 		/* Request to create an existing attribute? */
469 		error = -EEXIST;
470 		if (flags & XATTR_CREATE)
471 			goto cleanup;
472 		if (!here->e_value_block && here->e_value_size) {
473 			size_t size = le32_to_cpu(here->e_value_size);
474 
475 			if (le16_to_cpu(here->e_value_offs) + size >
476 			    sb->s_blocksize || size > sb->s_blocksize)
477 				goto bad_block;
478 			free += EXT2_XATTR_SIZE(size);
479 		}
480 		free += EXT2_XATTR_LEN(name_len);
481 	}
482 	error = -ENOSPC;
483 	if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
484 		goto cleanup;
485 
486 	/* Here we know that we can set the new attribute. */
487 
488 	if (header) {
489 		/* assert(header == HDR(bh)); */
490 		lock_buffer(bh);
491 		if (header->h_refcount == cpu_to_le32(1)) {
492 			__u32 hash = le32_to_cpu(header->h_hash);
493 
494 			ea_bdebug(bh, "modifying in-place");
495 			/*
496 			 * This must happen under buffer lock for
497 			 * ext2_xattr_set2() to reliably detect modified block
498 			 */
499 			mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
500 					      bh->b_blocknr);
501 
502 			/* keep the buffer locked while modifying it. */
503 		} else {
504 			int offset;
505 
506 			unlock_buffer(bh);
507 			ea_bdebug(bh, "cloning");
508 			header = kmalloc(bh->b_size, GFP_KERNEL);
509 			error = -ENOMEM;
510 			if (header == NULL)
511 				goto cleanup;
512 			memcpy(header, HDR(bh), bh->b_size);
513 			header->h_refcount = cpu_to_le32(1);
514 
515 			offset = (char *)here - bh->b_data;
516 			here = ENTRY((char *)header + offset);
517 			offset = (char *)last - bh->b_data;
518 			last = ENTRY((char *)header + offset);
519 		}
520 	} else {
521 		/* Allocate a buffer where we construct the new block. */
522 		header = kzalloc(sb->s_blocksize, GFP_KERNEL);
523 		error = -ENOMEM;
524 		if (header == NULL)
525 			goto cleanup;
526 		end = (char *)header + sb->s_blocksize;
527 		header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
528 		header->h_blocks = header->h_refcount = cpu_to_le32(1);
529 		last = here = ENTRY(header+1);
530 	}
531 
532 	/* Iff we are modifying the block in-place, bh is locked here. */
533 
534 	if (not_found) {
535 		/* Insert the new name. */
536 		size_t size = EXT2_XATTR_LEN(name_len);
537 		size_t rest = (char *)last - (char *)here;
538 		memmove((char *)here + size, here, rest);
539 		memset(here, 0, size);
540 		here->e_name_index = name_index;
541 		here->e_name_len = name_len;
542 		memcpy(here->e_name, name, name_len);
543 	} else {
544 		if (!here->e_value_block && here->e_value_size) {
545 			char *first_val = (char *)header + min_offs;
546 			size_t offs = le16_to_cpu(here->e_value_offs);
547 			char *val = (char *)header + offs;
548 			size_t size = EXT2_XATTR_SIZE(
549 				le32_to_cpu(here->e_value_size));
550 
551 			if (size == EXT2_XATTR_SIZE(value_len)) {
552 				/* The old and the new value have the same
553 				   size. Just replace. */
554 				here->e_value_size = cpu_to_le32(value_len);
555 				memset(val + size - EXT2_XATTR_PAD, 0,
556 				       EXT2_XATTR_PAD); /* Clear pad bytes. */
557 				memcpy(val, value, value_len);
558 				goto skip_replace;
559 			}
560 
561 			/* Remove the old value. */
562 			memmove(first_val + size, first_val, val - first_val);
563 			memset(first_val, 0, size);
564 			here->e_value_offs = 0;
565 			min_offs += size;
566 
567 			/* Adjust all value offsets. */
568 			last = ENTRY(header+1);
569 			while (!IS_LAST_ENTRY(last)) {
570 				size_t o = le16_to_cpu(last->e_value_offs);
571 				if (!last->e_value_block && o < offs)
572 					last->e_value_offs =
573 						cpu_to_le16(o + size);
574 				last = EXT2_XATTR_NEXT(last);
575 			}
576 		}
577 		if (value == NULL) {
578 			/* Remove the old name. */
579 			size_t size = EXT2_XATTR_LEN(name_len);
580 			last = ENTRY((char *)last - size);
581 			memmove(here, (char*)here + size,
582 				(char*)last - (char*)here);
583 			memset(last, 0, size);
584 		}
585 	}
586 
587 	if (value != NULL) {
588 		/* Insert the new value. */
589 		here->e_value_size = cpu_to_le32(value_len);
590 		if (value_len) {
591 			size_t size = EXT2_XATTR_SIZE(value_len);
592 			char *val = (char *)header + min_offs - size;
593 			here->e_value_offs =
594 				cpu_to_le16((char *)val - (char *)header);
595 			memset(val + size - EXT2_XATTR_PAD, 0,
596 			       EXT2_XATTR_PAD); /* Clear the pad bytes. */
597 			memcpy(val, value, value_len);
598 		}
599 	}
600 
601 skip_replace:
602 	if (IS_LAST_ENTRY(ENTRY(header+1))) {
603 		/* This block is now empty. */
604 		if (bh && header == HDR(bh))
605 			unlock_buffer(bh);  /* we were modifying in-place. */
606 		error = ext2_xattr_set2(inode, bh, NULL);
607 	} else {
608 		ext2_xattr_rehash(header, here);
609 		if (bh && header == HDR(bh))
610 			unlock_buffer(bh);  /* we were modifying in-place. */
611 		error = ext2_xattr_set2(inode, bh, header);
612 	}
613 
614 cleanup:
615 	if (!(bh && header == HDR(bh)))
616 		kfree(header);
617 	brelse(bh);
618 	up_write(&EXT2_I(inode)->xattr_sem);
619 
620 	return error;
621 }
622 
623 /*
624  * Second half of ext2_xattr_set(): Update the file system.
625  */
626 static int
627 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
628 		struct ext2_xattr_header *header)
629 {
630 	struct super_block *sb = inode->i_sb;
631 	struct buffer_head *new_bh = NULL;
632 	int error;
633 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
634 
635 	if (header) {
636 		new_bh = ext2_xattr_cache_find(inode, header);
637 		if (new_bh) {
638 			/* We found an identical block in the cache. */
639 			if (new_bh == old_bh) {
640 				ea_bdebug(new_bh, "keeping this block");
641 			} else {
642 				/* The old block is released after updating
643 				   the inode.  */
644 				ea_bdebug(new_bh, "reusing block");
645 
646 				error = dquot_alloc_block(inode, 1);
647 				if (error) {
648 					unlock_buffer(new_bh);
649 					goto cleanup;
650 				}
651 				le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
652 				ea_bdebug(new_bh, "refcount now=%d",
653 					le32_to_cpu(HDR(new_bh)->h_refcount));
654 			}
655 			unlock_buffer(new_bh);
656 		} else if (old_bh && header == HDR(old_bh)) {
657 			/* Keep this block. No need to lock the block as we
658 			   don't need to change the reference count. */
659 			new_bh = old_bh;
660 			get_bh(new_bh);
661 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
662 		} else {
663 			/* We need to allocate a new block */
664 			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
665 						EXT2_I(inode)->i_block_group);
666 			int block = ext2_new_block(inode, goal, &error);
667 			if (error)
668 				goto cleanup;
669 			ea_idebug(inode, "creating block %d", block);
670 
671 			new_bh = sb_getblk(sb, block);
672 			if (unlikely(!new_bh)) {
673 				ext2_free_blocks(inode, block, 1);
674 				mark_inode_dirty(inode);
675 				error = -ENOMEM;
676 				goto cleanup;
677 			}
678 			lock_buffer(new_bh);
679 			memcpy(new_bh->b_data, header, new_bh->b_size);
680 			set_buffer_uptodate(new_bh);
681 			unlock_buffer(new_bh);
682 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
683 
684 			ext2_xattr_update_super_block(sb);
685 		}
686 		mark_buffer_dirty(new_bh);
687 		if (IS_SYNC(inode)) {
688 			sync_dirty_buffer(new_bh);
689 			error = -EIO;
690 			if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
691 				goto cleanup;
692 		}
693 	}
694 
695 	/* Update the inode. */
696 	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
697 	inode->i_ctime = current_time(inode);
698 	if (IS_SYNC(inode)) {
699 		error = sync_inode_metadata(inode, 1);
700 		/* In case sync failed due to ENOSPC the inode was actually
701 		 * written (only some dirty data were not) so we just proceed
702 		 * as if nothing happened and cleanup the unused block */
703 		if (error && error != -ENOSPC) {
704 			if (new_bh && new_bh != old_bh) {
705 				dquot_free_block_nodirty(inode, 1);
706 				mark_inode_dirty(inode);
707 			}
708 			goto cleanup;
709 		}
710 	} else
711 		mark_inode_dirty(inode);
712 
713 	error = 0;
714 	if (old_bh && old_bh != new_bh) {
715 		/*
716 		 * If there was an old block and we are no longer using it,
717 		 * release the old block.
718 		 */
719 		lock_buffer(old_bh);
720 		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
721 			__u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
722 
723 			/*
724 			 * This must happen under buffer lock for
725 			 * ext2_xattr_set2() to reliably detect freed block
726 			 */
727 			mb_cache_entry_delete(ea_block_cache, hash,
728 					      old_bh->b_blocknr);
729 			/* Free the old block. */
730 			ea_bdebug(old_bh, "freeing");
731 			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
732 			mark_inode_dirty(inode);
733 			/* We let our caller release old_bh, so we
734 			 * need to duplicate the buffer before. */
735 			get_bh(old_bh);
736 			bforget(old_bh);
737 		} else {
738 			/* Decrement the refcount only. */
739 			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
740 			dquot_free_block_nodirty(inode, 1);
741 			mark_inode_dirty(inode);
742 			mark_buffer_dirty(old_bh);
743 			ea_bdebug(old_bh, "refcount now=%d",
744 				le32_to_cpu(HDR(old_bh)->h_refcount));
745 		}
746 		unlock_buffer(old_bh);
747 	}
748 
749 cleanup:
750 	brelse(new_bh);
751 
752 	return error;
753 }
754 
755 /*
756  * ext2_xattr_delete_inode()
757  *
758  * Free extended attribute resources associated with this inode. This
759  * is called immediately before an inode is freed.
760  */
761 void
762 ext2_xattr_delete_inode(struct inode *inode)
763 {
764 	struct buffer_head *bh = NULL;
765 	struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
766 
767 	down_write(&EXT2_I(inode)->xattr_sem);
768 	if (!EXT2_I(inode)->i_file_acl)
769 		goto cleanup;
770 
771 	if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 0)) {
772 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
773 			"inode %ld: xattr block %d is out of data blocks range",
774 			inode->i_ino, EXT2_I(inode)->i_file_acl);
775 		goto cleanup;
776 	}
777 
778 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
779 	if (!bh) {
780 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
781 			"inode %ld: block %d read error", inode->i_ino,
782 			EXT2_I(inode)->i_file_acl);
783 		goto cleanup;
784 	}
785 	ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
786 	if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
787 	    HDR(bh)->h_blocks != cpu_to_le32(1)) {
788 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
789 			"inode %ld: bad block %d", inode->i_ino,
790 			EXT2_I(inode)->i_file_acl);
791 		goto cleanup;
792 	}
793 	lock_buffer(bh);
794 	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
795 		__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
796 
797 		/*
798 		 * This must happen under buffer lock for ext2_xattr_set2() to
799 		 * reliably detect freed block
800 		 */
801 		mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
802 				      bh->b_blocknr);
803 		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
804 		get_bh(bh);
805 		bforget(bh);
806 		unlock_buffer(bh);
807 	} else {
808 		le32_add_cpu(&HDR(bh)->h_refcount, -1);
809 		ea_bdebug(bh, "refcount now=%d",
810 			le32_to_cpu(HDR(bh)->h_refcount));
811 		unlock_buffer(bh);
812 		mark_buffer_dirty(bh);
813 		if (IS_SYNC(inode))
814 			sync_dirty_buffer(bh);
815 		dquot_free_block_nodirty(inode, 1);
816 	}
817 	EXT2_I(inode)->i_file_acl = 0;
818 
819 cleanup:
820 	brelse(bh);
821 	up_write(&EXT2_I(inode)->xattr_sem);
822 }
823 
824 /*
825  * ext2_xattr_cache_insert()
826  *
827  * Create a new entry in the extended attribute cache, and insert
828  * it unless such an entry is already in the cache.
829  *
830  * Returns 0, or a negative error number on failure.
831  */
832 static int
833 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
834 {
835 	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
836 	int error;
837 
838 	error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
839 	if (error) {
840 		if (error == -EBUSY) {
841 			ea_bdebug(bh, "already in cache (%d cache entries)",
842 				atomic_read(&ext2_xattr_cache->c_entry_count));
843 			error = 0;
844 		}
845 	} else
846 		ea_bdebug(bh, "inserting [%x]", (int)hash);
847 	return error;
848 }
849 
850 /*
851  * ext2_xattr_cmp()
852  *
853  * Compare two extended attribute blocks for equality.
854  *
855  * Returns 0 if the blocks are equal, 1 if they differ, and
856  * a negative error number on errors.
857  */
858 static int
859 ext2_xattr_cmp(struct ext2_xattr_header *header1,
860 	       struct ext2_xattr_header *header2)
861 {
862 	struct ext2_xattr_entry *entry1, *entry2;
863 
864 	entry1 = ENTRY(header1+1);
865 	entry2 = ENTRY(header2+1);
866 	while (!IS_LAST_ENTRY(entry1)) {
867 		if (IS_LAST_ENTRY(entry2))
868 			return 1;
869 		if (entry1->e_hash != entry2->e_hash ||
870 		    entry1->e_name_index != entry2->e_name_index ||
871 		    entry1->e_name_len != entry2->e_name_len ||
872 		    entry1->e_value_size != entry2->e_value_size ||
873 		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
874 			return 1;
875 		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
876 			return -EIO;
877 		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
878 			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
879 			   le32_to_cpu(entry1->e_value_size)))
880 			return 1;
881 
882 		entry1 = EXT2_XATTR_NEXT(entry1);
883 		entry2 = EXT2_XATTR_NEXT(entry2);
884 	}
885 	if (!IS_LAST_ENTRY(entry2))
886 		return 1;
887 	return 0;
888 }
889 
890 /*
891  * ext2_xattr_cache_find()
892  *
893  * Find an identical extended attribute block.
894  *
895  * Returns a locked buffer head to the block found, or NULL if such
896  * a block was not found or an error occurred.
897  */
898 static struct buffer_head *
899 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
900 {
901 	__u32 hash = le32_to_cpu(header->h_hash);
902 	struct mb_cache_entry *ce;
903 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
904 
905 	if (!header->h_hash)
906 		return NULL;  /* never share */
907 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
908 again:
909 	ce = mb_cache_entry_find_first(ea_block_cache, hash);
910 	while (ce) {
911 		struct buffer_head *bh;
912 
913 		bh = sb_bread(inode->i_sb, ce->e_value);
914 		if (!bh) {
915 			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
916 				"inode %ld: block %ld read error",
917 				inode->i_ino, (unsigned long) ce->e_value);
918 		} else {
919 			lock_buffer(bh);
920 			/*
921 			 * We have to be careful about races with freeing or
922 			 * rehashing of xattr block. Once we hold buffer lock
923 			 * xattr block's state is stable so we can check
924 			 * whether the block got freed / rehashed or not.
925 			 * Since we unhash mbcache entry under buffer lock when
926 			 * freeing / rehashing xattr block, checking whether
927 			 * entry is still hashed is reliable.
928 			 */
929 			if (hlist_bl_unhashed(&ce->e_hash_list)) {
930 				mb_cache_entry_put(ea_block_cache, ce);
931 				unlock_buffer(bh);
932 				brelse(bh);
933 				goto again;
934 			} else if (le32_to_cpu(HDR(bh)->h_refcount) >
935 				   EXT2_XATTR_REFCOUNT_MAX) {
936 				ea_idebug(inode, "block %ld refcount %d>%d",
937 					  (unsigned long) ce->e_value,
938 					  le32_to_cpu(HDR(bh)->h_refcount),
939 					  EXT2_XATTR_REFCOUNT_MAX);
940 			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
941 				ea_bdebug(bh, "b_count=%d",
942 					  atomic_read(&(bh->b_count)));
943 				mb_cache_entry_touch(ea_block_cache, ce);
944 				mb_cache_entry_put(ea_block_cache, ce);
945 				return bh;
946 			}
947 			unlock_buffer(bh);
948 			brelse(bh);
949 		}
950 		ce = mb_cache_entry_find_next(ea_block_cache, ce);
951 	}
952 	return NULL;
953 }
954 
955 #define NAME_HASH_SHIFT 5
956 #define VALUE_HASH_SHIFT 16
957 
958 /*
959  * ext2_xattr_hash_entry()
960  *
961  * Compute the hash of an extended attribute.
962  */
963 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
964 					 struct ext2_xattr_entry *entry)
965 {
966 	__u32 hash = 0;
967 	char *name = entry->e_name;
968 	int n;
969 
970 	for (n=0; n < entry->e_name_len; n++) {
971 		hash = (hash << NAME_HASH_SHIFT) ^
972 		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
973 		       *name++;
974 	}
975 
976 	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
977 		__le32 *value = (__le32 *)((char *)header +
978 			le16_to_cpu(entry->e_value_offs));
979 		for (n = (le32_to_cpu(entry->e_value_size) +
980 		     EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
981 			hash = (hash << VALUE_HASH_SHIFT) ^
982 			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
983 			       le32_to_cpu(*value++);
984 		}
985 	}
986 	entry->e_hash = cpu_to_le32(hash);
987 }
988 
989 #undef NAME_HASH_SHIFT
990 #undef VALUE_HASH_SHIFT
991 
992 #define BLOCK_HASH_SHIFT 16
993 
994 /*
995  * ext2_xattr_rehash()
996  *
997  * Re-compute the extended attribute hash value after an entry has changed.
998  */
999 static void ext2_xattr_rehash(struct ext2_xattr_header *header,
1000 			      struct ext2_xattr_entry *entry)
1001 {
1002 	struct ext2_xattr_entry *here;
1003 	__u32 hash = 0;
1004 
1005 	ext2_xattr_hash_entry(header, entry);
1006 	here = ENTRY(header+1);
1007 	while (!IS_LAST_ENTRY(here)) {
1008 		if (!here->e_hash) {
1009 			/* Block is not shared if an entry's hash value == 0 */
1010 			hash = 0;
1011 			break;
1012 		}
1013 		hash = (hash << BLOCK_HASH_SHIFT) ^
1014 		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1015 		       le32_to_cpu(here->e_hash);
1016 		here = EXT2_XATTR_NEXT(here);
1017 	}
1018 	header->h_hash = cpu_to_le32(hash);
1019 }
1020 
1021 #undef BLOCK_HASH_SHIFT
1022 
1023 #define HASH_BUCKET_BITS 10
1024 
1025 struct mb_cache *ext2_xattr_create_cache(void)
1026 {
1027 	return mb_cache_create(HASH_BUCKET_BITS);
1028 }
1029 
1030 void ext2_xattr_destroy_cache(struct mb_cache *cache)
1031 {
1032 	if (cache)
1033 		mb_cache_destroy(cache);
1034 }
1035