xref: /openbmc/linux/fs/ext2/xattr.c (revision b5265c81)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext2/xattr.c
4  *
5  * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
6  *
7  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
8  * Extended attributes for symlinks and special files added per
9  *  suggestion of Luka Renko <luka.renko@hermes.si>.
10  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
11  *  Red Hat Inc.
12  *
13  */
14 
15 /*
16  * Extended attributes are stored on disk blocks allocated outside of
17  * any inode. The i_file_acl field is then made to point to this allocated
18  * block. If all extended attributes of an inode are identical, these
19  * inodes may share the same extended attribute block. Such situations
20  * are automatically detected by keeping a cache of recent attribute block
21  * numbers and hashes over the block's contents in memory.
22  *
23  *
24  * Extended attribute block layout:
25  *
26  *   +------------------+
27  *   | header           |
28  *   | entry 1          | |
29  *   | entry 2          | | growing downwards
30  *   | entry 3          | v
31  *   | four null bytes  |
32  *   | . . .            |
33  *   | value 1          | ^
34  *   | value 3          | | growing upwards
35  *   | value 2          | |
36  *   +------------------+
37  *
38  * The block header is followed by multiple entry descriptors. These entry
39  * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
40  * byte boundaries. The entry descriptors are sorted by attribute name,
41  * so that two extended attribute blocks can be compared efficiently.
42  *
43  * Attribute values are aligned to the end of the block, stored in
44  * no specific order. They are also padded to EXT2_XATTR_PAD byte
45  * boundaries. No additional gaps are left between them.
46  *
47  * Locking strategy
48  * ----------------
49  * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
50  * EA blocks are only changed if they are exclusive to an inode, so
51  * holding xattr_sem also means that nothing but the EA block's reference
52  * count will change. Multiple writers to an EA block are synchronized
53  * by the bh lock. No more than a single bh lock is held at any time
54  * to avoid deadlocks.
55  */
56 
57 #include <linux/buffer_head.h>
58 #include <linux/init.h>
59 #include <linux/printk.h>
60 #include <linux/slab.h>
61 #include <linux/mbcache.h>
62 #include <linux/quotaops.h>
63 #include <linux/rwsem.h>
64 #include <linux/security.h>
65 #include "ext2.h"
66 #include "xattr.h"
67 #include "acl.h"
68 
69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
70 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
72 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
73 
74 #ifdef EXT2_XATTR_DEBUG
75 # define ea_idebug(inode, f...) do { \
76 		printk(KERN_DEBUG "inode %s:%ld: ", \
77 			inode->i_sb->s_id, inode->i_ino); \
78 		printk(f); \
79 		printk("\n"); \
80 	} while (0)
81 # define ea_bdebug(bh, f...) do { \
82 		printk(KERN_DEBUG "block %pg:%lu: ", \
83 			bh->b_bdev, (unsigned long) bh->b_blocknr); \
84 		printk(f); \
85 		printk("\n"); \
86 	} while (0)
87 #else
88 # define ea_idebug(inode, f...)	no_printk(f)
89 # define ea_bdebug(bh, f...)	no_printk(f)
90 #endif
91 
92 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
93 			   struct ext2_xattr_header *);
94 
95 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
96 static struct buffer_head *ext2_xattr_cache_find(struct inode *,
97 						 struct ext2_xattr_header *);
98 static void ext2_xattr_rehash(struct ext2_xattr_header *,
99 			      struct ext2_xattr_entry *);
100 
101 static const struct xattr_handler *ext2_xattr_handler_map[] = {
102 	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
103 #ifdef CONFIG_EXT2_FS_POSIX_ACL
104 	[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
105 	[EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
106 #endif
107 	[EXT2_XATTR_INDEX_TRUSTED]	     = &ext2_xattr_trusted_handler,
108 #ifdef CONFIG_EXT2_FS_SECURITY
109 	[EXT2_XATTR_INDEX_SECURITY]	     = &ext2_xattr_security_handler,
110 #endif
111 };
112 
113 const struct xattr_handler *ext2_xattr_handlers[] = {
114 	&ext2_xattr_user_handler,
115 	&ext2_xattr_trusted_handler,
116 #ifdef CONFIG_EXT2_FS_POSIX_ACL
117 	&posix_acl_access_xattr_handler,
118 	&posix_acl_default_xattr_handler,
119 #endif
120 #ifdef CONFIG_EXT2_FS_SECURITY
121 	&ext2_xattr_security_handler,
122 #endif
123 	NULL
124 };
125 
126 #define EA_BLOCK_CACHE(inode)	(EXT2_SB(inode->i_sb)->s_ea_block_cache)
127 
128 static inline const struct xattr_handler *
129 ext2_xattr_handler(int name_index)
130 {
131 	const struct xattr_handler *handler = NULL;
132 
133 	if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
134 		handler = ext2_xattr_handler_map[name_index];
135 	return handler;
136 }
137 
138 static bool
139 ext2_xattr_header_valid(struct ext2_xattr_header *header)
140 {
141 	if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
142 	    header->h_blocks != cpu_to_le32(1))
143 		return false;
144 
145 	return true;
146 }
147 
148 static bool
149 ext2_xattr_entry_valid(struct ext2_xattr_entry *entry,
150 		       char *end, size_t end_offs)
151 {
152 	struct ext2_xattr_entry *next;
153 	size_t size;
154 
155 	next = EXT2_XATTR_NEXT(entry);
156 	if ((char *)next >= end)
157 		return false;
158 
159 	if (entry->e_value_block != 0)
160 		return false;
161 
162 	size = le32_to_cpu(entry->e_value_size);
163 	if (size > end_offs ||
164 	    le16_to_cpu(entry->e_value_offs) + size > end_offs)
165 		return false;
166 
167 	return true;
168 }
169 
170 static int
171 ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name,
172 		     struct ext2_xattr_entry *entry)
173 {
174 	int cmp;
175 
176 	cmp = name_index - entry->e_name_index;
177 	if (!cmp)
178 		cmp = name_len - entry->e_name_len;
179 	if (!cmp)
180 		cmp = memcmp(name, entry->e_name, name_len);
181 
182 	return cmp;
183 }
184 
185 /*
186  * ext2_xattr_get()
187  *
188  * Copy an extended attribute into the buffer
189  * provided, or compute the buffer size required.
190  * Buffer is NULL to compute the size of the buffer required.
191  *
192  * Returns a negative error number on failure, or the number of bytes
193  * used / required on success.
194  */
195 int
196 ext2_xattr_get(struct inode *inode, int name_index, const char *name,
197 	       void *buffer, size_t buffer_size)
198 {
199 	struct buffer_head *bh = NULL;
200 	struct ext2_xattr_entry *entry;
201 	size_t name_len, size;
202 	char *end;
203 	int error, not_found;
204 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
205 
206 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
207 		  name_index, name, buffer, (long)buffer_size);
208 
209 	if (name == NULL)
210 		return -EINVAL;
211 	name_len = strlen(name);
212 	if (name_len > 255)
213 		return -ERANGE;
214 
215 	down_read(&EXT2_I(inode)->xattr_sem);
216 	error = -ENODATA;
217 	if (!EXT2_I(inode)->i_file_acl)
218 		goto cleanup;
219 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
220 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
221 	error = -EIO;
222 	if (!bh)
223 		goto cleanup;
224 	ea_bdebug(bh, "b_count=%d, refcount=%d",
225 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
226 	end = bh->b_data + bh->b_size;
227 	if (!ext2_xattr_header_valid(HDR(bh))) {
228 bad_block:
229 		ext2_error(inode->i_sb, "ext2_xattr_get",
230 			"inode %ld: bad block %d", inode->i_ino,
231 			EXT2_I(inode)->i_file_acl);
232 		error = -EIO;
233 		goto cleanup;
234 	}
235 
236 	/* find named attribute */
237 	entry = FIRST_ENTRY(bh);
238 	while (!IS_LAST_ENTRY(entry)) {
239 		if (!ext2_xattr_entry_valid(entry, end,
240 		    inode->i_sb->s_blocksize))
241 			goto bad_block;
242 
243 		not_found = ext2_xattr_cmp_entry(name_index, name_len, name,
244 						 entry);
245 		if (!not_found)
246 			goto found;
247 		if (not_found < 0)
248 			break;
249 
250 		entry = EXT2_XATTR_NEXT(entry);
251 	}
252 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
253 		ea_idebug(inode, "cache insert failed");
254 	error = -ENODATA;
255 	goto cleanup;
256 found:
257 	size = le32_to_cpu(entry->e_value_size);
258 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
259 		ea_idebug(inode, "cache insert failed");
260 	if (buffer) {
261 		error = -ERANGE;
262 		if (size > buffer_size)
263 			goto cleanup;
264 		/* return value of attribute */
265 		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
266 			size);
267 	}
268 	error = size;
269 
270 cleanup:
271 	brelse(bh);
272 	up_read(&EXT2_I(inode)->xattr_sem);
273 
274 	return error;
275 }
276 
277 /*
278  * ext2_xattr_list()
279  *
280  * Copy a list of attribute names into the buffer
281  * provided, or compute the buffer size required.
282  * Buffer is NULL to compute the size of the buffer required.
283  *
284  * Returns a negative error number on failure, or the number of bytes
285  * used / required on success.
286  */
287 static int
288 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
289 {
290 	struct inode *inode = d_inode(dentry);
291 	struct buffer_head *bh = NULL;
292 	struct ext2_xattr_entry *entry;
293 	char *end;
294 	size_t rest = buffer_size;
295 	int error;
296 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
297 
298 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
299 		  buffer, (long)buffer_size);
300 
301 	down_read(&EXT2_I(inode)->xattr_sem);
302 	error = 0;
303 	if (!EXT2_I(inode)->i_file_acl)
304 		goto cleanup;
305 	ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
306 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
307 	error = -EIO;
308 	if (!bh)
309 		goto cleanup;
310 	ea_bdebug(bh, "b_count=%d, refcount=%d",
311 		atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
312 	end = bh->b_data + bh->b_size;
313 	if (!ext2_xattr_header_valid(HDR(bh))) {
314 bad_block:
315 		ext2_error(inode->i_sb, "ext2_xattr_list",
316 			"inode %ld: bad block %d", inode->i_ino,
317 			EXT2_I(inode)->i_file_acl);
318 		error = -EIO;
319 		goto cleanup;
320 	}
321 
322 	/* check the on-disk data structure */
323 	entry = FIRST_ENTRY(bh);
324 	while (!IS_LAST_ENTRY(entry)) {
325 		if (!ext2_xattr_entry_valid(entry, end,
326 		    inode->i_sb->s_blocksize))
327 			goto bad_block;
328 		entry = EXT2_XATTR_NEXT(entry);
329 	}
330 	if (ext2_xattr_cache_insert(ea_block_cache, bh))
331 		ea_idebug(inode, "cache insert failed");
332 
333 	/* list the attribute names */
334 	for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
335 	     entry = EXT2_XATTR_NEXT(entry)) {
336 		const struct xattr_handler *handler =
337 			ext2_xattr_handler(entry->e_name_index);
338 
339 		if (handler && (!handler->list || handler->list(dentry))) {
340 			const char *prefix = handler->prefix ?: handler->name;
341 			size_t prefix_len = strlen(prefix);
342 			size_t size = prefix_len + entry->e_name_len + 1;
343 
344 			if (buffer) {
345 				if (size > rest) {
346 					error = -ERANGE;
347 					goto cleanup;
348 				}
349 				memcpy(buffer, prefix, prefix_len);
350 				buffer += prefix_len;
351 				memcpy(buffer, entry->e_name, entry->e_name_len);
352 				buffer += entry->e_name_len;
353 				*buffer++ = 0;
354 			}
355 			rest -= size;
356 		}
357 	}
358 	error = buffer_size - rest;  /* total size */
359 
360 cleanup:
361 	brelse(bh);
362 	up_read(&EXT2_I(inode)->xattr_sem);
363 
364 	return error;
365 }
366 
367 /*
368  * Inode operation listxattr()
369  *
370  * d_inode(dentry)->i_mutex: don't care
371  */
372 ssize_t
373 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
374 {
375 	return ext2_xattr_list(dentry, buffer, size);
376 }
377 
378 /*
379  * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
380  * not set, set it.
381  */
382 static void ext2_xattr_update_super_block(struct super_block *sb)
383 {
384 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
385 		return;
386 
387 	spin_lock(&EXT2_SB(sb)->s_lock);
388 	ext2_update_dynamic_rev(sb);
389 	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
390 	spin_unlock(&EXT2_SB(sb)->s_lock);
391 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
392 }
393 
394 /*
395  * ext2_xattr_set()
396  *
397  * Create, replace or remove an extended attribute for this inode.  Value
398  * is NULL to remove an existing extended attribute, and non-NULL to
399  * either replace an existing extended attribute, or create a new extended
400  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
401  * specify that an extended attribute must exist and must not exist
402  * previous to the call, respectively.
403  *
404  * Returns 0, or a negative error number on failure.
405  */
406 int
407 ext2_xattr_set(struct inode *inode, int name_index, const char *name,
408 	       const void *value, size_t value_len, int flags)
409 {
410 	struct super_block *sb = inode->i_sb;
411 	struct buffer_head *bh = NULL;
412 	struct ext2_xattr_header *header = NULL;
413 	struct ext2_xattr_entry *here = NULL, *last = NULL;
414 	size_t name_len, free, min_offs = sb->s_blocksize;
415 	int not_found = 1, error;
416 	char *end;
417 
418 	/*
419 	 * header -- Points either into bh, or to a temporarily
420 	 *           allocated buffer.
421 	 * here -- The named entry found, or the place for inserting, within
422 	 *         the block pointed to by header.
423 	 * last -- Points right after the last named entry within the block
424 	 *         pointed to by header.
425 	 * min_offs -- The offset of the first value (values are aligned
426 	 *             towards the end of the block).
427 	 * end -- Points right after the block pointed to by header.
428 	 */
429 
430 	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
431 		  name_index, name, value, (long)value_len);
432 
433 	if (value == NULL)
434 		value_len = 0;
435 	if (name == NULL)
436 		return -EINVAL;
437 	name_len = strlen(name);
438 	if (name_len > 255 || value_len > sb->s_blocksize)
439 		return -ERANGE;
440 	down_write(&EXT2_I(inode)->xattr_sem);
441 	if (EXT2_I(inode)->i_file_acl) {
442 		/* The inode already has an extended attribute block. */
443 		bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
444 		error = -EIO;
445 		if (!bh)
446 			goto cleanup;
447 		ea_bdebug(bh, "b_count=%d, refcount=%d",
448 			atomic_read(&(bh->b_count)),
449 			le32_to_cpu(HDR(bh)->h_refcount));
450 		header = HDR(bh);
451 		end = bh->b_data + bh->b_size;
452 		if (!ext2_xattr_header_valid(header)) {
453 bad_block:
454 			ext2_error(sb, "ext2_xattr_set",
455 				"inode %ld: bad block %d", inode->i_ino,
456 				   EXT2_I(inode)->i_file_acl);
457 			error = -EIO;
458 			goto cleanup;
459 		}
460 		/*
461 		 * Find the named attribute. If not found, 'here' will point
462 		 * to entry where the new attribute should be inserted to
463 		 * maintain sorting.
464 		 */
465 		last = FIRST_ENTRY(bh);
466 		while (!IS_LAST_ENTRY(last)) {
467 			if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize))
468 				goto bad_block;
469 			if (last->e_value_size) {
470 				size_t offs = le16_to_cpu(last->e_value_offs);
471 				if (offs < min_offs)
472 					min_offs = offs;
473 			}
474 			if (not_found > 0) {
475 				not_found = ext2_xattr_cmp_entry(name_index,
476 								 name_len,
477 								 name, last);
478 				if (not_found <= 0)
479 					here = last;
480 			}
481 			last = EXT2_XATTR_NEXT(last);
482 		}
483 		if (not_found > 0)
484 			here = last;
485 
486 		/* Check whether we have enough space left. */
487 		free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
488 	} else {
489 		/* We will use a new extended attribute block. */
490 		free = sb->s_blocksize -
491 			sizeof(struct ext2_xattr_header) - sizeof(__u32);
492 	}
493 
494 	if (not_found) {
495 		/* Request to remove a nonexistent attribute? */
496 		error = -ENODATA;
497 		if (flags & XATTR_REPLACE)
498 			goto cleanup;
499 		error = 0;
500 		if (value == NULL)
501 			goto cleanup;
502 	} else {
503 		/* Request to create an existing attribute? */
504 		error = -EEXIST;
505 		if (flags & XATTR_CREATE)
506 			goto cleanup;
507 		free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size));
508 		free += EXT2_XATTR_LEN(name_len);
509 	}
510 	error = -ENOSPC;
511 	if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
512 		goto cleanup;
513 
514 	/* Here we know that we can set the new attribute. */
515 
516 	if (header) {
517 		/* assert(header == HDR(bh)); */
518 		lock_buffer(bh);
519 		if (header->h_refcount == cpu_to_le32(1)) {
520 			__u32 hash = le32_to_cpu(header->h_hash);
521 
522 			ea_bdebug(bh, "modifying in-place");
523 			/*
524 			 * This must happen under buffer lock for
525 			 * ext2_xattr_set2() to reliably detect modified block
526 			 */
527 			mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
528 					      bh->b_blocknr);
529 
530 			/* keep the buffer locked while modifying it. */
531 		} else {
532 			int offset;
533 
534 			unlock_buffer(bh);
535 			ea_bdebug(bh, "cloning");
536 			header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
537 			error = -ENOMEM;
538 			if (header == NULL)
539 				goto cleanup;
540 			header->h_refcount = cpu_to_le32(1);
541 
542 			offset = (char *)here - bh->b_data;
543 			here = ENTRY((char *)header + offset);
544 			offset = (char *)last - bh->b_data;
545 			last = ENTRY((char *)header + offset);
546 		}
547 	} else {
548 		/* Allocate a buffer where we construct the new block. */
549 		header = kzalloc(sb->s_blocksize, GFP_KERNEL);
550 		error = -ENOMEM;
551 		if (header == NULL)
552 			goto cleanup;
553 		end = (char *)header + sb->s_blocksize;
554 		header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
555 		header->h_blocks = header->h_refcount = cpu_to_le32(1);
556 		last = here = ENTRY(header+1);
557 	}
558 
559 	/* Iff we are modifying the block in-place, bh is locked here. */
560 
561 	if (not_found) {
562 		/* Insert the new name. */
563 		size_t size = EXT2_XATTR_LEN(name_len);
564 		size_t rest = (char *)last - (char *)here;
565 		memmove((char *)here + size, here, rest);
566 		memset(here, 0, size);
567 		here->e_name_index = name_index;
568 		here->e_name_len = name_len;
569 		memcpy(here->e_name, name, name_len);
570 	} else {
571 		if (here->e_value_size) {
572 			char *first_val = (char *)header + min_offs;
573 			size_t offs = le16_to_cpu(here->e_value_offs);
574 			char *val = (char *)header + offs;
575 			size_t size = EXT2_XATTR_SIZE(
576 				le32_to_cpu(here->e_value_size));
577 
578 			if (size == EXT2_XATTR_SIZE(value_len)) {
579 				/* The old and the new value have the same
580 				   size. Just replace. */
581 				here->e_value_size = cpu_to_le32(value_len);
582 				memset(val + size - EXT2_XATTR_PAD, 0,
583 				       EXT2_XATTR_PAD); /* Clear pad bytes. */
584 				memcpy(val, value, value_len);
585 				goto skip_replace;
586 			}
587 
588 			/* Remove the old value. */
589 			memmove(first_val + size, first_val, val - first_val);
590 			memset(first_val, 0, size);
591 			here->e_value_offs = 0;
592 			min_offs += size;
593 
594 			/* Adjust all value offsets. */
595 			last = ENTRY(header+1);
596 			while (!IS_LAST_ENTRY(last)) {
597 				size_t o = le16_to_cpu(last->e_value_offs);
598 				if (o < offs)
599 					last->e_value_offs =
600 						cpu_to_le16(o + size);
601 				last = EXT2_XATTR_NEXT(last);
602 			}
603 		}
604 		if (value == NULL) {
605 			/* Remove the old name. */
606 			size_t size = EXT2_XATTR_LEN(name_len);
607 			last = ENTRY((char *)last - size);
608 			memmove(here, (char*)here + size,
609 				(char*)last - (char*)here);
610 			memset(last, 0, size);
611 		}
612 	}
613 
614 	if (value != NULL) {
615 		/* Insert the new value. */
616 		here->e_value_size = cpu_to_le32(value_len);
617 		if (value_len) {
618 			size_t size = EXT2_XATTR_SIZE(value_len);
619 			char *val = (char *)header + min_offs - size;
620 			here->e_value_offs =
621 				cpu_to_le16((char *)val - (char *)header);
622 			memset(val + size - EXT2_XATTR_PAD, 0,
623 			       EXT2_XATTR_PAD); /* Clear the pad bytes. */
624 			memcpy(val, value, value_len);
625 		}
626 	}
627 
628 skip_replace:
629 	if (IS_LAST_ENTRY(ENTRY(header+1))) {
630 		/* This block is now empty. */
631 		if (bh && header == HDR(bh))
632 			unlock_buffer(bh);  /* we were modifying in-place. */
633 		error = ext2_xattr_set2(inode, bh, NULL);
634 	} else {
635 		ext2_xattr_rehash(header, here);
636 		if (bh && header == HDR(bh))
637 			unlock_buffer(bh);  /* we were modifying in-place. */
638 		error = ext2_xattr_set2(inode, bh, header);
639 	}
640 
641 cleanup:
642 	if (!(bh && header == HDR(bh)))
643 		kfree(header);
644 	brelse(bh);
645 	up_write(&EXT2_I(inode)->xattr_sem);
646 
647 	return error;
648 }
649 
650 /*
651  * Second half of ext2_xattr_set(): Update the file system.
652  */
653 static int
654 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
655 		struct ext2_xattr_header *header)
656 {
657 	struct super_block *sb = inode->i_sb;
658 	struct buffer_head *new_bh = NULL;
659 	int error;
660 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
661 
662 	if (header) {
663 		new_bh = ext2_xattr_cache_find(inode, header);
664 		if (new_bh) {
665 			/* We found an identical block in the cache. */
666 			if (new_bh == old_bh) {
667 				ea_bdebug(new_bh, "keeping this block");
668 			} else {
669 				/* The old block is released after updating
670 				   the inode.  */
671 				ea_bdebug(new_bh, "reusing block");
672 
673 				error = dquot_alloc_block(inode, 1);
674 				if (error) {
675 					unlock_buffer(new_bh);
676 					goto cleanup;
677 				}
678 				le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
679 				ea_bdebug(new_bh, "refcount now=%d",
680 					le32_to_cpu(HDR(new_bh)->h_refcount));
681 			}
682 			unlock_buffer(new_bh);
683 		} else if (old_bh && header == HDR(old_bh)) {
684 			/* Keep this block. No need to lock the block as we
685 			   don't need to change the reference count. */
686 			new_bh = old_bh;
687 			get_bh(new_bh);
688 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
689 		} else {
690 			/* We need to allocate a new block */
691 			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
692 						EXT2_I(inode)->i_block_group);
693 			int block = ext2_new_block(inode, goal, &error);
694 			if (error)
695 				goto cleanup;
696 			ea_idebug(inode, "creating block %d", block);
697 
698 			new_bh = sb_getblk(sb, block);
699 			if (unlikely(!new_bh)) {
700 				ext2_free_blocks(inode, block, 1);
701 				mark_inode_dirty(inode);
702 				error = -ENOMEM;
703 				goto cleanup;
704 			}
705 			lock_buffer(new_bh);
706 			memcpy(new_bh->b_data, header, new_bh->b_size);
707 			set_buffer_uptodate(new_bh);
708 			unlock_buffer(new_bh);
709 			ext2_xattr_cache_insert(ea_block_cache, new_bh);
710 
711 			ext2_xattr_update_super_block(sb);
712 		}
713 		mark_buffer_dirty(new_bh);
714 		if (IS_SYNC(inode)) {
715 			sync_dirty_buffer(new_bh);
716 			error = -EIO;
717 			if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
718 				goto cleanup;
719 		}
720 	}
721 
722 	/* Update the inode. */
723 	EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
724 	inode->i_ctime = current_time(inode);
725 	if (IS_SYNC(inode)) {
726 		error = sync_inode_metadata(inode, 1);
727 		/* In case sync failed due to ENOSPC the inode was actually
728 		 * written (only some dirty data were not) so we just proceed
729 		 * as if nothing happened and cleanup the unused block */
730 		if (error && error != -ENOSPC) {
731 			if (new_bh && new_bh != old_bh) {
732 				dquot_free_block_nodirty(inode, 1);
733 				mark_inode_dirty(inode);
734 			}
735 			goto cleanup;
736 		}
737 	} else
738 		mark_inode_dirty(inode);
739 
740 	error = 0;
741 	if (old_bh && old_bh != new_bh) {
742 		/*
743 		 * If there was an old block and we are no longer using it,
744 		 * release the old block.
745 		 */
746 		lock_buffer(old_bh);
747 		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
748 			__u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
749 
750 			/*
751 			 * This must happen under buffer lock for
752 			 * ext2_xattr_set2() to reliably detect freed block
753 			 */
754 			mb_cache_entry_delete(ea_block_cache, hash,
755 					      old_bh->b_blocknr);
756 			/* Free the old block. */
757 			ea_bdebug(old_bh, "freeing");
758 			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
759 			mark_inode_dirty(inode);
760 			/* We let our caller release old_bh, so we
761 			 * need to duplicate the buffer before. */
762 			get_bh(old_bh);
763 			bforget(old_bh);
764 		} else {
765 			/* Decrement the refcount only. */
766 			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
767 			dquot_free_block_nodirty(inode, 1);
768 			mark_inode_dirty(inode);
769 			mark_buffer_dirty(old_bh);
770 			ea_bdebug(old_bh, "refcount now=%d",
771 				le32_to_cpu(HDR(old_bh)->h_refcount));
772 		}
773 		unlock_buffer(old_bh);
774 	}
775 
776 cleanup:
777 	brelse(new_bh);
778 
779 	return error;
780 }
781 
782 /*
783  * ext2_xattr_delete_inode()
784  *
785  * Free extended attribute resources associated with this inode. This
786  * is called immediately before an inode is freed.
787  */
788 void
789 ext2_xattr_delete_inode(struct inode *inode)
790 {
791 	struct buffer_head *bh = NULL;
792 	struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
793 
794 	/*
795 	 * We are the only ones holding inode reference. The xattr_sem should
796 	 * better be unlocked! We could as well just not acquire xattr_sem at
797 	 * all but this makes the code more futureproof. OTOH we need trylock
798 	 * here to avoid false-positive warning from lockdep about reclaim
799 	 * circular dependency.
800 	 */
801 	if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem)))
802 		return;
803 	if (!EXT2_I(inode)->i_file_acl)
804 		goto cleanup;
805 
806 	if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) {
807 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
808 			"inode %ld: xattr block %d is out of data blocks range",
809 			inode->i_ino, EXT2_I(inode)->i_file_acl);
810 		goto cleanup;
811 	}
812 
813 	bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
814 	if (!bh) {
815 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
816 			"inode %ld: block %d read error", inode->i_ino,
817 			EXT2_I(inode)->i_file_acl);
818 		goto cleanup;
819 	}
820 	ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
821 	if (!ext2_xattr_header_valid(HDR(bh))) {
822 		ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
823 			"inode %ld: bad block %d", inode->i_ino,
824 			EXT2_I(inode)->i_file_acl);
825 		goto cleanup;
826 	}
827 	lock_buffer(bh);
828 	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
829 		__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
830 
831 		/*
832 		 * This must happen under buffer lock for ext2_xattr_set2() to
833 		 * reliably detect freed block
834 		 */
835 		mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
836 				      bh->b_blocknr);
837 		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
838 		get_bh(bh);
839 		bforget(bh);
840 		unlock_buffer(bh);
841 	} else {
842 		le32_add_cpu(&HDR(bh)->h_refcount, -1);
843 		ea_bdebug(bh, "refcount now=%d",
844 			le32_to_cpu(HDR(bh)->h_refcount));
845 		unlock_buffer(bh);
846 		mark_buffer_dirty(bh);
847 		if (IS_SYNC(inode))
848 			sync_dirty_buffer(bh);
849 		dquot_free_block_nodirty(inode, 1);
850 	}
851 	EXT2_I(inode)->i_file_acl = 0;
852 
853 cleanup:
854 	brelse(bh);
855 	up_write(&EXT2_I(inode)->xattr_sem);
856 }
857 
858 /*
859  * ext2_xattr_cache_insert()
860  *
861  * Create a new entry in the extended attribute cache, and insert
862  * it unless such an entry is already in the cache.
863  *
864  * Returns 0, or a negative error number on failure.
865  */
866 static int
867 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
868 {
869 	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
870 	int error;
871 
872 	error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
873 				      true);
874 	if (error) {
875 		if (error == -EBUSY) {
876 			ea_bdebug(bh, "already in cache");
877 			error = 0;
878 		}
879 	} else
880 		ea_bdebug(bh, "inserting [%x]", (int)hash);
881 	return error;
882 }
883 
884 /*
885  * ext2_xattr_cmp()
886  *
887  * Compare two extended attribute blocks for equality.
888  *
889  * Returns 0 if the blocks are equal, 1 if they differ, and
890  * a negative error number on errors.
891  */
892 static int
893 ext2_xattr_cmp(struct ext2_xattr_header *header1,
894 	       struct ext2_xattr_header *header2)
895 {
896 	struct ext2_xattr_entry *entry1, *entry2;
897 
898 	entry1 = ENTRY(header1+1);
899 	entry2 = ENTRY(header2+1);
900 	while (!IS_LAST_ENTRY(entry1)) {
901 		if (IS_LAST_ENTRY(entry2))
902 			return 1;
903 		if (entry1->e_hash != entry2->e_hash ||
904 		    entry1->e_name_index != entry2->e_name_index ||
905 		    entry1->e_name_len != entry2->e_name_len ||
906 		    entry1->e_value_size != entry2->e_value_size ||
907 		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
908 			return 1;
909 		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
910 			return -EIO;
911 		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
912 			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
913 			   le32_to_cpu(entry1->e_value_size)))
914 			return 1;
915 
916 		entry1 = EXT2_XATTR_NEXT(entry1);
917 		entry2 = EXT2_XATTR_NEXT(entry2);
918 	}
919 	if (!IS_LAST_ENTRY(entry2))
920 		return 1;
921 	return 0;
922 }
923 
924 /*
925  * ext2_xattr_cache_find()
926  *
927  * Find an identical extended attribute block.
928  *
929  * Returns a locked buffer head to the block found, or NULL if such
930  * a block was not found or an error occurred.
931  */
932 static struct buffer_head *
933 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
934 {
935 	__u32 hash = le32_to_cpu(header->h_hash);
936 	struct mb_cache_entry *ce;
937 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
938 
939 	if (!header->h_hash)
940 		return NULL;  /* never share */
941 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
942 again:
943 	ce = mb_cache_entry_find_first(ea_block_cache, hash);
944 	while (ce) {
945 		struct buffer_head *bh;
946 
947 		bh = sb_bread(inode->i_sb, ce->e_value);
948 		if (!bh) {
949 			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
950 				"inode %ld: block %ld read error",
951 				inode->i_ino, (unsigned long) ce->e_value);
952 		} else {
953 			lock_buffer(bh);
954 			/*
955 			 * We have to be careful about races with freeing or
956 			 * rehashing of xattr block. Once we hold buffer lock
957 			 * xattr block's state is stable so we can check
958 			 * whether the block got freed / rehashed or not.
959 			 * Since we unhash mbcache entry under buffer lock when
960 			 * freeing / rehashing xattr block, checking whether
961 			 * entry is still hashed is reliable.
962 			 */
963 			if (hlist_bl_unhashed(&ce->e_hash_list)) {
964 				mb_cache_entry_put(ea_block_cache, ce);
965 				unlock_buffer(bh);
966 				brelse(bh);
967 				goto again;
968 			} else if (le32_to_cpu(HDR(bh)->h_refcount) >
969 				   EXT2_XATTR_REFCOUNT_MAX) {
970 				ea_idebug(inode, "block %ld refcount %d>%d",
971 					  (unsigned long) ce->e_value,
972 					  le32_to_cpu(HDR(bh)->h_refcount),
973 					  EXT2_XATTR_REFCOUNT_MAX);
974 			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
975 				ea_bdebug(bh, "b_count=%d",
976 					  atomic_read(&(bh->b_count)));
977 				mb_cache_entry_touch(ea_block_cache, ce);
978 				mb_cache_entry_put(ea_block_cache, ce);
979 				return bh;
980 			}
981 			unlock_buffer(bh);
982 			brelse(bh);
983 		}
984 		ce = mb_cache_entry_find_next(ea_block_cache, ce);
985 	}
986 	return NULL;
987 }
988 
989 #define NAME_HASH_SHIFT 5
990 #define VALUE_HASH_SHIFT 16
991 
992 /*
993  * ext2_xattr_hash_entry()
994  *
995  * Compute the hash of an extended attribute.
996  */
997 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
998 					 struct ext2_xattr_entry *entry)
999 {
1000 	__u32 hash = 0;
1001 	char *name = entry->e_name;
1002 	int n;
1003 
1004 	for (n=0; n < entry->e_name_len; n++) {
1005 		hash = (hash << NAME_HASH_SHIFT) ^
1006 		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
1007 		       *name++;
1008 	}
1009 
1010 	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
1011 		__le32 *value = (__le32 *)((char *)header +
1012 			le16_to_cpu(entry->e_value_offs));
1013 		for (n = (le32_to_cpu(entry->e_value_size) +
1014 		     EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
1015 			hash = (hash << VALUE_HASH_SHIFT) ^
1016 			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
1017 			       le32_to_cpu(*value++);
1018 		}
1019 	}
1020 	entry->e_hash = cpu_to_le32(hash);
1021 }
1022 
1023 #undef NAME_HASH_SHIFT
1024 #undef VALUE_HASH_SHIFT
1025 
1026 #define BLOCK_HASH_SHIFT 16
1027 
1028 /*
1029  * ext2_xattr_rehash()
1030  *
1031  * Re-compute the extended attribute hash value after an entry has changed.
1032  */
1033 static void ext2_xattr_rehash(struct ext2_xattr_header *header,
1034 			      struct ext2_xattr_entry *entry)
1035 {
1036 	struct ext2_xattr_entry *here;
1037 	__u32 hash = 0;
1038 
1039 	ext2_xattr_hash_entry(header, entry);
1040 	here = ENTRY(header+1);
1041 	while (!IS_LAST_ENTRY(here)) {
1042 		if (!here->e_hash) {
1043 			/* Block is not shared if an entry's hash value == 0 */
1044 			hash = 0;
1045 			break;
1046 		}
1047 		hash = (hash << BLOCK_HASH_SHIFT) ^
1048 		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1049 		       le32_to_cpu(here->e_hash);
1050 		here = EXT2_XATTR_NEXT(here);
1051 	}
1052 	header->h_hash = cpu_to_le32(hash);
1053 }
1054 
1055 #undef BLOCK_HASH_SHIFT
1056 
1057 #define HASH_BUCKET_BITS 10
1058 
1059 struct mb_cache *ext2_xattr_create_cache(void)
1060 {
1061 	return mb_cache_create(HASH_BUCKET_BITS);
1062 }
1063 
1064 void ext2_xattr_destroy_cache(struct mb_cache *cache)
1065 {
1066 	if (cache)
1067 		mb_cache_destroy(cache);
1068 }
1069