xref: /openbmc/linux/fs/gfs2/xattr.c (revision 5927145e)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/posix_acl_xattr.h>
17 #include <linux/uaccess.h>
18 
19 #include "gfs2.h"
20 #include "incore.h"
21 #include "acl.h"
22 #include "xattr.h"
23 #include "glock.h"
24 #include "inode.h"
25 #include "meta_io.h"
26 #include "quota.h"
27 #include "rgrp.h"
28 #include "super.h"
29 #include "trans.h"
30 #include "util.h"
31 
32 /**
33  * ea_calc_size - returns the acutal number of bytes the request will take up
34  *                (not counting any unstuffed data blocks)
35  * @sdp:
36  * @er:
37  * @size:
38  *
39  * Returns: 1 if the EA should be stuffed
40  */
41 
42 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
43 			unsigned int *size)
44 {
45 	unsigned int jbsize = sdp->sd_jbsize;
46 
47 	/* Stuffed */
48 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
49 
50 	if (*size <= jbsize)
51 		return 1;
52 
53 	/* Unstuffed */
54 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
55 		      (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
56 
57 	return 0;
58 }
59 
60 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
61 {
62 	unsigned int size;
63 
64 	if (dsize > GFS2_EA_MAX_DATA_LEN)
65 		return -ERANGE;
66 
67 	ea_calc_size(sdp, nsize, dsize, &size);
68 
69 	/* This can only happen with 512 byte blocks */
70 	if (size > sdp->sd_jbsize)
71 		return -ERANGE;
72 
73 	return 0;
74 }
75 
76 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
77 			  struct gfs2_ea_header *ea,
78 			  struct gfs2_ea_header *prev, void *private);
79 
80 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
81 			ea_call_t ea_call, void *data)
82 {
83 	struct gfs2_ea_header *ea, *prev = NULL;
84 	int error = 0;
85 
86 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
87 		return -EIO;
88 
89 	for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
90 		if (!GFS2_EA_REC_LEN(ea))
91 			goto fail;
92 		if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
93 						  bh->b_data + bh->b_size))
94 			goto fail;
95 		if (!GFS2_EATYPE_VALID(ea->ea_type))
96 			goto fail;
97 
98 		error = ea_call(ip, bh, ea, prev, data);
99 		if (error)
100 			return error;
101 
102 		if (GFS2_EA_IS_LAST(ea)) {
103 			if ((char *)GFS2_EA2NEXT(ea) !=
104 			    bh->b_data + bh->b_size)
105 				goto fail;
106 			break;
107 		}
108 	}
109 
110 	return error;
111 
112 fail:
113 	gfs2_consist_inode(ip);
114 	return -EIO;
115 }
116 
117 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
118 {
119 	struct buffer_head *bh, *eabh;
120 	__be64 *eablk, *end;
121 	int error;
122 
123 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh);
124 	if (error)
125 		return error;
126 
127 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
128 		error = ea_foreach_i(ip, bh, ea_call, data);
129 		goto out;
130 	}
131 
132 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
133 		error = -EIO;
134 		goto out;
135 	}
136 
137 	eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
138 	end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
139 
140 	for (; eablk < end; eablk++) {
141 		u64 bn;
142 
143 		if (!*eablk)
144 			break;
145 		bn = be64_to_cpu(*eablk);
146 
147 		error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh);
148 		if (error)
149 			break;
150 		error = ea_foreach_i(ip, eabh, ea_call, data);
151 		brelse(eabh);
152 		if (error)
153 			break;
154 	}
155 out:
156 	brelse(bh);
157 	return error;
158 }
159 
160 struct ea_find {
161 	int type;
162 	const char *name;
163 	size_t namel;
164 	struct gfs2_ea_location *ef_el;
165 };
166 
167 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
168 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
169 		     void *private)
170 {
171 	struct ea_find *ef = private;
172 
173 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
174 		return 0;
175 
176 	if (ea->ea_type == ef->type) {
177 		if (ea->ea_name_len == ef->namel &&
178 		    !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
179 			struct gfs2_ea_location *el = ef->ef_el;
180 			get_bh(bh);
181 			el->el_bh = bh;
182 			el->el_ea = ea;
183 			el->el_prev = prev;
184 			return 1;
185 		}
186 	}
187 
188 	return 0;
189 }
190 
191 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
192 			struct gfs2_ea_location *el)
193 {
194 	struct ea_find ef;
195 	int error;
196 
197 	ef.type = type;
198 	ef.name = name;
199 	ef.namel = strlen(name);
200 	ef.ef_el = el;
201 
202 	memset(el, 0, sizeof(struct gfs2_ea_location));
203 
204 	error = ea_foreach(ip, ea_find_i, &ef);
205 	if (error > 0)
206 		return 0;
207 
208 	return error;
209 }
210 
211 /**
212  * ea_dealloc_unstuffed -
213  * @ip:
214  * @bh:
215  * @ea:
216  * @prev:
217  * @private:
218  *
219  * Take advantage of the fact that all unstuffed blocks are
220  * allocated from the same RG.  But watch, this may not always
221  * be true.
222  *
223  * Returns: errno
224  */
225 
226 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
227 				struct gfs2_ea_header *ea,
228 				struct gfs2_ea_header *prev, void *private)
229 {
230 	int *leave = private;
231 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
232 	struct gfs2_rgrpd *rgd;
233 	struct gfs2_holder rg_gh;
234 	__be64 *dataptrs;
235 	u64 bn = 0;
236 	u64 bstart = 0;
237 	unsigned int blen = 0;
238 	unsigned int blks = 0;
239 	unsigned int x;
240 	int error;
241 
242 	error = gfs2_rindex_update(sdp);
243 	if (error)
244 		return error;
245 
246 	if (GFS2_EA_IS_STUFFED(ea))
247 		return 0;
248 
249 	dataptrs = GFS2_EA2DATAPTRS(ea);
250 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
251 		if (*dataptrs) {
252 			blks++;
253 			bn = be64_to_cpu(*dataptrs);
254 		}
255 	}
256 	if (!blks)
257 		return 0;
258 
259 	rgd = gfs2_blk2rgrpd(sdp, bn, 1);
260 	if (!rgd) {
261 		gfs2_consist_inode(ip);
262 		return -EIO;
263 	}
264 
265 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
266 	if (error)
267 		return error;
268 
269 	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
270 				 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
271 	if (error)
272 		goto out_gunlock;
273 
274 	gfs2_trans_add_meta(ip->i_gl, bh);
275 
276 	dataptrs = GFS2_EA2DATAPTRS(ea);
277 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
278 		if (!*dataptrs)
279 			break;
280 		bn = be64_to_cpu(*dataptrs);
281 
282 		if (bstart + blen == bn)
283 			blen++;
284 		else {
285 			if (bstart)
286 				gfs2_free_meta(ip, bstart, blen);
287 			bstart = bn;
288 			blen = 1;
289 		}
290 
291 		*dataptrs = 0;
292 		gfs2_add_inode_blocks(&ip->i_inode, -1);
293 	}
294 	if (bstart)
295 		gfs2_free_meta(ip, bstart, blen);
296 
297 	if (prev && !leave) {
298 		u32 len;
299 
300 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
301 		prev->ea_rec_len = cpu_to_be32(len);
302 
303 		if (GFS2_EA_IS_LAST(ea))
304 			prev->ea_flags |= GFS2_EAFLAG_LAST;
305 	} else {
306 		ea->ea_type = GFS2_EATYPE_UNUSED;
307 		ea->ea_num_ptrs = 0;
308 	}
309 
310 	ip->i_inode.i_ctime = current_time(&ip->i_inode);
311 	__mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
312 
313 	gfs2_trans_end(sdp);
314 
315 out_gunlock:
316 	gfs2_glock_dq_uninit(&rg_gh);
317 	return error;
318 }
319 
320 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
321 			       struct gfs2_ea_header *ea,
322 			       struct gfs2_ea_header *prev, int leave)
323 {
324 	int error;
325 
326 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
327 	if (error)
328 		return error;
329 
330 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
331 	if (error)
332 		goto out_alloc;
333 
334 	error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
335 
336 	gfs2_quota_unhold(ip);
337 out_alloc:
338 	return error;
339 }
340 
341 struct ea_list {
342 	struct gfs2_ea_request *ei_er;
343 	unsigned int ei_size;
344 };
345 
346 static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
347 {
348 	switch (ea->ea_type) {
349 	case GFS2_EATYPE_USR:
350 		return 5 + ea->ea_name_len + 1;
351 	case GFS2_EATYPE_SYS:
352 		return 7 + ea->ea_name_len + 1;
353 	case GFS2_EATYPE_SECURITY:
354 		return 9 + ea->ea_name_len + 1;
355 	default:
356 		return 0;
357 	}
358 }
359 
360 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
361 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
362 		     void *private)
363 {
364 	struct ea_list *ei = private;
365 	struct gfs2_ea_request *er = ei->ei_er;
366 	unsigned int ea_size = gfs2_ea_strlen(ea);
367 
368 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
369 		return 0;
370 
371 	if (er->er_data_len) {
372 		char *prefix = NULL;
373 		unsigned int l = 0;
374 		char c = 0;
375 
376 		if (ei->ei_size + ea_size > er->er_data_len)
377 			return -ERANGE;
378 
379 		switch (ea->ea_type) {
380 		case GFS2_EATYPE_USR:
381 			prefix = "user.";
382 			l = 5;
383 			break;
384 		case GFS2_EATYPE_SYS:
385 			prefix = "system.";
386 			l = 7;
387 			break;
388 		case GFS2_EATYPE_SECURITY:
389 			prefix = "security.";
390 			l = 9;
391 			break;
392 		}
393 
394 		BUG_ON(l == 0);
395 
396 		memcpy(er->er_data + ei->ei_size, prefix, l);
397 		memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
398 		       ea->ea_name_len);
399 		memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
400 	}
401 
402 	ei->ei_size += ea_size;
403 
404 	return 0;
405 }
406 
407 /**
408  * gfs2_listxattr - List gfs2 extended attributes
409  * @dentry: The dentry whose inode we are interested in
410  * @buffer: The buffer to write the results
411  * @size: The size of the buffer
412  *
413  * Returns: actual size of data on success, -errno on error
414  */
415 
416 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
417 {
418 	struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
419 	struct gfs2_ea_request er;
420 	struct gfs2_holder i_gh;
421 	int error;
422 
423 	memset(&er, 0, sizeof(struct gfs2_ea_request));
424 	if (size) {
425 		er.er_data = buffer;
426 		er.er_data_len = size;
427 	}
428 
429 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
430 	if (error)
431 		return error;
432 
433 	if (ip->i_eattr) {
434 		struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
435 
436 		error = ea_foreach(ip, ea_list_i, &ei);
437 		if (!error)
438 			error = ei.ei_size;
439 	}
440 
441 	gfs2_glock_dq_uninit(&i_gh);
442 
443 	return error;
444 }
445 
446 /**
447  * ea_iter_unstuffed - copies the unstuffed xattr data to/from the
448  *                     request buffer
449  * @ip: The GFS2 inode
450  * @ea: The extended attribute header structure
451  * @din: The data to be copied in
452  * @dout: The data to be copied out (one of din,dout will be NULL)
453  *
454  * Returns: errno
455  */
456 
457 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
458 			       const char *din, char *dout)
459 {
460 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
461 	struct buffer_head **bh;
462 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
463 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
464 	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
465 	unsigned int x;
466 	int error = 0;
467 	unsigned char *pos;
468 	unsigned cp_size;
469 
470 	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
471 	if (!bh)
472 		return -ENOMEM;
473 
474 	for (x = 0; x < nptrs; x++) {
475 		error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0,
476 				       bh + x);
477 		if (error) {
478 			while (x--)
479 				brelse(bh[x]);
480 			goto out;
481 		}
482 		dataptrs++;
483 	}
484 
485 	for (x = 0; x < nptrs; x++) {
486 		error = gfs2_meta_wait(sdp, bh[x]);
487 		if (error) {
488 			for (; x < nptrs; x++)
489 				brelse(bh[x]);
490 			goto out;
491 		}
492 		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
493 			for (; x < nptrs; x++)
494 				brelse(bh[x]);
495 			error = -EIO;
496 			goto out;
497 		}
498 
499 		pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
500 		cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
501 
502 		if (dout) {
503 			memcpy(dout, pos, cp_size);
504 			dout += sdp->sd_jbsize;
505 		}
506 
507 		if (din) {
508 			gfs2_trans_add_meta(ip->i_gl, bh[x]);
509 			memcpy(pos, din, cp_size);
510 			din += sdp->sd_jbsize;
511 		}
512 
513 		amount -= sdp->sd_jbsize;
514 		brelse(bh[x]);
515 	}
516 
517 out:
518 	kfree(bh);
519 	return error;
520 }
521 
522 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
523 			    char *data, size_t size)
524 {
525 	int ret;
526 	size_t len = GFS2_EA_DATA_LEN(el->el_ea);
527 	if (len > size)
528 		return -ERANGE;
529 
530 	if (GFS2_EA_IS_STUFFED(el->el_ea)) {
531 		memcpy(data, GFS2_EA2DATA(el->el_ea), len);
532 		return len;
533 	}
534 	ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
535 	if (ret < 0)
536 		return ret;
537 	return len;
538 }
539 
540 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
541 {
542 	struct gfs2_ea_location el;
543 	int error;
544 	int len;
545 	char *data;
546 
547 	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
548 	if (error)
549 		return error;
550 	if (!el.el_ea)
551 		goto out;
552 	if (!GFS2_EA_DATA_LEN(el.el_ea))
553 		goto out;
554 
555 	len = GFS2_EA_DATA_LEN(el.el_ea);
556 	data = kmalloc(len, GFP_NOFS);
557 	error = -ENOMEM;
558 	if (data == NULL)
559 		goto out;
560 
561 	error = gfs2_ea_get_copy(ip, &el, data, len);
562 	if (error < 0)
563 		kfree(data);
564 	else
565 		*ppdata = data;
566 out:
567 	brelse(el.el_bh);
568 	return error;
569 }
570 
571 /**
572  * gfs2_xattr_get - Get a GFS2 extended attribute
573  * @inode: The inode
574  * @name: The name of the extended attribute
575  * @buffer: The buffer to write the result into
576  * @size: The size of the buffer
577  * @type: The type of extended attribute
578  *
579  * Returns: actual size of data on success, -errno on error
580  */
581 static int __gfs2_xattr_get(struct inode *inode, const char *name,
582 			    void *buffer, size_t size, int type)
583 {
584 	struct gfs2_inode *ip = GFS2_I(inode);
585 	struct gfs2_ea_location el;
586 	int error;
587 
588 	if (!ip->i_eattr)
589 		return -ENODATA;
590 	if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
591 		return -EINVAL;
592 
593 	error = gfs2_ea_find(ip, type, name, &el);
594 	if (error)
595 		return error;
596 	if (!el.el_ea)
597 		return -ENODATA;
598 	if (size)
599 		error = gfs2_ea_get_copy(ip, &el, buffer, size);
600 	else
601 		error = GFS2_EA_DATA_LEN(el.el_ea);
602 	brelse(el.el_bh);
603 
604 	return error;
605 }
606 
607 static int gfs2_xattr_get(const struct xattr_handler *handler,
608 			  struct dentry *unused, struct inode *inode,
609 			  const char *name, void *buffer, size_t size)
610 {
611 	struct gfs2_inode *ip = GFS2_I(inode);
612 	struct gfs2_holder gh;
613 	int ret;
614 
615 	/* During lookup, SELinux calls this function with the glock locked. */
616 
617 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
618 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
619 		if (ret)
620 			return ret;
621 	} else {
622 		gfs2_holder_mark_uninitialized(&gh);
623 	}
624 	ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
625 	if (gfs2_holder_initialized(&gh))
626 		gfs2_glock_dq_uninit(&gh);
627 	return ret;
628 }
629 
630 /**
631  * ea_alloc_blk - allocates a new block for extended attributes.
632  * @ip: A pointer to the inode that's getting extended attributes
633  * @bhp: Pointer to pointer to a struct buffer_head
634  *
635  * Returns: errno
636  */
637 
638 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
639 {
640 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
641 	struct gfs2_ea_header *ea;
642 	unsigned int n = 1;
643 	u64 block;
644 	int error;
645 
646 	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
647 	if (error)
648 		return error;
649 	gfs2_trans_add_unrevoke(sdp, block, 1);
650 	*bhp = gfs2_meta_new(ip->i_gl, block);
651 	gfs2_trans_add_meta(ip->i_gl, *bhp);
652 	gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
653 	gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
654 
655 	ea = GFS2_EA_BH2FIRST(*bhp);
656 	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
657 	ea->ea_type = GFS2_EATYPE_UNUSED;
658 	ea->ea_flags = GFS2_EAFLAG_LAST;
659 	ea->ea_num_ptrs = 0;
660 
661 	gfs2_add_inode_blocks(&ip->i_inode, 1);
662 
663 	return 0;
664 }
665 
666 /**
667  * ea_write - writes the request info to an ea, creating new blocks if
668  *            necessary
669  * @ip: inode that is being modified
670  * @ea: the location of the new ea in a block
671  * @er: the write request
672  *
673  * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
674  *
675  * returns : errno
676  */
677 
678 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
679 		    struct gfs2_ea_request *er)
680 {
681 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
682 	int error;
683 
684 	ea->ea_data_len = cpu_to_be32(er->er_data_len);
685 	ea->ea_name_len = er->er_name_len;
686 	ea->ea_type = er->er_type;
687 	ea->__pad = 0;
688 
689 	memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
690 
691 	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
692 		ea->ea_num_ptrs = 0;
693 		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
694 	} else {
695 		__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
696 		const char *data = er->er_data;
697 		unsigned int data_len = er->er_data_len;
698 		unsigned int copy;
699 		unsigned int x;
700 
701 		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
702 		for (x = 0; x < ea->ea_num_ptrs; x++) {
703 			struct buffer_head *bh;
704 			u64 block;
705 			int mh_size = sizeof(struct gfs2_meta_header);
706 			unsigned int n = 1;
707 
708 			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
709 			if (error)
710 				return error;
711 			gfs2_trans_add_unrevoke(sdp, block, 1);
712 			bh = gfs2_meta_new(ip->i_gl, block);
713 			gfs2_trans_add_meta(ip->i_gl, bh);
714 			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
715 
716 			gfs2_add_inode_blocks(&ip->i_inode, 1);
717 
718 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
719 							   data_len;
720 			memcpy(bh->b_data + mh_size, data, copy);
721 			if (copy < sdp->sd_jbsize)
722 				memset(bh->b_data + mh_size + copy, 0,
723 				       sdp->sd_jbsize - copy);
724 
725 			*dataptr++ = cpu_to_be64(bh->b_blocknr);
726 			data += copy;
727 			data_len -= copy;
728 
729 			brelse(bh);
730 		}
731 
732 		gfs2_assert_withdraw(sdp, !data_len);
733 	}
734 
735 	return 0;
736 }
737 
738 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
739 				   struct gfs2_ea_request *er, void *private);
740 
741 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
742 			     unsigned int blks,
743 			     ea_skeleton_call_t skeleton_call, void *private)
744 {
745 	struct gfs2_alloc_parms ap = { .target = blks };
746 	int error;
747 
748 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
749 	if (error)
750 		return error;
751 
752 	error = gfs2_quota_lock_check(ip, &ap);
753 	if (error)
754 		return error;
755 
756 	error = gfs2_inplace_reserve(ip, &ap);
757 	if (error)
758 		goto out_gunlock_q;
759 
760 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
761 				 blks + gfs2_rg_blocks(ip, blks) +
762 				 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
763 	if (error)
764 		goto out_ipres;
765 
766 	error = skeleton_call(ip, er, private);
767 	if (error)
768 		goto out_end_trans;
769 
770 	ip->i_inode.i_ctime = current_time(&ip->i_inode);
771 	__mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
772 
773 out_end_trans:
774 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
775 out_ipres:
776 	gfs2_inplace_release(ip);
777 out_gunlock_q:
778 	gfs2_quota_unlock(ip);
779 	return error;
780 }
781 
782 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
783 		     void *private)
784 {
785 	struct buffer_head *bh;
786 	int error;
787 
788 	error = ea_alloc_blk(ip, &bh);
789 	if (error)
790 		return error;
791 
792 	ip->i_eattr = bh->b_blocknr;
793 	error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
794 
795 	brelse(bh);
796 
797 	return error;
798 }
799 
800 /**
801  * ea_init - initializes a new eattr block
802  * @ip:
803  * @er:
804  *
805  * Returns: errno
806  */
807 
808 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
809 		   const void *data, size_t size)
810 {
811 	struct gfs2_ea_request er;
812 	unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
813 	unsigned int blks = 1;
814 
815 	er.er_type = type;
816 	er.er_name = name;
817 	er.er_name_len = strlen(name);
818 	er.er_data = (void *)data;
819 	er.er_data_len = size;
820 
821 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
822 		blks += DIV_ROUND_UP(er.er_data_len, jbsize);
823 
824 	return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
825 }
826 
827 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
828 {
829 	u32 ea_size = GFS2_EA_SIZE(ea);
830 	struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
831 				     ea_size);
832 	u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
833 	int last = ea->ea_flags & GFS2_EAFLAG_LAST;
834 
835 	ea->ea_rec_len = cpu_to_be32(ea_size);
836 	ea->ea_flags ^= last;
837 
838 	new->ea_rec_len = cpu_to_be32(new_size);
839 	new->ea_flags = last;
840 
841 	return new;
842 }
843 
844 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
845 				  struct gfs2_ea_location *el)
846 {
847 	struct gfs2_ea_header *ea = el->el_ea;
848 	struct gfs2_ea_header *prev = el->el_prev;
849 	u32 len;
850 
851 	gfs2_trans_add_meta(ip->i_gl, el->el_bh);
852 
853 	if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
854 		ea->ea_type = GFS2_EATYPE_UNUSED;
855 		return;
856 	} else if (GFS2_EA2NEXT(prev) != ea) {
857 		prev = GFS2_EA2NEXT(prev);
858 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
859 	}
860 
861 	len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
862 	prev->ea_rec_len = cpu_to_be32(len);
863 
864 	if (GFS2_EA_IS_LAST(ea))
865 		prev->ea_flags |= GFS2_EAFLAG_LAST;
866 }
867 
868 struct ea_set {
869 	int ea_split;
870 
871 	struct gfs2_ea_request *es_er;
872 	struct gfs2_ea_location *es_el;
873 
874 	struct buffer_head *es_bh;
875 	struct gfs2_ea_header *es_ea;
876 };
877 
878 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
879 				 struct gfs2_ea_header *ea, struct ea_set *es)
880 {
881 	struct gfs2_ea_request *er = es->es_er;
882 	int error;
883 
884 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
885 	if (error)
886 		return error;
887 
888 	gfs2_trans_add_meta(ip->i_gl, bh);
889 
890 	if (es->ea_split)
891 		ea = ea_split_ea(ea);
892 
893 	ea_write(ip, ea, er);
894 
895 	if (es->es_el)
896 		ea_set_remove_stuffed(ip, es->es_el);
897 
898 	ip->i_inode.i_ctime = current_time(&ip->i_inode);
899 	__mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
900 
901 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
902 	return error;
903 }
904 
905 static int ea_set_simple_alloc(struct gfs2_inode *ip,
906 			       struct gfs2_ea_request *er, void *private)
907 {
908 	struct ea_set *es = private;
909 	struct gfs2_ea_header *ea = es->es_ea;
910 	int error;
911 
912 	gfs2_trans_add_meta(ip->i_gl, es->es_bh);
913 
914 	if (es->ea_split)
915 		ea = ea_split_ea(ea);
916 
917 	error = ea_write(ip, ea, er);
918 	if (error)
919 		return error;
920 
921 	if (es->es_el)
922 		ea_set_remove_stuffed(ip, es->es_el);
923 
924 	return 0;
925 }
926 
927 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
928 			 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
929 			 void *private)
930 {
931 	struct ea_set *es = private;
932 	unsigned int size;
933 	int stuffed;
934 	int error;
935 
936 	stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
937 			       es->es_er->er_data_len, &size);
938 
939 	if (ea->ea_type == GFS2_EATYPE_UNUSED) {
940 		if (GFS2_EA_REC_LEN(ea) < size)
941 			return 0;
942 		if (!GFS2_EA_IS_STUFFED(ea)) {
943 			error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
944 			if (error)
945 				return error;
946 		}
947 		es->ea_split = 0;
948 	} else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
949 		es->ea_split = 1;
950 	else
951 		return 0;
952 
953 	if (stuffed) {
954 		error = ea_set_simple_noalloc(ip, bh, ea, es);
955 		if (error)
956 			return error;
957 	} else {
958 		unsigned int blks;
959 
960 		es->es_bh = bh;
961 		es->es_ea = ea;
962 		blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
963 					GFS2_SB(&ip->i_inode)->sd_jbsize);
964 
965 		error = ea_alloc_skeleton(ip, es->es_er, blks,
966 					  ea_set_simple_alloc, es);
967 		if (error)
968 			return error;
969 	}
970 
971 	return 1;
972 }
973 
974 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
975 			void *private)
976 {
977 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
978 	struct buffer_head *indbh, *newbh;
979 	__be64 *eablk;
980 	int error;
981 	int mh_size = sizeof(struct gfs2_meta_header);
982 
983 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
984 		__be64 *end;
985 
986 		error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0,
987 				       &indbh);
988 		if (error)
989 			return error;
990 
991 		if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
992 			error = -EIO;
993 			goto out;
994 		}
995 
996 		eablk = (__be64 *)(indbh->b_data + mh_size);
997 		end = eablk + sdp->sd_inptrs;
998 
999 		for (; eablk < end; eablk++)
1000 			if (!*eablk)
1001 				break;
1002 
1003 		if (eablk == end) {
1004 			error = -ENOSPC;
1005 			goto out;
1006 		}
1007 
1008 		gfs2_trans_add_meta(ip->i_gl, indbh);
1009 	} else {
1010 		u64 blk;
1011 		unsigned int n = 1;
1012 		error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1013 		if (error)
1014 			return error;
1015 		gfs2_trans_add_unrevoke(sdp, blk, 1);
1016 		indbh = gfs2_meta_new(ip->i_gl, blk);
1017 		gfs2_trans_add_meta(ip->i_gl, indbh);
1018 		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1019 		gfs2_buffer_clear_tail(indbh, mh_size);
1020 
1021 		eablk = (__be64 *)(indbh->b_data + mh_size);
1022 		*eablk = cpu_to_be64(ip->i_eattr);
1023 		ip->i_eattr = blk;
1024 		ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1025 		gfs2_add_inode_blocks(&ip->i_inode, 1);
1026 
1027 		eablk++;
1028 	}
1029 
1030 	error = ea_alloc_blk(ip, &newbh);
1031 	if (error)
1032 		goto out;
1033 
1034 	*eablk = cpu_to_be64((u64)newbh->b_blocknr);
1035 	error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1036 	brelse(newbh);
1037 	if (error)
1038 		goto out;
1039 
1040 	if (private)
1041 		ea_set_remove_stuffed(ip, private);
1042 
1043 out:
1044 	brelse(indbh);
1045 	return error;
1046 }
1047 
1048 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1049 		    const void *value, size_t size, struct gfs2_ea_location *el)
1050 {
1051 	struct gfs2_ea_request er;
1052 	struct ea_set es;
1053 	unsigned int blks = 2;
1054 	int error;
1055 
1056 	er.er_type = type;
1057 	er.er_name = name;
1058 	er.er_data = (void *)value;
1059 	er.er_name_len = strlen(name);
1060 	er.er_data_len = size;
1061 
1062 	memset(&es, 0, sizeof(struct ea_set));
1063 	es.es_er = &er;
1064 	es.es_el = el;
1065 
1066 	error = ea_foreach(ip, ea_set_simple, &es);
1067 	if (error > 0)
1068 		return 0;
1069 	if (error)
1070 		return error;
1071 
1072 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1073 		blks++;
1074 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1075 		blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1076 
1077 	return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1078 }
1079 
1080 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1081 				   struct gfs2_ea_location *el)
1082 {
1083 	if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1084 		el->el_prev = GFS2_EA2NEXT(el->el_prev);
1085 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1086 				     GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1087 	}
1088 
1089 	return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1090 }
1091 
1092 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1093 {
1094 	struct gfs2_ea_header *ea = el->el_ea;
1095 	struct gfs2_ea_header *prev = el->el_prev;
1096 	int error;
1097 
1098 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1099 	if (error)
1100 		return error;
1101 
1102 	gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1103 
1104 	if (prev) {
1105 		u32 len;
1106 
1107 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1108 		prev->ea_rec_len = cpu_to_be32(len);
1109 
1110 		if (GFS2_EA_IS_LAST(ea))
1111 			prev->ea_flags |= GFS2_EAFLAG_LAST;
1112 	} else {
1113 		ea->ea_type = GFS2_EATYPE_UNUSED;
1114 	}
1115 
1116 	ip->i_inode.i_ctime = current_time(&ip->i_inode);
1117 	__mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
1118 
1119 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1120 
1121 	return error;
1122 }
1123 
1124 /**
1125  * gfs2_xattr_remove - Remove a GFS2 extended attribute
1126  * @ip: The inode
1127  * @type: The type of the extended attribute
1128  * @name: The name of the extended attribute
1129  *
1130  * This is not called directly by the VFS since we use the (common)
1131  * scheme of making a "set with NULL data" mean a remove request. Note
1132  * that this is different from a set with zero length data.
1133  *
1134  * Returns: 0, or errno on failure
1135  */
1136 
1137 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1138 {
1139 	struct gfs2_ea_location el;
1140 	int error;
1141 
1142 	if (!ip->i_eattr)
1143 		return -ENODATA;
1144 
1145 	error = gfs2_ea_find(ip, type, name, &el);
1146 	if (error)
1147 		return error;
1148 	if (!el.el_ea)
1149 		return -ENODATA;
1150 
1151 	if (GFS2_EA_IS_STUFFED(el.el_ea))
1152 		error = ea_remove_stuffed(ip, &el);
1153 	else
1154 		error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1155 
1156 	brelse(el.el_bh);
1157 
1158 	return error;
1159 }
1160 
1161 /**
1162  * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1163  * @ip: The inode
1164  * @name: The name of the extended attribute
1165  * @value: The value of the extended attribute (NULL for remove)
1166  * @size: The size of the @value argument
1167  * @flags: Create or Replace
1168  * @type: The type of the extended attribute
1169  *
1170  * See gfs2_xattr_remove() for details of the removal of xattrs.
1171  *
1172  * Returns: 0 or errno on failure
1173  */
1174 
1175 int __gfs2_xattr_set(struct inode *inode, const char *name,
1176 		   const void *value, size_t size, int flags, int type)
1177 {
1178 	struct gfs2_inode *ip = GFS2_I(inode);
1179 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1180 	struct gfs2_ea_location el;
1181 	unsigned int namel = strlen(name);
1182 	int error;
1183 
1184 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1185 		return -EPERM;
1186 	if (namel > GFS2_EA_MAX_NAME_LEN)
1187 		return -ERANGE;
1188 
1189 	if (value == NULL) {
1190 		error = gfs2_xattr_remove(ip, type, name);
1191 		if (error == -ENODATA && !(flags & XATTR_REPLACE))
1192 			error = 0;
1193 		return error;
1194 	}
1195 
1196 	if (ea_check_size(sdp, namel, size))
1197 		return -ERANGE;
1198 
1199 	if (!ip->i_eattr) {
1200 		if (flags & XATTR_REPLACE)
1201 			return -ENODATA;
1202 		return ea_init(ip, type, name, value, size);
1203 	}
1204 
1205 	error = gfs2_ea_find(ip, type, name, &el);
1206 	if (error)
1207 		return error;
1208 
1209 	if (el.el_ea) {
1210 		if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1211 			brelse(el.el_bh);
1212 			return -EPERM;
1213 		}
1214 
1215 		error = -EEXIST;
1216 		if (!(flags & XATTR_CREATE)) {
1217 			int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1218 			error = ea_set_i(ip, type, name, value, size, &el);
1219 			if (!error && unstuffed)
1220 				ea_set_remove_unstuffed(ip, &el);
1221 		}
1222 
1223 		brelse(el.el_bh);
1224 		return error;
1225 	}
1226 
1227 	error = -ENODATA;
1228 	if (!(flags & XATTR_REPLACE))
1229 		error = ea_set_i(ip, type, name, value, size, NULL);
1230 
1231 	return error;
1232 }
1233 
1234 static int gfs2_xattr_set(const struct xattr_handler *handler,
1235 			  struct dentry *unused, struct inode *inode,
1236 			  const char *name, const void *value,
1237 			  size_t size, int flags)
1238 {
1239 	struct gfs2_inode *ip = GFS2_I(inode);
1240 	struct gfs2_holder gh;
1241 	int ret;
1242 
1243 	ret = gfs2_rsqa_alloc(ip);
1244 	if (ret)
1245 		return ret;
1246 
1247 	/* May be called from gfs_setattr with the glock locked. */
1248 
1249 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1250 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1251 		if (ret)
1252 			return ret;
1253 	} else {
1254 		if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
1255 			return -EIO;
1256 		gfs2_holder_mark_uninitialized(&gh);
1257 	}
1258 	ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
1259 	if (gfs2_holder_initialized(&gh))
1260 		gfs2_glock_dq_uninit(&gh);
1261 	return ret;
1262 }
1263 
1264 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1265 {
1266 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1267 	struct gfs2_rgrp_list rlist;
1268 	struct buffer_head *indbh, *dibh;
1269 	__be64 *eablk, *end;
1270 	unsigned int rg_blocks = 0;
1271 	u64 bstart = 0;
1272 	unsigned int blen = 0;
1273 	unsigned int blks = 0;
1274 	unsigned int x;
1275 	int error;
1276 
1277 	error = gfs2_rindex_update(sdp);
1278 	if (error)
1279 		return error;
1280 
1281 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1282 
1283 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh);
1284 	if (error)
1285 		return error;
1286 
1287 	if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1288 		error = -EIO;
1289 		goto out;
1290 	}
1291 
1292 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1293 	end = eablk + sdp->sd_inptrs;
1294 
1295 	for (; eablk < end; eablk++) {
1296 		u64 bn;
1297 
1298 		if (!*eablk)
1299 			break;
1300 		bn = be64_to_cpu(*eablk);
1301 
1302 		if (bstart + blen == bn)
1303 			blen++;
1304 		else {
1305 			if (bstart)
1306 				gfs2_rlist_add(ip, &rlist, bstart);
1307 			bstart = bn;
1308 			blen = 1;
1309 		}
1310 		blks++;
1311 	}
1312 	if (bstart)
1313 		gfs2_rlist_add(ip, &rlist, bstart);
1314 	else
1315 		goto out;
1316 
1317 	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1318 
1319 	for (x = 0; x < rlist.rl_rgrps; x++) {
1320 		struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1321 
1322 		rg_blocks += rgd->rd_length;
1323 	}
1324 
1325 	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1326 	if (error)
1327 		goto out_rlist_free;
1328 
1329 	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1330 				 RES_STATFS + RES_QUOTA, blks);
1331 	if (error)
1332 		goto out_gunlock;
1333 
1334 	gfs2_trans_add_meta(ip->i_gl, indbh);
1335 
1336 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1337 	bstart = 0;
1338 	blen = 0;
1339 
1340 	for (; eablk < end; eablk++) {
1341 		u64 bn;
1342 
1343 		if (!*eablk)
1344 			break;
1345 		bn = be64_to_cpu(*eablk);
1346 
1347 		if (bstart + blen == bn)
1348 			blen++;
1349 		else {
1350 			if (bstart)
1351 				gfs2_free_meta(ip, bstart, blen);
1352 			bstart = bn;
1353 			blen = 1;
1354 		}
1355 
1356 		*eablk = 0;
1357 		gfs2_add_inode_blocks(&ip->i_inode, -1);
1358 	}
1359 	if (bstart)
1360 		gfs2_free_meta(ip, bstart, blen);
1361 
1362 	ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1363 
1364 	error = gfs2_meta_inode_buffer(ip, &dibh);
1365 	if (!error) {
1366 		gfs2_trans_add_meta(ip->i_gl, dibh);
1367 		gfs2_dinode_out(ip, dibh->b_data);
1368 		brelse(dibh);
1369 	}
1370 
1371 	gfs2_trans_end(sdp);
1372 
1373 out_gunlock:
1374 	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1375 out_rlist_free:
1376 	gfs2_rlist_free(&rlist);
1377 out:
1378 	brelse(indbh);
1379 	return error;
1380 }
1381 
1382 static int ea_dealloc_block(struct gfs2_inode *ip)
1383 {
1384 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1385 	struct gfs2_rgrpd *rgd;
1386 	struct buffer_head *dibh;
1387 	struct gfs2_holder gh;
1388 	int error;
1389 
1390 	error = gfs2_rindex_update(sdp);
1391 	if (error)
1392 		return error;
1393 
1394 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1395 	if (!rgd) {
1396 		gfs2_consist_inode(ip);
1397 		return -EIO;
1398 	}
1399 
1400 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1401 	if (error)
1402 		return error;
1403 
1404 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1405 				 RES_QUOTA, 1);
1406 	if (error)
1407 		goto out_gunlock;
1408 
1409 	gfs2_free_meta(ip, ip->i_eattr, 1);
1410 
1411 	ip->i_eattr = 0;
1412 	gfs2_add_inode_blocks(&ip->i_inode, -1);
1413 
1414 	error = gfs2_meta_inode_buffer(ip, &dibh);
1415 	if (!error) {
1416 		gfs2_trans_add_meta(ip->i_gl, dibh);
1417 		gfs2_dinode_out(ip, dibh->b_data);
1418 		brelse(dibh);
1419 	}
1420 
1421 	gfs2_trans_end(sdp);
1422 
1423 out_gunlock:
1424 	gfs2_glock_dq_uninit(&gh);
1425 	return error;
1426 }
1427 
1428 /**
1429  * gfs2_ea_dealloc - deallocate the extended attribute fork
1430  * @ip: the inode
1431  *
1432  * Returns: errno
1433  */
1434 
1435 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1436 {
1437 	int error;
1438 
1439 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1440 	if (error)
1441 		return error;
1442 
1443 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1444 	if (error)
1445 		return error;
1446 
1447 	error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1448 	if (error)
1449 		goto out_quota;
1450 
1451 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1452 		error = ea_dealloc_indirect(ip);
1453 		if (error)
1454 			goto out_quota;
1455 	}
1456 
1457 	error = ea_dealloc_block(ip);
1458 
1459 out_quota:
1460 	gfs2_quota_unhold(ip);
1461 	return error;
1462 }
1463 
1464 static const struct xattr_handler gfs2_xattr_user_handler = {
1465 	.prefix = XATTR_USER_PREFIX,
1466 	.flags  = GFS2_EATYPE_USR,
1467 	.get    = gfs2_xattr_get,
1468 	.set    = gfs2_xattr_set,
1469 };
1470 
1471 static const struct xattr_handler gfs2_xattr_security_handler = {
1472 	.prefix = XATTR_SECURITY_PREFIX,
1473 	.flags  = GFS2_EATYPE_SECURITY,
1474 	.get    = gfs2_xattr_get,
1475 	.set    = gfs2_xattr_set,
1476 };
1477 
1478 const struct xattr_handler *gfs2_xattr_handlers[] = {
1479 	&gfs2_xattr_user_handler,
1480 	&gfs2_xattr_security_handler,
1481 	&posix_acl_access_xattr_handler,
1482 	&posix_acl_default_xattr_handler,
1483 	NULL,
1484 };
1485 
1486