xref: /openbmc/linux/fs/gfs2/xattr.c (revision 74ee0477)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/posix_acl_xattr.h>
17 #include <linux/uaccess.h>
18 
19 #include "gfs2.h"
20 #include "incore.h"
21 #include "acl.h"
22 #include "xattr.h"
23 #include "glock.h"
24 #include "inode.h"
25 #include "meta_io.h"
26 #include "quota.h"
27 #include "rgrp.h"
28 #include "super.h"
29 #include "trans.h"
30 #include "util.h"
31 
32 /**
33  * ea_calc_size - returns the acutal number of bytes the request will take up
34  *                (not counting any unstuffed data blocks)
35  * @sdp:
36  * @er:
37  * @size:
38  *
39  * Returns: 1 if the EA should be stuffed
40  */
41 
42 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
43 			unsigned int *size)
44 {
45 	unsigned int jbsize = sdp->sd_jbsize;
46 
47 	/* Stuffed */
48 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
49 
50 	if (*size <= jbsize)
51 		return 1;
52 
53 	/* Unstuffed */
54 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
55 		      (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
56 
57 	return 0;
58 }
59 
60 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
61 {
62 	unsigned int size;
63 
64 	if (dsize > GFS2_EA_MAX_DATA_LEN)
65 		return -ERANGE;
66 
67 	ea_calc_size(sdp, nsize, dsize, &size);
68 
69 	/* This can only happen with 512 byte blocks */
70 	if (size > sdp->sd_jbsize)
71 		return -ERANGE;
72 
73 	return 0;
74 }
75 
76 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
77 			  struct gfs2_ea_header *ea,
78 			  struct gfs2_ea_header *prev, void *private);
79 
80 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
81 			ea_call_t ea_call, void *data)
82 {
83 	struct gfs2_ea_header *ea, *prev = NULL;
84 	int error = 0;
85 
86 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
87 		return -EIO;
88 
89 	for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
90 		if (!GFS2_EA_REC_LEN(ea))
91 			goto fail;
92 		if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
93 						  bh->b_data + bh->b_size))
94 			goto fail;
95 		if (!GFS2_EATYPE_VALID(ea->ea_type))
96 			goto fail;
97 
98 		error = ea_call(ip, bh, ea, prev, data);
99 		if (error)
100 			return error;
101 
102 		if (GFS2_EA_IS_LAST(ea)) {
103 			if ((char *)GFS2_EA2NEXT(ea) !=
104 			    bh->b_data + bh->b_size)
105 				goto fail;
106 			break;
107 		}
108 	}
109 
110 	return error;
111 
112 fail:
113 	gfs2_consist_inode(ip);
114 	return -EIO;
115 }
116 
117 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
118 {
119 	struct buffer_head *bh, *eabh;
120 	__be64 *eablk, *end;
121 	int error;
122 
123 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh);
124 	if (error)
125 		return error;
126 
127 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
128 		error = ea_foreach_i(ip, bh, ea_call, data);
129 		goto out;
130 	}
131 
132 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
133 		error = -EIO;
134 		goto out;
135 	}
136 
137 	eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
138 	end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
139 
140 	for (; eablk < end; eablk++) {
141 		u64 bn;
142 
143 		if (!*eablk)
144 			break;
145 		bn = be64_to_cpu(*eablk);
146 
147 		error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh);
148 		if (error)
149 			break;
150 		error = ea_foreach_i(ip, eabh, ea_call, data);
151 		brelse(eabh);
152 		if (error)
153 			break;
154 	}
155 out:
156 	brelse(bh);
157 	return error;
158 }
159 
160 struct ea_find {
161 	int type;
162 	const char *name;
163 	size_t namel;
164 	struct gfs2_ea_location *ef_el;
165 };
166 
167 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
168 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
169 		     void *private)
170 {
171 	struct ea_find *ef = private;
172 
173 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
174 		return 0;
175 
176 	if (ea->ea_type == ef->type) {
177 		if (ea->ea_name_len == ef->namel &&
178 		    !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
179 			struct gfs2_ea_location *el = ef->ef_el;
180 			get_bh(bh);
181 			el->el_bh = bh;
182 			el->el_ea = ea;
183 			el->el_prev = prev;
184 			return 1;
185 		}
186 	}
187 
188 	return 0;
189 }
190 
191 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
192 			struct gfs2_ea_location *el)
193 {
194 	struct ea_find ef;
195 	int error;
196 
197 	ef.type = type;
198 	ef.name = name;
199 	ef.namel = strlen(name);
200 	ef.ef_el = el;
201 
202 	memset(el, 0, sizeof(struct gfs2_ea_location));
203 
204 	error = ea_foreach(ip, ea_find_i, &ef);
205 	if (error > 0)
206 		return 0;
207 
208 	return error;
209 }
210 
211 /**
212  * ea_dealloc_unstuffed -
213  * @ip:
214  * @bh:
215  * @ea:
216  * @prev:
217  * @private:
218  *
219  * Take advantage of the fact that all unstuffed blocks are
220  * allocated from the same RG.  But watch, this may not always
221  * be true.
222  *
223  * Returns: errno
224  */
225 
226 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
227 				struct gfs2_ea_header *ea,
228 				struct gfs2_ea_header *prev, void *private)
229 {
230 	int *leave = private;
231 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
232 	struct gfs2_rgrpd *rgd;
233 	struct gfs2_holder rg_gh;
234 	struct buffer_head *dibh;
235 	__be64 *dataptrs;
236 	u64 bn = 0;
237 	u64 bstart = 0;
238 	unsigned int blen = 0;
239 	unsigned int blks = 0;
240 	unsigned int x;
241 	int error;
242 
243 	error = gfs2_rindex_update(sdp);
244 	if (error)
245 		return error;
246 
247 	if (GFS2_EA_IS_STUFFED(ea))
248 		return 0;
249 
250 	dataptrs = GFS2_EA2DATAPTRS(ea);
251 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
252 		if (*dataptrs) {
253 			blks++;
254 			bn = be64_to_cpu(*dataptrs);
255 		}
256 	}
257 	if (!blks)
258 		return 0;
259 
260 	rgd = gfs2_blk2rgrpd(sdp, bn, 1);
261 	if (!rgd) {
262 		gfs2_consist_inode(ip);
263 		return -EIO;
264 	}
265 
266 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
267 	if (error)
268 		return error;
269 
270 	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
271 				 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
272 	if (error)
273 		goto out_gunlock;
274 
275 	gfs2_trans_add_meta(ip->i_gl, bh);
276 
277 	dataptrs = GFS2_EA2DATAPTRS(ea);
278 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
279 		if (!*dataptrs)
280 			break;
281 		bn = be64_to_cpu(*dataptrs);
282 
283 		if (bstart + blen == bn)
284 			blen++;
285 		else {
286 			if (bstart)
287 				gfs2_free_meta(ip, bstart, blen);
288 			bstart = bn;
289 			blen = 1;
290 		}
291 
292 		*dataptrs = 0;
293 		gfs2_add_inode_blocks(&ip->i_inode, -1);
294 	}
295 	if (bstart)
296 		gfs2_free_meta(ip, bstart, blen);
297 
298 	if (prev && !leave) {
299 		u32 len;
300 
301 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
302 		prev->ea_rec_len = cpu_to_be32(len);
303 
304 		if (GFS2_EA_IS_LAST(ea))
305 			prev->ea_flags |= GFS2_EAFLAG_LAST;
306 	} else {
307 		ea->ea_type = GFS2_EATYPE_UNUSED;
308 		ea->ea_num_ptrs = 0;
309 	}
310 
311 	error = gfs2_meta_inode_buffer(ip, &dibh);
312 	if (!error) {
313 		ip->i_inode.i_ctime = current_time(&ip->i_inode);
314 		gfs2_trans_add_meta(ip->i_gl, dibh);
315 		gfs2_dinode_out(ip, dibh->b_data);
316 		brelse(dibh);
317 	}
318 
319 	gfs2_trans_end(sdp);
320 
321 out_gunlock:
322 	gfs2_glock_dq_uninit(&rg_gh);
323 	return error;
324 }
325 
326 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
327 			       struct gfs2_ea_header *ea,
328 			       struct gfs2_ea_header *prev, int leave)
329 {
330 	int error;
331 
332 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
333 	if (error)
334 		return error;
335 
336 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
337 	if (error)
338 		goto out_alloc;
339 
340 	error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
341 
342 	gfs2_quota_unhold(ip);
343 out_alloc:
344 	return error;
345 }
346 
347 struct ea_list {
348 	struct gfs2_ea_request *ei_er;
349 	unsigned int ei_size;
350 };
351 
352 static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
353 {
354 	switch (ea->ea_type) {
355 	case GFS2_EATYPE_USR:
356 		return 5 + ea->ea_name_len + 1;
357 	case GFS2_EATYPE_SYS:
358 		return 7 + ea->ea_name_len + 1;
359 	case GFS2_EATYPE_SECURITY:
360 		return 9 + ea->ea_name_len + 1;
361 	default:
362 		return 0;
363 	}
364 }
365 
366 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
367 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
368 		     void *private)
369 {
370 	struct ea_list *ei = private;
371 	struct gfs2_ea_request *er = ei->ei_er;
372 	unsigned int ea_size = gfs2_ea_strlen(ea);
373 
374 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
375 		return 0;
376 
377 	if (er->er_data_len) {
378 		char *prefix = NULL;
379 		unsigned int l = 0;
380 		char c = 0;
381 
382 		if (ei->ei_size + ea_size > er->er_data_len)
383 			return -ERANGE;
384 
385 		switch (ea->ea_type) {
386 		case GFS2_EATYPE_USR:
387 			prefix = "user.";
388 			l = 5;
389 			break;
390 		case GFS2_EATYPE_SYS:
391 			prefix = "system.";
392 			l = 7;
393 			break;
394 		case GFS2_EATYPE_SECURITY:
395 			prefix = "security.";
396 			l = 9;
397 			break;
398 		}
399 
400 		BUG_ON(l == 0);
401 
402 		memcpy(er->er_data + ei->ei_size, prefix, l);
403 		memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
404 		       ea->ea_name_len);
405 		memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
406 	}
407 
408 	ei->ei_size += ea_size;
409 
410 	return 0;
411 }
412 
413 /**
414  * gfs2_listxattr - List gfs2 extended attributes
415  * @dentry: The dentry whose inode we are interested in
416  * @buffer: The buffer to write the results
417  * @size: The size of the buffer
418  *
419  * Returns: actual size of data on success, -errno on error
420  */
421 
422 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
423 {
424 	struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
425 	struct gfs2_ea_request er;
426 	struct gfs2_holder i_gh;
427 	int error;
428 
429 	memset(&er, 0, sizeof(struct gfs2_ea_request));
430 	if (size) {
431 		er.er_data = buffer;
432 		er.er_data_len = size;
433 	}
434 
435 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
436 	if (error)
437 		return error;
438 
439 	if (ip->i_eattr) {
440 		struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
441 
442 		error = ea_foreach(ip, ea_list_i, &ei);
443 		if (!error)
444 			error = ei.ei_size;
445 	}
446 
447 	gfs2_glock_dq_uninit(&i_gh);
448 
449 	return error;
450 }
451 
452 /**
453  * ea_iter_unstuffed - copies the unstuffed xattr data to/from the
454  *                     request buffer
455  * @ip: The GFS2 inode
456  * @ea: The extended attribute header structure
457  * @din: The data to be copied in
458  * @dout: The data to be copied out (one of din,dout will be NULL)
459  *
460  * Returns: errno
461  */
462 
463 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
464 			       const char *din, char *dout)
465 {
466 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
467 	struct buffer_head **bh;
468 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
469 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
470 	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
471 	unsigned int x;
472 	int error = 0;
473 	unsigned char *pos;
474 	unsigned cp_size;
475 
476 	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
477 	if (!bh)
478 		return -ENOMEM;
479 
480 	for (x = 0; x < nptrs; x++) {
481 		error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0,
482 				       bh + x);
483 		if (error) {
484 			while (x--)
485 				brelse(bh[x]);
486 			goto out;
487 		}
488 		dataptrs++;
489 	}
490 
491 	for (x = 0; x < nptrs; x++) {
492 		error = gfs2_meta_wait(sdp, bh[x]);
493 		if (error) {
494 			for (; x < nptrs; x++)
495 				brelse(bh[x]);
496 			goto out;
497 		}
498 		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
499 			for (; x < nptrs; x++)
500 				brelse(bh[x]);
501 			error = -EIO;
502 			goto out;
503 		}
504 
505 		pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
506 		cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
507 
508 		if (dout) {
509 			memcpy(dout, pos, cp_size);
510 			dout += sdp->sd_jbsize;
511 		}
512 
513 		if (din) {
514 			gfs2_trans_add_meta(ip->i_gl, bh[x]);
515 			memcpy(pos, din, cp_size);
516 			din += sdp->sd_jbsize;
517 		}
518 
519 		amount -= sdp->sd_jbsize;
520 		brelse(bh[x]);
521 	}
522 
523 out:
524 	kfree(bh);
525 	return error;
526 }
527 
528 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
529 			    char *data, size_t size)
530 {
531 	int ret;
532 	size_t len = GFS2_EA_DATA_LEN(el->el_ea);
533 	if (len > size)
534 		return -ERANGE;
535 
536 	if (GFS2_EA_IS_STUFFED(el->el_ea)) {
537 		memcpy(data, GFS2_EA2DATA(el->el_ea), len);
538 		return len;
539 	}
540 	ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
541 	if (ret < 0)
542 		return ret;
543 	return len;
544 }
545 
546 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
547 {
548 	struct gfs2_ea_location el;
549 	int error;
550 	int len;
551 	char *data;
552 
553 	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
554 	if (error)
555 		return error;
556 	if (!el.el_ea)
557 		goto out;
558 	if (!GFS2_EA_DATA_LEN(el.el_ea))
559 		goto out;
560 
561 	len = GFS2_EA_DATA_LEN(el.el_ea);
562 	data = kmalloc(len, GFP_NOFS);
563 	error = -ENOMEM;
564 	if (data == NULL)
565 		goto out;
566 
567 	error = gfs2_ea_get_copy(ip, &el, data, len);
568 	if (error < 0)
569 		kfree(data);
570 	else
571 		*ppdata = data;
572 out:
573 	brelse(el.el_bh);
574 	return error;
575 }
576 
577 /**
578  * gfs2_xattr_get - Get a GFS2 extended attribute
579  * @inode: The inode
580  * @name: The name of the extended attribute
581  * @buffer: The buffer to write the result into
582  * @size: The size of the buffer
583  * @type: The type of extended attribute
584  *
585  * Returns: actual size of data on success, -errno on error
586  */
587 static int __gfs2_xattr_get(struct inode *inode, const char *name,
588 			    void *buffer, size_t size, int type)
589 {
590 	struct gfs2_inode *ip = GFS2_I(inode);
591 	struct gfs2_ea_location el;
592 	int error;
593 
594 	if (!ip->i_eattr)
595 		return -ENODATA;
596 	if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
597 		return -EINVAL;
598 
599 	error = gfs2_ea_find(ip, type, name, &el);
600 	if (error)
601 		return error;
602 	if (!el.el_ea)
603 		return -ENODATA;
604 	if (size)
605 		error = gfs2_ea_get_copy(ip, &el, buffer, size);
606 	else
607 		error = GFS2_EA_DATA_LEN(el.el_ea);
608 	brelse(el.el_bh);
609 
610 	return error;
611 }
612 
613 static int gfs2_xattr_get(const struct xattr_handler *handler,
614 			  struct dentry *unused, struct inode *inode,
615 			  const char *name, void *buffer, size_t size)
616 {
617 	struct gfs2_inode *ip = GFS2_I(inode);
618 	struct gfs2_holder gh;
619 	bool need_unlock = false;
620 	int ret;
621 
622 	/* During lookup, SELinux calls this function with the glock locked. */
623 
624 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
625 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
626 		if (ret)
627 			return ret;
628 		need_unlock = true;
629 	}
630 	ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
631 	if (need_unlock)
632 		gfs2_glock_dq_uninit(&gh);
633 	return ret;
634 }
635 
636 /**
637  * ea_alloc_blk - allocates a new block for extended attributes.
638  * @ip: A pointer to the inode that's getting extended attributes
639  * @bhp: Pointer to pointer to a struct buffer_head
640  *
641  * Returns: errno
642  */
643 
644 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
645 {
646 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
647 	struct gfs2_ea_header *ea;
648 	unsigned int n = 1;
649 	u64 block;
650 	int error;
651 
652 	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
653 	if (error)
654 		return error;
655 	gfs2_trans_add_unrevoke(sdp, block, 1);
656 	*bhp = gfs2_meta_new(ip->i_gl, block);
657 	gfs2_trans_add_meta(ip->i_gl, *bhp);
658 	gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
659 	gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
660 
661 	ea = GFS2_EA_BH2FIRST(*bhp);
662 	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
663 	ea->ea_type = GFS2_EATYPE_UNUSED;
664 	ea->ea_flags = GFS2_EAFLAG_LAST;
665 	ea->ea_num_ptrs = 0;
666 
667 	gfs2_add_inode_blocks(&ip->i_inode, 1);
668 
669 	return 0;
670 }
671 
672 /**
673  * ea_write - writes the request info to an ea, creating new blocks if
674  *            necessary
675  * @ip: inode that is being modified
676  * @ea: the location of the new ea in a block
677  * @er: the write request
678  *
679  * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
680  *
681  * returns : errno
682  */
683 
684 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
685 		    struct gfs2_ea_request *er)
686 {
687 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
688 	int error;
689 
690 	ea->ea_data_len = cpu_to_be32(er->er_data_len);
691 	ea->ea_name_len = er->er_name_len;
692 	ea->ea_type = er->er_type;
693 	ea->__pad = 0;
694 
695 	memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
696 
697 	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
698 		ea->ea_num_ptrs = 0;
699 		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
700 	} else {
701 		__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
702 		const char *data = er->er_data;
703 		unsigned int data_len = er->er_data_len;
704 		unsigned int copy;
705 		unsigned int x;
706 
707 		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
708 		for (x = 0; x < ea->ea_num_ptrs; x++) {
709 			struct buffer_head *bh;
710 			u64 block;
711 			int mh_size = sizeof(struct gfs2_meta_header);
712 			unsigned int n = 1;
713 
714 			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
715 			if (error)
716 				return error;
717 			gfs2_trans_add_unrevoke(sdp, block, 1);
718 			bh = gfs2_meta_new(ip->i_gl, block);
719 			gfs2_trans_add_meta(ip->i_gl, bh);
720 			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
721 
722 			gfs2_add_inode_blocks(&ip->i_inode, 1);
723 
724 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
725 							   data_len;
726 			memcpy(bh->b_data + mh_size, data, copy);
727 			if (copy < sdp->sd_jbsize)
728 				memset(bh->b_data + mh_size + copy, 0,
729 				       sdp->sd_jbsize - copy);
730 
731 			*dataptr++ = cpu_to_be64(bh->b_blocknr);
732 			data += copy;
733 			data_len -= copy;
734 
735 			brelse(bh);
736 		}
737 
738 		gfs2_assert_withdraw(sdp, !data_len);
739 	}
740 
741 	return 0;
742 }
743 
744 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
745 				   struct gfs2_ea_request *er, void *private);
746 
747 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
748 			     unsigned int blks,
749 			     ea_skeleton_call_t skeleton_call, void *private)
750 {
751 	struct gfs2_alloc_parms ap = { .target = blks };
752 	struct buffer_head *dibh;
753 	int error;
754 
755 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
756 	if (error)
757 		return error;
758 
759 	error = gfs2_quota_lock_check(ip, &ap);
760 	if (error)
761 		return error;
762 
763 	error = gfs2_inplace_reserve(ip, &ap);
764 	if (error)
765 		goto out_gunlock_q;
766 
767 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
768 				 blks + gfs2_rg_blocks(ip, blks) +
769 				 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
770 	if (error)
771 		goto out_ipres;
772 
773 	error = skeleton_call(ip, er, private);
774 	if (error)
775 		goto out_end_trans;
776 
777 	error = gfs2_meta_inode_buffer(ip, &dibh);
778 	if (!error) {
779 		ip->i_inode.i_ctime = current_time(&ip->i_inode);
780 		gfs2_trans_add_meta(ip->i_gl, dibh);
781 		gfs2_dinode_out(ip, dibh->b_data);
782 		brelse(dibh);
783 	}
784 
785 out_end_trans:
786 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
787 out_ipres:
788 	gfs2_inplace_release(ip);
789 out_gunlock_q:
790 	gfs2_quota_unlock(ip);
791 	return error;
792 }
793 
794 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
795 		     void *private)
796 {
797 	struct buffer_head *bh;
798 	int error;
799 
800 	error = ea_alloc_blk(ip, &bh);
801 	if (error)
802 		return error;
803 
804 	ip->i_eattr = bh->b_blocknr;
805 	error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
806 
807 	brelse(bh);
808 
809 	return error;
810 }
811 
812 /**
813  * ea_init - initializes a new eattr block
814  * @ip:
815  * @er:
816  *
817  * Returns: errno
818  */
819 
820 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
821 		   const void *data, size_t size)
822 {
823 	struct gfs2_ea_request er;
824 	unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
825 	unsigned int blks = 1;
826 
827 	er.er_type = type;
828 	er.er_name = name;
829 	er.er_name_len = strlen(name);
830 	er.er_data = (void *)data;
831 	er.er_data_len = size;
832 
833 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
834 		blks += DIV_ROUND_UP(er.er_data_len, jbsize);
835 
836 	return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
837 }
838 
839 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
840 {
841 	u32 ea_size = GFS2_EA_SIZE(ea);
842 	struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
843 				     ea_size);
844 	u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
845 	int last = ea->ea_flags & GFS2_EAFLAG_LAST;
846 
847 	ea->ea_rec_len = cpu_to_be32(ea_size);
848 	ea->ea_flags ^= last;
849 
850 	new->ea_rec_len = cpu_to_be32(new_size);
851 	new->ea_flags = last;
852 
853 	return new;
854 }
855 
856 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
857 				  struct gfs2_ea_location *el)
858 {
859 	struct gfs2_ea_header *ea = el->el_ea;
860 	struct gfs2_ea_header *prev = el->el_prev;
861 	u32 len;
862 
863 	gfs2_trans_add_meta(ip->i_gl, el->el_bh);
864 
865 	if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
866 		ea->ea_type = GFS2_EATYPE_UNUSED;
867 		return;
868 	} else if (GFS2_EA2NEXT(prev) != ea) {
869 		prev = GFS2_EA2NEXT(prev);
870 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
871 	}
872 
873 	len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
874 	prev->ea_rec_len = cpu_to_be32(len);
875 
876 	if (GFS2_EA_IS_LAST(ea))
877 		prev->ea_flags |= GFS2_EAFLAG_LAST;
878 }
879 
880 struct ea_set {
881 	int ea_split;
882 
883 	struct gfs2_ea_request *es_er;
884 	struct gfs2_ea_location *es_el;
885 
886 	struct buffer_head *es_bh;
887 	struct gfs2_ea_header *es_ea;
888 };
889 
890 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
891 				 struct gfs2_ea_header *ea, struct ea_set *es)
892 {
893 	struct gfs2_ea_request *er = es->es_er;
894 	struct buffer_head *dibh;
895 	int error;
896 
897 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
898 	if (error)
899 		return error;
900 
901 	gfs2_trans_add_meta(ip->i_gl, bh);
902 
903 	if (es->ea_split)
904 		ea = ea_split_ea(ea);
905 
906 	ea_write(ip, ea, er);
907 
908 	if (es->es_el)
909 		ea_set_remove_stuffed(ip, es->es_el);
910 
911 	error = gfs2_meta_inode_buffer(ip, &dibh);
912 	if (error)
913 		goto out;
914 	ip->i_inode.i_ctime = current_time(&ip->i_inode);
915 	gfs2_trans_add_meta(ip->i_gl, dibh);
916 	gfs2_dinode_out(ip, dibh->b_data);
917 	brelse(dibh);
918 out:
919 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
920 	return error;
921 }
922 
923 static int ea_set_simple_alloc(struct gfs2_inode *ip,
924 			       struct gfs2_ea_request *er, void *private)
925 {
926 	struct ea_set *es = private;
927 	struct gfs2_ea_header *ea = es->es_ea;
928 	int error;
929 
930 	gfs2_trans_add_meta(ip->i_gl, es->es_bh);
931 
932 	if (es->ea_split)
933 		ea = ea_split_ea(ea);
934 
935 	error = ea_write(ip, ea, er);
936 	if (error)
937 		return error;
938 
939 	if (es->es_el)
940 		ea_set_remove_stuffed(ip, es->es_el);
941 
942 	return 0;
943 }
944 
945 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
946 			 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
947 			 void *private)
948 {
949 	struct ea_set *es = private;
950 	unsigned int size;
951 	int stuffed;
952 	int error;
953 
954 	stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
955 			       es->es_er->er_data_len, &size);
956 
957 	if (ea->ea_type == GFS2_EATYPE_UNUSED) {
958 		if (GFS2_EA_REC_LEN(ea) < size)
959 			return 0;
960 		if (!GFS2_EA_IS_STUFFED(ea)) {
961 			error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
962 			if (error)
963 				return error;
964 		}
965 		es->ea_split = 0;
966 	} else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
967 		es->ea_split = 1;
968 	else
969 		return 0;
970 
971 	if (stuffed) {
972 		error = ea_set_simple_noalloc(ip, bh, ea, es);
973 		if (error)
974 			return error;
975 	} else {
976 		unsigned int blks;
977 
978 		es->es_bh = bh;
979 		es->es_ea = ea;
980 		blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
981 					GFS2_SB(&ip->i_inode)->sd_jbsize);
982 
983 		error = ea_alloc_skeleton(ip, es->es_er, blks,
984 					  ea_set_simple_alloc, es);
985 		if (error)
986 			return error;
987 	}
988 
989 	return 1;
990 }
991 
992 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
993 			void *private)
994 {
995 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
996 	struct buffer_head *indbh, *newbh;
997 	__be64 *eablk;
998 	int error;
999 	int mh_size = sizeof(struct gfs2_meta_header);
1000 
1001 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1002 		__be64 *end;
1003 
1004 		error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0,
1005 				       &indbh);
1006 		if (error)
1007 			return error;
1008 
1009 		if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1010 			error = -EIO;
1011 			goto out;
1012 		}
1013 
1014 		eablk = (__be64 *)(indbh->b_data + mh_size);
1015 		end = eablk + sdp->sd_inptrs;
1016 
1017 		for (; eablk < end; eablk++)
1018 			if (!*eablk)
1019 				break;
1020 
1021 		if (eablk == end) {
1022 			error = -ENOSPC;
1023 			goto out;
1024 		}
1025 
1026 		gfs2_trans_add_meta(ip->i_gl, indbh);
1027 	} else {
1028 		u64 blk;
1029 		unsigned int n = 1;
1030 		error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1031 		if (error)
1032 			return error;
1033 		gfs2_trans_add_unrevoke(sdp, blk, 1);
1034 		indbh = gfs2_meta_new(ip->i_gl, blk);
1035 		gfs2_trans_add_meta(ip->i_gl, indbh);
1036 		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1037 		gfs2_buffer_clear_tail(indbh, mh_size);
1038 
1039 		eablk = (__be64 *)(indbh->b_data + mh_size);
1040 		*eablk = cpu_to_be64(ip->i_eattr);
1041 		ip->i_eattr = blk;
1042 		ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1043 		gfs2_add_inode_blocks(&ip->i_inode, 1);
1044 
1045 		eablk++;
1046 	}
1047 
1048 	error = ea_alloc_blk(ip, &newbh);
1049 	if (error)
1050 		goto out;
1051 
1052 	*eablk = cpu_to_be64((u64)newbh->b_blocknr);
1053 	error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1054 	brelse(newbh);
1055 	if (error)
1056 		goto out;
1057 
1058 	if (private)
1059 		ea_set_remove_stuffed(ip, private);
1060 
1061 out:
1062 	brelse(indbh);
1063 	return error;
1064 }
1065 
1066 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1067 		    const void *value, size_t size, struct gfs2_ea_location *el)
1068 {
1069 	struct gfs2_ea_request er;
1070 	struct ea_set es;
1071 	unsigned int blks = 2;
1072 	int error;
1073 
1074 	er.er_type = type;
1075 	er.er_name = name;
1076 	er.er_data = (void *)value;
1077 	er.er_name_len = strlen(name);
1078 	er.er_data_len = size;
1079 
1080 	memset(&es, 0, sizeof(struct ea_set));
1081 	es.es_er = &er;
1082 	es.es_el = el;
1083 
1084 	error = ea_foreach(ip, ea_set_simple, &es);
1085 	if (error > 0)
1086 		return 0;
1087 	if (error)
1088 		return error;
1089 
1090 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1091 		blks++;
1092 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1093 		blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1094 
1095 	return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1096 }
1097 
1098 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1099 				   struct gfs2_ea_location *el)
1100 {
1101 	if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1102 		el->el_prev = GFS2_EA2NEXT(el->el_prev);
1103 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1104 				     GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1105 	}
1106 
1107 	return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1108 }
1109 
1110 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1111 {
1112 	struct gfs2_ea_header *ea = el->el_ea;
1113 	struct gfs2_ea_header *prev = el->el_prev;
1114 	struct buffer_head *dibh;
1115 	int error;
1116 
1117 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1118 	if (error)
1119 		return error;
1120 
1121 	gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1122 
1123 	if (prev) {
1124 		u32 len;
1125 
1126 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1127 		prev->ea_rec_len = cpu_to_be32(len);
1128 
1129 		if (GFS2_EA_IS_LAST(ea))
1130 			prev->ea_flags |= GFS2_EAFLAG_LAST;
1131 	} else {
1132 		ea->ea_type = GFS2_EATYPE_UNUSED;
1133 	}
1134 
1135 	error = gfs2_meta_inode_buffer(ip, &dibh);
1136 	if (!error) {
1137 		ip->i_inode.i_ctime = current_time(&ip->i_inode);
1138 		gfs2_trans_add_meta(ip->i_gl, dibh);
1139 		gfs2_dinode_out(ip, dibh->b_data);
1140 		brelse(dibh);
1141 	}
1142 
1143 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1144 
1145 	return error;
1146 }
1147 
1148 /**
1149  * gfs2_xattr_remove - Remove a GFS2 extended attribute
1150  * @ip: The inode
1151  * @type: The type of the extended attribute
1152  * @name: The name of the extended attribute
1153  *
1154  * This is not called directly by the VFS since we use the (common)
1155  * scheme of making a "set with NULL data" mean a remove request. Note
1156  * that this is different from a set with zero length data.
1157  *
1158  * Returns: 0, or errno on failure
1159  */
1160 
1161 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1162 {
1163 	struct gfs2_ea_location el;
1164 	int error;
1165 
1166 	if (!ip->i_eattr)
1167 		return -ENODATA;
1168 
1169 	error = gfs2_ea_find(ip, type, name, &el);
1170 	if (error)
1171 		return error;
1172 	if (!el.el_ea)
1173 		return -ENODATA;
1174 
1175 	if (GFS2_EA_IS_STUFFED(el.el_ea))
1176 		error = ea_remove_stuffed(ip, &el);
1177 	else
1178 		error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1179 
1180 	brelse(el.el_bh);
1181 
1182 	return error;
1183 }
1184 
1185 /**
1186  * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1187  * @ip: The inode
1188  * @name: The name of the extended attribute
1189  * @value: The value of the extended attribute (NULL for remove)
1190  * @size: The size of the @value argument
1191  * @flags: Create or Replace
1192  * @type: The type of the extended attribute
1193  *
1194  * See gfs2_xattr_remove() for details of the removal of xattrs.
1195  *
1196  * Returns: 0 or errno on failure
1197  */
1198 
1199 int __gfs2_xattr_set(struct inode *inode, const char *name,
1200 		   const void *value, size_t size, int flags, int type)
1201 {
1202 	struct gfs2_inode *ip = GFS2_I(inode);
1203 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1204 	struct gfs2_ea_location el;
1205 	unsigned int namel = strlen(name);
1206 	int error;
1207 
1208 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1209 		return -EPERM;
1210 	if (namel > GFS2_EA_MAX_NAME_LEN)
1211 		return -ERANGE;
1212 
1213 	if (value == NULL) {
1214 		error = gfs2_xattr_remove(ip, type, name);
1215 		if (error == -ENODATA && !(flags & XATTR_REPLACE))
1216 			error = 0;
1217 		return error;
1218 	}
1219 
1220 	if (ea_check_size(sdp, namel, size))
1221 		return -ERANGE;
1222 
1223 	if (!ip->i_eattr) {
1224 		if (flags & XATTR_REPLACE)
1225 			return -ENODATA;
1226 		return ea_init(ip, type, name, value, size);
1227 	}
1228 
1229 	error = gfs2_ea_find(ip, type, name, &el);
1230 	if (error)
1231 		return error;
1232 
1233 	if (el.el_ea) {
1234 		if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1235 			brelse(el.el_bh);
1236 			return -EPERM;
1237 		}
1238 
1239 		error = -EEXIST;
1240 		if (!(flags & XATTR_CREATE)) {
1241 			int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1242 			error = ea_set_i(ip, type, name, value, size, &el);
1243 			if (!error && unstuffed)
1244 				ea_set_remove_unstuffed(ip, &el);
1245 		}
1246 
1247 		brelse(el.el_bh);
1248 		return error;
1249 	}
1250 
1251 	error = -ENODATA;
1252 	if (!(flags & XATTR_REPLACE))
1253 		error = ea_set_i(ip, type, name, value, size, NULL);
1254 
1255 	return error;
1256 }
1257 
1258 static int gfs2_xattr_set(const struct xattr_handler *handler,
1259 			  struct dentry *unused, struct inode *inode,
1260 			  const char *name, const void *value,
1261 			  size_t size, int flags)
1262 {
1263 	struct gfs2_inode *ip = GFS2_I(inode);
1264 	struct gfs2_holder gh;
1265 	int ret;
1266 
1267 	ret = gfs2_rsqa_alloc(ip);
1268 	if (ret)
1269 		return ret;
1270 
1271 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1272 	if (ret)
1273 		return ret;
1274 	ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
1275 	gfs2_glock_dq_uninit(&gh);
1276 	return ret;
1277 }
1278 
1279 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1280 {
1281 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1282 	struct gfs2_rgrp_list rlist;
1283 	struct buffer_head *indbh, *dibh;
1284 	__be64 *eablk, *end;
1285 	unsigned int rg_blocks = 0;
1286 	u64 bstart = 0;
1287 	unsigned int blen = 0;
1288 	unsigned int blks = 0;
1289 	unsigned int x;
1290 	int error;
1291 
1292 	error = gfs2_rindex_update(sdp);
1293 	if (error)
1294 		return error;
1295 
1296 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1297 
1298 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh);
1299 	if (error)
1300 		return error;
1301 
1302 	if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1303 		error = -EIO;
1304 		goto out;
1305 	}
1306 
1307 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1308 	end = eablk + sdp->sd_inptrs;
1309 
1310 	for (; eablk < end; eablk++) {
1311 		u64 bn;
1312 
1313 		if (!*eablk)
1314 			break;
1315 		bn = be64_to_cpu(*eablk);
1316 
1317 		if (bstart + blen == bn)
1318 			blen++;
1319 		else {
1320 			if (bstart)
1321 				gfs2_rlist_add(ip, &rlist, bstart);
1322 			bstart = bn;
1323 			blen = 1;
1324 		}
1325 		blks++;
1326 	}
1327 	if (bstart)
1328 		gfs2_rlist_add(ip, &rlist, bstart);
1329 	else
1330 		goto out;
1331 
1332 	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1333 
1334 	for (x = 0; x < rlist.rl_rgrps; x++) {
1335 		struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1336 
1337 		rg_blocks += rgd->rd_length;
1338 	}
1339 
1340 	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1341 	if (error)
1342 		goto out_rlist_free;
1343 
1344 	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1345 				 RES_STATFS + RES_QUOTA, blks);
1346 	if (error)
1347 		goto out_gunlock;
1348 
1349 	gfs2_trans_add_meta(ip->i_gl, indbh);
1350 
1351 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1352 	bstart = 0;
1353 	blen = 0;
1354 
1355 	for (; eablk < end; eablk++) {
1356 		u64 bn;
1357 
1358 		if (!*eablk)
1359 			break;
1360 		bn = be64_to_cpu(*eablk);
1361 
1362 		if (bstart + blen == bn)
1363 			blen++;
1364 		else {
1365 			if (bstart)
1366 				gfs2_free_meta(ip, bstart, blen);
1367 			bstart = bn;
1368 			blen = 1;
1369 		}
1370 
1371 		*eablk = 0;
1372 		gfs2_add_inode_blocks(&ip->i_inode, -1);
1373 	}
1374 	if (bstart)
1375 		gfs2_free_meta(ip, bstart, blen);
1376 
1377 	ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1378 
1379 	error = gfs2_meta_inode_buffer(ip, &dibh);
1380 	if (!error) {
1381 		gfs2_trans_add_meta(ip->i_gl, dibh);
1382 		gfs2_dinode_out(ip, dibh->b_data);
1383 		brelse(dibh);
1384 	}
1385 
1386 	gfs2_trans_end(sdp);
1387 
1388 out_gunlock:
1389 	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1390 out_rlist_free:
1391 	gfs2_rlist_free(&rlist);
1392 out:
1393 	brelse(indbh);
1394 	return error;
1395 }
1396 
1397 static int ea_dealloc_block(struct gfs2_inode *ip)
1398 {
1399 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1400 	struct gfs2_rgrpd *rgd;
1401 	struct buffer_head *dibh;
1402 	struct gfs2_holder gh;
1403 	int error;
1404 
1405 	error = gfs2_rindex_update(sdp);
1406 	if (error)
1407 		return error;
1408 
1409 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1410 	if (!rgd) {
1411 		gfs2_consist_inode(ip);
1412 		return -EIO;
1413 	}
1414 
1415 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1416 	if (error)
1417 		return error;
1418 
1419 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1420 				 RES_QUOTA, 1);
1421 	if (error)
1422 		goto out_gunlock;
1423 
1424 	gfs2_free_meta(ip, ip->i_eattr, 1);
1425 
1426 	ip->i_eattr = 0;
1427 	gfs2_add_inode_blocks(&ip->i_inode, -1);
1428 
1429 	error = gfs2_meta_inode_buffer(ip, &dibh);
1430 	if (!error) {
1431 		gfs2_trans_add_meta(ip->i_gl, dibh);
1432 		gfs2_dinode_out(ip, dibh->b_data);
1433 		brelse(dibh);
1434 	}
1435 
1436 	gfs2_trans_end(sdp);
1437 
1438 out_gunlock:
1439 	gfs2_glock_dq_uninit(&gh);
1440 	return error;
1441 }
1442 
1443 /**
1444  * gfs2_ea_dealloc - deallocate the extended attribute fork
1445  * @ip: the inode
1446  *
1447  * Returns: errno
1448  */
1449 
1450 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1451 {
1452 	int error;
1453 
1454 	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1455 	if (error)
1456 		return error;
1457 
1458 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1459 	if (error)
1460 		return error;
1461 
1462 	error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1463 	if (error)
1464 		goto out_quota;
1465 
1466 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1467 		error = ea_dealloc_indirect(ip);
1468 		if (error)
1469 			goto out_quota;
1470 	}
1471 
1472 	error = ea_dealloc_block(ip);
1473 
1474 out_quota:
1475 	gfs2_quota_unhold(ip);
1476 	return error;
1477 }
1478 
1479 static const struct xattr_handler gfs2_xattr_user_handler = {
1480 	.prefix = XATTR_USER_PREFIX,
1481 	.flags  = GFS2_EATYPE_USR,
1482 	.get    = gfs2_xattr_get,
1483 	.set    = gfs2_xattr_set,
1484 };
1485 
1486 static const struct xattr_handler gfs2_xattr_security_handler = {
1487 	.prefix = XATTR_SECURITY_PREFIX,
1488 	.flags  = GFS2_EATYPE_SECURITY,
1489 	.get    = gfs2_xattr_get,
1490 	.set    = gfs2_xattr_set,
1491 };
1492 
1493 const struct xattr_handler *gfs2_xattr_handlers[] = {
1494 	&gfs2_xattr_user_handler,
1495 	&gfs2_xattr_security_handler,
1496 	&posix_acl_access_xattr_handler,
1497 	&posix_acl_default_xattr_handler,
1498 	NULL,
1499 };
1500 
1501