xref: /openbmc/linux/fs/gfs2/rgrp.c (revision c8dbaa22)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
16 #include <linux/fs.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "quota.h"
30 #include "rgrp.h"
31 #include "super.h"
32 #include "trans.h"
33 #include "util.h"
34 #include "log.h"
35 #include "inode.h"
36 #include "trace_gfs2.h"
37 
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
40 
41 #if BITS_PER_LONG == 32
42 #define LBITMASK   (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
45 #else
46 #define LBITMASK   (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
49 #endif
50 
51 /*
52  * These routines are used by the resource group routines (rgrp.c)
53  * to keep track of block allocation.  Each block is represented by two
54  * bits.  So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55  *
56  * 0 = Free
57  * 1 = Used (not metadata)
58  * 2 = Unlinked (still in use) inode
59  * 3 = Used (metadata)
60  */
61 
62 struct gfs2_extent {
63 	struct gfs2_rbm rbm;
64 	u32 len;
65 };
66 
67 static const char valid_change[16] = {
68 	        /* current */
69 	/* n */ 0, 1, 1, 1,
70 	/* e */ 1, 0, 0, 0,
71 	/* w */ 0, 0, 0, 1,
72 	        1, 0, 0, 0
73 };
74 
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 			 const struct gfs2_inode *ip, bool nowrap);
77 
78 
79 /**
80  * gfs2_setbit - Set a bit in the bitmaps
81  * @rbm: The position of the bit to set
82  * @do_clone: Also set the clone bitmap, if it exists
83  * @new_state: the new state of the block
84  *
85  */
86 
87 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
88 			       unsigned char new_state)
89 {
90 	unsigned char *byte1, *byte2, *end, cur_state;
91 	struct gfs2_bitmap *bi = rbm_bi(rbm);
92 	unsigned int buflen = bi->bi_len;
93 	const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
94 
95 	byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 	end = bi->bi_bh->b_data + bi->bi_offset + buflen;
97 
98 	BUG_ON(byte1 >= end);
99 
100 	cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
101 
102 	if (unlikely(!valid_change[new_state * 4 + cur_state])) {
103 		pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 			rbm->offset, cur_state, new_state);
105 		pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 			(unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 		pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 			bi->bi_offset, bi->bi_len);
109 		dump_stack();
110 		gfs2_consist_rgrpd(rbm->rgd);
111 		return;
112 	}
113 	*byte1 ^= (cur_state ^ new_state) << bit;
114 
115 	if (do_clone && bi->bi_clone) {
116 		byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
117 		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 		*byte2 ^= (cur_state ^ new_state) << bit;
119 	}
120 }
121 
122 /**
123  * gfs2_testbit - test a bit in the bitmaps
124  * @rbm: The bit to test
125  *
126  * Returns: The two bit block state of the requested bit
127  */
128 
129 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
130 {
131 	struct gfs2_bitmap *bi = rbm_bi(rbm);
132 	const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
133 	const u8 *byte;
134 	unsigned int bit;
135 
136 	byte = buffer + (rbm->offset / GFS2_NBBY);
137 	bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
138 
139 	return (*byte >> bit) & GFS2_BIT_MASK;
140 }
141 
142 /**
143  * gfs2_bit_search
144  * @ptr: Pointer to bitmap data
145  * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146  * @state: The state we are searching for
147  *
148  * We xor the bitmap data with a patter which is the bitwise opposite
149  * of what we are looking for, this gives rise to a pattern of ones
150  * wherever there is a match. Since we have two bits per entry, we
151  * take this pattern, shift it down by one place and then and it with
152  * the original. All the even bit positions (0,2,4, etc) then represent
153  * successful matches, so we mask with 0x55555..... to remove the unwanted
154  * odd bit positions.
155  *
156  * This allows searching of a whole u64 at once (32 blocks) with a
157  * single test (on 64 bit arches).
158  */
159 
160 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
161 {
162 	u64 tmp;
163 	static const u64 search[] = {
164 		[0] = 0xffffffffffffffffULL,
165 		[1] = 0xaaaaaaaaaaaaaaaaULL,
166 		[2] = 0x5555555555555555ULL,
167 		[3] = 0x0000000000000000ULL,
168 	};
169 	tmp = le64_to_cpu(*ptr) ^ search[state];
170 	tmp &= (tmp >> 1);
171 	tmp &= mask;
172 	return tmp;
173 }
174 
175 /**
176  * rs_cmp - multi-block reservation range compare
177  * @blk: absolute file system block number of the new reservation
178  * @len: number of blocks in the new reservation
179  * @rs: existing reservation to compare against
180  *
181  * returns: 1 if the block range is beyond the reach of the reservation
182  *         -1 if the block range is before the start of the reservation
183  *          0 if the block range overlaps with the reservation
184  */
185 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
186 {
187 	u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
188 
189 	if (blk >= startblk + rs->rs_free)
190 		return 1;
191 	if (blk + len - 1 < startblk)
192 		return -1;
193 	return 0;
194 }
195 
196 /**
197  * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198  *       a block in a given allocation state.
199  * @buf: the buffer that holds the bitmaps
200  * @len: the length (in bytes) of the buffer
201  * @goal: start search at this block's bit-pair (within @buffer)
202  * @state: GFS2_BLKST_XXX the state of the block we're looking for.
203  *
204  * Scope of @goal and returned block number is only within this bitmap buffer,
205  * not entire rgrp or filesystem.  @buffer will be offset from the actual
206  * beginning of a bitmap block buffer, skipping any header structures, but
207  * headers are always a multiple of 64 bits long so that the buffer is
208  * always aligned to a 64 bit boundary.
209  *
210  * The size of the buffer is in bytes, but is it assumed that it is
211  * always ok to read a complete multiple of 64 bits at the end
212  * of the block in case the end is no aligned to a natural boundary.
213  *
214  * Return: the block number (bitmap buffer scope) that was found
215  */
216 
217 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
218 		       u32 goal, u8 state)
219 {
220 	u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 	const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 	const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
223 	u64 tmp;
224 	u64 mask = 0x5555555555555555ULL;
225 	u32 bit;
226 
227 	/* Mask off bits we don't care about at the start of the search */
228 	mask <<= spoint;
229 	tmp = gfs2_bit_search(ptr, mask, state);
230 	ptr++;
231 	while(tmp == 0 && ptr < end) {
232 		tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
233 		ptr++;
234 	}
235 	/* Mask off any bits which are more than len bytes from the start */
236 	if (ptr == end && (len & (sizeof(u64) - 1)))
237 		tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 	/* Didn't find anything, so return */
239 	if (tmp == 0)
240 		return BFITNOENT;
241 	ptr--;
242 	bit = __ffs64(tmp);
243 	bit /= 2;	/* two bits per entry in the bitmap */
244 	return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
245 }
246 
247 /**
248  * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249  * @rbm: The rbm with rgd already set correctly
250  * @block: The block number (filesystem relative)
251  *
252  * This sets the bi and offset members of an rbm based on a
253  * resource group and a filesystem relative block number. The
254  * resource group must be set in the rbm on entry, the bi and
255  * offset members will be set by this function.
256  *
257  * Returns: 0 on success, or an error code
258  */
259 
260 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
261 {
262 	u64 rblock = block - rbm->rgd->rd_data0;
263 
264 	if (WARN_ON_ONCE(rblock > UINT_MAX))
265 		return -EINVAL;
266 	if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
267 		return -E2BIG;
268 
269 	rbm->bii = 0;
270 	rbm->offset = (u32)(rblock);
271 	/* Check if the block is within the first block */
272 	if (rbm->offset < rbm_bi(rbm)->bi_blocks)
273 		return 0;
274 
275 	/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 	rbm->offset += (sizeof(struct gfs2_rgrp) -
277 			sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
278 	rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 	rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280 	return 0;
281 }
282 
283 /**
284  * gfs2_rbm_incr - increment an rbm structure
285  * @rbm: The rbm with rgd already set correctly
286  *
287  * This function takes an existing rbm structure and increments it to the next
288  * viable block offset.
289  *
290  * Returns: If incrementing the offset would cause the rbm to go past the
291  *          end of the rgrp, true is returned, otherwise false.
292  *
293  */
294 
295 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
296 {
297 	if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
298 		rbm->offset++;
299 		return false;
300 	}
301 	if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
302 		return true;
303 
304 	rbm->offset = 0;
305 	rbm->bii++;
306 	return false;
307 }
308 
309 /**
310  * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311  * @rbm: Position to search (value/result)
312  * @n_unaligned: Number of unaligned blocks to check
313  * @len: Decremented for each block found (terminate on zero)
314  *
315  * Returns: true if a non-free block is encountered
316  */
317 
318 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
319 {
320 	u32 n;
321 	u8 res;
322 
323 	for (n = 0; n < n_unaligned; n++) {
324 		res = gfs2_testbit(rbm);
325 		if (res != GFS2_BLKST_FREE)
326 			return true;
327 		(*len)--;
328 		if (*len == 0)
329 			return true;
330 		if (gfs2_rbm_incr(rbm))
331 			return true;
332 	}
333 
334 	return false;
335 }
336 
337 /**
338  * gfs2_free_extlen - Return extent length of free blocks
339  * @rrbm: Starting position
340  * @len: Max length to check
341  *
342  * Starting at the block specified by the rbm, see how many free blocks
343  * there are, not reading more than len blocks ahead. This can be done
344  * using memchr_inv when the blocks are byte aligned, but has to be done
345  * on a block by block basis in case of unaligned blocks. Also this
346  * function can cope with bitmap boundaries (although it must stop on
347  * a resource group boundary)
348  *
349  * Returns: Number of free blocks in the extent
350  */
351 
352 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
353 {
354 	struct gfs2_rbm rbm = *rrbm;
355 	u32 n_unaligned = rbm.offset & 3;
356 	u32 size = len;
357 	u32 bytes;
358 	u32 chunk_size;
359 	u8 *ptr, *start, *end;
360 	u64 block;
361 	struct gfs2_bitmap *bi;
362 
363 	if (n_unaligned &&
364 	    gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
365 		goto out;
366 
367 	n_unaligned = len & 3;
368 	/* Start is now byte aligned */
369 	while (len > 3) {
370 		bi = rbm_bi(&rbm);
371 		start = bi->bi_bh->b_data;
372 		if (bi->bi_clone)
373 			start = bi->bi_clone;
374 		end = start + bi->bi_bh->b_size;
375 		start += bi->bi_offset;
376 		BUG_ON(rbm.offset & 3);
377 		start += (rbm.offset / GFS2_NBBY);
378 		bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 		ptr = memchr_inv(start, 0, bytes);
380 		chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 		chunk_size *= GFS2_NBBY;
382 		BUG_ON(len < chunk_size);
383 		len -= chunk_size;
384 		block = gfs2_rbm_to_block(&rbm);
385 		if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
386 			n_unaligned = 0;
387 			break;
388 		}
389 		if (ptr) {
390 			n_unaligned = 3;
391 			break;
392 		}
393 		n_unaligned = len & 3;
394 	}
395 
396 	/* Deal with any bits left over at the end */
397 	if (n_unaligned)
398 		gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
399 out:
400 	return size - len;
401 }
402 
403 /**
404  * gfs2_bitcount - count the number of bits in a certain state
405  * @rgd: the resource group descriptor
406  * @buffer: the buffer that holds the bitmaps
407  * @buflen: the length (in bytes) of the buffer
408  * @state: the state of the block we're looking for
409  *
410  * Returns: The number of bits
411  */
412 
413 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 			 unsigned int buflen, u8 state)
415 {
416 	const u8 *byte = buffer;
417 	const u8 *end = buffer + buflen;
418 	const u8 state1 = state << 2;
419 	const u8 state2 = state << 4;
420 	const u8 state3 = state << 6;
421 	u32 count = 0;
422 
423 	for (; byte < end; byte++) {
424 		if (((*byte) & 0x03) == state)
425 			count++;
426 		if (((*byte) & 0x0C) == state1)
427 			count++;
428 		if (((*byte) & 0x30) == state2)
429 			count++;
430 		if (((*byte) & 0xC0) == state3)
431 			count++;
432 	}
433 
434 	return count;
435 }
436 
437 /**
438  * gfs2_rgrp_verify - Verify that a resource group is consistent
439  * @rgd: the rgrp
440  *
441  */
442 
443 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
444 {
445 	struct gfs2_sbd *sdp = rgd->rd_sbd;
446 	struct gfs2_bitmap *bi = NULL;
447 	u32 length = rgd->rd_length;
448 	u32 count[4], tmp;
449 	int buf, x;
450 
451 	memset(count, 0, 4 * sizeof(u32));
452 
453 	/* Count # blocks in each of 4 possible allocation states */
454 	for (buf = 0; buf < length; buf++) {
455 		bi = rgd->rd_bits + buf;
456 		for (x = 0; x < 4; x++)
457 			count[x] += gfs2_bitcount(rgd,
458 						  bi->bi_bh->b_data +
459 						  bi->bi_offset,
460 						  bi->bi_len, x);
461 	}
462 
463 	if (count[0] != rgd->rd_free) {
464 		if (gfs2_consist_rgrpd(rgd))
465 			fs_err(sdp, "free data mismatch:  %u != %u\n",
466 			       count[0], rgd->rd_free);
467 		return;
468 	}
469 
470 	tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
471 	if (count[1] != tmp) {
472 		if (gfs2_consist_rgrpd(rgd))
473 			fs_err(sdp, "used data mismatch:  %u != %u\n",
474 			       count[1], tmp);
475 		return;
476 	}
477 
478 	if (count[2] + count[3] != rgd->rd_dinodes) {
479 		if (gfs2_consist_rgrpd(rgd))
480 			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
481 			       count[2] + count[3], rgd->rd_dinodes);
482 		return;
483 	}
484 }
485 
486 /**
487  * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
488  * @sdp: The GFS2 superblock
489  * @blk: The data block number
490  * @exact: True if this needs to be an exact match
491  *
492  * Returns: The resource group, or NULL if not found
493  */
494 
495 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
496 {
497 	struct rb_node *n, *next;
498 	struct gfs2_rgrpd *cur;
499 
500 	spin_lock(&sdp->sd_rindex_spin);
501 	n = sdp->sd_rindex_tree.rb_node;
502 	while (n) {
503 		cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
504 		next = NULL;
505 		if (blk < cur->rd_addr)
506 			next = n->rb_left;
507 		else if (blk >= cur->rd_data0 + cur->rd_data)
508 			next = n->rb_right;
509 		if (next == NULL) {
510 			spin_unlock(&sdp->sd_rindex_spin);
511 			if (exact) {
512 				if (blk < cur->rd_addr)
513 					return NULL;
514 				if (blk >= cur->rd_data0 + cur->rd_data)
515 					return NULL;
516 			}
517 			return cur;
518 		}
519 		n = next;
520 	}
521 	spin_unlock(&sdp->sd_rindex_spin);
522 
523 	return NULL;
524 }
525 
526 /**
527  * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
528  * @sdp: The GFS2 superblock
529  *
530  * Returns: The first rgrp in the filesystem
531  */
532 
533 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
534 {
535 	const struct rb_node *n;
536 	struct gfs2_rgrpd *rgd;
537 
538 	spin_lock(&sdp->sd_rindex_spin);
539 	n = rb_first(&sdp->sd_rindex_tree);
540 	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
541 	spin_unlock(&sdp->sd_rindex_spin);
542 
543 	return rgd;
544 }
545 
546 /**
547  * gfs2_rgrpd_get_next - get the next RG
548  * @rgd: the resource group descriptor
549  *
550  * Returns: The next rgrp
551  */
552 
553 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
554 {
555 	struct gfs2_sbd *sdp = rgd->rd_sbd;
556 	const struct rb_node *n;
557 
558 	spin_lock(&sdp->sd_rindex_spin);
559 	n = rb_next(&rgd->rd_node);
560 	if (n == NULL)
561 		n = rb_first(&sdp->sd_rindex_tree);
562 
563 	if (unlikely(&rgd->rd_node == n)) {
564 		spin_unlock(&sdp->sd_rindex_spin);
565 		return NULL;
566 	}
567 	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
568 	spin_unlock(&sdp->sd_rindex_spin);
569 	return rgd;
570 }
571 
572 void check_and_update_goal(struct gfs2_inode *ip)
573 {
574 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
575 	if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
576 		ip->i_goal = ip->i_no_addr;
577 }
578 
579 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
580 {
581 	int x;
582 
583 	for (x = 0; x < rgd->rd_length; x++) {
584 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
585 		kfree(bi->bi_clone);
586 		bi->bi_clone = NULL;
587 	}
588 }
589 
590 /**
591  * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
592  *                 plus a quota allocations data structure, if necessary
593  * @ip: the inode for this reservation
594  */
595 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
596 {
597 	return gfs2_qa_alloc(ip);
598 }
599 
600 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
601 {
602 	gfs2_print_dbg(seq, "  B: n:%llu s:%llu b:%u f:%u\n",
603 		       (unsigned long long)rs->rs_inum,
604 		       (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
605 		       rs->rs_rbm.offset, rs->rs_free);
606 }
607 
608 /**
609  * __rs_deltree - remove a multi-block reservation from the rgd tree
610  * @rs: The reservation to remove
611  *
612  */
613 static void __rs_deltree(struct gfs2_blkreserv *rs)
614 {
615 	struct gfs2_rgrpd *rgd;
616 
617 	if (!gfs2_rs_active(rs))
618 		return;
619 
620 	rgd = rs->rs_rbm.rgd;
621 	trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
622 	rb_erase(&rs->rs_node, &rgd->rd_rstree);
623 	RB_CLEAR_NODE(&rs->rs_node);
624 
625 	if (rs->rs_free) {
626 		struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
627 
628 		/* return reserved blocks to the rgrp */
629 		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
630 		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
631 		/* The rgrp extent failure point is likely not to increase;
632 		   it will only do so if the freed blocks are somehow
633 		   contiguous with a span of free blocks that follows. Still,
634 		   it will force the number to be recalculated later. */
635 		rgd->rd_extfail_pt += rs->rs_free;
636 		rs->rs_free = 0;
637 		clear_bit(GBF_FULL, &bi->bi_flags);
638 	}
639 }
640 
641 /**
642  * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
643  * @rs: The reservation to remove
644  *
645  */
646 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
647 {
648 	struct gfs2_rgrpd *rgd;
649 
650 	rgd = rs->rs_rbm.rgd;
651 	if (rgd) {
652 		spin_lock(&rgd->rd_rsspin);
653 		__rs_deltree(rs);
654 		BUG_ON(rs->rs_free);
655 		spin_unlock(&rgd->rd_rsspin);
656 	}
657 }
658 
659 /**
660  * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
661  * @ip: The inode for this reservation
662  * @wcount: The inode's write count, or NULL
663  *
664  */
665 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
666 {
667 	down_write(&ip->i_rw_mutex);
668 	if ((wcount == NULL) || (atomic_read(wcount) <= 1))
669 		gfs2_rs_deltree(&ip->i_res);
670 	up_write(&ip->i_rw_mutex);
671 	gfs2_qa_delete(ip, wcount);
672 }
673 
674 /**
675  * return_all_reservations - return all reserved blocks back to the rgrp.
676  * @rgd: the rgrp that needs its space back
677  *
678  * We previously reserved a bunch of blocks for allocation. Now we need to
679  * give them back. This leave the reservation structures in tact, but removes
680  * all of their corresponding "no-fly zones".
681  */
682 static void return_all_reservations(struct gfs2_rgrpd *rgd)
683 {
684 	struct rb_node *n;
685 	struct gfs2_blkreserv *rs;
686 
687 	spin_lock(&rgd->rd_rsspin);
688 	while ((n = rb_first(&rgd->rd_rstree))) {
689 		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
690 		__rs_deltree(rs);
691 	}
692 	spin_unlock(&rgd->rd_rsspin);
693 }
694 
695 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
696 {
697 	struct rb_node *n;
698 	struct gfs2_rgrpd *rgd;
699 	struct gfs2_glock *gl;
700 
701 	while ((n = rb_first(&sdp->sd_rindex_tree))) {
702 		rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
703 		gl = rgd->rd_gl;
704 
705 		rb_erase(n, &sdp->sd_rindex_tree);
706 
707 		if (gl) {
708 			glock_set_object(gl, NULL);
709 			gfs2_glock_add_to_lru(gl);
710 			gfs2_glock_put(gl);
711 		}
712 
713 		gfs2_free_clones(rgd);
714 		kfree(rgd->rd_bits);
715 		rgd->rd_bits = NULL;
716 		return_all_reservations(rgd);
717 		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
718 	}
719 }
720 
721 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
722 {
723 	pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
724 	pr_info("ri_length = %u\n", rgd->rd_length);
725 	pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
726 	pr_info("ri_data = %u\n", rgd->rd_data);
727 	pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
728 }
729 
730 /**
731  * gfs2_compute_bitstructs - Compute the bitmap sizes
732  * @rgd: The resource group descriptor
733  *
734  * Calculates bitmap descriptors, one for each block that contains bitmap data
735  *
736  * Returns: errno
737  */
738 
739 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
740 {
741 	struct gfs2_sbd *sdp = rgd->rd_sbd;
742 	struct gfs2_bitmap *bi;
743 	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
744 	u32 bytes_left, bytes;
745 	int x;
746 
747 	if (!length)
748 		return -EINVAL;
749 
750 	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
751 	if (!rgd->rd_bits)
752 		return -ENOMEM;
753 
754 	bytes_left = rgd->rd_bitbytes;
755 
756 	for (x = 0; x < length; x++) {
757 		bi = rgd->rd_bits + x;
758 
759 		bi->bi_flags = 0;
760 		/* small rgrp; bitmap stored completely in header block */
761 		if (length == 1) {
762 			bytes = bytes_left;
763 			bi->bi_offset = sizeof(struct gfs2_rgrp);
764 			bi->bi_start = 0;
765 			bi->bi_len = bytes;
766 			bi->bi_blocks = bytes * GFS2_NBBY;
767 		/* header block */
768 		} else if (x == 0) {
769 			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
770 			bi->bi_offset = sizeof(struct gfs2_rgrp);
771 			bi->bi_start = 0;
772 			bi->bi_len = bytes;
773 			bi->bi_blocks = bytes * GFS2_NBBY;
774 		/* last block */
775 		} else if (x + 1 == length) {
776 			bytes = bytes_left;
777 			bi->bi_offset = sizeof(struct gfs2_meta_header);
778 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
779 			bi->bi_len = bytes;
780 			bi->bi_blocks = bytes * GFS2_NBBY;
781 		/* other blocks */
782 		} else {
783 			bytes = sdp->sd_sb.sb_bsize -
784 				sizeof(struct gfs2_meta_header);
785 			bi->bi_offset = sizeof(struct gfs2_meta_header);
786 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
787 			bi->bi_len = bytes;
788 			bi->bi_blocks = bytes * GFS2_NBBY;
789 		}
790 
791 		bytes_left -= bytes;
792 	}
793 
794 	if (bytes_left) {
795 		gfs2_consist_rgrpd(rgd);
796 		return -EIO;
797 	}
798 	bi = rgd->rd_bits + (length - 1);
799 	if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
800 		if (gfs2_consist_rgrpd(rgd)) {
801 			gfs2_rindex_print(rgd);
802 			fs_err(sdp, "start=%u len=%u offset=%u\n",
803 			       bi->bi_start, bi->bi_len, bi->bi_offset);
804 		}
805 		return -EIO;
806 	}
807 
808 	return 0;
809 }
810 
811 /**
812  * gfs2_ri_total - Total up the file system space, according to the rindex.
813  * @sdp: the filesystem
814  *
815  */
816 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
817 {
818 	u64 total_data = 0;
819 	struct inode *inode = sdp->sd_rindex;
820 	struct gfs2_inode *ip = GFS2_I(inode);
821 	char buf[sizeof(struct gfs2_rindex)];
822 	int error, rgrps;
823 
824 	for (rgrps = 0;; rgrps++) {
825 		loff_t pos = rgrps * sizeof(struct gfs2_rindex);
826 
827 		if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
828 			break;
829 		error = gfs2_internal_read(ip, buf, &pos,
830 					   sizeof(struct gfs2_rindex));
831 		if (error != sizeof(struct gfs2_rindex))
832 			break;
833 		total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
834 	}
835 	return total_data;
836 }
837 
838 static int rgd_insert(struct gfs2_rgrpd *rgd)
839 {
840 	struct gfs2_sbd *sdp = rgd->rd_sbd;
841 	struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
842 
843 	/* Figure out where to put new node */
844 	while (*newn) {
845 		struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
846 						  rd_node);
847 
848 		parent = *newn;
849 		if (rgd->rd_addr < cur->rd_addr)
850 			newn = &((*newn)->rb_left);
851 		else if (rgd->rd_addr > cur->rd_addr)
852 			newn = &((*newn)->rb_right);
853 		else
854 			return -EEXIST;
855 	}
856 
857 	rb_link_node(&rgd->rd_node, parent, newn);
858 	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
859 	sdp->sd_rgrps++;
860 	return 0;
861 }
862 
863 /**
864  * read_rindex_entry - Pull in a new resource index entry from the disk
865  * @ip: Pointer to the rindex inode
866  *
867  * Returns: 0 on success, > 0 on EOF, error code otherwise
868  */
869 
870 static int read_rindex_entry(struct gfs2_inode *ip)
871 {
872 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
873 	const unsigned bsize = sdp->sd_sb.sb_bsize;
874 	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
875 	struct gfs2_rindex buf;
876 	int error;
877 	struct gfs2_rgrpd *rgd;
878 
879 	if (pos >= i_size_read(&ip->i_inode))
880 		return 1;
881 
882 	error = gfs2_internal_read(ip, (char *)&buf, &pos,
883 				   sizeof(struct gfs2_rindex));
884 
885 	if (error != sizeof(struct gfs2_rindex))
886 		return (error == 0) ? 1 : error;
887 
888 	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
889 	error = -ENOMEM;
890 	if (!rgd)
891 		return error;
892 
893 	rgd->rd_sbd = sdp;
894 	rgd->rd_addr = be64_to_cpu(buf.ri_addr);
895 	rgd->rd_length = be32_to_cpu(buf.ri_length);
896 	rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
897 	rgd->rd_data = be32_to_cpu(buf.ri_data);
898 	rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
899 	spin_lock_init(&rgd->rd_rsspin);
900 
901 	error = compute_bitstructs(rgd);
902 	if (error)
903 		goto fail;
904 
905 	error = gfs2_glock_get(sdp, rgd->rd_addr,
906 			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
907 	if (error)
908 		goto fail;
909 
910 	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
911 	rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
912 	if (rgd->rd_data > sdp->sd_max_rg_data)
913 		sdp->sd_max_rg_data = rgd->rd_data;
914 	spin_lock(&sdp->sd_rindex_spin);
915 	error = rgd_insert(rgd);
916 	spin_unlock(&sdp->sd_rindex_spin);
917 	if (!error) {
918 		glock_set_object(rgd->rd_gl, rgd);
919 		rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
920 		rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
921 						    rgd->rd_length) * bsize) - 1;
922 		return 0;
923 	}
924 
925 	error = 0; /* someone else read in the rgrp; free it and ignore it */
926 	gfs2_glock_put(rgd->rd_gl);
927 
928 fail:
929 	kfree(rgd->rd_bits);
930 	rgd->rd_bits = NULL;
931 	kmem_cache_free(gfs2_rgrpd_cachep, rgd);
932 	return error;
933 }
934 
935 /**
936  * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
937  * @sdp: the GFS2 superblock
938  *
939  * The purpose of this function is to select a subset of the resource groups
940  * and mark them as PREFERRED. We do it in such a way that each node prefers
941  * to use a unique set of rgrps to minimize glock contention.
942  */
943 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
944 {
945 	struct gfs2_rgrpd *rgd, *first;
946 	int i;
947 
948 	/* Skip an initial number of rgrps, based on this node's journal ID.
949 	   That should start each node out on its own set. */
950 	rgd = gfs2_rgrpd_get_first(sdp);
951 	for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
952 		rgd = gfs2_rgrpd_get_next(rgd);
953 	first = rgd;
954 
955 	do {
956 		rgd->rd_flags |= GFS2_RDF_PREFERRED;
957 		for (i = 0; i < sdp->sd_journals; i++) {
958 			rgd = gfs2_rgrpd_get_next(rgd);
959 			if (!rgd || rgd == first)
960 				break;
961 		}
962 	} while (rgd && rgd != first);
963 }
964 
965 /**
966  * gfs2_ri_update - Pull in a new resource index from the disk
967  * @ip: pointer to the rindex inode
968  *
969  * Returns: 0 on successful update, error code otherwise
970  */
971 
972 static int gfs2_ri_update(struct gfs2_inode *ip)
973 {
974 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
975 	int error;
976 
977 	do {
978 		error = read_rindex_entry(ip);
979 	} while (error == 0);
980 
981 	if (error < 0)
982 		return error;
983 
984 	set_rgrp_preferences(sdp);
985 
986 	sdp->sd_rindex_uptodate = 1;
987 	return 0;
988 }
989 
990 /**
991  * gfs2_rindex_update - Update the rindex if required
992  * @sdp: The GFS2 superblock
993  *
994  * We grab a lock on the rindex inode to make sure that it doesn't
995  * change whilst we are performing an operation. We keep this lock
996  * for quite long periods of time compared to other locks. This
997  * doesn't matter, since it is shared and it is very, very rarely
998  * accessed in the exclusive mode (i.e. only when expanding the filesystem).
999  *
1000  * This makes sure that we're using the latest copy of the resource index
1001  * special file, which might have been updated if someone expanded the
1002  * filesystem (via gfs2_grow utility), which adds new resource groups.
1003  *
1004  * Returns: 0 on succeess, error code otherwise
1005  */
1006 
1007 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1008 {
1009 	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1010 	struct gfs2_glock *gl = ip->i_gl;
1011 	struct gfs2_holder ri_gh;
1012 	int error = 0;
1013 	int unlock_required = 0;
1014 
1015 	/* Read new copy from disk if we don't have the latest */
1016 	if (!sdp->sd_rindex_uptodate) {
1017 		if (!gfs2_glock_is_locked_by_me(gl)) {
1018 			error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1019 			if (error)
1020 				return error;
1021 			unlock_required = 1;
1022 		}
1023 		if (!sdp->sd_rindex_uptodate)
1024 			error = gfs2_ri_update(ip);
1025 		if (unlock_required)
1026 			gfs2_glock_dq_uninit(&ri_gh);
1027 	}
1028 
1029 	return error;
1030 }
1031 
1032 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1033 {
1034 	const struct gfs2_rgrp *str = buf;
1035 	u32 rg_flags;
1036 
1037 	rg_flags = be32_to_cpu(str->rg_flags);
1038 	rg_flags &= ~GFS2_RDF_MASK;
1039 	rgd->rd_flags &= GFS2_RDF_MASK;
1040 	rgd->rd_flags |= rg_flags;
1041 	rgd->rd_free = be32_to_cpu(str->rg_free);
1042 	rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1043 	rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1044 }
1045 
1046 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1047 {
1048 	struct gfs2_rgrp *str = buf;
1049 
1050 	str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1051 	str->rg_free = cpu_to_be32(rgd->rd_free);
1052 	str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1053 	str->__pad = cpu_to_be32(0);
1054 	str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1055 	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1056 }
1057 
1058 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1059 {
1060 	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1061 	struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1062 
1063 	if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1064 	    rgl->rl_dinodes != str->rg_dinodes ||
1065 	    rgl->rl_igeneration != str->rg_igeneration)
1066 		return 0;
1067 	return 1;
1068 }
1069 
1070 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1071 {
1072 	const struct gfs2_rgrp *str = buf;
1073 
1074 	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1075 	rgl->rl_flags = str->rg_flags;
1076 	rgl->rl_free = str->rg_free;
1077 	rgl->rl_dinodes = str->rg_dinodes;
1078 	rgl->rl_igeneration = str->rg_igeneration;
1079 	rgl->__pad = 0UL;
1080 }
1081 
1082 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1083 {
1084 	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1085 	u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1086 	rgl->rl_unlinked = cpu_to_be32(unlinked);
1087 }
1088 
1089 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1090 {
1091 	struct gfs2_bitmap *bi;
1092 	const u32 length = rgd->rd_length;
1093 	const u8 *buffer = NULL;
1094 	u32 i, goal, count = 0;
1095 
1096 	for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1097 		goal = 0;
1098 		buffer = bi->bi_bh->b_data + bi->bi_offset;
1099 		WARN_ON(!buffer_uptodate(bi->bi_bh));
1100 		while (goal < bi->bi_len * GFS2_NBBY) {
1101 			goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1102 					   GFS2_BLKST_UNLINKED);
1103 			if (goal == BFITNOENT)
1104 				break;
1105 			count++;
1106 			goal++;
1107 		}
1108 	}
1109 
1110 	return count;
1111 }
1112 
1113 
1114 /**
1115  * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1116  * @rgd: the struct gfs2_rgrpd describing the RG to read in
1117  *
1118  * Read in all of a Resource Group's header and bitmap blocks.
1119  * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1120  *
1121  * Returns: errno
1122  */
1123 
1124 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1125 {
1126 	struct gfs2_sbd *sdp = rgd->rd_sbd;
1127 	struct gfs2_glock *gl = rgd->rd_gl;
1128 	unsigned int length = rgd->rd_length;
1129 	struct gfs2_bitmap *bi;
1130 	unsigned int x, y;
1131 	int error;
1132 
1133 	if (rgd->rd_bits[0].bi_bh != NULL)
1134 		return 0;
1135 
1136 	for (x = 0; x < length; x++) {
1137 		bi = rgd->rd_bits + x;
1138 		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1139 		if (error)
1140 			goto fail;
1141 	}
1142 
1143 	for (y = length; y--;) {
1144 		bi = rgd->rd_bits + y;
1145 		error = gfs2_meta_wait(sdp, bi->bi_bh);
1146 		if (error)
1147 			goto fail;
1148 		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1149 					      GFS2_METATYPE_RG)) {
1150 			error = -EIO;
1151 			goto fail;
1152 		}
1153 	}
1154 
1155 	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1156 		for (x = 0; x < length; x++)
1157 			clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1158 		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1159 		rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1160 		rgd->rd_free_clone = rgd->rd_free;
1161 		/* max out the rgrp allocation failure point */
1162 		rgd->rd_extfail_pt = rgd->rd_free;
1163 	}
1164 	if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1165 		rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1166 		gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1167 				     rgd->rd_bits[0].bi_bh->b_data);
1168 	}
1169 	else if (sdp->sd_args.ar_rgrplvb) {
1170 		if (!gfs2_rgrp_lvb_valid(rgd)){
1171 			gfs2_consist_rgrpd(rgd);
1172 			error = -EIO;
1173 			goto fail;
1174 		}
1175 		if (rgd->rd_rgl->rl_unlinked == 0)
1176 			rgd->rd_flags &= ~GFS2_RDF_CHECK;
1177 	}
1178 	return 0;
1179 
1180 fail:
1181 	while (x--) {
1182 		bi = rgd->rd_bits + x;
1183 		brelse(bi->bi_bh);
1184 		bi->bi_bh = NULL;
1185 		gfs2_assert_warn(sdp, !bi->bi_clone);
1186 	}
1187 
1188 	return error;
1189 }
1190 
1191 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1192 {
1193 	u32 rl_flags;
1194 
1195 	if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1196 		return 0;
1197 
1198 	if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1199 		return gfs2_rgrp_bh_get(rgd);
1200 
1201 	rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1202 	rl_flags &= ~GFS2_RDF_MASK;
1203 	rgd->rd_flags &= GFS2_RDF_MASK;
1204 	rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1205 	if (rgd->rd_rgl->rl_unlinked == 0)
1206 		rgd->rd_flags &= ~GFS2_RDF_CHECK;
1207 	rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1208 	rgd->rd_free_clone = rgd->rd_free;
1209 	rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1210 	rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1211 	return 0;
1212 }
1213 
1214 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1215 {
1216 	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1217 	struct gfs2_sbd *sdp = rgd->rd_sbd;
1218 
1219 	if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1220 		return 0;
1221 	return gfs2_rgrp_bh_get(rgd);
1222 }
1223 
1224 /**
1225  * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1226  * @rgd: The resource group
1227  *
1228  */
1229 
1230 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1231 {
1232 	int x, length = rgd->rd_length;
1233 
1234 	for (x = 0; x < length; x++) {
1235 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
1236 		if (bi->bi_bh) {
1237 			brelse(bi->bi_bh);
1238 			bi->bi_bh = NULL;
1239 		}
1240 	}
1241 
1242 }
1243 
1244 /**
1245  * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1246  * @gh: The glock holder for the resource group
1247  *
1248  */
1249 
1250 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1251 {
1252 	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1253 	int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1254 		test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1255 
1256 	if (rgd && demote_requested)
1257 		gfs2_rgrp_brelse(rgd);
1258 }
1259 
1260 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1261 			     struct buffer_head *bh,
1262 			     const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1263 {
1264 	struct super_block *sb = sdp->sd_vfs;
1265 	u64 blk;
1266 	sector_t start = 0;
1267 	sector_t nr_blks = 0;
1268 	int rv;
1269 	unsigned int x;
1270 	u32 trimmed = 0;
1271 	u8 diff;
1272 
1273 	for (x = 0; x < bi->bi_len; x++) {
1274 		const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1275 		clone += bi->bi_offset;
1276 		clone += x;
1277 		if (bh) {
1278 			const u8 *orig = bh->b_data + bi->bi_offset + x;
1279 			diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1280 		} else {
1281 			diff = ~(*clone | (*clone >> 1));
1282 		}
1283 		diff &= 0x55;
1284 		if (diff == 0)
1285 			continue;
1286 		blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1287 		while(diff) {
1288 			if (diff & 1) {
1289 				if (nr_blks == 0)
1290 					goto start_new_extent;
1291 				if ((start + nr_blks) != blk) {
1292 					if (nr_blks >= minlen) {
1293 						rv = sb_issue_discard(sb,
1294 							start, nr_blks,
1295 							GFP_NOFS, 0);
1296 						if (rv)
1297 							goto fail;
1298 						trimmed += nr_blks;
1299 					}
1300 					nr_blks = 0;
1301 start_new_extent:
1302 					start = blk;
1303 				}
1304 				nr_blks++;
1305 			}
1306 			diff >>= 2;
1307 			blk++;
1308 		}
1309 	}
1310 	if (nr_blks >= minlen) {
1311 		rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1312 		if (rv)
1313 			goto fail;
1314 		trimmed += nr_blks;
1315 	}
1316 	if (ptrimmed)
1317 		*ptrimmed = trimmed;
1318 	return 0;
1319 
1320 fail:
1321 	if (sdp->sd_args.ar_discard)
1322 		fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1323 	sdp->sd_args.ar_discard = 0;
1324 	return -EIO;
1325 }
1326 
1327 /**
1328  * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1329  * @filp: Any file on the filesystem
1330  * @argp: Pointer to the arguments (also used to pass result)
1331  *
1332  * Returns: 0 on success, otherwise error code
1333  */
1334 
1335 int gfs2_fitrim(struct file *filp, void __user *argp)
1336 {
1337 	struct inode *inode = file_inode(filp);
1338 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1339 	struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1340 	struct buffer_head *bh;
1341 	struct gfs2_rgrpd *rgd;
1342 	struct gfs2_rgrpd *rgd_end;
1343 	struct gfs2_holder gh;
1344 	struct fstrim_range r;
1345 	int ret = 0;
1346 	u64 amt;
1347 	u64 trimmed = 0;
1348 	u64 start, end, minlen;
1349 	unsigned int x;
1350 	unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1351 
1352 	if (!capable(CAP_SYS_ADMIN))
1353 		return -EPERM;
1354 
1355 	if (!blk_queue_discard(q))
1356 		return -EOPNOTSUPP;
1357 
1358 	if (copy_from_user(&r, argp, sizeof(r)))
1359 		return -EFAULT;
1360 
1361 	ret = gfs2_rindex_update(sdp);
1362 	if (ret)
1363 		return ret;
1364 
1365 	start = r.start >> bs_shift;
1366 	end = start + (r.len >> bs_shift);
1367 	minlen = max_t(u64, r.minlen,
1368 		       q->limits.discard_granularity) >> bs_shift;
1369 
1370 	if (end <= start || minlen > sdp->sd_max_rg_data)
1371 		return -EINVAL;
1372 
1373 	rgd = gfs2_blk2rgrpd(sdp, start, 0);
1374 	rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1375 
1376 	if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1377 	    && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1378 		return -EINVAL; /* start is beyond the end of the fs */
1379 
1380 	while (1) {
1381 
1382 		ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1383 		if (ret)
1384 			goto out;
1385 
1386 		if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1387 			/* Trim each bitmap in the rgrp */
1388 			for (x = 0; x < rgd->rd_length; x++) {
1389 				struct gfs2_bitmap *bi = rgd->rd_bits + x;
1390 				ret = gfs2_rgrp_send_discards(sdp,
1391 						rgd->rd_data0, NULL, bi, minlen,
1392 						&amt);
1393 				if (ret) {
1394 					gfs2_glock_dq_uninit(&gh);
1395 					goto out;
1396 				}
1397 				trimmed += amt;
1398 			}
1399 
1400 			/* Mark rgrp as having been trimmed */
1401 			ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1402 			if (ret == 0) {
1403 				bh = rgd->rd_bits[0].bi_bh;
1404 				rgd->rd_flags |= GFS2_RGF_TRIMMED;
1405 				gfs2_trans_add_meta(rgd->rd_gl, bh);
1406 				gfs2_rgrp_out(rgd, bh->b_data);
1407 				gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1408 				gfs2_trans_end(sdp);
1409 			}
1410 		}
1411 		gfs2_glock_dq_uninit(&gh);
1412 
1413 		if (rgd == rgd_end)
1414 			break;
1415 
1416 		rgd = gfs2_rgrpd_get_next(rgd);
1417 	}
1418 
1419 out:
1420 	r.len = trimmed << bs_shift;
1421 	if (copy_to_user(argp, &r, sizeof(r)))
1422 		return -EFAULT;
1423 
1424 	return ret;
1425 }
1426 
1427 /**
1428  * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1429  * @ip: the inode structure
1430  *
1431  */
1432 static void rs_insert(struct gfs2_inode *ip)
1433 {
1434 	struct rb_node **newn, *parent = NULL;
1435 	int rc;
1436 	struct gfs2_blkreserv *rs = &ip->i_res;
1437 	struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1438 	u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1439 
1440 	BUG_ON(gfs2_rs_active(rs));
1441 
1442 	spin_lock(&rgd->rd_rsspin);
1443 	newn = &rgd->rd_rstree.rb_node;
1444 	while (*newn) {
1445 		struct gfs2_blkreserv *cur =
1446 			rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1447 
1448 		parent = *newn;
1449 		rc = rs_cmp(fsblock, rs->rs_free, cur);
1450 		if (rc > 0)
1451 			newn = &((*newn)->rb_right);
1452 		else if (rc < 0)
1453 			newn = &((*newn)->rb_left);
1454 		else {
1455 			spin_unlock(&rgd->rd_rsspin);
1456 			WARN_ON(1);
1457 			return;
1458 		}
1459 	}
1460 
1461 	rb_link_node(&rs->rs_node, parent, newn);
1462 	rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1463 
1464 	/* Do our rgrp accounting for the reservation */
1465 	rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1466 	spin_unlock(&rgd->rd_rsspin);
1467 	trace_gfs2_rs(rs, TRACE_RS_INSERT);
1468 }
1469 
1470 /**
1471  * rg_mblk_search - find a group of multiple free blocks to form a reservation
1472  * @rgd: the resource group descriptor
1473  * @ip: pointer to the inode for which we're reserving blocks
1474  * @ap: the allocation parameters
1475  *
1476  */
1477 
1478 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1479 			   const struct gfs2_alloc_parms *ap)
1480 {
1481 	struct gfs2_rbm rbm = { .rgd = rgd, };
1482 	u64 goal;
1483 	struct gfs2_blkreserv *rs = &ip->i_res;
1484 	u32 extlen;
1485 	u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1486 	int ret;
1487 	struct inode *inode = &ip->i_inode;
1488 
1489 	if (S_ISDIR(inode->i_mode))
1490 		extlen = 1;
1491 	else {
1492 		extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1493 		extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1494 	}
1495 	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1496 		return;
1497 
1498 	/* Find bitmap block that contains bits for goal block */
1499 	if (rgrp_contains_block(rgd, ip->i_goal))
1500 		goal = ip->i_goal;
1501 	else
1502 		goal = rgd->rd_last_alloc + rgd->rd_data0;
1503 
1504 	if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1505 		return;
1506 
1507 	ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1508 	if (ret == 0) {
1509 		rs->rs_rbm = rbm;
1510 		rs->rs_free = extlen;
1511 		rs->rs_inum = ip->i_no_addr;
1512 		rs_insert(ip);
1513 	} else {
1514 		if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1515 			rgd->rd_last_alloc = 0;
1516 	}
1517 }
1518 
1519 /**
1520  * gfs2_next_unreserved_block - Return next block that is not reserved
1521  * @rgd: The resource group
1522  * @block: The starting block
1523  * @length: The required length
1524  * @ip: Ignore any reservations for this inode
1525  *
1526  * If the block does not appear in any reservation, then return the
1527  * block number unchanged. If it does appear in the reservation, then
1528  * keep looking through the tree of reservations in order to find the
1529  * first block number which is not reserved.
1530  */
1531 
1532 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1533 				      u32 length,
1534 				      const struct gfs2_inode *ip)
1535 {
1536 	struct gfs2_blkreserv *rs;
1537 	struct rb_node *n;
1538 	int rc;
1539 
1540 	spin_lock(&rgd->rd_rsspin);
1541 	n = rgd->rd_rstree.rb_node;
1542 	while (n) {
1543 		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1544 		rc = rs_cmp(block, length, rs);
1545 		if (rc < 0)
1546 			n = n->rb_left;
1547 		else if (rc > 0)
1548 			n = n->rb_right;
1549 		else
1550 			break;
1551 	}
1552 
1553 	if (n) {
1554 		while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1555 			block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1556 			n = n->rb_right;
1557 			if (n == NULL)
1558 				break;
1559 			rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1560 		}
1561 	}
1562 
1563 	spin_unlock(&rgd->rd_rsspin);
1564 	return block;
1565 }
1566 
1567 /**
1568  * gfs2_reservation_check_and_update - Check for reservations during block alloc
1569  * @rbm: The current position in the resource group
1570  * @ip: The inode for which we are searching for blocks
1571  * @minext: The minimum extent length
1572  * @maxext: A pointer to the maximum extent structure
1573  *
1574  * This checks the current position in the rgrp to see whether there is
1575  * a reservation covering this block. If not then this function is a
1576  * no-op. If there is, then the position is moved to the end of the
1577  * contiguous reservation(s) so that we are pointing at the first
1578  * non-reserved block.
1579  *
1580  * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1581  */
1582 
1583 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1584 					     const struct gfs2_inode *ip,
1585 					     u32 minext,
1586 					     struct gfs2_extent *maxext)
1587 {
1588 	u64 block = gfs2_rbm_to_block(rbm);
1589 	u32 extlen = 1;
1590 	u64 nblock;
1591 	int ret;
1592 
1593 	/*
1594 	 * If we have a minimum extent length, then skip over any extent
1595 	 * which is less than the min extent length in size.
1596 	 */
1597 	if (minext) {
1598 		extlen = gfs2_free_extlen(rbm, minext);
1599 		if (extlen <= maxext->len)
1600 			goto fail;
1601 	}
1602 
1603 	/*
1604 	 * Check the extent which has been found against the reservations
1605 	 * and skip if parts of it are already reserved
1606 	 */
1607 	nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1608 	if (nblock == block) {
1609 		if (!minext || extlen >= minext)
1610 			return 0;
1611 
1612 		if (extlen > maxext->len) {
1613 			maxext->len = extlen;
1614 			maxext->rbm = *rbm;
1615 		}
1616 fail:
1617 		nblock = block + extlen;
1618 	}
1619 	ret = gfs2_rbm_from_block(rbm, nblock);
1620 	if (ret < 0)
1621 		return ret;
1622 	return 1;
1623 }
1624 
1625 /**
1626  * gfs2_rbm_find - Look for blocks of a particular state
1627  * @rbm: Value/result starting position and final position
1628  * @state: The state which we want to find
1629  * @minext: Pointer to the requested extent length (NULL for a single block)
1630  *          This is updated to be the actual reservation size.
1631  * @ip: If set, check for reservations
1632  * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1633  *          around until we've reached the starting point.
1634  *
1635  * Side effects:
1636  * - If looking for free blocks, we set GBF_FULL on each bitmap which
1637  *   has no free blocks in it.
1638  * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1639  *   has come up short on a free block search.
1640  *
1641  * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1642  */
1643 
1644 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1645 			 const struct gfs2_inode *ip, bool nowrap)
1646 {
1647 	struct buffer_head *bh;
1648 	int initial_bii;
1649 	u32 initial_offset;
1650 	int first_bii = rbm->bii;
1651 	u32 first_offset = rbm->offset;
1652 	u32 offset;
1653 	u8 *buffer;
1654 	int n = 0;
1655 	int iters = rbm->rgd->rd_length;
1656 	int ret;
1657 	struct gfs2_bitmap *bi;
1658 	struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1659 
1660 	/* If we are not starting at the beginning of a bitmap, then we
1661 	 * need to add one to the bitmap count to ensure that we search
1662 	 * the starting bitmap twice.
1663 	 */
1664 	if (rbm->offset != 0)
1665 		iters++;
1666 
1667 	while(1) {
1668 		bi = rbm_bi(rbm);
1669 		if (test_bit(GBF_FULL, &bi->bi_flags) &&
1670 		    (state == GFS2_BLKST_FREE))
1671 			goto next_bitmap;
1672 
1673 		bh = bi->bi_bh;
1674 		buffer = bh->b_data + bi->bi_offset;
1675 		WARN_ON(!buffer_uptodate(bh));
1676 		if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1677 			buffer = bi->bi_clone + bi->bi_offset;
1678 		initial_offset = rbm->offset;
1679 		offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1680 		if (offset == BFITNOENT)
1681 			goto bitmap_full;
1682 		rbm->offset = offset;
1683 		if (ip == NULL)
1684 			return 0;
1685 
1686 		initial_bii = rbm->bii;
1687 		ret = gfs2_reservation_check_and_update(rbm, ip,
1688 							minext ? *minext : 0,
1689 							&maxext);
1690 		if (ret == 0)
1691 			return 0;
1692 		if (ret > 0) {
1693 			n += (rbm->bii - initial_bii);
1694 			goto next_iter;
1695 		}
1696 		if (ret == -E2BIG) {
1697 			rbm->bii = 0;
1698 			rbm->offset = 0;
1699 			n += (rbm->bii - initial_bii);
1700 			goto res_covered_end_of_rgrp;
1701 		}
1702 		return ret;
1703 
1704 bitmap_full:	/* Mark bitmap as full and fall through */
1705 		if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1706 			set_bit(GBF_FULL, &bi->bi_flags);
1707 
1708 next_bitmap:	/* Find next bitmap in the rgrp */
1709 		rbm->offset = 0;
1710 		rbm->bii++;
1711 		if (rbm->bii == rbm->rgd->rd_length)
1712 			rbm->bii = 0;
1713 res_covered_end_of_rgrp:
1714 		if ((rbm->bii == 0) && nowrap)
1715 			break;
1716 		n++;
1717 next_iter:
1718 		if (n >= iters)
1719 			break;
1720 	}
1721 
1722 	if (minext == NULL || state != GFS2_BLKST_FREE)
1723 		return -ENOSPC;
1724 
1725 	/* If the extent was too small, and it's smaller than the smallest
1726 	   to have failed before, remember for future reference that it's
1727 	   useless to search this rgrp again for this amount or more. */
1728 	if ((first_offset == 0) && (first_bii == 0) &&
1729 	    (*minext < rbm->rgd->rd_extfail_pt))
1730 		rbm->rgd->rd_extfail_pt = *minext;
1731 
1732 	/* If the maximum extent we found is big enough to fulfill the
1733 	   minimum requirements, use it anyway. */
1734 	if (maxext.len) {
1735 		*rbm = maxext.rbm;
1736 		*minext = maxext.len;
1737 		return 0;
1738 	}
1739 
1740 	return -ENOSPC;
1741 }
1742 
1743 /**
1744  * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1745  * @rgd: The rgrp
1746  * @last_unlinked: block address of the last dinode we unlinked
1747  * @skip: block address we should explicitly not unlink
1748  *
1749  * Returns: 0 if no error
1750  *          The inode, if one has been found, in inode.
1751  */
1752 
1753 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1754 {
1755 	u64 block;
1756 	struct gfs2_sbd *sdp = rgd->rd_sbd;
1757 	struct gfs2_glock *gl;
1758 	struct gfs2_inode *ip;
1759 	int error;
1760 	int found = 0;
1761 	struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1762 
1763 	while (1) {
1764 		down_write(&sdp->sd_log_flush_lock);
1765 		error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1766 				      true);
1767 		up_write(&sdp->sd_log_flush_lock);
1768 		if (error == -ENOSPC)
1769 			break;
1770 		if (WARN_ON_ONCE(error))
1771 			break;
1772 
1773 		block = gfs2_rbm_to_block(&rbm);
1774 		if (gfs2_rbm_from_block(&rbm, block + 1))
1775 			break;
1776 		if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1777 			continue;
1778 		if (block == skip)
1779 			continue;
1780 		*last_unlinked = block;
1781 
1782 		error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1783 		if (error)
1784 			continue;
1785 
1786 		/* If the inode is already in cache, we can ignore it here
1787 		 * because the existing inode disposal code will deal with
1788 		 * it when all refs have gone away. Accessing gl_object like
1789 		 * this is not safe in general. Here it is ok because we do
1790 		 * not dereference the pointer, and we only need an approx
1791 		 * answer to whether it is NULL or not.
1792 		 */
1793 		ip = gl->gl_object;
1794 
1795 		if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1796 			gfs2_glock_put(gl);
1797 		else
1798 			found++;
1799 
1800 		/* Limit reclaim to sensible number of tasks */
1801 		if (found > NR_CPUS)
1802 			return;
1803 	}
1804 
1805 	rgd->rd_flags &= ~GFS2_RDF_CHECK;
1806 	return;
1807 }
1808 
1809 /**
1810  * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1811  * @rgd: The rgrp in question
1812  * @loops: An indication of how picky we can be (0=very, 1=less so)
1813  *
1814  * This function uses the recently added glock statistics in order to
1815  * figure out whether a parciular resource group is suffering from
1816  * contention from multiple nodes. This is done purely on the basis
1817  * of timings, since this is the only data we have to work with and
1818  * our aim here is to reject a resource group which is highly contended
1819  * but (very important) not to do this too often in order to ensure that
1820  * we do not land up introducing fragmentation by changing resource
1821  * groups when not actually required.
1822  *
1823  * The calculation is fairly simple, we want to know whether the SRTTB
1824  * (i.e. smoothed round trip time for blocking operations) to acquire
1825  * the lock for this rgrp's glock is significantly greater than the
1826  * time taken for resource groups on average. We introduce a margin in
1827  * the form of the variable @var which is computed as the sum of the two
1828  * respective variences, and multiplied by a factor depending on @loops
1829  * and whether we have a lot of data to base the decision on. This is
1830  * then tested against the square difference of the means in order to
1831  * decide whether the result is statistically significant or not.
1832  *
1833  * Returns: A boolean verdict on the congestion status
1834  */
1835 
1836 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1837 {
1838 	const struct gfs2_glock *gl = rgd->rd_gl;
1839 	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1840 	struct gfs2_lkstats *st;
1841 	u64 r_dcount, l_dcount;
1842 	u64 l_srttb, a_srttb = 0;
1843 	s64 srttb_diff;
1844 	u64 sqr_diff;
1845 	u64 var;
1846 	int cpu, nonzero = 0;
1847 
1848 	preempt_disable();
1849 	for_each_present_cpu(cpu) {
1850 		st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1851 		if (st->stats[GFS2_LKS_SRTTB]) {
1852 			a_srttb += st->stats[GFS2_LKS_SRTTB];
1853 			nonzero++;
1854 		}
1855 	}
1856 	st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1857 	if (nonzero)
1858 		do_div(a_srttb, nonzero);
1859 	r_dcount = st->stats[GFS2_LKS_DCOUNT];
1860 	var = st->stats[GFS2_LKS_SRTTVARB] +
1861 	      gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1862 	preempt_enable();
1863 
1864 	l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1865 	l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1866 
1867 	if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1868 		return false;
1869 
1870 	srttb_diff = a_srttb - l_srttb;
1871 	sqr_diff = srttb_diff * srttb_diff;
1872 
1873 	var *= 2;
1874 	if (l_dcount < 8 || r_dcount < 8)
1875 		var *= 2;
1876 	if (loops == 1)
1877 		var *= 2;
1878 
1879 	return ((srttb_diff < 0) && (sqr_diff > var));
1880 }
1881 
1882 /**
1883  * gfs2_rgrp_used_recently
1884  * @rs: The block reservation with the rgrp to test
1885  * @msecs: The time limit in milliseconds
1886  *
1887  * Returns: True if the rgrp glock has been used within the time limit
1888  */
1889 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1890 				    u64 msecs)
1891 {
1892 	u64 tdiff;
1893 
1894 	tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1895                             rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1896 
1897 	return tdiff > (msecs * 1000 * 1000);
1898 }
1899 
1900 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1901 {
1902 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1903 	u32 skip;
1904 
1905 	get_random_bytes(&skip, sizeof(skip));
1906 	return skip % sdp->sd_rgrps;
1907 }
1908 
1909 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1910 {
1911 	struct gfs2_rgrpd *rgd = *pos;
1912 	struct gfs2_sbd *sdp = rgd->rd_sbd;
1913 
1914 	rgd = gfs2_rgrpd_get_next(rgd);
1915 	if (rgd == NULL)
1916 		rgd = gfs2_rgrpd_get_first(sdp);
1917 	*pos = rgd;
1918 	if (rgd != begin) /* If we didn't wrap */
1919 		return true;
1920 	return false;
1921 }
1922 
1923 /**
1924  * fast_to_acquire - determine if a resource group will be fast to acquire
1925  *
1926  * If this is one of our preferred rgrps, it should be quicker to acquire,
1927  * because we tried to set ourselves up as dlm lock master.
1928  */
1929 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1930 {
1931 	struct gfs2_glock *gl = rgd->rd_gl;
1932 
1933 	if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1934 	    !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1935 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1936 		return 1;
1937 	if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1938 		return 1;
1939 	return 0;
1940 }
1941 
1942 /**
1943  * gfs2_inplace_reserve - Reserve space in the filesystem
1944  * @ip: the inode to reserve space for
1945  * @ap: the allocation parameters
1946  *
1947  * We try our best to find an rgrp that has at least ap->target blocks
1948  * available. After a couple of passes (loops == 2), the prospects of finding
1949  * such an rgrp diminish. At this stage, we return the first rgrp that has
1950  * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1951  * the number of blocks available in the chosen rgrp.
1952  *
1953  * Returns: 0 on success,
1954  *          -ENOMEM if a suitable rgrp can't be found
1955  *          errno otherwise
1956  */
1957 
1958 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1959 {
1960 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1961 	struct gfs2_rgrpd *begin = NULL;
1962 	struct gfs2_blkreserv *rs = &ip->i_res;
1963 	int error = 0, rg_locked, flags = 0;
1964 	u64 last_unlinked = NO_BLOCK;
1965 	int loops = 0;
1966 	u32 skip = 0;
1967 
1968 	if (sdp->sd_args.ar_rgrplvb)
1969 		flags |= GL_SKIP;
1970 	if (gfs2_assert_warn(sdp, ap->target))
1971 		return -EINVAL;
1972 	if (gfs2_rs_active(rs)) {
1973 		begin = rs->rs_rbm.rgd;
1974 	} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1975 		rs->rs_rbm.rgd = begin = ip->i_rgd;
1976 	} else {
1977 		check_and_update_goal(ip);
1978 		rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1979 	}
1980 	if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1981 		skip = gfs2_orlov_skip(ip);
1982 	if (rs->rs_rbm.rgd == NULL)
1983 		return -EBADSLT;
1984 
1985 	while (loops < 3) {
1986 		rg_locked = 1;
1987 
1988 		if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1989 			rg_locked = 0;
1990 			if (skip && skip--)
1991 				goto next_rgrp;
1992 			if (!gfs2_rs_active(rs)) {
1993 				if (loops == 0 &&
1994 				    !fast_to_acquire(rs->rs_rbm.rgd))
1995 					goto next_rgrp;
1996 				if ((loops < 2) &&
1997 				    gfs2_rgrp_used_recently(rs, 1000) &&
1998 				    gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
1999 					goto next_rgrp;
2000 			}
2001 			error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2002 						   LM_ST_EXCLUSIVE, flags,
2003 						   &rs->rs_rgd_gh);
2004 			if (unlikely(error))
2005 				return error;
2006 			if (!gfs2_rs_active(rs) && (loops < 2) &&
2007 			    gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2008 				goto skip_rgrp;
2009 			if (sdp->sd_args.ar_rgrplvb) {
2010 				error = update_rgrp_lvb(rs->rs_rbm.rgd);
2011 				if (unlikely(error)) {
2012 					gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2013 					return error;
2014 				}
2015 			}
2016 		}
2017 
2018 		/* Skip unuseable resource groups */
2019 		if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2020 						 GFS2_RDF_ERROR)) ||
2021 		    (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2022 			goto skip_rgrp;
2023 
2024 		if (sdp->sd_args.ar_rgrplvb)
2025 			gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2026 
2027 		/* Get a reservation if we don't already have one */
2028 		if (!gfs2_rs_active(rs))
2029 			rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2030 
2031 		/* Skip rgrps when we can't get a reservation on first pass */
2032 		if (!gfs2_rs_active(rs) && (loops < 1))
2033 			goto check_rgrp;
2034 
2035 		/* If rgrp has enough free space, use it */
2036 		if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2037 		    (loops == 2 && ap->min_target &&
2038 		     rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2039 			ip->i_rgd = rs->rs_rbm.rgd;
2040 			ap->allowed = ip->i_rgd->rd_free_clone;
2041 			return 0;
2042 		}
2043 check_rgrp:
2044 		/* Check for unlinked inodes which can be reclaimed */
2045 		if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2046 			try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2047 					ip->i_no_addr);
2048 skip_rgrp:
2049 		/* Drop reservation, if we couldn't use reserved rgrp */
2050 		if (gfs2_rs_active(rs))
2051 			gfs2_rs_deltree(rs);
2052 
2053 		/* Unlock rgrp if required */
2054 		if (!rg_locked)
2055 			gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2056 next_rgrp:
2057 		/* Find the next rgrp, and continue looking */
2058 		if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2059 			continue;
2060 		if (skip)
2061 			continue;
2062 
2063 		/* If we've scanned all the rgrps, but found no free blocks
2064 		 * then this checks for some less likely conditions before
2065 		 * trying again.
2066 		 */
2067 		loops++;
2068 		/* Check that fs hasn't grown if writing to rindex */
2069 		if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2070 			error = gfs2_ri_update(ip);
2071 			if (error)
2072 				return error;
2073 		}
2074 		/* Flushing the log may release space */
2075 		if (loops == 2)
2076 			gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2077 	}
2078 
2079 	return -ENOSPC;
2080 }
2081 
2082 /**
2083  * gfs2_inplace_release - release an inplace reservation
2084  * @ip: the inode the reservation was taken out on
2085  *
2086  * Release a reservation made by gfs2_inplace_reserve().
2087  */
2088 
2089 void gfs2_inplace_release(struct gfs2_inode *ip)
2090 {
2091 	struct gfs2_blkreserv *rs = &ip->i_res;
2092 
2093 	if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2094 		gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2095 }
2096 
2097 /**
2098  * gfs2_get_block_type - Check a block in a RG is of given type
2099  * @rgd: the resource group holding the block
2100  * @block: the block number
2101  *
2102  * Returns: The block type (GFS2_BLKST_*)
2103  */
2104 
2105 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2106 {
2107 	struct gfs2_rbm rbm = { .rgd = rgd, };
2108 	int ret;
2109 
2110 	ret = gfs2_rbm_from_block(&rbm, block);
2111 	WARN_ON_ONCE(ret != 0);
2112 
2113 	return gfs2_testbit(&rbm);
2114 }
2115 
2116 
2117 /**
2118  * gfs2_alloc_extent - allocate an extent from a given bitmap
2119  * @rbm: the resource group information
2120  * @dinode: TRUE if the first block we allocate is for a dinode
2121  * @n: The extent length (value/result)
2122  *
2123  * Add the bitmap buffer to the transaction.
2124  * Set the found bits to @new_state to change block's allocation state.
2125  */
2126 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2127 			     unsigned int *n)
2128 {
2129 	struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2130 	const unsigned int elen = *n;
2131 	u64 block;
2132 	int ret;
2133 
2134 	*n = 1;
2135 	block = gfs2_rbm_to_block(rbm);
2136 	gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2137 	gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2138 	block++;
2139 	while (*n < elen) {
2140 		ret = gfs2_rbm_from_block(&pos, block);
2141 		if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2142 			break;
2143 		gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2144 		gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2145 		(*n)++;
2146 		block++;
2147 	}
2148 }
2149 
2150 /**
2151  * rgblk_free - Change alloc state of given block(s)
2152  * @sdp: the filesystem
2153  * @bstart: the start of a run of blocks to free
2154  * @blen: the length of the block run (all must lie within ONE RG!)
2155  * @new_state: GFS2_BLKST_XXX the after-allocation block state
2156  *
2157  * Returns:  Resource group containing the block(s)
2158  */
2159 
2160 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2161 				     u32 blen, unsigned char new_state)
2162 {
2163 	struct gfs2_rbm rbm;
2164 	struct gfs2_bitmap *bi, *bi_prev = NULL;
2165 
2166 	rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2167 	if (!rbm.rgd) {
2168 		if (gfs2_consist(sdp))
2169 			fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2170 		return NULL;
2171 	}
2172 
2173 	gfs2_rbm_from_block(&rbm, bstart);
2174 	while (blen--) {
2175 		bi = rbm_bi(&rbm);
2176 		if (bi != bi_prev) {
2177 			if (!bi->bi_clone) {
2178 				bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2179 						      GFP_NOFS | __GFP_NOFAIL);
2180 				memcpy(bi->bi_clone + bi->bi_offset,
2181 				       bi->bi_bh->b_data + bi->bi_offset,
2182 				       bi->bi_len);
2183 			}
2184 			gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2185 			bi_prev = bi;
2186 		}
2187 		gfs2_setbit(&rbm, false, new_state);
2188 		gfs2_rbm_incr(&rbm);
2189 	}
2190 
2191 	return rbm.rgd;
2192 }
2193 
2194 /**
2195  * gfs2_rgrp_dump - print out an rgrp
2196  * @seq: The iterator
2197  * @gl: The glock in question
2198  *
2199  */
2200 
2201 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2202 {
2203 	struct gfs2_rgrpd *rgd = gl->gl_object;
2204 	struct gfs2_blkreserv *trs;
2205 	const struct rb_node *n;
2206 
2207 	if (rgd == NULL)
2208 		return;
2209 	gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2210 		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2211 		       rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2212 		       rgd->rd_reserved, rgd->rd_extfail_pt);
2213 	spin_lock(&rgd->rd_rsspin);
2214 	for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2215 		trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2216 		dump_rs(seq, trs);
2217 	}
2218 	spin_unlock(&rgd->rd_rsspin);
2219 }
2220 
2221 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2222 {
2223 	struct gfs2_sbd *sdp = rgd->rd_sbd;
2224 	fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2225 		(unsigned long long)rgd->rd_addr);
2226 	fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2227 	gfs2_rgrp_dump(NULL, rgd->rd_gl);
2228 	rgd->rd_flags |= GFS2_RDF_ERROR;
2229 }
2230 
2231 /**
2232  * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2233  * @ip: The inode we have just allocated blocks for
2234  * @rbm: The start of the allocated blocks
2235  * @len: The extent length
2236  *
2237  * Adjusts a reservation after an allocation has taken place. If the
2238  * reservation does not match the allocation, or if it is now empty
2239  * then it is removed.
2240  */
2241 
2242 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2243 				    const struct gfs2_rbm *rbm, unsigned len)
2244 {
2245 	struct gfs2_blkreserv *rs = &ip->i_res;
2246 	struct gfs2_rgrpd *rgd = rbm->rgd;
2247 	unsigned rlen;
2248 	u64 block;
2249 	int ret;
2250 
2251 	spin_lock(&rgd->rd_rsspin);
2252 	if (gfs2_rs_active(rs)) {
2253 		if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2254 			block = gfs2_rbm_to_block(rbm);
2255 			ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2256 			rlen = min(rs->rs_free, len);
2257 			rs->rs_free -= rlen;
2258 			rgd->rd_reserved -= rlen;
2259 			trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2260 			if (rs->rs_free && !ret)
2261 				goto out;
2262 			/* We used up our block reservation, so we should
2263 			   reserve more blocks next time. */
2264 			atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2265 		}
2266 		__rs_deltree(rs);
2267 	}
2268 out:
2269 	spin_unlock(&rgd->rd_rsspin);
2270 }
2271 
2272 /**
2273  * gfs2_set_alloc_start - Set starting point for block allocation
2274  * @rbm: The rbm which will be set to the required location
2275  * @ip: The gfs2 inode
2276  * @dinode: Flag to say if allocation includes a new inode
2277  *
2278  * This sets the starting point from the reservation if one is active
2279  * otherwise it falls back to guessing a start point based on the
2280  * inode's goal block or the last allocation point in the rgrp.
2281  */
2282 
2283 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2284 				 const struct gfs2_inode *ip, bool dinode)
2285 {
2286 	u64 goal;
2287 
2288 	if (gfs2_rs_active(&ip->i_res)) {
2289 		*rbm = ip->i_res.rs_rbm;
2290 		return;
2291 	}
2292 
2293 	if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2294 		goal = ip->i_goal;
2295 	else
2296 		goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2297 
2298 	gfs2_rbm_from_block(rbm, goal);
2299 }
2300 
2301 /**
2302  * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2303  * @ip: the inode to allocate the block for
2304  * @bn: Used to return the starting block number
2305  * @nblocks: requested number of blocks/extent length (value/result)
2306  * @dinode: 1 if we're allocating a dinode block, else 0
2307  * @generation: the generation number of the inode
2308  *
2309  * Returns: 0 or error
2310  */
2311 
2312 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2313 		      bool dinode, u64 *generation)
2314 {
2315 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2316 	struct buffer_head *dibh;
2317 	struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2318 	unsigned int ndata;
2319 	u64 block; /* block, within the file system scope */
2320 	int error;
2321 
2322 	gfs2_set_alloc_start(&rbm, ip, dinode);
2323 	error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2324 
2325 	if (error == -ENOSPC) {
2326 		gfs2_set_alloc_start(&rbm, ip, dinode);
2327 		error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2328 	}
2329 
2330 	/* Since all blocks are reserved in advance, this shouldn't happen */
2331 	if (error) {
2332 		fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2333 			(unsigned long long)ip->i_no_addr, error, *nblocks,
2334 			test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2335 			rbm.rgd->rd_extfail_pt);
2336 		goto rgrp_error;
2337 	}
2338 
2339 	gfs2_alloc_extent(&rbm, dinode, nblocks);
2340 	block = gfs2_rbm_to_block(&rbm);
2341 	rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2342 	if (gfs2_rs_active(&ip->i_res))
2343 		gfs2_adjust_reservation(ip, &rbm, *nblocks);
2344 	ndata = *nblocks;
2345 	if (dinode)
2346 		ndata--;
2347 
2348 	if (!dinode) {
2349 		ip->i_goal = block + ndata - 1;
2350 		error = gfs2_meta_inode_buffer(ip, &dibh);
2351 		if (error == 0) {
2352 			struct gfs2_dinode *di =
2353 				(struct gfs2_dinode *)dibh->b_data;
2354 			gfs2_trans_add_meta(ip->i_gl, dibh);
2355 			di->di_goal_meta = di->di_goal_data =
2356 				cpu_to_be64(ip->i_goal);
2357 			brelse(dibh);
2358 		}
2359 	}
2360 	if (rbm.rgd->rd_free < *nblocks) {
2361 		pr_warn("nblocks=%u\n", *nblocks);
2362 		goto rgrp_error;
2363 	}
2364 
2365 	rbm.rgd->rd_free -= *nblocks;
2366 	if (dinode) {
2367 		rbm.rgd->rd_dinodes++;
2368 		*generation = rbm.rgd->rd_igeneration++;
2369 		if (*generation == 0)
2370 			*generation = rbm.rgd->rd_igeneration++;
2371 	}
2372 
2373 	gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2374 	gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2375 	gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2376 
2377 	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2378 	if (dinode)
2379 		gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2380 
2381 	gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2382 
2383 	rbm.rgd->rd_free_clone -= *nblocks;
2384 	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2385 			       dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2386 	*bn = block;
2387 	return 0;
2388 
2389 rgrp_error:
2390 	gfs2_rgrp_error(rbm.rgd);
2391 	return -EIO;
2392 }
2393 
2394 /**
2395  * __gfs2_free_blocks - free a contiguous run of block(s)
2396  * @ip: the inode these blocks are being freed from
2397  * @bstart: first block of a run of contiguous blocks
2398  * @blen: the length of the block run
2399  * @meta: 1 if the blocks represent metadata
2400  *
2401  */
2402 
2403 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2404 {
2405 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2406 	struct gfs2_rgrpd *rgd;
2407 
2408 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2409 	if (!rgd)
2410 		return;
2411 	trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2412 	rgd->rd_free += blen;
2413 	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2414 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2415 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2416 	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2417 
2418 	/* Directories keep their data in the metadata address space */
2419 	if (meta || ip->i_depth)
2420 		gfs2_meta_wipe(ip, bstart, blen);
2421 }
2422 
2423 /**
2424  * gfs2_free_meta - free a contiguous run of data block(s)
2425  * @ip: the inode these blocks are being freed from
2426  * @bstart: first block of a run of contiguous blocks
2427  * @blen: the length of the block run
2428  *
2429  */
2430 
2431 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2432 {
2433 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2434 
2435 	__gfs2_free_blocks(ip, bstart, blen, 1);
2436 	gfs2_statfs_change(sdp, 0, +blen, 0);
2437 	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2438 }
2439 
2440 void gfs2_unlink_di(struct inode *inode)
2441 {
2442 	struct gfs2_inode *ip = GFS2_I(inode);
2443 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2444 	struct gfs2_rgrpd *rgd;
2445 	u64 blkno = ip->i_no_addr;
2446 
2447 	rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2448 	if (!rgd)
2449 		return;
2450 	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2451 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2452 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2453 	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2454 	update_rgrp_lvb_unlinked(rgd, 1);
2455 }
2456 
2457 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2458 {
2459 	struct gfs2_sbd *sdp = rgd->rd_sbd;
2460 	struct gfs2_rgrpd *tmp_rgd;
2461 
2462 	tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2463 	if (!tmp_rgd)
2464 		return;
2465 	gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2466 
2467 	if (!rgd->rd_dinodes)
2468 		gfs2_consist_rgrpd(rgd);
2469 	rgd->rd_dinodes--;
2470 	rgd->rd_free++;
2471 
2472 	gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2473 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2474 	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2475 	update_rgrp_lvb_unlinked(rgd, -1);
2476 
2477 	gfs2_statfs_change(sdp, 0, +1, -1);
2478 }
2479 
2480 
2481 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2482 {
2483 	gfs2_free_uninit_di(rgd, ip->i_no_addr);
2484 	trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2485 	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2486 	gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2487 }
2488 
2489 /**
2490  * gfs2_check_blk_type - Check the type of a block
2491  * @sdp: The superblock
2492  * @no_addr: The block number to check
2493  * @type: The block type we are looking for
2494  *
2495  * Returns: 0 if the block type matches the expected type
2496  *          -ESTALE if it doesn't match
2497  *          or -ve errno if something went wrong while checking
2498  */
2499 
2500 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2501 {
2502 	struct gfs2_rgrpd *rgd;
2503 	struct gfs2_holder rgd_gh;
2504 	int error = -EINVAL;
2505 
2506 	rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2507 	if (!rgd)
2508 		goto fail;
2509 
2510 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2511 	if (error)
2512 		goto fail;
2513 
2514 	if (gfs2_get_block_type(rgd, no_addr) != type)
2515 		error = -ESTALE;
2516 
2517 	gfs2_glock_dq_uninit(&rgd_gh);
2518 fail:
2519 	return error;
2520 }
2521 
2522 /**
2523  * gfs2_rlist_add - add a RG to a list of RGs
2524  * @ip: the inode
2525  * @rlist: the list of resource groups
2526  * @block: the block
2527  *
2528  * Figure out what RG a block belongs to and add that RG to the list
2529  *
2530  * FIXME: Don't use NOFAIL
2531  *
2532  */
2533 
2534 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2535 		    u64 block)
2536 {
2537 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2538 	struct gfs2_rgrpd *rgd;
2539 	struct gfs2_rgrpd **tmp;
2540 	unsigned int new_space;
2541 	unsigned int x;
2542 
2543 	if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2544 		return;
2545 
2546 	if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2547 		rgd = ip->i_rgd;
2548 	else
2549 		rgd = gfs2_blk2rgrpd(sdp, block, 1);
2550 	if (!rgd) {
2551 		fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2552 		return;
2553 	}
2554 	ip->i_rgd = rgd;
2555 
2556 	for (x = 0; x < rlist->rl_rgrps; x++)
2557 		if (rlist->rl_rgd[x] == rgd)
2558 			return;
2559 
2560 	if (rlist->rl_rgrps == rlist->rl_space) {
2561 		new_space = rlist->rl_space + 10;
2562 
2563 		tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2564 			      GFP_NOFS | __GFP_NOFAIL);
2565 
2566 		if (rlist->rl_rgd) {
2567 			memcpy(tmp, rlist->rl_rgd,
2568 			       rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2569 			kfree(rlist->rl_rgd);
2570 		}
2571 
2572 		rlist->rl_space = new_space;
2573 		rlist->rl_rgd = tmp;
2574 	}
2575 
2576 	rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2577 }
2578 
2579 /**
2580  * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2581  *      and initialize an array of glock holders for them
2582  * @rlist: the list of resource groups
2583  * @state: the lock state to acquire the RG lock in
2584  *
2585  * FIXME: Don't use NOFAIL
2586  *
2587  */
2588 
2589 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2590 {
2591 	unsigned int x;
2592 
2593 	rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
2594 				GFP_NOFS | __GFP_NOFAIL);
2595 	for (x = 0; x < rlist->rl_rgrps; x++)
2596 		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2597 				state, 0,
2598 				&rlist->rl_ghs[x]);
2599 }
2600 
2601 /**
2602  * gfs2_rlist_free - free a resource group list
2603  * @rlist: the list of resource groups
2604  *
2605  */
2606 
2607 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2608 {
2609 	unsigned int x;
2610 
2611 	kfree(rlist->rl_rgd);
2612 
2613 	if (rlist->rl_ghs) {
2614 		for (x = 0; x < rlist->rl_rgrps; x++)
2615 			gfs2_holder_uninit(&rlist->rl_ghs[x]);
2616 		kfree(rlist->rl_ghs);
2617 		rlist->rl_ghs = NULL;
2618 	}
2619 }
2620 
2621