xref: /openbmc/linux/fs/gfs2/rgrp.c (revision 643d1f7f)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/fs.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17 
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "lops.h"
23 #include "meta_io.h"
24 #include "quota.h"
25 #include "rgrp.h"
26 #include "super.h"
27 #include "trans.h"
28 #include "util.h"
29 #include "log.h"
30 #include "inode.h"
31 #include "ops_address.h"
32 
33 #define BFITNOENT ((u32)~0)
34 #define NO_BLOCK ((u64)~0)
35 
36 /*
37  * These routines are used by the resource group routines (rgrp.c)
38  * to keep track of block allocation.  Each block is represented by two
39  * bits.  So, each byte represents GFS2_NBBY (i.e. 4) blocks.
40  *
41  * 0 = Free
42  * 1 = Used (not metadata)
43  * 2 = Unlinked (still in use) inode
44  * 3 = Used (metadata)
45  */
46 
47 static const char valid_change[16] = {
48 	        /* current */
49 	/* n */ 0, 1, 1, 1,
50 	/* e */ 1, 0, 0, 0,
51 	/* w */ 0, 0, 0, 1,
52 	        1, 0, 0, 0
53 };
54 
55 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
56                         unsigned char old_state, unsigned char new_state);
57 
58 /**
59  * gfs2_setbit - Set a bit in the bitmaps
60  * @buffer: the buffer that holds the bitmaps
61  * @buflen: the length (in bytes) of the buffer
62  * @block: the block to set
63  * @new_state: the new state of the block
64  *
65  */
66 
67 static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
68 			unsigned int buflen, u32 block,
69 			unsigned char new_state)
70 {
71 	unsigned char *byte, *end, cur_state;
72 	unsigned int bit;
73 
74 	byte = buffer + (block / GFS2_NBBY);
75 	bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
76 	end = buffer + buflen;
77 
78 	gfs2_assert(rgd->rd_sbd, byte < end);
79 
80 	cur_state = (*byte >> bit) & GFS2_BIT_MASK;
81 
82 	if (valid_change[new_state * 4 + cur_state]) {
83 		*byte ^= cur_state << bit;
84 		*byte |= new_state << bit;
85 	} else
86 		gfs2_consist_rgrpd(rgd);
87 }
88 
89 /**
90  * gfs2_testbit - test a bit in the bitmaps
91  * @buffer: the buffer that holds the bitmaps
92  * @buflen: the length (in bytes) of the buffer
93  * @block: the block to read
94  *
95  */
96 
97 static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
98 				  unsigned int buflen, u32 block)
99 {
100 	unsigned char *byte, *end, cur_state;
101 	unsigned int bit;
102 
103 	byte = buffer + (block / GFS2_NBBY);
104 	bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
105 	end = buffer + buflen;
106 
107 	gfs2_assert(rgd->rd_sbd, byte < end);
108 
109 	cur_state = (*byte >> bit) & GFS2_BIT_MASK;
110 
111 	return cur_state;
112 }
113 
114 /**
115  * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
116  *       a block in a given allocation state.
117  * @buffer: the buffer that holds the bitmaps
118  * @buflen: the length (in bytes) of the buffer
119  * @goal: start search at this block's bit-pair (within @buffer)
120  * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
121  *
122  * Scope of @goal and returned block number is only within this bitmap buffer,
123  * not entire rgrp or filesystem.  @buffer will be offset from the actual
124  * beginning of a bitmap block buffer, skipping any header structures.
125  *
126  * Return: the block number (bitmap buffer scope) that was found
127  */
128 
129 static u32 gfs2_bitfit(unsigned char *buffer, unsigned int buflen, u32 goal,
130 		       unsigned char old_state)
131 {
132 	unsigned char *byte;
133 	u32 blk = goal;
134 	unsigned int bit, bitlong;
135 	unsigned long *plong, plong55;
136 
137 	byte = buffer + (goal / GFS2_NBBY);
138 	plong = (unsigned long *)(buffer + (goal / GFS2_NBBY));
139 	bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
140 	bitlong = bit;
141 #if BITS_PER_LONG == 32
142 	plong55 = 0x55555555;
143 #else
144 	plong55 = 0x5555555555555555;
145 #endif
146 	while (byte < buffer + buflen) {
147 
148 		if (bitlong == 0 && old_state == 0 && *plong == plong55) {
149 			plong++;
150 			byte += sizeof(unsigned long);
151 			blk += sizeof(unsigned long) * GFS2_NBBY;
152 			continue;
153 		}
154 		if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
155 			return blk;
156 		bit += GFS2_BIT_SIZE;
157 		if (bit >= 8) {
158 			bit = 0;
159 			byte++;
160 		}
161 		bitlong += GFS2_BIT_SIZE;
162 		if (bitlong >= sizeof(unsigned long) * 8) {
163 			bitlong = 0;
164 			plong++;
165 		}
166 
167 		blk++;
168 	}
169 
170 	return BFITNOENT;
171 }
172 
173 /**
174  * gfs2_bitcount - count the number of bits in a certain state
175  * @buffer: the buffer that holds the bitmaps
176  * @buflen: the length (in bytes) of the buffer
177  * @state: the state of the block we're looking for
178  *
179  * Returns: The number of bits
180  */
181 
182 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer,
183 			      unsigned int buflen, unsigned char state)
184 {
185 	unsigned char *byte = buffer;
186 	unsigned char *end = buffer + buflen;
187 	unsigned char state1 = state << 2;
188 	unsigned char state2 = state << 4;
189 	unsigned char state3 = state << 6;
190 	u32 count = 0;
191 
192 	for (; byte < end; byte++) {
193 		if (((*byte) & 0x03) == state)
194 			count++;
195 		if (((*byte) & 0x0C) == state1)
196 			count++;
197 		if (((*byte) & 0x30) == state2)
198 			count++;
199 		if (((*byte) & 0xC0) == state3)
200 			count++;
201 	}
202 
203 	return count;
204 }
205 
206 /**
207  * gfs2_rgrp_verify - Verify that a resource group is consistent
208  * @sdp: the filesystem
209  * @rgd: the rgrp
210  *
211  */
212 
213 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
214 {
215 	struct gfs2_sbd *sdp = rgd->rd_sbd;
216 	struct gfs2_bitmap *bi = NULL;
217 	u32 length = rgd->rd_length;
218 	u32 count[4], tmp;
219 	int buf, x;
220 
221 	memset(count, 0, 4 * sizeof(u32));
222 
223 	/* Count # blocks in each of 4 possible allocation states */
224 	for (buf = 0; buf < length; buf++) {
225 		bi = rgd->rd_bits + buf;
226 		for (x = 0; x < 4; x++)
227 			count[x] += gfs2_bitcount(rgd,
228 						  bi->bi_bh->b_data +
229 						  bi->bi_offset,
230 						  bi->bi_len, x);
231 	}
232 
233 	if (count[0] != rgd->rd_rg.rg_free) {
234 		if (gfs2_consist_rgrpd(rgd))
235 			fs_err(sdp, "free data mismatch:  %u != %u\n",
236 			       count[0], rgd->rd_rg.rg_free);
237 		return;
238 	}
239 
240 	tmp = rgd->rd_data -
241 		rgd->rd_rg.rg_free -
242 		rgd->rd_rg.rg_dinodes;
243 	if (count[1] + count[2] != tmp) {
244 		if (gfs2_consist_rgrpd(rgd))
245 			fs_err(sdp, "used data mismatch:  %u != %u\n",
246 			       count[1], tmp);
247 		return;
248 	}
249 
250 	if (count[3] != rgd->rd_rg.rg_dinodes) {
251 		if (gfs2_consist_rgrpd(rgd))
252 			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
253 			       count[3], rgd->rd_rg.rg_dinodes);
254 		return;
255 	}
256 
257 	if (count[2] > count[3]) {
258 		if (gfs2_consist_rgrpd(rgd))
259 			fs_err(sdp, "unlinked inodes > inodes:  %u\n",
260 			       count[2]);
261 		return;
262 	}
263 
264 }
265 
266 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
267 {
268 	u64 first = rgd->rd_data0;
269 	u64 last = first + rgd->rd_data;
270 	return first <= block && block < last;
271 }
272 
273 /**
274  * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
275  * @sdp: The GFS2 superblock
276  * @n: The data block number
277  *
278  * Returns: The resource group, or NULL if not found
279  */
280 
281 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
282 {
283 	struct gfs2_rgrpd *rgd;
284 
285 	spin_lock(&sdp->sd_rindex_spin);
286 
287 	list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
288 		if (rgrp_contains_block(rgd, blk)) {
289 			list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
290 			spin_unlock(&sdp->sd_rindex_spin);
291 			return rgd;
292 		}
293 	}
294 
295 	spin_unlock(&sdp->sd_rindex_spin);
296 
297 	return NULL;
298 }
299 
300 /**
301  * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
302  * @sdp: The GFS2 superblock
303  *
304  * Returns: The first rgrp in the filesystem
305  */
306 
307 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
308 {
309 	gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
310 	return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
311 }
312 
313 /**
314  * gfs2_rgrpd_get_next - get the next RG
315  * @rgd: A RG
316  *
317  * Returns: The next rgrp
318  */
319 
320 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
321 {
322 	if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
323 		return NULL;
324 	return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
325 }
326 
327 static void clear_rgrpdi(struct gfs2_sbd *sdp)
328 {
329 	struct list_head *head;
330 	struct gfs2_rgrpd *rgd;
331 	struct gfs2_glock *gl;
332 
333 	spin_lock(&sdp->sd_rindex_spin);
334 	sdp->sd_rindex_forward = NULL;
335 	head = &sdp->sd_rindex_recent_list;
336 	while (!list_empty(head)) {
337 		rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
338 		list_del(&rgd->rd_recent);
339 	}
340 	spin_unlock(&sdp->sd_rindex_spin);
341 
342 	head = &sdp->sd_rindex_list;
343 	while (!list_empty(head)) {
344 		rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
345 		gl = rgd->rd_gl;
346 
347 		list_del(&rgd->rd_list);
348 		list_del(&rgd->rd_list_mru);
349 
350 		if (gl) {
351 			gl->gl_object = NULL;
352 			gfs2_glock_put(gl);
353 		}
354 
355 		kfree(rgd->rd_bits);
356 		kfree(rgd);
357 	}
358 }
359 
360 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
361 {
362 	mutex_lock(&sdp->sd_rindex_mutex);
363 	clear_rgrpdi(sdp);
364 	mutex_unlock(&sdp->sd_rindex_mutex);
365 }
366 
367 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
368 {
369 	printk(KERN_INFO "  ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
370 	printk(KERN_INFO "  ri_length = %u\n", rgd->rd_length);
371 	printk(KERN_INFO "  ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
372 	printk(KERN_INFO "  ri_data = %u\n", rgd->rd_data);
373 	printk(KERN_INFO "  ri_bitbytes = %u\n", rgd->rd_bitbytes);
374 }
375 
376 /**
377  * gfs2_compute_bitstructs - Compute the bitmap sizes
378  * @rgd: The resource group descriptor
379  *
380  * Calculates bitmap descriptors, one for each block that contains bitmap data
381  *
382  * Returns: errno
383  */
384 
385 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
386 {
387 	struct gfs2_sbd *sdp = rgd->rd_sbd;
388 	struct gfs2_bitmap *bi;
389 	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
390 	u32 bytes_left, bytes;
391 	int x;
392 
393 	if (!length)
394 		return -EINVAL;
395 
396 	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
397 	if (!rgd->rd_bits)
398 		return -ENOMEM;
399 
400 	bytes_left = rgd->rd_bitbytes;
401 
402 	for (x = 0; x < length; x++) {
403 		bi = rgd->rd_bits + x;
404 
405 		/* small rgrp; bitmap stored completely in header block */
406 		if (length == 1) {
407 			bytes = bytes_left;
408 			bi->bi_offset = sizeof(struct gfs2_rgrp);
409 			bi->bi_start = 0;
410 			bi->bi_len = bytes;
411 		/* header block */
412 		} else if (x == 0) {
413 			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
414 			bi->bi_offset = sizeof(struct gfs2_rgrp);
415 			bi->bi_start = 0;
416 			bi->bi_len = bytes;
417 		/* last block */
418 		} else if (x + 1 == length) {
419 			bytes = bytes_left;
420 			bi->bi_offset = sizeof(struct gfs2_meta_header);
421 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
422 			bi->bi_len = bytes;
423 		/* other blocks */
424 		} else {
425 			bytes = sdp->sd_sb.sb_bsize -
426 				sizeof(struct gfs2_meta_header);
427 			bi->bi_offset = sizeof(struct gfs2_meta_header);
428 			bi->bi_start = rgd->rd_bitbytes - bytes_left;
429 			bi->bi_len = bytes;
430 		}
431 
432 		bytes_left -= bytes;
433 	}
434 
435 	if (bytes_left) {
436 		gfs2_consist_rgrpd(rgd);
437 		return -EIO;
438 	}
439 	bi = rgd->rd_bits + (length - 1);
440 	if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
441 		if (gfs2_consist_rgrpd(rgd)) {
442 			gfs2_rindex_print(rgd);
443 			fs_err(sdp, "start=%u len=%u offset=%u\n",
444 			       bi->bi_start, bi->bi_len, bi->bi_offset);
445 		}
446 		return -EIO;
447 	}
448 
449 	return 0;
450 }
451 
452 /**
453  * gfs2_ri_total - Total up the file system space, according to the rindex.
454  *
455  */
456 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
457 {
458 	u64 total_data = 0;
459 	struct inode *inode = sdp->sd_rindex;
460 	struct gfs2_inode *ip = GFS2_I(inode);
461 	char buf[sizeof(struct gfs2_rindex)];
462 	struct file_ra_state ra_state;
463 	int error, rgrps;
464 
465 	mutex_lock(&sdp->sd_rindex_mutex);
466 	file_ra_state_init(&ra_state, inode->i_mapping);
467 	for (rgrps = 0;; rgrps++) {
468 		loff_t pos = rgrps * sizeof(struct gfs2_rindex);
469 
470 		if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size)
471 			break;
472 		error = gfs2_internal_read(ip, &ra_state, buf, &pos,
473 					   sizeof(struct gfs2_rindex));
474 		if (error != sizeof(struct gfs2_rindex))
475 			break;
476 		total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
477 	}
478 	mutex_unlock(&sdp->sd_rindex_mutex);
479 	return total_data;
480 }
481 
482 static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf)
483 {
484 	const struct gfs2_rindex *str = buf;
485 
486 	rgd->rd_addr = be64_to_cpu(str->ri_addr);
487 	rgd->rd_length = be32_to_cpu(str->ri_length);
488 	rgd->rd_data0 = be64_to_cpu(str->ri_data0);
489 	rgd->rd_data = be32_to_cpu(str->ri_data);
490 	rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes);
491 }
492 
493 /**
494  * read_rindex_entry - Pull in a new resource index entry from the disk
495  * @gl: The glock covering the rindex inode
496  *
497  * Returns: 0 on success, error code otherwise
498  */
499 
500 static int read_rindex_entry(struct gfs2_inode *ip,
501 			     struct file_ra_state *ra_state)
502 {
503 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
504 	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
505 	char buf[sizeof(struct gfs2_rindex)];
506 	int error;
507 	struct gfs2_rgrpd *rgd;
508 
509 	error = gfs2_internal_read(ip, ra_state, buf, &pos,
510 				   sizeof(struct gfs2_rindex));
511 	if (!error)
512 		return 0;
513 	if (error != sizeof(struct gfs2_rindex)) {
514 		if (error > 0)
515 			error = -EIO;
516 		return error;
517 	}
518 
519 	rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS);
520 	error = -ENOMEM;
521 	if (!rgd)
522 		return error;
523 
524 	mutex_init(&rgd->rd_mutex);
525 	lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
526 	rgd->rd_sbd = sdp;
527 
528 	list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
529 	list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
530 
531 	gfs2_rindex_in(rgd, buf);
532 	error = compute_bitstructs(rgd);
533 	if (error)
534 		return error;
535 
536 	error = gfs2_glock_get(sdp, rgd->rd_addr,
537 			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
538 	if (error)
539 		return error;
540 
541 	rgd->rd_gl->gl_object = rgd;
542 	rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
543 	rgd->rd_flags |= GFS2_RDF_CHECK;
544 	return error;
545 }
546 
547 /**
548  * gfs2_ri_update - Pull in a new resource index from the disk
549  * @ip: pointer to the rindex inode
550  *
551  * Returns: 0 on successful update, error code otherwise
552  */
553 
554 static int gfs2_ri_update(struct gfs2_inode *ip)
555 {
556 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
557 	struct inode *inode = &ip->i_inode;
558 	struct file_ra_state ra_state;
559 	u64 rgrp_count = ip->i_di.di_size;
560 	int error;
561 
562 	if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
563 		gfs2_consist_inode(ip);
564 		return -EIO;
565 	}
566 
567 	clear_rgrpdi(sdp);
568 
569 	file_ra_state_init(&ra_state, inode->i_mapping);
570 	for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) {
571 		error = read_rindex_entry(ip, &ra_state);
572 		if (error) {
573 			clear_rgrpdi(sdp);
574 			return error;
575 		}
576 	}
577 
578 	sdp->sd_rindex_vn = ip->i_gl->gl_vn;
579 	return 0;
580 }
581 
582 /**
583  * gfs2_ri_update_special - Pull in a new resource index from the disk
584  *
585  * This is a special version that's safe to call from gfs2_inplace_reserve_i.
586  * In this case we know that we don't have any resource groups in memory yet.
587  *
588  * @ip: pointer to the rindex inode
589  *
590  * Returns: 0 on successful update, error code otherwise
591  */
592 static int gfs2_ri_update_special(struct gfs2_inode *ip)
593 {
594 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
595 	struct inode *inode = &ip->i_inode;
596 	struct file_ra_state ra_state;
597 	int error;
598 
599 	file_ra_state_init(&ra_state, inode->i_mapping);
600 	for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
601 		/* Ignore partials */
602 		if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
603 		    ip->i_di.di_size)
604 			break;
605 		error = read_rindex_entry(ip, &ra_state);
606 		if (error) {
607 			clear_rgrpdi(sdp);
608 			return error;
609 		}
610 	}
611 
612 	sdp->sd_rindex_vn = ip->i_gl->gl_vn;
613 	return 0;
614 }
615 
616 /**
617  * gfs2_rindex_hold - Grab a lock on the rindex
618  * @sdp: The GFS2 superblock
619  * @ri_gh: the glock holder
620  *
621  * We grab a lock on the rindex inode to make sure that it doesn't
622  * change whilst we are performing an operation. We keep this lock
623  * for quite long periods of time compared to other locks. This
624  * doesn't matter, since it is shared and it is very, very rarely
625  * accessed in the exclusive mode (i.e. only when expanding the filesystem).
626  *
627  * This makes sure that we're using the latest copy of the resource index
628  * special file, which might have been updated if someone expanded the
629  * filesystem (via gfs2_grow utility), which adds new resource groups.
630  *
631  * Returns: 0 on success, error code otherwise
632  */
633 
634 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
635 {
636 	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
637 	struct gfs2_glock *gl = ip->i_gl;
638 	int error;
639 
640 	error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
641 	if (error)
642 		return error;
643 
644 	/* Read new copy from disk if we don't have the latest */
645 	if (sdp->sd_rindex_vn != gl->gl_vn) {
646 		mutex_lock(&sdp->sd_rindex_mutex);
647 		if (sdp->sd_rindex_vn != gl->gl_vn) {
648 			error = gfs2_ri_update(ip);
649 			if (error)
650 				gfs2_glock_dq_uninit(ri_gh);
651 		}
652 		mutex_unlock(&sdp->sd_rindex_mutex);
653 	}
654 
655 	return error;
656 }
657 
658 static void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
659 {
660 	const struct gfs2_rgrp *str = buf;
661 
662 	rg->rg_flags = be32_to_cpu(str->rg_flags);
663 	rg->rg_free = be32_to_cpu(str->rg_free);
664 	rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
665 	rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
666 }
667 
668 static void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
669 {
670 	struct gfs2_rgrp *str = buf;
671 
672 	str->rg_flags = cpu_to_be32(rg->rg_flags);
673 	str->rg_free = cpu_to_be32(rg->rg_free);
674 	str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
675 	str->__pad = cpu_to_be32(0);
676 	str->rg_igeneration = cpu_to_be64(rg->rg_igeneration);
677 	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
678 }
679 
680 /**
681  * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
682  * @rgd: the struct gfs2_rgrpd describing the RG to read in
683  *
684  * Read in all of a Resource Group's header and bitmap blocks.
685  * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
686  *
687  * Returns: errno
688  */
689 
690 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
691 {
692 	struct gfs2_sbd *sdp = rgd->rd_sbd;
693 	struct gfs2_glock *gl = rgd->rd_gl;
694 	unsigned int length = rgd->rd_length;
695 	struct gfs2_bitmap *bi;
696 	unsigned int x, y;
697 	int error;
698 
699 	mutex_lock(&rgd->rd_mutex);
700 
701 	spin_lock(&sdp->sd_rindex_spin);
702 	if (rgd->rd_bh_count) {
703 		rgd->rd_bh_count++;
704 		spin_unlock(&sdp->sd_rindex_spin);
705 		mutex_unlock(&rgd->rd_mutex);
706 		return 0;
707 	}
708 	spin_unlock(&sdp->sd_rindex_spin);
709 
710 	for (x = 0; x < length; x++) {
711 		bi = rgd->rd_bits + x;
712 		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
713 		if (error)
714 			goto fail;
715 	}
716 
717 	for (y = length; y--;) {
718 		bi = rgd->rd_bits + y;
719 		error = gfs2_meta_wait(sdp, bi->bi_bh);
720 		if (error)
721 			goto fail;
722 		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
723 					      GFS2_METATYPE_RG)) {
724 			error = -EIO;
725 			goto fail;
726 		}
727 	}
728 
729 	if (rgd->rd_rg_vn != gl->gl_vn) {
730 		gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
731 		rgd->rd_rg_vn = gl->gl_vn;
732 	}
733 
734 	spin_lock(&sdp->sd_rindex_spin);
735 	rgd->rd_free_clone = rgd->rd_rg.rg_free;
736 	rgd->rd_bh_count++;
737 	spin_unlock(&sdp->sd_rindex_spin);
738 
739 	mutex_unlock(&rgd->rd_mutex);
740 
741 	return 0;
742 
743 fail:
744 	while (x--) {
745 		bi = rgd->rd_bits + x;
746 		brelse(bi->bi_bh);
747 		bi->bi_bh = NULL;
748 		gfs2_assert_warn(sdp, !bi->bi_clone);
749 	}
750 	mutex_unlock(&rgd->rd_mutex);
751 
752 	return error;
753 }
754 
755 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
756 {
757 	struct gfs2_sbd *sdp = rgd->rd_sbd;
758 
759 	spin_lock(&sdp->sd_rindex_spin);
760 	gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
761 	rgd->rd_bh_count++;
762 	spin_unlock(&sdp->sd_rindex_spin);
763 }
764 
765 /**
766  * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
767  * @rgd: the struct gfs2_rgrpd describing the RG to read in
768  *
769  */
770 
771 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
772 {
773 	struct gfs2_sbd *sdp = rgd->rd_sbd;
774 	int x, length = rgd->rd_length;
775 
776 	spin_lock(&sdp->sd_rindex_spin);
777 	gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
778 	if (--rgd->rd_bh_count) {
779 		spin_unlock(&sdp->sd_rindex_spin);
780 		return;
781 	}
782 
783 	for (x = 0; x < length; x++) {
784 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
785 		kfree(bi->bi_clone);
786 		bi->bi_clone = NULL;
787 		brelse(bi->bi_bh);
788 		bi->bi_bh = NULL;
789 	}
790 
791 	spin_unlock(&sdp->sd_rindex_spin);
792 }
793 
794 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
795 {
796 	struct gfs2_sbd *sdp = rgd->rd_sbd;
797 	unsigned int length = rgd->rd_length;
798 	unsigned int x;
799 
800 	for (x = 0; x < length; x++) {
801 		struct gfs2_bitmap *bi = rgd->rd_bits + x;
802 		if (!bi->bi_clone)
803 			continue;
804 		memcpy(bi->bi_clone + bi->bi_offset,
805 		       bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
806 	}
807 
808 	spin_lock(&sdp->sd_rindex_spin);
809 	rgd->rd_free_clone = rgd->rd_rg.rg_free;
810 	spin_unlock(&sdp->sd_rindex_spin);
811 }
812 
813 /**
814  * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
815  * @ip: the incore GFS2 inode structure
816  *
817  * Returns: the struct gfs2_alloc
818  */
819 
820 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
821 {
822 	BUG_ON(ip->i_alloc != NULL);
823 	ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL);
824 	return ip->i_alloc;
825 }
826 
827 /**
828  * try_rgrp_fit - See if a given reservation will fit in a given RG
829  * @rgd: the RG data
830  * @al: the struct gfs2_alloc structure describing the reservation
831  *
832  * If there's room for the requested blocks to be allocated from the RG:
833  *   Sets the $al_rgd field in @al.
834  *
835  * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
836  */
837 
838 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
839 {
840 	struct gfs2_sbd *sdp = rgd->rd_sbd;
841 	int ret = 0;
842 
843 	if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC)
844 		return 0;
845 
846 	spin_lock(&sdp->sd_rindex_spin);
847 	if (rgd->rd_free_clone >= al->al_requested) {
848 		al->al_rgd = rgd;
849 		ret = 1;
850 	}
851 	spin_unlock(&sdp->sd_rindex_spin);
852 
853 	return ret;
854 }
855 
856 /**
857  * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
858  * @rgd: The rgrp
859  *
860  * Returns: The inode, if one has been found
861  */
862 
863 static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
864 {
865 	struct inode *inode;
866 	u32 goal = 0, block;
867 	u64 no_addr;
868 	struct gfs2_sbd *sdp = rgd->rd_sbd;
869 
870 	for(;;) {
871 		if (goal >= rgd->rd_data)
872 			break;
873 		down_write(&sdp->sd_log_flush_lock);
874 		block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
875 				     GFS2_BLKST_UNLINKED);
876 		up_write(&sdp->sd_log_flush_lock);
877 		if (block == BFITNOENT)
878 			break;
879 		/* rgblk_search can return a block < goal, so we need to
880 		   keep it marching forward. */
881 		no_addr = block + rgd->rd_data0;
882 		goal++;
883 		if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
884 			continue;
885 		*last_unlinked = no_addr;
886 		inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
887 					  no_addr, -1, 1);
888 		if (!IS_ERR(inode))
889 			return inode;
890 	}
891 
892 	rgd->rd_flags &= ~GFS2_RDF_CHECK;
893 	return NULL;
894 }
895 
896 /**
897  * recent_rgrp_first - get first RG from "recent" list
898  * @sdp: The GFS2 superblock
899  * @rglast: address of the rgrp used last
900  *
901  * Returns: The first rgrp in the recent list
902  */
903 
904 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
905 					    u64 rglast)
906 {
907 	struct gfs2_rgrpd *rgd = NULL;
908 
909 	spin_lock(&sdp->sd_rindex_spin);
910 
911 	if (list_empty(&sdp->sd_rindex_recent_list))
912 		goto out;
913 
914 	if (!rglast)
915 		goto first;
916 
917 	list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
918 		if (rgd->rd_addr == rglast)
919 			goto out;
920 	}
921 
922 first:
923 	rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
924 			 rd_recent);
925 out:
926 	spin_unlock(&sdp->sd_rindex_spin);
927 	return rgd;
928 }
929 
930 /**
931  * recent_rgrp_next - get next RG from "recent" list
932  * @cur_rgd: current rgrp
933  * @remove:
934  *
935  * Returns: The next rgrp in the recent list
936  */
937 
938 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
939 					   int remove)
940 {
941 	struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
942 	struct list_head *head;
943 	struct gfs2_rgrpd *rgd;
944 
945 	spin_lock(&sdp->sd_rindex_spin);
946 
947 	head = &sdp->sd_rindex_recent_list;
948 
949 	list_for_each_entry(rgd, head, rd_recent) {
950 		if (rgd == cur_rgd) {
951 			if (cur_rgd->rd_recent.next != head)
952 				rgd = list_entry(cur_rgd->rd_recent.next,
953 						 struct gfs2_rgrpd, rd_recent);
954 			else
955 				rgd = NULL;
956 
957 			if (remove)
958 				list_del(&cur_rgd->rd_recent);
959 
960 			goto out;
961 		}
962 	}
963 
964 	rgd = NULL;
965 	if (!list_empty(head))
966 		rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
967 
968 out:
969 	spin_unlock(&sdp->sd_rindex_spin);
970 	return rgd;
971 }
972 
973 /**
974  * recent_rgrp_add - add an RG to tail of "recent" list
975  * @new_rgd: The rgrp to add
976  *
977  */
978 
979 static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
980 {
981 	struct gfs2_sbd *sdp = new_rgd->rd_sbd;
982 	struct gfs2_rgrpd *rgd;
983 	unsigned int count = 0;
984 	unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
985 
986 	spin_lock(&sdp->sd_rindex_spin);
987 
988 	list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
989 		if (rgd == new_rgd)
990 			goto out;
991 
992 		if (++count >= max)
993 			goto out;
994 	}
995 	list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
996 
997 out:
998 	spin_unlock(&sdp->sd_rindex_spin);
999 }
1000 
1001 /**
1002  * forward_rgrp_get - get an rgrp to try next from full list
1003  * @sdp: The GFS2 superblock
1004  *
1005  * Returns: The rgrp to try next
1006  */
1007 
1008 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
1009 {
1010 	struct gfs2_rgrpd *rgd;
1011 	unsigned int journals = gfs2_jindex_size(sdp);
1012 	unsigned int rg = 0, x;
1013 
1014 	spin_lock(&sdp->sd_rindex_spin);
1015 
1016 	rgd = sdp->sd_rindex_forward;
1017 	if (!rgd) {
1018 		if (sdp->sd_rgrps >= journals)
1019 			rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
1020 
1021 		for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg;
1022 		     x++, rgd = gfs2_rgrpd_get_next(rgd))
1023 			/* Do Nothing */;
1024 
1025 		sdp->sd_rindex_forward = rgd;
1026 	}
1027 
1028 	spin_unlock(&sdp->sd_rindex_spin);
1029 
1030 	return rgd;
1031 }
1032 
1033 /**
1034  * forward_rgrp_set - set the forward rgrp pointer
1035  * @sdp: the filesystem
1036  * @rgd: The new forward rgrp
1037  *
1038  */
1039 
1040 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
1041 {
1042 	spin_lock(&sdp->sd_rindex_spin);
1043 	sdp->sd_rindex_forward = rgd;
1044 	spin_unlock(&sdp->sd_rindex_spin);
1045 }
1046 
1047 /**
1048  * get_local_rgrp - Choose and lock a rgrp for allocation
1049  * @ip: the inode to reserve space for
1050  * @rgp: the chosen and locked rgrp
1051  *
1052  * Try to acquire rgrp in way which avoids contending with others.
1053  *
1054  * Returns: errno
1055  */
1056 
1057 static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1058 {
1059 	struct inode *inode = NULL;
1060 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1061 	struct gfs2_rgrpd *rgd, *begin = NULL;
1062 	struct gfs2_alloc *al = ip->i_alloc;
1063 	int flags = LM_FLAG_TRY;
1064 	int skipped = 0;
1065 	int loops = 0;
1066 	int error, rg_locked;
1067 
1068 	/* Try recently successful rgrps */
1069 
1070 	rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
1071 
1072 	while (rgd) {
1073 		rg_locked = 0;
1074 
1075 		if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1076 			rg_locked = 1;
1077 			error = 0;
1078 		} else {
1079 			error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1080 						   LM_FLAG_TRY, &al->al_rgd_gh);
1081 		}
1082 		switch (error) {
1083 		case 0:
1084 			if (try_rgrp_fit(rgd, al))
1085 				goto out;
1086 			if (rgd->rd_flags & GFS2_RDF_CHECK)
1087 				inode = try_rgrp_unlink(rgd, last_unlinked);
1088 			if (!rg_locked)
1089 				gfs2_glock_dq_uninit(&al->al_rgd_gh);
1090 			if (inode)
1091 				return inode;
1092 			rgd = recent_rgrp_next(rgd, 1);
1093 			break;
1094 
1095 		case GLR_TRYFAILED:
1096 			rgd = recent_rgrp_next(rgd, 0);
1097 			break;
1098 
1099 		default:
1100 			return ERR_PTR(error);
1101 		}
1102 	}
1103 
1104 	/* Go through full list of rgrps */
1105 
1106 	begin = rgd = forward_rgrp_get(sdp);
1107 
1108 	for (;;) {
1109 		rg_locked = 0;
1110 
1111 		if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1112 			rg_locked = 1;
1113 			error = 0;
1114 		} else {
1115 			error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags,
1116 						   &al->al_rgd_gh);
1117 		}
1118 		switch (error) {
1119 		case 0:
1120 			if (try_rgrp_fit(rgd, al))
1121 				goto out;
1122 			if (rgd->rd_flags & GFS2_RDF_CHECK)
1123 				inode = try_rgrp_unlink(rgd, last_unlinked);
1124 			if (!rg_locked)
1125 				gfs2_glock_dq_uninit(&al->al_rgd_gh);
1126 			if (inode)
1127 				return inode;
1128 			break;
1129 
1130 		case GLR_TRYFAILED:
1131 			skipped++;
1132 			break;
1133 
1134 		default:
1135 			return ERR_PTR(error);
1136 		}
1137 
1138 		rgd = gfs2_rgrpd_get_next(rgd);
1139 		if (!rgd)
1140 			rgd = gfs2_rgrpd_get_first(sdp);
1141 
1142 		if (rgd == begin) {
1143 			if (++loops >= 3)
1144 				return ERR_PTR(-ENOSPC);
1145 			if (!skipped)
1146 				loops++;
1147 			flags = 0;
1148 			if (loops == 2)
1149 				gfs2_log_flush(sdp, NULL);
1150 		}
1151 	}
1152 
1153 out:
1154 	ip->i_last_rg_alloc = rgd->rd_addr;
1155 
1156 	if (begin) {
1157 		recent_rgrp_add(rgd);
1158 		rgd = gfs2_rgrpd_get_next(rgd);
1159 		if (!rgd)
1160 			rgd = gfs2_rgrpd_get_first(sdp);
1161 		forward_rgrp_set(sdp, rgd);
1162 	}
1163 
1164 	return NULL;
1165 }
1166 
1167 /**
1168  * gfs2_inplace_reserve_i - Reserve space in the filesystem
1169  * @ip: the inode to reserve space for
1170  *
1171  * Returns: errno
1172  */
1173 
1174 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
1175 {
1176 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1177 	struct gfs2_alloc *al = ip->i_alloc;
1178 	struct inode *inode;
1179 	int error = 0;
1180 	u64 last_unlinked = NO_BLOCK;
1181 
1182 	if (gfs2_assert_warn(sdp, al->al_requested))
1183 		return -EINVAL;
1184 
1185 try_again:
1186 	/* We need to hold the rindex unless the inode we're using is
1187 	   the rindex itself, in which case it's already held. */
1188 	if (ip != GFS2_I(sdp->sd_rindex))
1189 		error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
1190 	else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
1191 		error = gfs2_ri_update_special(ip);
1192 
1193 	if (error)
1194 		return error;
1195 
1196 	inode = get_local_rgrp(ip, &last_unlinked);
1197 	if (inode) {
1198 		if (ip != GFS2_I(sdp->sd_rindex))
1199 			gfs2_glock_dq_uninit(&al->al_ri_gh);
1200 		if (IS_ERR(inode))
1201 			return PTR_ERR(inode);
1202 		iput(inode);
1203 		gfs2_log_flush(sdp, NULL);
1204 		goto try_again;
1205 	}
1206 
1207 	al->al_file = file;
1208 	al->al_line = line;
1209 
1210 	return 0;
1211 }
1212 
1213 /**
1214  * gfs2_inplace_release - release an inplace reservation
1215  * @ip: the inode the reservation was taken out on
1216  *
1217  * Release a reservation made by gfs2_inplace_reserve().
1218  */
1219 
1220 void gfs2_inplace_release(struct gfs2_inode *ip)
1221 {
1222 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1223 	struct gfs2_alloc *al = ip->i_alloc;
1224 
1225 	if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
1226 		fs_warn(sdp, "al_alloced = %u, al_requested = %u "
1227 			     "al_file = %s, al_line = %u\n",
1228 		             al->al_alloced, al->al_requested, al->al_file,
1229 			     al->al_line);
1230 
1231 	al->al_rgd = NULL;
1232 	if (al->al_rgd_gh.gh_gl)
1233 		gfs2_glock_dq_uninit(&al->al_rgd_gh);
1234 	if (ip != GFS2_I(sdp->sd_rindex))
1235 		gfs2_glock_dq_uninit(&al->al_ri_gh);
1236 }
1237 
1238 /**
1239  * gfs2_get_block_type - Check a block in a RG is of given type
1240  * @rgd: the resource group holding the block
1241  * @block: the block number
1242  *
1243  * Returns: The block type (GFS2_BLKST_*)
1244  */
1245 
1246 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1247 {
1248 	struct gfs2_bitmap *bi = NULL;
1249 	u32 length, rgrp_block, buf_block;
1250 	unsigned int buf;
1251 	unsigned char type;
1252 
1253 	length = rgd->rd_length;
1254 	rgrp_block = block - rgd->rd_data0;
1255 
1256 	for (buf = 0; buf < length; buf++) {
1257 		bi = rgd->rd_bits + buf;
1258 		if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1259 			break;
1260 	}
1261 
1262 	gfs2_assert(rgd->rd_sbd, buf < length);
1263 	buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
1264 
1265 	type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1266 			   bi->bi_len, buf_block);
1267 
1268 	return type;
1269 }
1270 
1271 /**
1272  * rgblk_search - find a block in @old_state, change allocation
1273  *           state to @new_state
1274  * @rgd: the resource group descriptor
1275  * @goal: the goal block within the RG (start here to search for avail block)
1276  * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1277  * @new_state: GFS2_BLKST_XXX the after-allocation block state
1278  *
1279  * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1280  * Add the found bitmap buffer to the transaction.
1281  * Set the found bits to @new_state to change block's allocation state.
1282  *
1283  * This function never fails, because we wouldn't call it unless we
1284  * know (from reservation results, etc.) that a block is available.
1285  *
1286  * Scope of @goal and returned block is just within rgrp, not the whole
1287  * filesystem.
1288  *
1289  * Returns:  the block number allocated
1290  */
1291 
1292 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1293 			unsigned char old_state, unsigned char new_state)
1294 {
1295 	struct gfs2_bitmap *bi = NULL;
1296 	u32 length = rgd->rd_length;
1297 	u32 blk = 0;
1298 	unsigned int buf, x;
1299 
1300 	/* Find bitmap block that contains bits for goal block */
1301 	for (buf = 0; buf < length; buf++) {
1302 		bi = rgd->rd_bits + buf;
1303 		if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1304 			break;
1305 	}
1306 
1307 	gfs2_assert(rgd->rd_sbd, buf < length);
1308 
1309 	/* Convert scope of "goal" from rgrp-wide to within found bit block */
1310 	goal -= bi->bi_start * GFS2_NBBY;
1311 
1312 	/* Search (up to entire) bitmap in this rgrp for allocatable block.
1313 	   "x <= length", instead of "x < length", because we typically start
1314 	   the search in the middle of a bit block, but if we can't find an
1315 	   allocatable block anywhere else, we want to be able wrap around and
1316 	   search in the first part of our first-searched bit block.  */
1317 	for (x = 0; x <= length; x++) {
1318 		/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
1319 		   bitmaps, so we must search the originals for that. */
1320 		if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1321 			blk = gfs2_bitfit(bi->bi_clone + bi->bi_offset,
1322 					  bi->bi_len, goal, old_state);
1323 		else
1324 			blk = gfs2_bitfit(bi->bi_bh->b_data + bi->bi_offset,
1325 					  bi->bi_len, goal, old_state);
1326 		if (blk != BFITNOENT)
1327 			break;
1328 
1329 		/* Try next bitmap block (wrap back to rgrp header if at end) */
1330 		buf = (buf + 1) % length;
1331 		bi = rgd->rd_bits + buf;
1332 		goal = 0;
1333 	}
1334 
1335 	if (blk != BFITNOENT && old_state != new_state) {
1336 		gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1337 		gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1338 			    bi->bi_len, blk, new_state);
1339 		if (bi->bi_clone)
1340 			gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset,
1341 				    bi->bi_len, blk, new_state);
1342 	}
1343 
1344 	return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
1345 }
1346 
1347 /**
1348  * rgblk_free - Change alloc state of given block(s)
1349  * @sdp: the filesystem
1350  * @bstart: the start of a run of blocks to free
1351  * @blen: the length of the block run (all must lie within ONE RG!)
1352  * @new_state: GFS2_BLKST_XXX the after-allocation block state
1353  *
1354  * Returns:  Resource group containing the block(s)
1355  */
1356 
1357 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1358 				     u32 blen, unsigned char new_state)
1359 {
1360 	struct gfs2_rgrpd *rgd;
1361 	struct gfs2_bitmap *bi = NULL;
1362 	u32 length, rgrp_blk, buf_blk;
1363 	unsigned int buf;
1364 
1365 	rgd = gfs2_blk2rgrpd(sdp, bstart);
1366 	if (!rgd) {
1367 		if (gfs2_consist(sdp))
1368 			fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
1369 		return NULL;
1370 	}
1371 
1372 	length = rgd->rd_length;
1373 
1374 	rgrp_blk = bstart - rgd->rd_data0;
1375 
1376 	while (blen--) {
1377 		for (buf = 0; buf < length; buf++) {
1378 			bi = rgd->rd_bits + buf;
1379 			if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1380 				break;
1381 		}
1382 
1383 		gfs2_assert(rgd->rd_sbd, buf < length);
1384 
1385 		buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1386 		rgrp_blk++;
1387 
1388 		if (!bi->bi_clone) {
1389 			bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1390 					       GFP_NOFS | __GFP_NOFAIL);
1391 			memcpy(bi->bi_clone + bi->bi_offset,
1392 			       bi->bi_bh->b_data + bi->bi_offset,
1393 			       bi->bi_len);
1394 		}
1395 		gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1396 		gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1397 			    bi->bi_len, buf_blk, new_state);
1398 	}
1399 
1400 	return rgd;
1401 }
1402 
1403 /**
1404  * gfs2_alloc_data - Allocate a data block
1405  * @ip: the inode to allocate the data block for
1406  *
1407  * Returns: the allocated block
1408  */
1409 
1410 u64 gfs2_alloc_data(struct gfs2_inode *ip)
1411 {
1412 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1413 	struct gfs2_alloc *al = ip->i_alloc;
1414 	struct gfs2_rgrpd *rgd = al->al_rgd;
1415 	u32 goal, blk;
1416 	u64 block;
1417 
1418 	if (rgrp_contains_block(rgd, ip->i_di.di_goal_data))
1419 		goal = ip->i_di.di_goal_data - rgd->rd_data0;
1420 	else
1421 		goal = rgd->rd_last_alloc_data;
1422 
1423 	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1424 	BUG_ON(blk == BFITNOENT);
1425 	rgd->rd_last_alloc_data = blk;
1426 
1427 	block = rgd->rd_data0 + blk;
1428 	ip->i_di.di_goal_data = block;
1429 
1430 	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1431 	rgd->rd_rg.rg_free--;
1432 
1433 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1434 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1435 
1436 	al->al_alloced++;
1437 
1438 	gfs2_statfs_change(sdp, 0, -1, 0);
1439 	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1440 
1441 	spin_lock(&sdp->sd_rindex_spin);
1442 	rgd->rd_free_clone--;
1443 	spin_unlock(&sdp->sd_rindex_spin);
1444 
1445 	return block;
1446 }
1447 
1448 /**
1449  * gfs2_alloc_meta - Allocate a metadata block
1450  * @ip: the inode to allocate the metadata block for
1451  *
1452  * Returns: the allocated block
1453  */
1454 
1455 u64 gfs2_alloc_meta(struct gfs2_inode *ip)
1456 {
1457 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1458 	struct gfs2_alloc *al = ip->i_alloc;
1459 	struct gfs2_rgrpd *rgd = al->al_rgd;
1460 	u32 goal, blk;
1461 	u64 block;
1462 
1463 	if (rgrp_contains_block(rgd, ip->i_di.di_goal_meta))
1464 		goal = ip->i_di.di_goal_meta - rgd->rd_data0;
1465 	else
1466 		goal = rgd->rd_last_alloc_meta;
1467 
1468 	blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1469 	BUG_ON(blk == BFITNOENT);
1470 	rgd->rd_last_alloc_meta = blk;
1471 
1472 	block = rgd->rd_data0 + blk;
1473 	ip->i_di.di_goal_meta = block;
1474 
1475 	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1476 	rgd->rd_rg.rg_free--;
1477 
1478 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1479 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1480 
1481 	al->al_alloced++;
1482 
1483 	gfs2_statfs_change(sdp, 0, -1, 0);
1484 	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1485 	gfs2_trans_add_unrevoke(sdp, block);
1486 
1487 	spin_lock(&sdp->sd_rindex_spin);
1488 	rgd->rd_free_clone--;
1489 	spin_unlock(&sdp->sd_rindex_spin);
1490 
1491 	return block;
1492 }
1493 
1494 /**
1495  * gfs2_alloc_di - Allocate a dinode
1496  * @dip: the directory that the inode is going in
1497  *
1498  * Returns: the block allocated
1499  */
1500 
1501 u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1502 {
1503 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1504 	struct gfs2_alloc *al = dip->i_alloc;
1505 	struct gfs2_rgrpd *rgd = al->al_rgd;
1506 	u32 blk;
1507 	u64 block;
1508 
1509 	blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
1510 			   GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
1511 	BUG_ON(blk == BFITNOENT);
1512 
1513 	rgd->rd_last_alloc_meta = blk;
1514 
1515 	block = rgd->rd_data0 + blk;
1516 
1517 	gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1518 	rgd->rd_rg.rg_free--;
1519 	rgd->rd_rg.rg_dinodes++;
1520 	*generation = rgd->rd_rg.rg_igeneration++;
1521 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1522 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1523 
1524 	al->al_alloced++;
1525 
1526 	gfs2_statfs_change(sdp, 0, -1, +1);
1527 	gfs2_trans_add_unrevoke(sdp, block);
1528 
1529 	spin_lock(&sdp->sd_rindex_spin);
1530 	rgd->rd_free_clone--;
1531 	spin_unlock(&sdp->sd_rindex_spin);
1532 
1533 	return block;
1534 }
1535 
1536 /**
1537  * gfs2_free_data - free a contiguous run of data block(s)
1538  * @ip: the inode these blocks are being freed from
1539  * @bstart: first block of a run of contiguous blocks
1540  * @blen: the length of the block run
1541  *
1542  */
1543 
1544 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1545 {
1546 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1547 	struct gfs2_rgrpd *rgd;
1548 
1549 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1550 	if (!rgd)
1551 		return;
1552 
1553 	rgd->rd_rg.rg_free += blen;
1554 
1555 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1556 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1557 
1558 	gfs2_trans_add_rg(rgd);
1559 
1560 	gfs2_statfs_change(sdp, 0, +blen, 0);
1561 	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1562 }
1563 
1564 /**
1565  * gfs2_free_meta - free a contiguous run of data block(s)
1566  * @ip: the inode these blocks are being freed from
1567  * @bstart: first block of a run of contiguous blocks
1568  * @blen: the length of the block run
1569  *
1570  */
1571 
1572 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1573 {
1574 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1575 	struct gfs2_rgrpd *rgd;
1576 
1577 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1578 	if (!rgd)
1579 		return;
1580 
1581 	rgd->rd_rg.rg_free += blen;
1582 
1583 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1584 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1585 
1586 	gfs2_trans_add_rg(rgd);
1587 
1588 	gfs2_statfs_change(sdp, 0, +blen, 0);
1589 	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1590 	gfs2_meta_wipe(ip, bstart, blen);
1591 }
1592 
1593 void gfs2_unlink_di(struct inode *inode)
1594 {
1595 	struct gfs2_inode *ip = GFS2_I(inode);
1596 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1597 	struct gfs2_rgrpd *rgd;
1598 	u64 blkno = ip->i_no_addr;
1599 
1600 	rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1601 	if (!rgd)
1602 		return;
1603 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1604 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1605 	gfs2_trans_add_rg(rgd);
1606 }
1607 
1608 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1609 {
1610 	struct gfs2_sbd *sdp = rgd->rd_sbd;
1611 	struct gfs2_rgrpd *tmp_rgd;
1612 
1613 	tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1614 	if (!tmp_rgd)
1615 		return;
1616 	gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1617 
1618 	if (!rgd->rd_rg.rg_dinodes)
1619 		gfs2_consist_rgrpd(rgd);
1620 	rgd->rd_rg.rg_dinodes--;
1621 	rgd->rd_rg.rg_free++;
1622 
1623 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1624 	gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1625 
1626 	gfs2_statfs_change(sdp, 0, +1, -1);
1627 	gfs2_trans_add_rg(rgd);
1628 }
1629 
1630 
1631 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1632 {
1633 	gfs2_free_uninit_di(rgd, ip->i_no_addr);
1634 	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1635 	gfs2_meta_wipe(ip, ip->i_no_addr, 1);
1636 }
1637 
1638 /**
1639  * gfs2_rlist_add - add a RG to a list of RGs
1640  * @sdp: the filesystem
1641  * @rlist: the list of resource groups
1642  * @block: the block
1643  *
1644  * Figure out what RG a block belongs to and add that RG to the list
1645  *
1646  * FIXME: Don't use NOFAIL
1647  *
1648  */
1649 
1650 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1651 		    u64 block)
1652 {
1653 	struct gfs2_rgrpd *rgd;
1654 	struct gfs2_rgrpd **tmp;
1655 	unsigned int new_space;
1656 	unsigned int x;
1657 
1658 	if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1659 		return;
1660 
1661 	rgd = gfs2_blk2rgrpd(sdp, block);
1662 	if (!rgd) {
1663 		if (gfs2_consist(sdp))
1664 			fs_err(sdp, "block = %llu\n", (unsigned long long)block);
1665 		return;
1666 	}
1667 
1668 	for (x = 0; x < rlist->rl_rgrps; x++)
1669 		if (rlist->rl_rgd[x] == rgd)
1670 			return;
1671 
1672 	if (rlist->rl_rgrps == rlist->rl_space) {
1673 		new_space = rlist->rl_space + 10;
1674 
1675 		tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1676 			      GFP_NOFS | __GFP_NOFAIL);
1677 
1678 		if (rlist->rl_rgd) {
1679 			memcpy(tmp, rlist->rl_rgd,
1680 			       rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1681 			kfree(rlist->rl_rgd);
1682 		}
1683 
1684 		rlist->rl_space = new_space;
1685 		rlist->rl_rgd = tmp;
1686 	}
1687 
1688 	rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1689 }
1690 
1691 /**
1692  * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1693  *      and initialize an array of glock holders for them
1694  * @rlist: the list of resource groups
1695  * @state: the lock state to acquire the RG lock in
1696  * @flags: the modifier flags for the holder structures
1697  *
1698  * FIXME: Don't use NOFAIL
1699  *
1700  */
1701 
1702 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1703 		      int flags)
1704 {
1705 	unsigned int x;
1706 
1707 	rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1708 				GFP_NOFS | __GFP_NOFAIL);
1709 	for (x = 0; x < rlist->rl_rgrps; x++)
1710 		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1711 				state, flags,
1712 				&rlist->rl_ghs[x]);
1713 }
1714 
1715 /**
1716  * gfs2_rlist_free - free a resource group list
1717  * @list: the list of resource groups
1718  *
1719  */
1720 
1721 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1722 {
1723 	unsigned int x;
1724 
1725 	kfree(rlist->rl_rgd);
1726 
1727 	if (rlist->rl_ghs) {
1728 		for (x = 0; x < rlist->rl_rgrps; x++)
1729 			gfs2_holder_uninit(&rlist->rl_ghs[x]);
1730 		kfree(rlist->rl_ghs);
1731 	}
1732 }
1733 
1734