xref: /openbmc/linux/fs/gfs2/bmap.c (revision 27ba4deb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
15 
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "meta_io.h"
22 #include "quota.h"
23 #include "rgrp.h"
24 #include "log.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "aops.h"
30 #include "trace_gfs2.h"
31 
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33  * block is 512, so __u16 is fine for that. It saves stack space to
34  * keep it small.
35  */
36 struct metapath {
37 	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 	__u16 mp_list[GFS2_MAX_META_HEIGHT];
39 	int mp_fheight; /* find_metapath height */
40 	int mp_aheight; /* actual height (lookup height) */
41 };
42 
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44 
45 /**
46  * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
47  * @ip: the inode
48  * @dibh: the dinode buffer
49  * @block: the block number that was allocated
50  * @page: The (optional) page. This is looked up if @page is NULL
51  *
52  * Returns: errno
53  */
54 
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 			       u64 block, struct page *page)
57 {
58 	struct inode *inode = &ip->i_inode;
59 	struct buffer_head *bh;
60 	int release = 0;
61 
62 	if (!page || page->index) {
63 		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64 		if (!page)
65 			return -ENOMEM;
66 		release = 1;
67 	}
68 
69 	if (!PageUptodate(page)) {
70 		void *kaddr = kmap(page);
71 		u64 dsize = i_size_read(inode);
72 
73 		if (dsize > gfs2_max_stuffed_size(ip))
74 			dsize = gfs2_max_stuffed_size(ip);
75 
76 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
78 		kunmap(page);
79 
80 		SetPageUptodate(page);
81 	}
82 
83 	if (!page_has_buffers(page))
84 		create_empty_buffers(page, BIT(inode->i_blkbits),
85 				     BIT(BH_Uptodate));
86 
87 	bh = page_buffers(page);
88 
89 	if (!buffer_mapped(bh))
90 		map_bh(bh, inode->i_sb, block);
91 
92 	set_buffer_uptodate(bh);
93 	if (gfs2_is_jdata(ip))
94 		gfs2_trans_add_data(ip->i_gl, bh);
95 	else {
96 		mark_buffer_dirty(bh);
97 		gfs2_ordered_add_inode(ip);
98 	}
99 
100 	if (release) {
101 		unlock_page(page);
102 		put_page(page);
103 	}
104 
105 	return 0;
106 }
107 
108 /**
109  * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110  * @ip: The GFS2 inode to unstuff
111  * @page: The (optional) page. This is looked up if the @page is NULL
112  *
113  * This routine unstuffs a dinode and returns it to a "normal" state such
114  * that the height can be grown in the traditional way.
115  *
116  * Returns: errno
117  */
118 
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
120 {
121 	struct buffer_head *bh, *dibh;
122 	struct gfs2_dinode *di;
123 	u64 block = 0;
124 	int isdir = gfs2_is_dir(ip);
125 	int error;
126 
127 	down_write(&ip->i_rw_mutex);
128 
129 	error = gfs2_meta_inode_buffer(ip, &dibh);
130 	if (error)
131 		goto out;
132 
133 	if (i_size_read(&ip->i_inode)) {
134 		/* Get a free block, fill it with the stuffed data,
135 		   and write it out to disk */
136 
137 		unsigned int n = 1;
138 		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
139 		if (error)
140 			goto out_brelse;
141 		if (isdir) {
142 			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 			error = gfs2_dir_get_new_buffer(ip, block, &bh);
144 			if (error)
145 				goto out_brelse;
146 			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 					      dibh, sizeof(struct gfs2_dinode));
148 			brelse(bh);
149 		} else {
150 			error = gfs2_unstuffer_page(ip, dibh, block, page);
151 			if (error)
152 				goto out_brelse;
153 		}
154 	}
155 
156 	/*  Set up the pointer to the new block  */
157 
158 	gfs2_trans_add_meta(ip->i_gl, dibh);
159 	di = (struct gfs2_dinode *)dibh->b_data;
160 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
161 
162 	if (i_size_read(&ip->i_inode)) {
163 		*(__be64 *)(di + 1) = cpu_to_be64(block);
164 		gfs2_add_inode_blocks(&ip->i_inode, 1);
165 		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 	}
167 
168 	ip->i_height = 1;
169 	di->di_height = cpu_to_be16(1);
170 
171 out_brelse:
172 	brelse(dibh);
173 out:
174 	up_write(&ip->i_rw_mutex);
175 	return error;
176 }
177 
178 
179 /**
180  * find_metapath - Find path through the metadata tree
181  * @sdp: The superblock
182  * @block: The disk block to look up
183  * @mp: The metapath to return the result in
184  * @height: The pre-calculated height of the metadata tree
185  *
186  *   This routine returns a struct metapath structure that defines a path
187  *   through the metadata of inode "ip" to get to block "block".
188  *
189  *   Example:
190  *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
191  *   filesystem with a blocksize of 4096.
192  *
193  *   find_metapath() would return a struct metapath structure set to:
194  *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
195  *
196  *   That means that in order to get to the block containing the byte at
197  *   offset 101342453, we would load the indirect block pointed to by pointer
198  *   0 in the dinode.  We would then load the indirect block pointed to by
199  *   pointer 48 in that indirect block.  We would then load the data block
200  *   pointed to by pointer 165 in that indirect block.
201  *
202  *             ----------------------------------------
203  *             | Dinode |                             |
204  *             |        |                            4|
205  *             |        |0 1 2 3 4 5                 9|
206  *             |        |                            6|
207  *             ----------------------------------------
208  *                       |
209  *                       |
210  *                       V
211  *             ----------------------------------------
212  *             | Indirect Block                       |
213  *             |                                     5|
214  *             |            4 4 4 4 4 5 5            1|
215  *             |0           5 6 7 8 9 0 1            2|
216  *             ----------------------------------------
217  *                                |
218  *                                |
219  *                                V
220  *             ----------------------------------------
221  *             | Indirect Block                       |
222  *             |                         1 1 1 1 1   5|
223  *             |                         6 6 6 6 6   1|
224  *             |0                        3 4 5 6 7   2|
225  *             ----------------------------------------
226  *                                           |
227  *                                           |
228  *                                           V
229  *             ----------------------------------------
230  *             | Data block containing offset         |
231  *             |            101342453                 |
232  *             |                                      |
233  *             |                                      |
234  *             ----------------------------------------
235  *
236  */
237 
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 			  struct metapath *mp, unsigned int height)
240 {
241 	unsigned int i;
242 
243 	mp->mp_fheight = height;
244 	for (i = height; i--;)
245 		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246 }
247 
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
249 {
250 	if (mp->mp_list[0] == 0)
251 		return 2;
252 	return 1;
253 }
254 
255 /**
256  * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257  * @height: The metadata height (0 = dinode)
258  * @mp: The metapath
259  */
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
261 {
262 	struct buffer_head *bh = mp->mp_bh[height];
263 	if (height == 0)
264 		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266 }
267 
268 /**
269  * metapointer - Return pointer to start of metadata in a buffer
270  * @height: The metadata height (0 = dinode)
271  * @mp: The metapath
272  *
273  * Return a pointer to the block number of the next height of the metadata
274  * tree given a buffer containing the pointer to the current height of the
275  * metadata tree.
276  */
277 
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
279 {
280 	__be64 *p = metaptr1(height, mp);
281 	return p + mp->mp_list[height];
282 }
283 
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
285 {
286 	const struct buffer_head *bh = mp->mp_bh[height];
287 	return (const __be64 *)(bh->b_data + bh->b_size);
288 }
289 
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
291 {
292 	unsigned int hgt;
293 
294 	*clone = *mp;
295 	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 		get_bh(clone->mp_bh[hgt]);
297 }
298 
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300 {
301 	const __be64 *t;
302 
303 	for (t = start; t < end; t++) {
304 		struct buffer_head *rabh;
305 
306 		if (!*t)
307 			continue;
308 
309 		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 		if (trylock_buffer(rabh)) {
311 			if (!buffer_uptodate(rabh)) {
312 				rabh->b_end_io = end_buffer_read_sync;
313 				submit_bh(REQ_OP_READ,
314 					  REQ_RAHEAD | REQ_META | REQ_PRIO,
315 					  rabh);
316 				continue;
317 			}
318 			unlock_buffer(rabh);
319 		}
320 		brelse(rabh);
321 	}
322 }
323 
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 			     unsigned int x, unsigned int h)
326 {
327 	for (; x < h; x++) {
328 		__be64 *ptr = metapointer(x, mp);
329 		u64 dblock = be64_to_cpu(*ptr);
330 		int ret;
331 
332 		if (!dblock)
333 			break;
334 		ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
335 		if (ret)
336 			return ret;
337 	}
338 	mp->mp_aheight = x + 1;
339 	return 0;
340 }
341 
342 /**
343  * lookup_metapath - Walk the metadata tree to a specific point
344  * @ip: The inode
345  * @mp: The metapath
346  *
347  * Assumes that the inode's buffer has already been looked up and
348  * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349  * by find_metapath().
350  *
351  * If this function encounters part of the tree which has not been
352  * allocated, it returns the current height of the tree at the point
353  * at which it found the unallocated block. Blocks which are found are
354  * added to the mp->mp_bh[] list.
355  *
356  * Returns: error
357  */
358 
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
360 {
361 	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362 }
363 
364 /**
365  * fillup_metapath - fill up buffers for the metadata path to a specific height
366  * @ip: The inode
367  * @mp: The metapath
368  * @h: The height to which it should be mapped
369  *
370  * Similar to lookup_metapath, but does lookups for a range of heights
371  *
372  * Returns: error or the number of buffers filled
373  */
374 
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
376 {
377 	unsigned int x = 0;
378 	int ret;
379 
380 	if (h) {
381 		/* find the first buffer we need to look up. */
382 		for (x = h - 1; x > 0; x--) {
383 			if (mp->mp_bh[x])
384 				break;
385 		}
386 	}
387 	ret = __fillup_metapath(ip, mp, x, h);
388 	if (ret)
389 		return ret;
390 	return mp->mp_aheight - x - 1;
391 }
392 
393 static void release_metapath(struct metapath *mp)
394 {
395 	int i;
396 
397 	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
398 		if (mp->mp_bh[i] == NULL)
399 			break;
400 		brelse(mp->mp_bh[i]);
401 		mp->mp_bh[i] = NULL;
402 	}
403 }
404 
405 /**
406  * gfs2_extent_length - Returns length of an extent of blocks
407  * @bh: The metadata block
408  * @ptr: Current position in @bh
409  * @limit: Max extent length to return
410  * @eob: Set to 1 if we hit "end of block"
411  *
412  * Returns: The length of the extent (minimum of one block)
413  */
414 
415 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
416 {
417 	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
418 	const __be64 *first = ptr;
419 	u64 d = be64_to_cpu(*ptr);
420 
421 	*eob = 0;
422 	do {
423 		ptr++;
424 		if (ptr >= end)
425 			break;
426 		d++;
427 	} while(be64_to_cpu(*ptr) == d);
428 	if (ptr >= end)
429 		*eob = 1;
430 	return ptr - first;
431 }
432 
433 typedef const __be64 *(*gfs2_metadata_walker)(
434 		struct metapath *mp,
435 		const __be64 *start, const __be64 *end,
436 		u64 factor, void *data);
437 
438 #define WALK_STOP ((__be64 *)0)
439 #define WALK_NEXT ((__be64 *)1)
440 
441 static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
442 		u64 len, struct metapath *mp, gfs2_metadata_walker walker,
443 		void *data)
444 {
445 	struct metapath clone;
446 	struct gfs2_inode *ip = GFS2_I(inode);
447 	struct gfs2_sbd *sdp = GFS2_SB(inode);
448 	const __be64 *start, *end, *ptr;
449 	u64 factor = 1;
450 	unsigned int hgt;
451 	int ret = 0;
452 
453 	for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
454 		factor *= sdp->sd_inptrs;
455 
456 	for (;;) {
457 		u64 step;
458 
459 		/* Walk indirect block. */
460 		start = metapointer(hgt, mp);
461 		end = metaend(hgt, mp);
462 
463 		step = (end - start) * factor;
464 		if (step > len)
465 			end = start + DIV_ROUND_UP_ULL(len, factor);
466 
467 		ptr = walker(mp, start, end, factor, data);
468 		if (ptr == WALK_STOP)
469 			break;
470 		if (step >= len)
471 			break;
472 		len -= step;
473 		if (ptr != WALK_NEXT) {
474 			BUG_ON(!*ptr);
475 			mp->mp_list[hgt] += ptr - start;
476 			goto fill_up_metapath;
477 		}
478 
479 lower_metapath:
480 		/* Decrease height of metapath. */
481 		if (mp != &clone) {
482 			clone_metapath(&clone, mp);
483 			mp = &clone;
484 		}
485 		brelse(mp->mp_bh[hgt]);
486 		mp->mp_bh[hgt] = NULL;
487 		if (!hgt)
488 			break;
489 		hgt--;
490 		factor *= sdp->sd_inptrs;
491 
492 		/* Advance in metadata tree. */
493 		(mp->mp_list[hgt])++;
494 		start = metapointer(hgt, mp);
495 		end = metaend(hgt, mp);
496 		if (start >= end) {
497 			mp->mp_list[hgt] = 0;
498 			if (!hgt)
499 				break;
500 			goto lower_metapath;
501 		}
502 
503 fill_up_metapath:
504 		/* Increase height of metapath. */
505 		if (mp != &clone) {
506 			clone_metapath(&clone, mp);
507 			mp = &clone;
508 		}
509 		ret = fillup_metapath(ip, mp, ip->i_height - 1);
510 		if (ret < 0)
511 			break;
512 		hgt += ret;
513 		for (; ret; ret--)
514 			do_div(factor, sdp->sd_inptrs);
515 		mp->mp_aheight = hgt + 1;
516 	}
517 	if (mp == &clone)
518 		release_metapath(mp);
519 	return ret;
520 }
521 
522 struct gfs2_hole_walker_args {
523 	u64 blocks;
524 };
525 
526 static const __be64 *gfs2_hole_walker(struct metapath *mp,
527 		const __be64 *start, const __be64 *end,
528 		u64 factor, void *data)
529 {
530 	struct gfs2_hole_walker_args *args = data;
531 	const __be64 *ptr;
532 
533 	for (ptr = start; ptr < end; ptr++) {
534 		if (*ptr) {
535 			args->blocks += (ptr - start) * factor;
536 			if (mp->mp_aheight == mp->mp_fheight)
537 				return WALK_STOP;
538 			return ptr;  /* increase height */
539 		}
540 	}
541 	args->blocks += (end - start) * factor;
542 	return WALK_NEXT;
543 }
544 
545 /**
546  * gfs2_hole_size - figure out the size of a hole
547  * @inode: The inode
548  * @lblock: The logical starting block number
549  * @len: How far to look (in blocks)
550  * @mp: The metapath at lblock
551  * @iomap: The iomap to store the hole size in
552  *
553  * This function modifies @mp.
554  *
555  * Returns: errno on error
556  */
557 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
558 			  struct metapath *mp, struct iomap *iomap)
559 {
560 	struct gfs2_hole_walker_args args = { };
561 	int ret = 0;
562 
563 	ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
564 	if (!ret)
565 		iomap->length = args.blocks << inode->i_blkbits;
566 	return ret;
567 }
568 
569 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
570 					 struct gfs2_glock *gl, unsigned int i,
571 					 unsigned offset, u64 bn)
572 {
573 	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
574 		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
575 				 sizeof(struct gfs2_dinode)));
576 	BUG_ON(i < 1);
577 	BUG_ON(mp->mp_bh[i] != NULL);
578 	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
579 	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
580 	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
581 	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
582 	ptr += offset;
583 	*ptr = cpu_to_be64(bn);
584 	return ptr;
585 }
586 
587 enum alloc_state {
588 	ALLOC_DATA = 0,
589 	ALLOC_GROW_DEPTH = 1,
590 	ALLOC_GROW_HEIGHT = 2,
591 	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
592 };
593 
594 /**
595  * gfs2_iomap_alloc - Build a metadata tree of the requested height
596  * @inode: The GFS2 inode
597  * @iomap: The iomap structure
598  * @flags: iomap flags
599  * @mp: The metapath, with proper height information calculated
600  *
601  * In this routine we may have to alloc:
602  *   i) Indirect blocks to grow the metadata tree height
603  *  ii) Indirect blocks to fill in lower part of the metadata tree
604  * iii) Data blocks
605  *
606  * This function is called after gfs2_iomap_get, which works out the
607  * total number of blocks which we need via gfs2_alloc_size.
608  *
609  * We then do the actual allocation asking for an extent at a time (if
610  * enough contiguous free blocks are available, there will only be one
611  * allocation request per call) and uses the state machine to initialise
612  * the blocks in order.
613  *
614  * Right now, this function will allocate at most one indirect block
615  * worth of data -- with a default block size of 4K, that's slightly
616  * less than 2M.  If this limitation is ever removed to allow huge
617  * allocations, we would probably still want to limit the iomap size we
618  * return to avoid stalling other tasks during huge writes; the next
619  * iomap iteration would then find the blocks already allocated.
620  *
621  * Returns: errno on error
622  */
623 
624 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
625 			    unsigned flags, struct metapath *mp)
626 {
627 	struct gfs2_inode *ip = GFS2_I(inode);
628 	struct gfs2_sbd *sdp = GFS2_SB(inode);
629 	struct buffer_head *dibh = mp->mp_bh[0];
630 	u64 bn;
631 	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
632 	size_t dblks = iomap->length >> inode->i_blkbits;
633 	const unsigned end_of_metadata = mp->mp_fheight - 1;
634 	int ret;
635 	enum alloc_state state;
636 	__be64 *ptr;
637 	__be64 zero_bn = 0;
638 
639 	BUG_ON(mp->mp_aheight < 1);
640 	BUG_ON(dibh == NULL);
641 	BUG_ON(dblks < 1);
642 
643 	gfs2_trans_add_meta(ip->i_gl, dibh);
644 
645 	down_write(&ip->i_rw_mutex);
646 
647 	if (mp->mp_fheight == mp->mp_aheight) {
648 		/* Bottom indirect block exists */
649 		state = ALLOC_DATA;
650 	} else {
651 		/* Need to allocate indirect blocks */
652 		if (mp->mp_fheight == ip->i_height) {
653 			/* Writing into existing tree, extend tree down */
654 			iblks = mp->mp_fheight - mp->mp_aheight;
655 			state = ALLOC_GROW_DEPTH;
656 		} else {
657 			/* Building up tree height */
658 			state = ALLOC_GROW_HEIGHT;
659 			iblks = mp->mp_fheight - ip->i_height;
660 			branch_start = metapath_branch_start(mp);
661 			iblks += (mp->mp_fheight - branch_start);
662 		}
663 	}
664 
665 	/* start of the second part of the function (state machine) */
666 
667 	blks = dblks + iblks;
668 	i = mp->mp_aheight;
669 	do {
670 		n = blks - alloced;
671 		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
672 		if (ret)
673 			goto out;
674 		alloced += n;
675 		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
676 			gfs2_trans_remove_revoke(sdp, bn, n);
677 		switch (state) {
678 		/* Growing height of tree */
679 		case ALLOC_GROW_HEIGHT:
680 			if (i == 1) {
681 				ptr = (__be64 *)(dibh->b_data +
682 						 sizeof(struct gfs2_dinode));
683 				zero_bn = *ptr;
684 			}
685 			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
686 			     i++, n--)
687 				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
688 			if (i - 1 == mp->mp_fheight - ip->i_height) {
689 				i--;
690 				gfs2_buffer_copy_tail(mp->mp_bh[i],
691 						sizeof(struct gfs2_meta_header),
692 						dibh, sizeof(struct gfs2_dinode));
693 				gfs2_buffer_clear_tail(dibh,
694 						sizeof(struct gfs2_dinode) +
695 						sizeof(__be64));
696 				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
697 					sizeof(struct gfs2_meta_header));
698 				*ptr = zero_bn;
699 				state = ALLOC_GROW_DEPTH;
700 				for(i = branch_start; i < mp->mp_fheight; i++) {
701 					if (mp->mp_bh[i] == NULL)
702 						break;
703 					brelse(mp->mp_bh[i]);
704 					mp->mp_bh[i] = NULL;
705 				}
706 				i = branch_start;
707 			}
708 			if (n == 0)
709 				break;
710 		/* fall through - To branching from existing tree */
711 		case ALLOC_GROW_DEPTH:
712 			if (i > 1 && i < mp->mp_fheight)
713 				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
714 			for (; i < mp->mp_fheight && n > 0; i++, n--)
715 				gfs2_indirect_init(mp, ip->i_gl, i,
716 						   mp->mp_list[i-1], bn++);
717 			if (i == mp->mp_fheight)
718 				state = ALLOC_DATA;
719 			if (n == 0)
720 				break;
721 		/* fall through - To tree complete, adding data blocks */
722 		case ALLOC_DATA:
723 			BUG_ON(n > dblks);
724 			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
725 			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
726 			dblks = n;
727 			ptr = metapointer(end_of_metadata, mp);
728 			iomap->addr = bn << inode->i_blkbits;
729 			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
730 			while (n-- > 0)
731 				*ptr++ = cpu_to_be64(bn++);
732 			break;
733 		}
734 	} while (iomap->addr == IOMAP_NULL_ADDR);
735 
736 	iomap->type = IOMAP_MAPPED;
737 	iomap->length = (u64)dblks << inode->i_blkbits;
738 	ip->i_height = mp->mp_fheight;
739 	gfs2_add_inode_blocks(&ip->i_inode, alloced);
740 	gfs2_dinode_out(ip, dibh->b_data);
741 out:
742 	up_write(&ip->i_rw_mutex);
743 	return ret;
744 }
745 
746 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
747 
748 /**
749  * gfs2_alloc_size - Compute the maximum allocation size
750  * @inode: The inode
751  * @mp: The metapath
752  * @size: Requested size in blocks
753  *
754  * Compute the maximum size of the next allocation at @mp.
755  *
756  * Returns: size in blocks
757  */
758 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
759 {
760 	struct gfs2_inode *ip = GFS2_I(inode);
761 	struct gfs2_sbd *sdp = GFS2_SB(inode);
762 	const __be64 *first, *ptr, *end;
763 
764 	/*
765 	 * For writes to stuffed files, this function is called twice via
766 	 * gfs2_iomap_get, before and after unstuffing. The size we return the
767 	 * first time needs to be large enough to get the reservation and
768 	 * allocation sizes right.  The size we return the second time must
769 	 * be exact or else gfs2_iomap_alloc won't do the right thing.
770 	 */
771 
772 	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
773 		unsigned int maxsize = mp->mp_fheight > 1 ?
774 			sdp->sd_inptrs : sdp->sd_diptrs;
775 		maxsize -= mp->mp_list[mp->mp_fheight - 1];
776 		if (size > maxsize)
777 			size = maxsize;
778 		return size;
779 	}
780 
781 	first = metapointer(ip->i_height - 1, mp);
782 	end = metaend(ip->i_height - 1, mp);
783 	if (end - first > size)
784 		end = first + size;
785 	for (ptr = first; ptr < end; ptr++) {
786 		if (*ptr)
787 			break;
788 	}
789 	return ptr - first;
790 }
791 
792 /**
793  * gfs2_iomap_get - Map blocks from an inode to disk blocks
794  * @inode: The inode
795  * @pos: Starting position in bytes
796  * @length: Length to map, in bytes
797  * @flags: iomap flags
798  * @iomap: The iomap structure
799  * @mp: The metapath
800  *
801  * Returns: errno
802  */
803 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
804 			  unsigned flags, struct iomap *iomap,
805 			  struct metapath *mp)
806 {
807 	struct gfs2_inode *ip = GFS2_I(inode);
808 	struct gfs2_sbd *sdp = GFS2_SB(inode);
809 	loff_t size = i_size_read(inode);
810 	__be64 *ptr;
811 	sector_t lblock;
812 	sector_t lblock_stop;
813 	int ret;
814 	int eob;
815 	u64 len;
816 	struct buffer_head *dibh = NULL, *bh;
817 	u8 height;
818 
819 	if (!length)
820 		return -EINVAL;
821 
822 	down_read(&ip->i_rw_mutex);
823 
824 	ret = gfs2_meta_inode_buffer(ip, &dibh);
825 	if (ret)
826 		goto unlock;
827 	mp->mp_bh[0] = dibh;
828 
829 	if (gfs2_is_stuffed(ip)) {
830 		if (flags & IOMAP_WRITE) {
831 			loff_t max_size = gfs2_max_stuffed_size(ip);
832 
833 			if (pos + length > max_size)
834 				goto unstuff;
835 			iomap->length = max_size;
836 		} else {
837 			if (pos >= size) {
838 				if (flags & IOMAP_REPORT) {
839 					ret = -ENOENT;
840 					goto unlock;
841 				} else {
842 					/* report a hole */
843 					iomap->offset = pos;
844 					iomap->length = length;
845 					goto do_alloc;
846 				}
847 			}
848 			iomap->length = size;
849 		}
850 		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
851 			      sizeof(struct gfs2_dinode);
852 		iomap->type = IOMAP_INLINE;
853 		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
854 		goto out;
855 	}
856 
857 unstuff:
858 	lblock = pos >> inode->i_blkbits;
859 	iomap->offset = lblock << inode->i_blkbits;
860 	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
861 	len = lblock_stop - lblock + 1;
862 	iomap->length = len << inode->i_blkbits;
863 
864 	height = ip->i_height;
865 	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
866 		height++;
867 	find_metapath(sdp, lblock, mp, height);
868 	if (height > ip->i_height || gfs2_is_stuffed(ip))
869 		goto do_alloc;
870 
871 	ret = lookup_metapath(ip, mp);
872 	if (ret)
873 		goto unlock;
874 
875 	if (mp->mp_aheight != ip->i_height)
876 		goto do_alloc;
877 
878 	ptr = metapointer(ip->i_height - 1, mp);
879 	if (*ptr == 0)
880 		goto do_alloc;
881 
882 	bh = mp->mp_bh[ip->i_height - 1];
883 	len = gfs2_extent_length(bh, ptr, len, &eob);
884 
885 	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
886 	iomap->length = len << inode->i_blkbits;
887 	iomap->type = IOMAP_MAPPED;
888 	iomap->flags |= IOMAP_F_MERGED;
889 	if (eob)
890 		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
891 
892 out:
893 	iomap->bdev = inode->i_sb->s_bdev;
894 unlock:
895 	up_read(&ip->i_rw_mutex);
896 	return ret;
897 
898 do_alloc:
899 	iomap->addr = IOMAP_NULL_ADDR;
900 	iomap->type = IOMAP_HOLE;
901 	if (flags & IOMAP_REPORT) {
902 		if (pos >= size)
903 			ret = -ENOENT;
904 		else if (height == ip->i_height)
905 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
906 		else
907 			iomap->length = size - pos;
908 	} else if (flags & IOMAP_WRITE) {
909 		u64 alloc_size;
910 
911 		if (flags & IOMAP_DIRECT)
912 			goto out;  /* (see gfs2_file_direct_write) */
913 
914 		len = gfs2_alloc_size(inode, mp, len);
915 		alloc_size = len << inode->i_blkbits;
916 		if (alloc_size < iomap->length)
917 			iomap->length = alloc_size;
918 	} else {
919 		if (pos < size && height == ip->i_height)
920 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
921 	}
922 	goto out;
923 }
924 
925 /**
926  * gfs2_lblk_to_dblk - convert logical block to disk block
927  * @inode: the inode of the file we're mapping
928  * @lblock: the block relative to the start of the file
929  * @dblock: the returned dblock, if no error
930  *
931  * This function maps a single block from a file logical block (relative to
932  * the start of the file) to a file system absolute block using iomap.
933  *
934  * Returns: the absolute file system block, or an error
935  */
936 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
937 {
938 	struct iomap iomap = { };
939 	struct metapath mp = { .mp_aheight = 1, };
940 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
941 	int ret;
942 
943 	ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
944 	release_metapath(&mp);
945 	if (ret == 0)
946 		*dblock = iomap.addr >> inode->i_blkbits;
947 
948 	return ret;
949 }
950 
951 static int gfs2_write_lock(struct inode *inode)
952 {
953 	struct gfs2_inode *ip = GFS2_I(inode);
954 	struct gfs2_sbd *sdp = GFS2_SB(inode);
955 	int error;
956 
957 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
958 	error = gfs2_glock_nq(&ip->i_gh);
959 	if (error)
960 		goto out_uninit;
961 	if (&ip->i_inode == sdp->sd_rindex) {
962 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
963 
964 		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
965 					   GL_NOCACHE, &m_ip->i_gh);
966 		if (error)
967 			goto out_unlock;
968 	}
969 	return 0;
970 
971 out_unlock:
972 	gfs2_glock_dq(&ip->i_gh);
973 out_uninit:
974 	gfs2_holder_uninit(&ip->i_gh);
975 	return error;
976 }
977 
978 static void gfs2_write_unlock(struct inode *inode)
979 {
980 	struct gfs2_inode *ip = GFS2_I(inode);
981 	struct gfs2_sbd *sdp = GFS2_SB(inode);
982 
983 	if (&ip->i_inode == sdp->sd_rindex) {
984 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
985 
986 		gfs2_glock_dq_uninit(&m_ip->i_gh);
987 	}
988 	gfs2_glock_dq_uninit(&ip->i_gh);
989 }
990 
991 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
992 				   unsigned len, struct iomap *iomap)
993 {
994 	struct gfs2_sbd *sdp = GFS2_SB(inode);
995 
996 	return gfs2_trans_begin(sdp, RES_DINODE + (len >> inode->i_blkbits), 0);
997 }
998 
999 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1000 				 unsigned copied, struct page *page,
1001 				 struct iomap *iomap)
1002 {
1003 	struct gfs2_inode *ip = GFS2_I(inode);
1004 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1005 
1006 	if (page && !gfs2_is_stuffed(ip))
1007 		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1008 	gfs2_trans_end(sdp);
1009 }
1010 
1011 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1012 	.page_prepare = gfs2_iomap_page_prepare,
1013 	.page_done = gfs2_iomap_page_done,
1014 };
1015 
1016 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1017 				  loff_t length, unsigned flags,
1018 				  struct iomap *iomap,
1019 				  struct metapath *mp)
1020 {
1021 	struct gfs2_inode *ip = GFS2_I(inode);
1022 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1023 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1024 	bool unstuff, alloc_required;
1025 	int ret;
1026 
1027 	ret = gfs2_write_lock(inode);
1028 	if (ret)
1029 		return ret;
1030 
1031 	unstuff = gfs2_is_stuffed(ip) &&
1032 		  pos + length > gfs2_max_stuffed_size(ip);
1033 
1034 	ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
1035 	if (ret)
1036 		goto out_unlock;
1037 
1038 	alloc_required = unstuff || iomap->type == IOMAP_HOLE;
1039 
1040 	if (alloc_required || gfs2_is_jdata(ip))
1041 		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1042 				       &ind_blocks);
1043 
1044 	if (alloc_required) {
1045 		struct gfs2_alloc_parms ap = {
1046 			.target = data_blocks + ind_blocks
1047 		};
1048 
1049 		ret = gfs2_quota_lock_check(ip, &ap);
1050 		if (ret)
1051 			goto out_unlock;
1052 
1053 		ret = gfs2_inplace_reserve(ip, &ap);
1054 		if (ret)
1055 			goto out_qunlock;
1056 	}
1057 
1058 	rblocks = RES_DINODE + ind_blocks;
1059 	if (gfs2_is_jdata(ip))
1060 		rblocks += data_blocks;
1061 	if (ind_blocks || data_blocks)
1062 		rblocks += RES_STATFS + RES_QUOTA;
1063 	if (inode == sdp->sd_rindex)
1064 		rblocks += 2 * RES_STATFS;
1065 	if (alloc_required)
1066 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1067 
1068 	if (unstuff || iomap->type == IOMAP_HOLE) {
1069 		struct gfs2_trans *tr;
1070 
1071 		ret = gfs2_trans_begin(sdp, rblocks,
1072 				       iomap->length >> inode->i_blkbits);
1073 		if (ret)
1074 			goto out_trans_fail;
1075 
1076 		if (unstuff) {
1077 			ret = gfs2_unstuff_dinode(ip, NULL);
1078 			if (ret)
1079 				goto out_trans_end;
1080 			release_metapath(mp);
1081 			ret = gfs2_iomap_get(inode, iomap->offset,
1082 					     iomap->length, flags, iomap, mp);
1083 			if (ret)
1084 				goto out_trans_end;
1085 		}
1086 
1087 		if (iomap->type == IOMAP_HOLE) {
1088 			ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
1089 			if (ret) {
1090 				gfs2_trans_end(sdp);
1091 				gfs2_inplace_release(ip);
1092 				punch_hole(ip, iomap->offset, iomap->length);
1093 				goto out_qunlock;
1094 			}
1095 		}
1096 
1097 		tr = current->journal_info;
1098 		if (tr->tr_num_buf_new)
1099 			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1100 		else
1101 			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
1102 
1103 		gfs2_trans_end(sdp);
1104 	}
1105 
1106 	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1107 		iomap->page_ops = &gfs2_iomap_page_ops;
1108 	return 0;
1109 
1110 out_trans_end:
1111 	gfs2_trans_end(sdp);
1112 out_trans_fail:
1113 	if (alloc_required)
1114 		gfs2_inplace_release(ip);
1115 out_qunlock:
1116 	if (alloc_required)
1117 		gfs2_quota_unlock(ip);
1118 out_unlock:
1119 	gfs2_write_unlock(inode);
1120 	return ret;
1121 }
1122 
1123 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1124 			    unsigned flags, struct iomap *iomap)
1125 {
1126 	struct gfs2_inode *ip = GFS2_I(inode);
1127 	struct metapath mp = { .mp_aheight = 1, };
1128 	int ret;
1129 
1130 	iomap->flags |= IOMAP_F_BUFFER_HEAD;
1131 
1132 	trace_gfs2_iomap_start(ip, pos, length, flags);
1133 	if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
1134 		ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1135 	} else {
1136 		ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1137 
1138 		/*
1139 		 * Silently fall back to buffered I/O for stuffed files or if
1140 		 * we've hot a hole (see gfs2_file_direct_write).
1141 		 */
1142 		if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
1143 		    iomap->type != IOMAP_MAPPED)
1144 			ret = -ENOTBLK;
1145 	}
1146 	release_metapath(&mp);
1147 	trace_gfs2_iomap_end(ip, iomap, ret);
1148 	return ret;
1149 }
1150 
1151 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1152 			  ssize_t written, unsigned flags, struct iomap *iomap)
1153 {
1154 	struct gfs2_inode *ip = GFS2_I(inode);
1155 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1156 
1157 	if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
1158 		goto out;
1159 
1160 	if (!gfs2_is_stuffed(ip))
1161 		gfs2_ordered_add_inode(ip);
1162 
1163 	if (inode == sdp->sd_rindex)
1164 		adjust_fs_space(inode);
1165 
1166 	gfs2_inplace_release(ip);
1167 
1168 	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1169 		/* Deallocate blocks that were just allocated. */
1170 		loff_t blockmask = i_blocksize(inode) - 1;
1171 		loff_t end = (pos + length) & ~blockmask;
1172 
1173 		pos = (pos + written + blockmask) & ~blockmask;
1174 		if (pos < end) {
1175 			truncate_pagecache_range(inode, pos, end - 1);
1176 			punch_hole(ip, pos, end - pos);
1177 		}
1178 	}
1179 
1180 	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1181 		gfs2_quota_unlock(ip);
1182 	gfs2_write_unlock(inode);
1183 
1184 out:
1185 	return 0;
1186 }
1187 
1188 const struct iomap_ops gfs2_iomap_ops = {
1189 	.iomap_begin = gfs2_iomap_begin,
1190 	.iomap_end = gfs2_iomap_end,
1191 };
1192 
1193 /**
1194  * gfs2_block_map - Map one or more blocks of an inode to a disk block
1195  * @inode: The inode
1196  * @lblock: The logical block number
1197  * @bh_map: The bh to be mapped
1198  * @create: True if its ok to alloc blocks to satify the request
1199  *
1200  * The size of the requested mapping is defined in bh_map->b_size.
1201  *
1202  * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1203  * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
1204  * bh_map->b_size to indicate the size of the mapping when @lblock and
1205  * successive blocks are mapped, up to the requested size.
1206  *
1207  * Sets buffer_boundary() if a read of metadata will be required
1208  * before the next block can be mapped. Sets buffer_new() if new
1209  * blocks were allocated.
1210  *
1211  * Returns: errno
1212  */
1213 
1214 int gfs2_block_map(struct inode *inode, sector_t lblock,
1215 		   struct buffer_head *bh_map, int create)
1216 {
1217 	struct gfs2_inode *ip = GFS2_I(inode);
1218 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
1219 	loff_t length = bh_map->b_size;
1220 	struct metapath mp = { .mp_aheight = 1, };
1221 	struct iomap iomap = { };
1222 	int ret;
1223 
1224 	clear_buffer_mapped(bh_map);
1225 	clear_buffer_new(bh_map);
1226 	clear_buffer_boundary(bh_map);
1227 	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1228 
1229 	if (create) {
1230 		ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1231 		if (!ret && iomap.type == IOMAP_HOLE)
1232 			ret = gfs2_iomap_alloc(inode, &iomap, IOMAP_WRITE, &mp);
1233 		release_metapath(&mp);
1234 	} else {
1235 		ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1236 		release_metapath(&mp);
1237 	}
1238 	if (ret)
1239 		goto out;
1240 
1241 	if (iomap.length > bh_map->b_size) {
1242 		iomap.length = bh_map->b_size;
1243 		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1244 	}
1245 	if (iomap.addr != IOMAP_NULL_ADDR)
1246 		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1247 	bh_map->b_size = iomap.length;
1248 	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1249 		set_buffer_boundary(bh_map);
1250 	if (iomap.flags & IOMAP_F_NEW)
1251 		set_buffer_new(bh_map);
1252 
1253 out:
1254 	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1255 	return ret;
1256 }
1257 
1258 /*
1259  * Deprecated: do not use in new code
1260  */
1261 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1262 {
1263 	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1264 	int ret;
1265 	int create = *new;
1266 
1267 	BUG_ON(!extlen);
1268 	BUG_ON(!dblock);
1269 	BUG_ON(!new);
1270 
1271 	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1272 	ret = gfs2_block_map(inode, lblock, &bh, create);
1273 	*extlen = bh.b_size >> inode->i_blkbits;
1274 	*dblock = bh.b_blocknr;
1275 	if (buffer_new(&bh))
1276 		*new = 1;
1277 	else
1278 		*new = 0;
1279 	return ret;
1280 }
1281 
1282 /**
1283  * gfs2_block_zero_range - Deal with zeroing out data
1284  *
1285  * This is partly borrowed from ext3.
1286  */
1287 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1288 				 unsigned int length)
1289 {
1290 	struct address_space *mapping = inode->i_mapping;
1291 	struct gfs2_inode *ip = GFS2_I(inode);
1292 	unsigned long index = from >> PAGE_SHIFT;
1293 	unsigned offset = from & (PAGE_SIZE-1);
1294 	unsigned blocksize, iblock, pos;
1295 	struct buffer_head *bh;
1296 	struct page *page;
1297 	int err;
1298 
1299 	page = find_or_create_page(mapping, index, GFP_NOFS);
1300 	if (!page)
1301 		return 0;
1302 
1303 	blocksize = inode->i_sb->s_blocksize;
1304 	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
1305 
1306 	if (!page_has_buffers(page))
1307 		create_empty_buffers(page, blocksize, 0);
1308 
1309 	/* Find the buffer that contains "offset" */
1310 	bh = page_buffers(page);
1311 	pos = blocksize;
1312 	while (offset >= pos) {
1313 		bh = bh->b_this_page;
1314 		iblock++;
1315 		pos += blocksize;
1316 	}
1317 
1318 	err = 0;
1319 
1320 	if (!buffer_mapped(bh)) {
1321 		gfs2_block_map(inode, iblock, bh, 0);
1322 		/* unmapped? It's a hole - nothing to do */
1323 		if (!buffer_mapped(bh))
1324 			goto unlock;
1325 	}
1326 
1327 	/* Ok, it's mapped. Make sure it's up-to-date */
1328 	if (PageUptodate(page))
1329 		set_buffer_uptodate(bh);
1330 
1331 	if (!buffer_uptodate(bh)) {
1332 		err = -EIO;
1333 		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1334 		wait_on_buffer(bh);
1335 		/* Uhhuh. Read error. Complain and punt. */
1336 		if (!buffer_uptodate(bh))
1337 			goto unlock;
1338 		err = 0;
1339 	}
1340 
1341 	if (gfs2_is_jdata(ip))
1342 		gfs2_trans_add_data(ip->i_gl, bh);
1343 	else
1344 		gfs2_ordered_add_inode(ip);
1345 
1346 	zero_user(page, offset, length);
1347 	mark_buffer_dirty(bh);
1348 unlock:
1349 	unlock_page(page);
1350 	put_page(page);
1351 	return err;
1352 }
1353 
1354 #define GFS2_JTRUNC_REVOKES 8192
1355 
1356 /**
1357  * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1358  * @inode: The inode being truncated
1359  * @oldsize: The original (larger) size
1360  * @newsize: The new smaller size
1361  *
1362  * With jdata files, we have to journal a revoke for each block which is
1363  * truncated. As a result, we need to split this into separate transactions
1364  * if the number of pages being truncated gets too large.
1365  */
1366 
1367 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1368 {
1369 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1370 	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1371 	u64 chunk;
1372 	int error;
1373 
1374 	while (oldsize != newsize) {
1375 		struct gfs2_trans *tr;
1376 		unsigned int offs;
1377 
1378 		chunk = oldsize - newsize;
1379 		if (chunk > max_chunk)
1380 			chunk = max_chunk;
1381 
1382 		offs = oldsize & ~PAGE_MASK;
1383 		if (offs && chunk > PAGE_SIZE)
1384 			chunk = offs + ((chunk - offs) & PAGE_MASK);
1385 
1386 		truncate_pagecache(inode, oldsize - chunk);
1387 		oldsize -= chunk;
1388 
1389 		tr = current->journal_info;
1390 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1391 			continue;
1392 
1393 		gfs2_trans_end(sdp);
1394 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1395 		if (error)
1396 			return error;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static int trunc_start(struct inode *inode, u64 newsize)
1403 {
1404 	struct gfs2_inode *ip = GFS2_I(inode);
1405 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1406 	struct buffer_head *dibh = NULL;
1407 	int journaled = gfs2_is_jdata(ip);
1408 	u64 oldsize = inode->i_size;
1409 	int error;
1410 
1411 	if (journaled)
1412 		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1413 	else
1414 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1415 	if (error)
1416 		return error;
1417 
1418 	error = gfs2_meta_inode_buffer(ip, &dibh);
1419 	if (error)
1420 		goto out;
1421 
1422 	gfs2_trans_add_meta(ip->i_gl, dibh);
1423 
1424 	if (gfs2_is_stuffed(ip)) {
1425 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1426 	} else {
1427 		unsigned int blocksize = i_blocksize(inode);
1428 		unsigned int offs = newsize & (blocksize - 1);
1429 		if (offs) {
1430 			error = gfs2_block_zero_range(inode, newsize,
1431 						      blocksize - offs);
1432 			if (error)
1433 				goto out;
1434 		}
1435 		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1436 	}
1437 
1438 	i_size_write(inode, newsize);
1439 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1440 	gfs2_dinode_out(ip, dibh->b_data);
1441 
1442 	if (journaled)
1443 		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1444 	else
1445 		truncate_pagecache(inode, newsize);
1446 
1447 out:
1448 	brelse(dibh);
1449 	if (current->journal_info)
1450 		gfs2_trans_end(sdp);
1451 	return error;
1452 }
1453 
1454 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1455 			 struct iomap *iomap)
1456 {
1457 	struct metapath mp = { .mp_aheight = 1, };
1458 	int ret;
1459 
1460 	ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1461 	if (!ret && iomap->type == IOMAP_HOLE)
1462 		ret = gfs2_iomap_alloc(inode, iomap, IOMAP_WRITE, &mp);
1463 	release_metapath(&mp);
1464 	return ret;
1465 }
1466 
1467 /**
1468  * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1469  * @ip: inode
1470  * @rg_gh: holder of resource group glock
1471  * @bh: buffer head to sweep
1472  * @start: starting point in bh
1473  * @end: end point in bh
1474  * @meta: true if bh points to metadata (rather than data)
1475  * @btotal: place to keep count of total blocks freed
1476  *
1477  * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1478  * free, and free them all. However, we do it one rgrp at a time. If this
1479  * block has references to multiple rgrps, we break it into individual
1480  * transactions. This allows other processes to use the rgrps while we're
1481  * focused on a single one, for better concurrency / performance.
1482  * At every transaction boundary, we rewrite the inode into the journal.
1483  * That way the bitmaps are kept consistent with the inode and we can recover
1484  * if we're interrupted by power-outages.
1485  *
1486  * Returns: 0, or return code if an error occurred.
1487  *          *btotal has the total number of blocks freed
1488  */
1489 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1490 			      struct buffer_head *bh, __be64 *start, __be64 *end,
1491 			      bool meta, u32 *btotal)
1492 {
1493 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1494 	struct gfs2_rgrpd *rgd;
1495 	struct gfs2_trans *tr;
1496 	__be64 *p;
1497 	int blks_outside_rgrp;
1498 	u64 bn, bstart, isize_blks;
1499 	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1500 	int ret = 0;
1501 	bool buf_in_tr = false; /* buffer was added to transaction */
1502 
1503 more_rgrps:
1504 	rgd = NULL;
1505 	if (gfs2_holder_initialized(rd_gh)) {
1506 		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1507 		gfs2_assert_withdraw(sdp,
1508 			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1509 	}
1510 	blks_outside_rgrp = 0;
1511 	bstart = 0;
1512 	blen = 0;
1513 
1514 	for (p = start; p < end; p++) {
1515 		if (!*p)
1516 			continue;
1517 		bn = be64_to_cpu(*p);
1518 
1519 		if (rgd) {
1520 			if (!rgrp_contains_block(rgd, bn)) {
1521 				blks_outside_rgrp++;
1522 				continue;
1523 			}
1524 		} else {
1525 			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1526 			if (unlikely(!rgd)) {
1527 				ret = -EIO;
1528 				goto out;
1529 			}
1530 			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1531 						 0, rd_gh);
1532 			if (ret)
1533 				goto out;
1534 
1535 			/* Must be done with the rgrp glock held: */
1536 			if (gfs2_rs_active(&ip->i_res) &&
1537 			    rgd == ip->i_res.rs_rbm.rgd)
1538 				gfs2_rs_deltree(&ip->i_res);
1539 		}
1540 
1541 		/* The size of our transactions will be unknown until we
1542 		   actually process all the metadata blocks that relate to
1543 		   the rgrp. So we estimate. We know it can't be more than
1544 		   the dinode's i_blocks and we don't want to exceed the
1545 		   journal flush threshold, sd_log_thresh2. */
1546 		if (current->journal_info == NULL) {
1547 			unsigned int jblocks_rqsted, revokes;
1548 
1549 			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1550 				RES_INDIRECT;
1551 			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1552 			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1553 				jblocks_rqsted +=
1554 					atomic_read(&sdp->sd_log_thresh2);
1555 			else
1556 				jblocks_rqsted += isize_blks;
1557 			revokes = jblocks_rqsted;
1558 			if (meta)
1559 				revokes += end - start;
1560 			else if (ip->i_depth)
1561 				revokes += sdp->sd_inptrs;
1562 			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1563 			if (ret)
1564 				goto out_unlock;
1565 			down_write(&ip->i_rw_mutex);
1566 		}
1567 		/* check if we will exceed the transaction blocks requested */
1568 		tr = current->journal_info;
1569 		if (tr->tr_num_buf_new + RES_STATFS +
1570 		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1571 			/* We set blks_outside_rgrp to ensure the loop will
1572 			   be repeated for the same rgrp, but with a new
1573 			   transaction. */
1574 			blks_outside_rgrp++;
1575 			/* This next part is tricky. If the buffer was added
1576 			   to the transaction, we've already set some block
1577 			   pointers to 0, so we better follow through and free
1578 			   them, or we will introduce corruption (so break).
1579 			   This may be impossible, or at least rare, but I
1580 			   decided to cover the case regardless.
1581 
1582 			   If the buffer was not added to the transaction
1583 			   (this call), doing so would exceed our transaction
1584 			   size, so we need to end the transaction and start a
1585 			   new one (so goto). */
1586 
1587 			if (buf_in_tr)
1588 				break;
1589 			goto out_unlock;
1590 		}
1591 
1592 		gfs2_trans_add_meta(ip->i_gl, bh);
1593 		buf_in_tr = true;
1594 		*p = 0;
1595 		if (bstart + blen == bn) {
1596 			blen++;
1597 			continue;
1598 		}
1599 		if (bstart) {
1600 			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1601 			(*btotal) += blen;
1602 			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1603 		}
1604 		bstart = bn;
1605 		blen = 1;
1606 	}
1607 	if (bstart) {
1608 		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1609 		(*btotal) += blen;
1610 		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1611 	}
1612 out_unlock:
1613 	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1614 					    outside the rgrp we just processed,
1615 					    do it all over again. */
1616 		if (current->journal_info) {
1617 			struct buffer_head *dibh;
1618 
1619 			ret = gfs2_meta_inode_buffer(ip, &dibh);
1620 			if (ret)
1621 				goto out;
1622 
1623 			/* Every transaction boundary, we rewrite the dinode
1624 			   to keep its di_blocks current in case of failure. */
1625 			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1626 				current_time(&ip->i_inode);
1627 			gfs2_trans_add_meta(ip->i_gl, dibh);
1628 			gfs2_dinode_out(ip, dibh->b_data);
1629 			brelse(dibh);
1630 			up_write(&ip->i_rw_mutex);
1631 			gfs2_trans_end(sdp);
1632 		}
1633 		gfs2_glock_dq_uninit(rd_gh);
1634 		cond_resched();
1635 		goto more_rgrps;
1636 	}
1637 out:
1638 	return ret;
1639 }
1640 
1641 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1642 {
1643 	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1644 		return false;
1645 	return true;
1646 }
1647 
1648 /**
1649  * find_nonnull_ptr - find a non-null pointer given a metapath and height
1650  * @mp: starting metapath
1651  * @h: desired height to search
1652  *
1653  * Assumes the metapath is valid (with buffers) out to height h.
1654  * Returns: true if a non-null pointer was found in the metapath buffer
1655  *          false if all remaining pointers are NULL in the buffer
1656  */
1657 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1658 			     unsigned int h,
1659 			     __u16 *end_list, unsigned int end_aligned)
1660 {
1661 	struct buffer_head *bh = mp->mp_bh[h];
1662 	__be64 *first, *ptr, *end;
1663 
1664 	first = metaptr1(h, mp);
1665 	ptr = first + mp->mp_list[h];
1666 	end = (__be64 *)(bh->b_data + bh->b_size);
1667 	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1668 		bool keep_end = h < end_aligned;
1669 		end = first + end_list[h] + keep_end;
1670 	}
1671 
1672 	while (ptr < end) {
1673 		if (*ptr) { /* if we have a non-null pointer */
1674 			mp->mp_list[h] = ptr - first;
1675 			h++;
1676 			if (h < GFS2_MAX_META_HEIGHT)
1677 				mp->mp_list[h] = 0;
1678 			return true;
1679 		}
1680 		ptr++;
1681 	}
1682 	return false;
1683 }
1684 
1685 enum dealloc_states {
1686 	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1687 	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1688 	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1689 	DEALLOC_DONE = 3,       /* process complete */
1690 };
1691 
1692 static inline void
1693 metapointer_range(struct metapath *mp, int height,
1694 		  __u16 *start_list, unsigned int start_aligned,
1695 		  __u16 *end_list, unsigned int end_aligned,
1696 		  __be64 **start, __be64 **end)
1697 {
1698 	struct buffer_head *bh = mp->mp_bh[height];
1699 	__be64 *first;
1700 
1701 	first = metaptr1(height, mp);
1702 	*start = first;
1703 	if (mp_eq_to_hgt(mp, start_list, height)) {
1704 		bool keep_start = height < start_aligned;
1705 		*start = first + start_list[height] + keep_start;
1706 	}
1707 	*end = (__be64 *)(bh->b_data + bh->b_size);
1708 	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1709 		bool keep_end = height < end_aligned;
1710 		*end = first + end_list[height] + keep_end;
1711 	}
1712 }
1713 
1714 static inline bool walk_done(struct gfs2_sbd *sdp,
1715 			     struct metapath *mp, int height,
1716 			     __u16 *end_list, unsigned int end_aligned)
1717 {
1718 	__u16 end;
1719 
1720 	if (end_list) {
1721 		bool keep_end = height < end_aligned;
1722 		if (!mp_eq_to_hgt(mp, end_list, height))
1723 			return false;
1724 		end = end_list[height] + keep_end;
1725 	} else
1726 		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1727 	return mp->mp_list[height] >= end;
1728 }
1729 
1730 /**
1731  * punch_hole - deallocate blocks in a file
1732  * @ip: inode to truncate
1733  * @offset: the start of the hole
1734  * @length: the size of the hole (or 0 for truncate)
1735  *
1736  * Punch a hole into a file or truncate a file at a given position.  This
1737  * function operates in whole blocks (@offset and @length are rounded
1738  * accordingly); partially filled blocks must be cleared otherwise.
1739  *
1740  * This function works from the bottom up, and from the right to the left. In
1741  * other words, it strips off the highest layer (data) before stripping any of
1742  * the metadata. Doing it this way is best in case the operation is interrupted
1743  * by power failure, etc.  The dinode is rewritten in every transaction to
1744  * guarantee integrity.
1745  */
1746 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1747 {
1748 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1749 	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1750 	struct metapath mp = {};
1751 	struct buffer_head *dibh, *bh;
1752 	struct gfs2_holder rd_gh;
1753 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1754 	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1755 	__u16 start_list[GFS2_MAX_META_HEIGHT];
1756 	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1757 	unsigned int start_aligned, uninitialized_var(end_aligned);
1758 	unsigned int strip_h = ip->i_height - 1;
1759 	u32 btotal = 0;
1760 	int ret, state;
1761 	int mp_h; /* metapath buffers are read in to this height */
1762 	u64 prev_bnr = 0;
1763 	__be64 *start, *end;
1764 
1765 	if (offset >= maxsize) {
1766 		/*
1767 		 * The starting point lies beyond the allocated meta-data;
1768 		 * there are no blocks do deallocate.
1769 		 */
1770 		return 0;
1771 	}
1772 
1773 	/*
1774 	 * The start position of the hole is defined by lblock, start_list, and
1775 	 * start_aligned.  The end position of the hole is defined by lend,
1776 	 * end_list, and end_aligned.
1777 	 *
1778 	 * start_aligned and end_aligned define down to which height the start
1779 	 * and end positions are aligned to the metadata tree (i.e., the
1780 	 * position is a multiple of the metadata granularity at the height
1781 	 * above).  This determines at which heights additional meta pointers
1782 	 * needs to be preserved for the remaining data.
1783 	 */
1784 
1785 	if (length) {
1786 		u64 end_offset = offset + length;
1787 		u64 lend;
1788 
1789 		/*
1790 		 * Clip the end at the maximum file size for the given height:
1791 		 * that's how far the metadata goes; files bigger than that
1792 		 * will have additional layers of indirection.
1793 		 */
1794 		if (end_offset > maxsize)
1795 			end_offset = maxsize;
1796 		lend = end_offset >> bsize_shift;
1797 
1798 		if (lblock >= lend)
1799 			return 0;
1800 
1801 		find_metapath(sdp, lend, &mp, ip->i_height);
1802 		end_list = __end_list;
1803 		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1804 
1805 		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1806 			if (end_list[mp_h])
1807 				break;
1808 		}
1809 		end_aligned = mp_h;
1810 	}
1811 
1812 	find_metapath(sdp, lblock, &mp, ip->i_height);
1813 	memcpy(start_list, mp.mp_list, sizeof(start_list));
1814 
1815 	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1816 		if (start_list[mp_h])
1817 			break;
1818 	}
1819 	start_aligned = mp_h;
1820 
1821 	ret = gfs2_meta_inode_buffer(ip, &dibh);
1822 	if (ret)
1823 		return ret;
1824 
1825 	mp.mp_bh[0] = dibh;
1826 	ret = lookup_metapath(ip, &mp);
1827 	if (ret)
1828 		goto out_metapath;
1829 
1830 	/* issue read-ahead on metadata */
1831 	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1832 		metapointer_range(&mp, mp_h, start_list, start_aligned,
1833 				  end_list, end_aligned, &start, &end);
1834 		gfs2_metapath_ra(ip->i_gl, start, end);
1835 	}
1836 
1837 	if (mp.mp_aheight == ip->i_height)
1838 		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1839 	else
1840 		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1841 
1842 	ret = gfs2_rindex_update(sdp);
1843 	if (ret)
1844 		goto out_metapath;
1845 
1846 	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1847 	if (ret)
1848 		goto out_metapath;
1849 	gfs2_holder_mark_uninitialized(&rd_gh);
1850 
1851 	mp_h = strip_h;
1852 
1853 	while (state != DEALLOC_DONE) {
1854 		switch (state) {
1855 		/* Truncate a full metapath at the given strip height.
1856 		 * Note that strip_h == mp_h in order to be in this state. */
1857 		case DEALLOC_MP_FULL:
1858 			bh = mp.mp_bh[mp_h];
1859 			gfs2_assert_withdraw(sdp, bh);
1860 			if (gfs2_assert_withdraw(sdp,
1861 						 prev_bnr != bh->b_blocknr)) {
1862 				printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
1863 				       "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
1864 				       sdp->sd_fsname,
1865 				       (unsigned long long)ip->i_no_addr,
1866 				       prev_bnr, ip->i_height, strip_h, mp_h);
1867 			}
1868 			prev_bnr = bh->b_blocknr;
1869 
1870 			if (gfs2_metatype_check(sdp, bh,
1871 						(mp_h ? GFS2_METATYPE_IN :
1872 							GFS2_METATYPE_DI))) {
1873 				ret = -EIO;
1874 				goto out;
1875 			}
1876 
1877 			/*
1878 			 * Below, passing end_aligned as 0 gives us the
1879 			 * metapointer range excluding the end point: the end
1880 			 * point is the first metapath we must not deallocate!
1881 			 */
1882 
1883 			metapointer_range(&mp, mp_h, start_list, start_aligned,
1884 					  end_list, 0 /* end_aligned */,
1885 					  &start, &end);
1886 			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1887 						 start, end,
1888 						 mp_h != ip->i_height - 1,
1889 						 &btotal);
1890 
1891 			/* If we hit an error or just swept dinode buffer,
1892 			   just exit. */
1893 			if (ret || !mp_h) {
1894 				state = DEALLOC_DONE;
1895 				break;
1896 			}
1897 			state = DEALLOC_MP_LOWER;
1898 			break;
1899 
1900 		/* lower the metapath strip height */
1901 		case DEALLOC_MP_LOWER:
1902 			/* We're done with the current buffer, so release it,
1903 			   unless it's the dinode buffer. Then back up to the
1904 			   previous pointer. */
1905 			if (mp_h) {
1906 				brelse(mp.mp_bh[mp_h]);
1907 				mp.mp_bh[mp_h] = NULL;
1908 			}
1909 			/* If we can't get any lower in height, we've stripped
1910 			   off all we can. Next step is to back up and start
1911 			   stripping the previous level of metadata. */
1912 			if (mp_h == 0) {
1913 				strip_h--;
1914 				memcpy(mp.mp_list, start_list, sizeof(start_list));
1915 				mp_h = strip_h;
1916 				state = DEALLOC_FILL_MP;
1917 				break;
1918 			}
1919 			mp.mp_list[mp_h] = 0;
1920 			mp_h--; /* search one metadata height down */
1921 			mp.mp_list[mp_h]++;
1922 			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1923 				break;
1924 			/* Here we've found a part of the metapath that is not
1925 			 * allocated. We need to search at that height for the
1926 			 * next non-null pointer. */
1927 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1928 				state = DEALLOC_FILL_MP;
1929 				mp_h++;
1930 			}
1931 			/* No more non-null pointers at this height. Back up
1932 			   to the previous height and try again. */
1933 			break; /* loop around in the same state */
1934 
1935 		/* Fill the metapath with buffers to the given height. */
1936 		case DEALLOC_FILL_MP:
1937 			/* Fill the buffers out to the current height. */
1938 			ret = fillup_metapath(ip, &mp, mp_h);
1939 			if (ret < 0)
1940 				goto out;
1941 
1942 			/* On the first pass, issue read-ahead on metadata. */
1943 			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1944 				unsigned int height = mp.mp_aheight - 1;
1945 
1946 				/* No read-ahead for data blocks. */
1947 				if (mp.mp_aheight - 1 == strip_h)
1948 					height--;
1949 
1950 				for (; height >= mp.mp_aheight - ret; height--) {
1951 					metapointer_range(&mp, height,
1952 							  start_list, start_aligned,
1953 							  end_list, end_aligned,
1954 							  &start, &end);
1955 					gfs2_metapath_ra(ip->i_gl, start, end);
1956 				}
1957 			}
1958 
1959 			/* If buffers found for the entire strip height */
1960 			if (mp.mp_aheight - 1 == strip_h) {
1961 				state = DEALLOC_MP_FULL;
1962 				break;
1963 			}
1964 			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1965 				mp_h = mp.mp_aheight - 1;
1966 
1967 			/* If we find a non-null block pointer, crawl a bit
1968 			   higher up in the metapath and try again, otherwise
1969 			   we need to look lower for a new starting point. */
1970 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1971 				mp_h++;
1972 			else
1973 				state = DEALLOC_MP_LOWER;
1974 			break;
1975 		}
1976 	}
1977 
1978 	if (btotal) {
1979 		if (current->journal_info == NULL) {
1980 			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1981 					       RES_QUOTA, 0);
1982 			if (ret)
1983 				goto out;
1984 			down_write(&ip->i_rw_mutex);
1985 		}
1986 		gfs2_statfs_change(sdp, 0, +btotal, 0);
1987 		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1988 				  ip->i_inode.i_gid);
1989 		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1990 		gfs2_trans_add_meta(ip->i_gl, dibh);
1991 		gfs2_dinode_out(ip, dibh->b_data);
1992 		up_write(&ip->i_rw_mutex);
1993 		gfs2_trans_end(sdp);
1994 	}
1995 
1996 out:
1997 	if (gfs2_holder_initialized(&rd_gh))
1998 		gfs2_glock_dq_uninit(&rd_gh);
1999 	if (current->journal_info) {
2000 		up_write(&ip->i_rw_mutex);
2001 		gfs2_trans_end(sdp);
2002 		cond_resched();
2003 	}
2004 	gfs2_quota_unhold(ip);
2005 out_metapath:
2006 	release_metapath(&mp);
2007 	return ret;
2008 }
2009 
2010 static int trunc_end(struct gfs2_inode *ip)
2011 {
2012 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2013 	struct buffer_head *dibh;
2014 	int error;
2015 
2016 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2017 	if (error)
2018 		return error;
2019 
2020 	down_write(&ip->i_rw_mutex);
2021 
2022 	error = gfs2_meta_inode_buffer(ip, &dibh);
2023 	if (error)
2024 		goto out;
2025 
2026 	if (!i_size_read(&ip->i_inode)) {
2027 		ip->i_height = 0;
2028 		ip->i_goal = ip->i_no_addr;
2029 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2030 		gfs2_ordered_del_inode(ip);
2031 	}
2032 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2033 	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2034 
2035 	gfs2_trans_add_meta(ip->i_gl, dibh);
2036 	gfs2_dinode_out(ip, dibh->b_data);
2037 	brelse(dibh);
2038 
2039 out:
2040 	up_write(&ip->i_rw_mutex);
2041 	gfs2_trans_end(sdp);
2042 	return error;
2043 }
2044 
2045 /**
2046  * do_shrink - make a file smaller
2047  * @inode: the inode
2048  * @newsize: the size to make the file
2049  *
2050  * Called with an exclusive lock on @inode. The @size must
2051  * be equal to or smaller than the current inode size.
2052  *
2053  * Returns: errno
2054  */
2055 
2056 static int do_shrink(struct inode *inode, u64 newsize)
2057 {
2058 	struct gfs2_inode *ip = GFS2_I(inode);
2059 	int error;
2060 
2061 	error = trunc_start(inode, newsize);
2062 	if (error < 0)
2063 		return error;
2064 	if (gfs2_is_stuffed(ip))
2065 		return 0;
2066 
2067 	error = punch_hole(ip, newsize, 0);
2068 	if (error == 0)
2069 		error = trunc_end(ip);
2070 
2071 	return error;
2072 }
2073 
2074 void gfs2_trim_blocks(struct inode *inode)
2075 {
2076 	int ret;
2077 
2078 	ret = do_shrink(inode, inode->i_size);
2079 	WARN_ON(ret != 0);
2080 }
2081 
2082 /**
2083  * do_grow - Touch and update inode size
2084  * @inode: The inode
2085  * @size: The new size
2086  *
2087  * This function updates the timestamps on the inode and
2088  * may also increase the size of the inode. This function
2089  * must not be called with @size any smaller than the current
2090  * inode size.
2091  *
2092  * Although it is not strictly required to unstuff files here,
2093  * earlier versions of GFS2 have a bug in the stuffed file reading
2094  * code which will result in a buffer overrun if the size is larger
2095  * than the max stuffed file size. In order to prevent this from
2096  * occurring, such files are unstuffed, but in other cases we can
2097  * just update the inode size directly.
2098  *
2099  * Returns: 0 on success, or -ve on error
2100  */
2101 
2102 static int do_grow(struct inode *inode, u64 size)
2103 {
2104 	struct gfs2_inode *ip = GFS2_I(inode);
2105 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2106 	struct gfs2_alloc_parms ap = { .target = 1, };
2107 	struct buffer_head *dibh;
2108 	int error;
2109 	int unstuff = 0;
2110 
2111 	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2112 		error = gfs2_quota_lock_check(ip, &ap);
2113 		if (error)
2114 			return error;
2115 
2116 		error = gfs2_inplace_reserve(ip, &ap);
2117 		if (error)
2118 			goto do_grow_qunlock;
2119 		unstuff = 1;
2120 	}
2121 
2122 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2123 				 (unstuff &&
2124 				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2125 				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2126 				  0 : RES_QUOTA), 0);
2127 	if (error)
2128 		goto do_grow_release;
2129 
2130 	if (unstuff) {
2131 		error = gfs2_unstuff_dinode(ip, NULL);
2132 		if (error)
2133 			goto do_end_trans;
2134 	}
2135 
2136 	error = gfs2_meta_inode_buffer(ip, &dibh);
2137 	if (error)
2138 		goto do_end_trans;
2139 
2140 	i_size_write(inode, size);
2141 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2142 	gfs2_trans_add_meta(ip->i_gl, dibh);
2143 	gfs2_dinode_out(ip, dibh->b_data);
2144 	brelse(dibh);
2145 
2146 do_end_trans:
2147 	gfs2_trans_end(sdp);
2148 do_grow_release:
2149 	if (unstuff) {
2150 		gfs2_inplace_release(ip);
2151 do_grow_qunlock:
2152 		gfs2_quota_unlock(ip);
2153 	}
2154 	return error;
2155 }
2156 
2157 /**
2158  * gfs2_setattr_size - make a file a given size
2159  * @inode: the inode
2160  * @newsize: the size to make the file
2161  *
2162  * The file size can grow, shrink, or stay the same size. This
2163  * is called holding i_rwsem and an exclusive glock on the inode
2164  * in question.
2165  *
2166  * Returns: errno
2167  */
2168 
2169 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2170 {
2171 	struct gfs2_inode *ip = GFS2_I(inode);
2172 	int ret;
2173 
2174 	BUG_ON(!S_ISREG(inode->i_mode));
2175 
2176 	ret = inode_newsize_ok(inode, newsize);
2177 	if (ret)
2178 		return ret;
2179 
2180 	inode_dio_wait(inode);
2181 
2182 	ret = gfs2_rsqa_alloc(ip);
2183 	if (ret)
2184 		goto out;
2185 
2186 	if (newsize >= inode->i_size) {
2187 		ret = do_grow(inode, newsize);
2188 		goto out;
2189 	}
2190 
2191 	ret = do_shrink(inode, newsize);
2192 out:
2193 	gfs2_rsqa_delete(ip, NULL);
2194 	return ret;
2195 }
2196 
2197 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2198 {
2199 	int error;
2200 	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2201 	if (!error)
2202 		error = trunc_end(ip);
2203 	return error;
2204 }
2205 
2206 int gfs2_file_dealloc(struct gfs2_inode *ip)
2207 {
2208 	return punch_hole(ip, 0, 0);
2209 }
2210 
2211 /**
2212  * gfs2_free_journal_extents - Free cached journal bmap info
2213  * @jd: The journal
2214  *
2215  */
2216 
2217 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2218 {
2219 	struct gfs2_journal_extent *jext;
2220 
2221 	while(!list_empty(&jd->extent_list)) {
2222 		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2223 		list_del(&jext->list);
2224 		kfree(jext);
2225 	}
2226 }
2227 
2228 /**
2229  * gfs2_add_jextent - Add or merge a new extent to extent cache
2230  * @jd: The journal descriptor
2231  * @lblock: The logical block at start of new extent
2232  * @dblock: The physical block at start of new extent
2233  * @blocks: Size of extent in fs blocks
2234  *
2235  * Returns: 0 on success or -ENOMEM
2236  */
2237 
2238 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2239 {
2240 	struct gfs2_journal_extent *jext;
2241 
2242 	if (!list_empty(&jd->extent_list)) {
2243 		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2244 		if ((jext->dblock + jext->blocks) == dblock) {
2245 			jext->blocks += blocks;
2246 			return 0;
2247 		}
2248 	}
2249 
2250 	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2251 	if (jext == NULL)
2252 		return -ENOMEM;
2253 	jext->dblock = dblock;
2254 	jext->lblock = lblock;
2255 	jext->blocks = blocks;
2256 	list_add_tail(&jext->list, &jd->extent_list);
2257 	jd->nr_extents++;
2258 	return 0;
2259 }
2260 
2261 /**
2262  * gfs2_map_journal_extents - Cache journal bmap info
2263  * @sdp: The super block
2264  * @jd: The journal to map
2265  *
2266  * Create a reusable "extent" mapping from all logical
2267  * blocks to all physical blocks for the given journal.  This will save
2268  * us time when writing journal blocks.  Most journals will have only one
2269  * extent that maps all their logical blocks.  That's because gfs2.mkfs
2270  * arranges the journal blocks sequentially to maximize performance.
2271  * So the extent would map the first block for the entire file length.
2272  * However, gfs2_jadd can happen while file activity is happening, so
2273  * those journals may not be sequential.  Less likely is the case where
2274  * the users created their own journals by mounting the metafs and
2275  * laying it out.  But it's still possible.  These journals might have
2276  * several extents.
2277  *
2278  * Returns: 0 on success, or error on failure
2279  */
2280 
2281 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2282 {
2283 	u64 lblock = 0;
2284 	u64 lblock_stop;
2285 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2286 	struct buffer_head bh;
2287 	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2288 	u64 size;
2289 	int rc;
2290 	ktime_t start, end;
2291 
2292 	start = ktime_get();
2293 	lblock_stop = i_size_read(jd->jd_inode) >> shift;
2294 	size = (lblock_stop - lblock) << shift;
2295 	jd->nr_extents = 0;
2296 	WARN_ON(!list_empty(&jd->extent_list));
2297 
2298 	do {
2299 		bh.b_state = 0;
2300 		bh.b_blocknr = 0;
2301 		bh.b_size = size;
2302 		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2303 		if (rc || !buffer_mapped(&bh))
2304 			goto fail;
2305 		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2306 		if (rc)
2307 			goto fail;
2308 		size -= bh.b_size;
2309 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2310 	} while(size > 0);
2311 
2312 	end = ktime_get();
2313 	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2314 		jd->nr_extents, ktime_ms_delta(end, start));
2315 	return 0;
2316 
2317 fail:
2318 	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2319 		rc, jd->jd_jid,
2320 		(unsigned long long)(i_size_read(jd->jd_inode) - size),
2321 		jd->nr_extents);
2322 	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2323 		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2324 		bh.b_state, (unsigned long long)bh.b_size);
2325 	gfs2_free_journal_extents(jd);
2326 	return rc;
2327 }
2328 
2329 /**
2330  * gfs2_write_alloc_required - figure out if a write will require an allocation
2331  * @ip: the file being written to
2332  * @offset: the offset to write to
2333  * @len: the number of bytes being written
2334  *
2335  * Returns: 1 if an alloc is required, 0 otherwise
2336  */
2337 
2338 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2339 			      unsigned int len)
2340 {
2341 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2342 	struct buffer_head bh;
2343 	unsigned int shift;
2344 	u64 lblock, lblock_stop, size;
2345 	u64 end_of_file;
2346 
2347 	if (!len)
2348 		return 0;
2349 
2350 	if (gfs2_is_stuffed(ip)) {
2351 		if (offset + len > gfs2_max_stuffed_size(ip))
2352 			return 1;
2353 		return 0;
2354 	}
2355 
2356 	shift = sdp->sd_sb.sb_bsize_shift;
2357 	BUG_ON(gfs2_is_dir(ip));
2358 	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2359 	lblock = offset >> shift;
2360 	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2361 	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2362 		return 1;
2363 
2364 	size = (lblock_stop - lblock) << shift;
2365 	do {
2366 		bh.b_state = 0;
2367 		bh.b_size = size;
2368 		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2369 		if (!buffer_mapped(&bh))
2370 			return 1;
2371 		size -= bh.b_size;
2372 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2373 	} while(size > 0);
2374 
2375 	return 0;
2376 }
2377 
2378 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2379 {
2380 	struct gfs2_inode *ip = GFS2_I(inode);
2381 	struct buffer_head *dibh;
2382 	int error;
2383 
2384 	if (offset >= inode->i_size)
2385 		return 0;
2386 	if (offset + length > inode->i_size)
2387 		length = inode->i_size - offset;
2388 
2389 	error = gfs2_meta_inode_buffer(ip, &dibh);
2390 	if (error)
2391 		return error;
2392 	gfs2_trans_add_meta(ip->i_gl, dibh);
2393 	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2394 	       length);
2395 	brelse(dibh);
2396 	return 0;
2397 }
2398 
2399 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2400 					 loff_t length)
2401 {
2402 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2403 	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2404 	int error;
2405 
2406 	while (length) {
2407 		struct gfs2_trans *tr;
2408 		loff_t chunk;
2409 		unsigned int offs;
2410 
2411 		chunk = length;
2412 		if (chunk > max_chunk)
2413 			chunk = max_chunk;
2414 
2415 		offs = offset & ~PAGE_MASK;
2416 		if (offs && chunk > PAGE_SIZE)
2417 			chunk = offs + ((chunk - offs) & PAGE_MASK);
2418 
2419 		truncate_pagecache_range(inode, offset, chunk);
2420 		offset += chunk;
2421 		length -= chunk;
2422 
2423 		tr = current->journal_info;
2424 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2425 			continue;
2426 
2427 		gfs2_trans_end(sdp);
2428 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2429 		if (error)
2430 			return error;
2431 	}
2432 	return 0;
2433 }
2434 
2435 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2436 {
2437 	struct inode *inode = file_inode(file);
2438 	struct gfs2_inode *ip = GFS2_I(inode);
2439 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2440 	int error;
2441 
2442 	if (gfs2_is_jdata(ip))
2443 		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2444 					 GFS2_JTRUNC_REVOKES);
2445 	else
2446 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2447 	if (error)
2448 		return error;
2449 
2450 	if (gfs2_is_stuffed(ip)) {
2451 		error = stuffed_zero_range(inode, offset, length);
2452 		if (error)
2453 			goto out;
2454 	} else {
2455 		unsigned int start_off, end_len, blocksize;
2456 
2457 		blocksize = i_blocksize(inode);
2458 		start_off = offset & (blocksize - 1);
2459 		end_len = (offset + length) & (blocksize - 1);
2460 		if (start_off) {
2461 			unsigned int len = length;
2462 			if (length > blocksize - start_off)
2463 				len = blocksize - start_off;
2464 			error = gfs2_block_zero_range(inode, offset, len);
2465 			if (error)
2466 				goto out;
2467 			if (start_off + length < blocksize)
2468 				end_len = 0;
2469 		}
2470 		if (end_len) {
2471 			error = gfs2_block_zero_range(inode,
2472 				offset + length - end_len, end_len);
2473 			if (error)
2474 				goto out;
2475 		}
2476 	}
2477 
2478 	if (gfs2_is_jdata(ip)) {
2479 		BUG_ON(!current->journal_info);
2480 		gfs2_journaled_truncate_range(inode, offset, length);
2481 	} else
2482 		truncate_pagecache_range(inode, offset, offset + length - 1);
2483 
2484 	file_update_time(file);
2485 	mark_inode_dirty(inode);
2486 
2487 	if (current->journal_info)
2488 		gfs2_trans_end(sdp);
2489 
2490 	if (!gfs2_is_stuffed(ip))
2491 		error = punch_hole(ip, offset, length);
2492 
2493 out:
2494 	if (current->journal_info)
2495 		gfs2_trans_end(sdp);
2496 	return error;
2497 }
2498