xref: /openbmc/linux/fs/gfs2/bmap.c (revision 03638e62)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
15 
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "inode.h"
21 #include "meta_io.h"
22 #include "quota.h"
23 #include "rgrp.h"
24 #include "log.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "aops.h"
30 #include "trace_gfs2.h"
31 
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33  * block is 512, so __u16 is fine for that. It saves stack space to
34  * keep it small.
35  */
36 struct metapath {
37 	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 	__u16 mp_list[GFS2_MAX_META_HEIGHT];
39 	int mp_fheight; /* find_metapath height */
40 	int mp_aheight; /* actual height (lookup height) */
41 };
42 
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
44 
45 /**
46  * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
47  * @ip: the inode
48  * @dibh: the dinode buffer
49  * @block: the block number that was allocated
50  * @page: The (optional) page. This is looked up if @page is NULL
51  *
52  * Returns: errno
53  */
54 
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 			       u64 block, struct page *page)
57 {
58 	struct inode *inode = &ip->i_inode;
59 	struct buffer_head *bh;
60 	int release = 0;
61 
62 	if (!page || page->index) {
63 		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64 		if (!page)
65 			return -ENOMEM;
66 		release = 1;
67 	}
68 
69 	if (!PageUptodate(page)) {
70 		void *kaddr = kmap(page);
71 		u64 dsize = i_size_read(inode);
72 
73 		if (dsize > gfs2_max_stuffed_size(ip))
74 			dsize = gfs2_max_stuffed_size(ip);
75 
76 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
78 		kunmap(page);
79 
80 		SetPageUptodate(page);
81 	}
82 
83 	if (!page_has_buffers(page))
84 		create_empty_buffers(page, BIT(inode->i_blkbits),
85 				     BIT(BH_Uptodate));
86 
87 	bh = page_buffers(page);
88 
89 	if (!buffer_mapped(bh))
90 		map_bh(bh, inode->i_sb, block);
91 
92 	set_buffer_uptodate(bh);
93 	if (gfs2_is_jdata(ip))
94 		gfs2_trans_add_data(ip->i_gl, bh);
95 	else {
96 		mark_buffer_dirty(bh);
97 		gfs2_ordered_add_inode(ip);
98 	}
99 
100 	if (release) {
101 		unlock_page(page);
102 		put_page(page);
103 	}
104 
105 	return 0;
106 }
107 
108 /**
109  * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110  * @ip: The GFS2 inode to unstuff
111  * @page: The (optional) page. This is looked up if the @page is NULL
112  *
113  * This routine unstuffs a dinode and returns it to a "normal" state such
114  * that the height can be grown in the traditional way.
115  *
116  * Returns: errno
117  */
118 
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
120 {
121 	struct buffer_head *bh, *dibh;
122 	struct gfs2_dinode *di;
123 	u64 block = 0;
124 	int isdir = gfs2_is_dir(ip);
125 	int error;
126 
127 	down_write(&ip->i_rw_mutex);
128 
129 	error = gfs2_meta_inode_buffer(ip, &dibh);
130 	if (error)
131 		goto out;
132 
133 	if (i_size_read(&ip->i_inode)) {
134 		/* Get a free block, fill it with the stuffed data,
135 		   and write it out to disk */
136 
137 		unsigned int n = 1;
138 		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
139 		if (error)
140 			goto out_brelse;
141 		if (isdir) {
142 			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 			error = gfs2_dir_get_new_buffer(ip, block, &bh);
144 			if (error)
145 				goto out_brelse;
146 			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 					      dibh, sizeof(struct gfs2_dinode));
148 			brelse(bh);
149 		} else {
150 			error = gfs2_unstuffer_page(ip, dibh, block, page);
151 			if (error)
152 				goto out_brelse;
153 		}
154 	}
155 
156 	/*  Set up the pointer to the new block  */
157 
158 	gfs2_trans_add_meta(ip->i_gl, dibh);
159 	di = (struct gfs2_dinode *)dibh->b_data;
160 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
161 
162 	if (i_size_read(&ip->i_inode)) {
163 		*(__be64 *)(di + 1) = cpu_to_be64(block);
164 		gfs2_add_inode_blocks(&ip->i_inode, 1);
165 		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
166 	}
167 
168 	ip->i_height = 1;
169 	di->di_height = cpu_to_be16(1);
170 
171 out_brelse:
172 	brelse(dibh);
173 out:
174 	up_write(&ip->i_rw_mutex);
175 	return error;
176 }
177 
178 
179 /**
180  * find_metapath - Find path through the metadata tree
181  * @sdp: The superblock
182  * @block: The disk block to look up
183  * @mp: The metapath to return the result in
184  * @height: The pre-calculated height of the metadata tree
185  *
186  *   This routine returns a struct metapath structure that defines a path
187  *   through the metadata of inode "ip" to get to block "block".
188  *
189  *   Example:
190  *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
191  *   filesystem with a blocksize of 4096.
192  *
193  *   find_metapath() would return a struct metapath structure set to:
194  *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
195  *
196  *   That means that in order to get to the block containing the byte at
197  *   offset 101342453, we would load the indirect block pointed to by pointer
198  *   0 in the dinode.  We would then load the indirect block pointed to by
199  *   pointer 48 in that indirect block.  We would then load the data block
200  *   pointed to by pointer 165 in that indirect block.
201  *
202  *             ----------------------------------------
203  *             | Dinode |                             |
204  *             |        |                            4|
205  *             |        |0 1 2 3 4 5                 9|
206  *             |        |                            6|
207  *             ----------------------------------------
208  *                       |
209  *                       |
210  *                       V
211  *             ----------------------------------------
212  *             | Indirect Block                       |
213  *             |                                     5|
214  *             |            4 4 4 4 4 5 5            1|
215  *             |0           5 6 7 8 9 0 1            2|
216  *             ----------------------------------------
217  *                                |
218  *                                |
219  *                                V
220  *             ----------------------------------------
221  *             | Indirect Block                       |
222  *             |                         1 1 1 1 1   5|
223  *             |                         6 6 6 6 6   1|
224  *             |0                        3 4 5 6 7   2|
225  *             ----------------------------------------
226  *                                           |
227  *                                           |
228  *                                           V
229  *             ----------------------------------------
230  *             | Data block containing offset         |
231  *             |            101342453                 |
232  *             |                                      |
233  *             |                                      |
234  *             ----------------------------------------
235  *
236  */
237 
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 			  struct metapath *mp, unsigned int height)
240 {
241 	unsigned int i;
242 
243 	mp->mp_fheight = height;
244 	for (i = height; i--;)
245 		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
246 }
247 
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
249 {
250 	if (mp->mp_list[0] == 0)
251 		return 2;
252 	return 1;
253 }
254 
255 /**
256  * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257  * @height: The metadata height (0 = dinode)
258  * @mp: The metapath
259  */
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
261 {
262 	struct buffer_head *bh = mp->mp_bh[height];
263 	if (height == 0)
264 		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
266 }
267 
268 /**
269  * metapointer - Return pointer to start of metadata in a buffer
270  * @height: The metadata height (0 = dinode)
271  * @mp: The metapath
272  *
273  * Return a pointer to the block number of the next height of the metadata
274  * tree given a buffer containing the pointer to the current height of the
275  * metadata tree.
276  */
277 
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
279 {
280 	__be64 *p = metaptr1(height, mp);
281 	return p + mp->mp_list[height];
282 }
283 
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
285 {
286 	const struct buffer_head *bh = mp->mp_bh[height];
287 	return (const __be64 *)(bh->b_data + bh->b_size);
288 }
289 
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
291 {
292 	unsigned int hgt;
293 
294 	*clone = *mp;
295 	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 		get_bh(clone->mp_bh[hgt]);
297 }
298 
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
300 {
301 	const __be64 *t;
302 
303 	for (t = start; t < end; t++) {
304 		struct buffer_head *rabh;
305 
306 		if (!*t)
307 			continue;
308 
309 		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 		if (trylock_buffer(rabh)) {
311 			if (!buffer_uptodate(rabh)) {
312 				rabh->b_end_io = end_buffer_read_sync;
313 				submit_bh(REQ_OP_READ,
314 					  REQ_RAHEAD | REQ_META | REQ_PRIO,
315 					  rabh);
316 				continue;
317 			}
318 			unlock_buffer(rabh);
319 		}
320 		brelse(rabh);
321 	}
322 }
323 
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 			     unsigned int x, unsigned int h)
326 {
327 	for (; x < h; x++) {
328 		__be64 *ptr = metapointer(x, mp);
329 		u64 dblock = be64_to_cpu(*ptr);
330 		int ret;
331 
332 		if (!dblock)
333 			break;
334 		ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
335 		if (ret)
336 			return ret;
337 	}
338 	mp->mp_aheight = x + 1;
339 	return 0;
340 }
341 
342 /**
343  * lookup_metapath - Walk the metadata tree to a specific point
344  * @ip: The inode
345  * @mp: The metapath
346  *
347  * Assumes that the inode's buffer has already been looked up and
348  * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349  * by find_metapath().
350  *
351  * If this function encounters part of the tree which has not been
352  * allocated, it returns the current height of the tree at the point
353  * at which it found the unallocated block. Blocks which are found are
354  * added to the mp->mp_bh[] list.
355  *
356  * Returns: error
357  */
358 
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
360 {
361 	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
362 }
363 
364 /**
365  * fillup_metapath - fill up buffers for the metadata path to a specific height
366  * @ip: The inode
367  * @mp: The metapath
368  * @h: The height to which it should be mapped
369  *
370  * Similar to lookup_metapath, but does lookups for a range of heights
371  *
372  * Returns: error or the number of buffers filled
373  */
374 
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
376 {
377 	unsigned int x = 0;
378 	int ret;
379 
380 	if (h) {
381 		/* find the first buffer we need to look up. */
382 		for (x = h - 1; x > 0; x--) {
383 			if (mp->mp_bh[x])
384 				break;
385 		}
386 	}
387 	ret = __fillup_metapath(ip, mp, x, h);
388 	if (ret)
389 		return ret;
390 	return mp->mp_aheight - x - 1;
391 }
392 
393 static void release_metapath(struct metapath *mp)
394 {
395 	int i;
396 
397 	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
398 		if (mp->mp_bh[i] == NULL)
399 			break;
400 		brelse(mp->mp_bh[i]);
401 		mp->mp_bh[i] = NULL;
402 	}
403 }
404 
405 /**
406  * gfs2_extent_length - Returns length of an extent of blocks
407  * @bh: The metadata block
408  * @ptr: Current position in @bh
409  * @limit: Max extent length to return
410  * @eob: Set to 1 if we hit "end of block"
411  *
412  * Returns: The length of the extent (minimum of one block)
413  */
414 
415 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
416 {
417 	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
418 	const __be64 *first = ptr;
419 	u64 d = be64_to_cpu(*ptr);
420 
421 	*eob = 0;
422 	do {
423 		ptr++;
424 		if (ptr >= end)
425 			break;
426 		d++;
427 	} while(be64_to_cpu(*ptr) == d);
428 	if (ptr >= end)
429 		*eob = 1;
430 	return ptr - first;
431 }
432 
433 typedef const __be64 *(*gfs2_metadata_walker)(
434 		struct metapath *mp,
435 		const __be64 *start, const __be64 *end,
436 		u64 factor, void *data);
437 
438 #define WALK_STOP ((__be64 *)0)
439 #define WALK_NEXT ((__be64 *)1)
440 
441 static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
442 		u64 len, struct metapath *mp, gfs2_metadata_walker walker,
443 		void *data)
444 {
445 	struct metapath clone;
446 	struct gfs2_inode *ip = GFS2_I(inode);
447 	struct gfs2_sbd *sdp = GFS2_SB(inode);
448 	const __be64 *start, *end, *ptr;
449 	u64 factor = 1;
450 	unsigned int hgt;
451 	int ret = 0;
452 
453 	for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
454 		factor *= sdp->sd_inptrs;
455 
456 	for (;;) {
457 		u64 step;
458 
459 		/* Walk indirect block. */
460 		start = metapointer(hgt, mp);
461 		end = metaend(hgt, mp);
462 
463 		step = (end - start) * factor;
464 		if (step > len)
465 			end = start + DIV_ROUND_UP_ULL(len, factor);
466 
467 		ptr = walker(mp, start, end, factor, data);
468 		if (ptr == WALK_STOP)
469 			break;
470 		if (step >= len)
471 			break;
472 		len -= step;
473 		if (ptr != WALK_NEXT) {
474 			BUG_ON(!*ptr);
475 			mp->mp_list[hgt] += ptr - start;
476 			goto fill_up_metapath;
477 		}
478 
479 lower_metapath:
480 		/* Decrease height of metapath. */
481 		if (mp != &clone) {
482 			clone_metapath(&clone, mp);
483 			mp = &clone;
484 		}
485 		brelse(mp->mp_bh[hgt]);
486 		mp->mp_bh[hgt] = NULL;
487 		if (!hgt)
488 			break;
489 		hgt--;
490 		factor *= sdp->sd_inptrs;
491 
492 		/* Advance in metadata tree. */
493 		(mp->mp_list[hgt])++;
494 		start = metapointer(hgt, mp);
495 		end = metaend(hgt, mp);
496 		if (start >= end) {
497 			mp->mp_list[hgt] = 0;
498 			if (!hgt)
499 				break;
500 			goto lower_metapath;
501 		}
502 
503 fill_up_metapath:
504 		/* Increase height of metapath. */
505 		if (mp != &clone) {
506 			clone_metapath(&clone, mp);
507 			mp = &clone;
508 		}
509 		ret = fillup_metapath(ip, mp, ip->i_height - 1);
510 		if (ret < 0)
511 			break;
512 		hgt += ret;
513 		for (; ret; ret--)
514 			do_div(factor, sdp->sd_inptrs);
515 		mp->mp_aheight = hgt + 1;
516 	}
517 	if (mp == &clone)
518 		release_metapath(mp);
519 	return ret;
520 }
521 
522 struct gfs2_hole_walker_args {
523 	u64 blocks;
524 };
525 
526 static const __be64 *gfs2_hole_walker(struct metapath *mp,
527 		const __be64 *start, const __be64 *end,
528 		u64 factor, void *data)
529 {
530 	struct gfs2_hole_walker_args *args = data;
531 	const __be64 *ptr;
532 
533 	for (ptr = start; ptr < end; ptr++) {
534 		if (*ptr) {
535 			args->blocks += (ptr - start) * factor;
536 			if (mp->mp_aheight == mp->mp_fheight)
537 				return WALK_STOP;
538 			return ptr;  /* increase height */
539 		}
540 	}
541 	args->blocks += (end - start) * factor;
542 	return WALK_NEXT;
543 }
544 
545 /**
546  * gfs2_hole_size - figure out the size of a hole
547  * @inode: The inode
548  * @lblock: The logical starting block number
549  * @len: How far to look (in blocks)
550  * @mp: The metapath at lblock
551  * @iomap: The iomap to store the hole size in
552  *
553  * This function modifies @mp.
554  *
555  * Returns: errno on error
556  */
557 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
558 			  struct metapath *mp, struct iomap *iomap)
559 {
560 	struct gfs2_hole_walker_args args = { };
561 	int ret = 0;
562 
563 	ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
564 	if (!ret)
565 		iomap->length = args.blocks << inode->i_blkbits;
566 	return ret;
567 }
568 
569 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
570 					 struct gfs2_glock *gl, unsigned int i,
571 					 unsigned offset, u64 bn)
572 {
573 	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
574 		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
575 				 sizeof(struct gfs2_dinode)));
576 	BUG_ON(i < 1);
577 	BUG_ON(mp->mp_bh[i] != NULL);
578 	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
579 	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
580 	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
581 	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
582 	ptr += offset;
583 	*ptr = cpu_to_be64(bn);
584 	return ptr;
585 }
586 
587 enum alloc_state {
588 	ALLOC_DATA = 0,
589 	ALLOC_GROW_DEPTH = 1,
590 	ALLOC_GROW_HEIGHT = 2,
591 	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
592 };
593 
594 /**
595  * gfs2_iomap_alloc - Build a metadata tree of the requested height
596  * @inode: The GFS2 inode
597  * @iomap: The iomap structure
598  * @mp: The metapath, with proper height information calculated
599  *
600  * In this routine we may have to alloc:
601  *   i) Indirect blocks to grow the metadata tree height
602  *  ii) Indirect blocks to fill in lower part of the metadata tree
603  * iii) Data blocks
604  *
605  * This function is called after gfs2_iomap_get, which works out the
606  * total number of blocks which we need via gfs2_alloc_size.
607  *
608  * We then do the actual allocation asking for an extent at a time (if
609  * enough contiguous free blocks are available, there will only be one
610  * allocation request per call) and uses the state machine to initialise
611  * the blocks in order.
612  *
613  * Right now, this function will allocate at most one indirect block
614  * worth of data -- with a default block size of 4K, that's slightly
615  * less than 2M.  If this limitation is ever removed to allow huge
616  * allocations, we would probably still want to limit the iomap size we
617  * return to avoid stalling other tasks during huge writes; the next
618  * iomap iteration would then find the blocks already allocated.
619  *
620  * Returns: errno on error
621  */
622 
623 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
624 			    struct metapath *mp)
625 {
626 	struct gfs2_inode *ip = GFS2_I(inode);
627 	struct gfs2_sbd *sdp = GFS2_SB(inode);
628 	struct buffer_head *dibh = mp->mp_bh[0];
629 	u64 bn;
630 	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
631 	size_t dblks = iomap->length >> inode->i_blkbits;
632 	const unsigned end_of_metadata = mp->mp_fheight - 1;
633 	int ret;
634 	enum alloc_state state;
635 	__be64 *ptr;
636 	__be64 zero_bn = 0;
637 
638 	BUG_ON(mp->mp_aheight < 1);
639 	BUG_ON(dibh == NULL);
640 	BUG_ON(dblks < 1);
641 
642 	gfs2_trans_add_meta(ip->i_gl, dibh);
643 
644 	down_write(&ip->i_rw_mutex);
645 
646 	if (mp->mp_fheight == mp->mp_aheight) {
647 		/* Bottom indirect block exists */
648 		state = ALLOC_DATA;
649 	} else {
650 		/* Need to allocate indirect blocks */
651 		if (mp->mp_fheight == ip->i_height) {
652 			/* Writing into existing tree, extend tree down */
653 			iblks = mp->mp_fheight - mp->mp_aheight;
654 			state = ALLOC_GROW_DEPTH;
655 		} else {
656 			/* Building up tree height */
657 			state = ALLOC_GROW_HEIGHT;
658 			iblks = mp->mp_fheight - ip->i_height;
659 			branch_start = metapath_branch_start(mp);
660 			iblks += (mp->mp_fheight - branch_start);
661 		}
662 	}
663 
664 	/* start of the second part of the function (state machine) */
665 
666 	blks = dblks + iblks;
667 	i = mp->mp_aheight;
668 	do {
669 		n = blks - alloced;
670 		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
671 		if (ret)
672 			goto out;
673 		alloced += n;
674 		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
675 			gfs2_trans_remove_revoke(sdp, bn, n);
676 		switch (state) {
677 		/* Growing height of tree */
678 		case ALLOC_GROW_HEIGHT:
679 			if (i == 1) {
680 				ptr = (__be64 *)(dibh->b_data +
681 						 sizeof(struct gfs2_dinode));
682 				zero_bn = *ptr;
683 			}
684 			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
685 			     i++, n--)
686 				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
687 			if (i - 1 == mp->mp_fheight - ip->i_height) {
688 				i--;
689 				gfs2_buffer_copy_tail(mp->mp_bh[i],
690 						sizeof(struct gfs2_meta_header),
691 						dibh, sizeof(struct gfs2_dinode));
692 				gfs2_buffer_clear_tail(dibh,
693 						sizeof(struct gfs2_dinode) +
694 						sizeof(__be64));
695 				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
696 					sizeof(struct gfs2_meta_header));
697 				*ptr = zero_bn;
698 				state = ALLOC_GROW_DEPTH;
699 				for(i = branch_start; i < mp->mp_fheight; i++) {
700 					if (mp->mp_bh[i] == NULL)
701 						break;
702 					brelse(mp->mp_bh[i]);
703 					mp->mp_bh[i] = NULL;
704 				}
705 				i = branch_start;
706 			}
707 			if (n == 0)
708 				break;
709 		/* fall through - To branching from existing tree */
710 		case ALLOC_GROW_DEPTH:
711 			if (i > 1 && i < mp->mp_fheight)
712 				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
713 			for (; i < mp->mp_fheight && n > 0; i++, n--)
714 				gfs2_indirect_init(mp, ip->i_gl, i,
715 						   mp->mp_list[i-1], bn++);
716 			if (i == mp->mp_fheight)
717 				state = ALLOC_DATA;
718 			if (n == 0)
719 				break;
720 		/* fall through - To tree complete, adding data blocks */
721 		case ALLOC_DATA:
722 			BUG_ON(n > dblks);
723 			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
724 			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
725 			dblks = n;
726 			ptr = metapointer(end_of_metadata, mp);
727 			iomap->addr = bn << inode->i_blkbits;
728 			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
729 			while (n-- > 0)
730 				*ptr++ = cpu_to_be64(bn++);
731 			break;
732 		}
733 	} while (iomap->addr == IOMAP_NULL_ADDR);
734 
735 	iomap->type = IOMAP_MAPPED;
736 	iomap->length = (u64)dblks << inode->i_blkbits;
737 	ip->i_height = mp->mp_fheight;
738 	gfs2_add_inode_blocks(&ip->i_inode, alloced);
739 	gfs2_dinode_out(ip, dibh->b_data);
740 out:
741 	up_write(&ip->i_rw_mutex);
742 	return ret;
743 }
744 
745 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
746 
747 /**
748  * gfs2_alloc_size - Compute the maximum allocation size
749  * @inode: The inode
750  * @mp: The metapath
751  * @size: Requested size in blocks
752  *
753  * Compute the maximum size of the next allocation at @mp.
754  *
755  * Returns: size in blocks
756  */
757 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
758 {
759 	struct gfs2_inode *ip = GFS2_I(inode);
760 	struct gfs2_sbd *sdp = GFS2_SB(inode);
761 	const __be64 *first, *ptr, *end;
762 
763 	/*
764 	 * For writes to stuffed files, this function is called twice via
765 	 * gfs2_iomap_get, before and after unstuffing. The size we return the
766 	 * first time needs to be large enough to get the reservation and
767 	 * allocation sizes right.  The size we return the second time must
768 	 * be exact or else gfs2_iomap_alloc won't do the right thing.
769 	 */
770 
771 	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
772 		unsigned int maxsize = mp->mp_fheight > 1 ?
773 			sdp->sd_inptrs : sdp->sd_diptrs;
774 		maxsize -= mp->mp_list[mp->mp_fheight - 1];
775 		if (size > maxsize)
776 			size = maxsize;
777 		return size;
778 	}
779 
780 	first = metapointer(ip->i_height - 1, mp);
781 	end = metaend(ip->i_height - 1, mp);
782 	if (end - first > size)
783 		end = first + size;
784 	for (ptr = first; ptr < end; ptr++) {
785 		if (*ptr)
786 			break;
787 	}
788 	return ptr - first;
789 }
790 
791 /**
792  * gfs2_iomap_get - Map blocks from an inode to disk blocks
793  * @inode: The inode
794  * @pos: Starting position in bytes
795  * @length: Length to map, in bytes
796  * @flags: iomap flags
797  * @iomap: The iomap structure
798  * @mp: The metapath
799  *
800  * Returns: errno
801  */
802 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
803 			  unsigned flags, struct iomap *iomap,
804 			  struct metapath *mp)
805 {
806 	struct gfs2_inode *ip = GFS2_I(inode);
807 	struct gfs2_sbd *sdp = GFS2_SB(inode);
808 	loff_t size = i_size_read(inode);
809 	__be64 *ptr;
810 	sector_t lblock;
811 	sector_t lblock_stop;
812 	int ret;
813 	int eob;
814 	u64 len;
815 	struct buffer_head *dibh = NULL, *bh;
816 	u8 height;
817 
818 	if (!length)
819 		return -EINVAL;
820 
821 	down_read(&ip->i_rw_mutex);
822 
823 	ret = gfs2_meta_inode_buffer(ip, &dibh);
824 	if (ret)
825 		goto unlock;
826 	mp->mp_bh[0] = dibh;
827 
828 	if (gfs2_is_stuffed(ip)) {
829 		if (flags & IOMAP_WRITE) {
830 			loff_t max_size = gfs2_max_stuffed_size(ip);
831 
832 			if (pos + length > max_size)
833 				goto unstuff;
834 			iomap->length = max_size;
835 		} else {
836 			if (pos >= size) {
837 				if (flags & IOMAP_REPORT) {
838 					ret = -ENOENT;
839 					goto unlock;
840 				} else {
841 					/* report a hole */
842 					iomap->offset = pos;
843 					iomap->length = length;
844 					goto do_alloc;
845 				}
846 			}
847 			iomap->length = size;
848 		}
849 		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
850 			      sizeof(struct gfs2_dinode);
851 		iomap->type = IOMAP_INLINE;
852 		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
853 		goto out;
854 	}
855 
856 unstuff:
857 	lblock = pos >> inode->i_blkbits;
858 	iomap->offset = lblock << inode->i_blkbits;
859 	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
860 	len = lblock_stop - lblock + 1;
861 	iomap->length = len << inode->i_blkbits;
862 
863 	height = ip->i_height;
864 	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
865 		height++;
866 	find_metapath(sdp, lblock, mp, height);
867 	if (height > ip->i_height || gfs2_is_stuffed(ip))
868 		goto do_alloc;
869 
870 	ret = lookup_metapath(ip, mp);
871 	if (ret)
872 		goto unlock;
873 
874 	if (mp->mp_aheight != ip->i_height)
875 		goto do_alloc;
876 
877 	ptr = metapointer(ip->i_height - 1, mp);
878 	if (*ptr == 0)
879 		goto do_alloc;
880 
881 	bh = mp->mp_bh[ip->i_height - 1];
882 	len = gfs2_extent_length(bh, ptr, len, &eob);
883 
884 	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
885 	iomap->length = len << inode->i_blkbits;
886 	iomap->type = IOMAP_MAPPED;
887 	iomap->flags |= IOMAP_F_MERGED;
888 	if (eob)
889 		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
890 
891 out:
892 	iomap->bdev = inode->i_sb->s_bdev;
893 unlock:
894 	up_read(&ip->i_rw_mutex);
895 	return ret;
896 
897 do_alloc:
898 	iomap->addr = IOMAP_NULL_ADDR;
899 	iomap->type = IOMAP_HOLE;
900 	if (flags & IOMAP_REPORT) {
901 		if (pos >= size)
902 			ret = -ENOENT;
903 		else if (height == ip->i_height)
904 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
905 		else
906 			iomap->length = size - pos;
907 	} else if (flags & IOMAP_WRITE) {
908 		u64 alloc_size;
909 
910 		if (flags & IOMAP_DIRECT)
911 			goto out;  /* (see gfs2_file_direct_write) */
912 
913 		len = gfs2_alloc_size(inode, mp, len);
914 		alloc_size = len << inode->i_blkbits;
915 		if (alloc_size < iomap->length)
916 			iomap->length = alloc_size;
917 	} else {
918 		if (pos < size && height == ip->i_height)
919 			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
920 	}
921 	goto out;
922 }
923 
924 /**
925  * gfs2_lblk_to_dblk - convert logical block to disk block
926  * @inode: the inode of the file we're mapping
927  * @lblock: the block relative to the start of the file
928  * @dblock: the returned dblock, if no error
929  *
930  * This function maps a single block from a file logical block (relative to
931  * the start of the file) to a file system absolute block using iomap.
932  *
933  * Returns: the absolute file system block, or an error
934  */
935 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
936 {
937 	struct iomap iomap = { };
938 	struct metapath mp = { .mp_aheight = 1, };
939 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
940 	int ret;
941 
942 	ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
943 	release_metapath(&mp);
944 	if (ret == 0)
945 		*dblock = iomap.addr >> inode->i_blkbits;
946 
947 	return ret;
948 }
949 
950 static int gfs2_write_lock(struct inode *inode)
951 {
952 	struct gfs2_inode *ip = GFS2_I(inode);
953 	struct gfs2_sbd *sdp = GFS2_SB(inode);
954 	int error;
955 
956 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
957 	error = gfs2_glock_nq(&ip->i_gh);
958 	if (error)
959 		goto out_uninit;
960 	if (&ip->i_inode == sdp->sd_rindex) {
961 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
962 
963 		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
964 					   GL_NOCACHE, &m_ip->i_gh);
965 		if (error)
966 			goto out_unlock;
967 	}
968 	return 0;
969 
970 out_unlock:
971 	gfs2_glock_dq(&ip->i_gh);
972 out_uninit:
973 	gfs2_holder_uninit(&ip->i_gh);
974 	return error;
975 }
976 
977 static void gfs2_write_unlock(struct inode *inode)
978 {
979 	struct gfs2_inode *ip = GFS2_I(inode);
980 	struct gfs2_sbd *sdp = GFS2_SB(inode);
981 
982 	if (&ip->i_inode == sdp->sd_rindex) {
983 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
984 
985 		gfs2_glock_dq_uninit(&m_ip->i_gh);
986 	}
987 	gfs2_glock_dq_uninit(&ip->i_gh);
988 }
989 
990 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
991 				   unsigned len, struct iomap *iomap)
992 {
993 	unsigned int blockmask = i_blocksize(inode) - 1;
994 	struct gfs2_sbd *sdp = GFS2_SB(inode);
995 	unsigned int blocks;
996 
997 	blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
998 	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
999 }
1000 
1001 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1002 				 unsigned copied, struct page *page,
1003 				 struct iomap *iomap)
1004 {
1005 	struct gfs2_inode *ip = GFS2_I(inode);
1006 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1007 
1008 	if (page && !gfs2_is_stuffed(ip))
1009 		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1010 	gfs2_trans_end(sdp);
1011 }
1012 
1013 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1014 	.page_prepare = gfs2_iomap_page_prepare,
1015 	.page_done = gfs2_iomap_page_done,
1016 };
1017 
1018 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1019 				  loff_t length, unsigned flags,
1020 				  struct iomap *iomap,
1021 				  struct metapath *mp)
1022 {
1023 	struct gfs2_inode *ip = GFS2_I(inode);
1024 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1025 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1026 	bool unstuff, alloc_required;
1027 	int ret;
1028 
1029 	ret = gfs2_write_lock(inode);
1030 	if (ret)
1031 		return ret;
1032 
1033 	unstuff = gfs2_is_stuffed(ip) &&
1034 		  pos + length > gfs2_max_stuffed_size(ip);
1035 
1036 	ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
1037 	if (ret)
1038 		goto out_unlock;
1039 
1040 	alloc_required = unstuff || iomap->type == IOMAP_HOLE;
1041 
1042 	if (alloc_required || gfs2_is_jdata(ip))
1043 		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1044 				       &ind_blocks);
1045 
1046 	if (alloc_required) {
1047 		struct gfs2_alloc_parms ap = {
1048 			.target = data_blocks + ind_blocks
1049 		};
1050 
1051 		ret = gfs2_quota_lock_check(ip, &ap);
1052 		if (ret)
1053 			goto out_unlock;
1054 
1055 		ret = gfs2_inplace_reserve(ip, &ap);
1056 		if (ret)
1057 			goto out_qunlock;
1058 	}
1059 
1060 	rblocks = RES_DINODE + ind_blocks;
1061 	if (gfs2_is_jdata(ip))
1062 		rblocks += data_blocks;
1063 	if (ind_blocks || data_blocks)
1064 		rblocks += RES_STATFS + RES_QUOTA;
1065 	if (inode == sdp->sd_rindex)
1066 		rblocks += 2 * RES_STATFS;
1067 	if (alloc_required)
1068 		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1069 
1070 	if (unstuff || iomap->type == IOMAP_HOLE) {
1071 		struct gfs2_trans *tr;
1072 
1073 		ret = gfs2_trans_begin(sdp, rblocks,
1074 				       iomap->length >> inode->i_blkbits);
1075 		if (ret)
1076 			goto out_trans_fail;
1077 
1078 		if (unstuff) {
1079 			ret = gfs2_unstuff_dinode(ip, NULL);
1080 			if (ret)
1081 				goto out_trans_end;
1082 			release_metapath(mp);
1083 			ret = gfs2_iomap_get(inode, iomap->offset,
1084 					     iomap->length, flags, iomap, mp);
1085 			if (ret)
1086 				goto out_trans_end;
1087 		}
1088 
1089 		if (iomap->type == IOMAP_HOLE) {
1090 			ret = gfs2_iomap_alloc(inode, iomap, mp);
1091 			if (ret) {
1092 				gfs2_trans_end(sdp);
1093 				gfs2_inplace_release(ip);
1094 				punch_hole(ip, iomap->offset, iomap->length);
1095 				goto out_qunlock;
1096 			}
1097 		}
1098 
1099 		tr = current->journal_info;
1100 		if (tr->tr_num_buf_new)
1101 			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1102 		else
1103 			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
1104 
1105 		gfs2_trans_end(sdp);
1106 	}
1107 
1108 	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1109 		iomap->page_ops = &gfs2_iomap_page_ops;
1110 	return 0;
1111 
1112 out_trans_end:
1113 	gfs2_trans_end(sdp);
1114 out_trans_fail:
1115 	if (alloc_required)
1116 		gfs2_inplace_release(ip);
1117 out_qunlock:
1118 	if (alloc_required)
1119 		gfs2_quota_unlock(ip);
1120 out_unlock:
1121 	gfs2_write_unlock(inode);
1122 	return ret;
1123 }
1124 
1125 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1126 			    unsigned flags, struct iomap *iomap)
1127 {
1128 	struct gfs2_inode *ip = GFS2_I(inode);
1129 	struct metapath mp = { .mp_aheight = 1, };
1130 	int ret;
1131 
1132 	iomap->flags |= IOMAP_F_BUFFER_HEAD;
1133 
1134 	trace_gfs2_iomap_start(ip, pos, length, flags);
1135 	if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
1136 		ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1137 	} else {
1138 		ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1139 
1140 		/*
1141 		 * Silently fall back to buffered I/O for stuffed files or if
1142 		 * we've hot a hole (see gfs2_file_direct_write).
1143 		 */
1144 		if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
1145 		    iomap->type != IOMAP_MAPPED)
1146 			ret = -ENOTBLK;
1147 	}
1148 	release_metapath(&mp);
1149 	trace_gfs2_iomap_end(ip, iomap, ret);
1150 	return ret;
1151 }
1152 
1153 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1154 			  ssize_t written, unsigned flags, struct iomap *iomap)
1155 {
1156 	struct gfs2_inode *ip = GFS2_I(inode);
1157 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1158 
1159 	if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
1160 		goto out;
1161 
1162 	if (!gfs2_is_stuffed(ip))
1163 		gfs2_ordered_add_inode(ip);
1164 
1165 	if (inode == sdp->sd_rindex)
1166 		adjust_fs_space(inode);
1167 
1168 	gfs2_inplace_release(ip);
1169 
1170 	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1171 		/* Deallocate blocks that were just allocated. */
1172 		loff_t blockmask = i_blocksize(inode) - 1;
1173 		loff_t end = (pos + length) & ~blockmask;
1174 
1175 		pos = (pos + written + blockmask) & ~blockmask;
1176 		if (pos < end) {
1177 			truncate_pagecache_range(inode, pos, end - 1);
1178 			punch_hole(ip, pos, end - pos);
1179 		}
1180 	}
1181 
1182 	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1183 		gfs2_quota_unlock(ip);
1184 	if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1185 		mark_inode_dirty(inode);
1186 	gfs2_write_unlock(inode);
1187 
1188 out:
1189 	return 0;
1190 }
1191 
1192 const struct iomap_ops gfs2_iomap_ops = {
1193 	.iomap_begin = gfs2_iomap_begin,
1194 	.iomap_end = gfs2_iomap_end,
1195 };
1196 
1197 /**
1198  * gfs2_block_map - Map one or more blocks of an inode to a disk block
1199  * @inode: The inode
1200  * @lblock: The logical block number
1201  * @bh_map: The bh to be mapped
1202  * @create: True if its ok to alloc blocks to satify the request
1203  *
1204  * The size of the requested mapping is defined in bh_map->b_size.
1205  *
1206  * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1207  * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
1208  * bh_map->b_size to indicate the size of the mapping when @lblock and
1209  * successive blocks are mapped, up to the requested size.
1210  *
1211  * Sets buffer_boundary() if a read of metadata will be required
1212  * before the next block can be mapped. Sets buffer_new() if new
1213  * blocks were allocated.
1214  *
1215  * Returns: errno
1216  */
1217 
1218 int gfs2_block_map(struct inode *inode, sector_t lblock,
1219 		   struct buffer_head *bh_map, int create)
1220 {
1221 	struct gfs2_inode *ip = GFS2_I(inode);
1222 	loff_t pos = (loff_t)lblock << inode->i_blkbits;
1223 	loff_t length = bh_map->b_size;
1224 	struct metapath mp = { .mp_aheight = 1, };
1225 	struct iomap iomap = { };
1226 	int ret;
1227 
1228 	clear_buffer_mapped(bh_map);
1229 	clear_buffer_new(bh_map);
1230 	clear_buffer_boundary(bh_map);
1231 	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1232 
1233 	if (create) {
1234 		ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1235 		if (!ret && iomap.type == IOMAP_HOLE)
1236 			ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1237 		release_metapath(&mp);
1238 	} else {
1239 		ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1240 		release_metapath(&mp);
1241 	}
1242 	if (ret)
1243 		goto out;
1244 
1245 	if (iomap.length > bh_map->b_size) {
1246 		iomap.length = bh_map->b_size;
1247 		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1248 	}
1249 	if (iomap.addr != IOMAP_NULL_ADDR)
1250 		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1251 	bh_map->b_size = iomap.length;
1252 	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1253 		set_buffer_boundary(bh_map);
1254 	if (iomap.flags & IOMAP_F_NEW)
1255 		set_buffer_new(bh_map);
1256 
1257 out:
1258 	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1259 	return ret;
1260 }
1261 
1262 /*
1263  * Deprecated: do not use in new code
1264  */
1265 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1266 {
1267 	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1268 	int ret;
1269 	int create = *new;
1270 
1271 	BUG_ON(!extlen);
1272 	BUG_ON(!dblock);
1273 	BUG_ON(!new);
1274 
1275 	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1276 	ret = gfs2_block_map(inode, lblock, &bh, create);
1277 	*extlen = bh.b_size >> inode->i_blkbits;
1278 	*dblock = bh.b_blocknr;
1279 	if (buffer_new(&bh))
1280 		*new = 1;
1281 	else
1282 		*new = 0;
1283 	return ret;
1284 }
1285 
1286 /**
1287  * gfs2_block_zero_range - Deal with zeroing out data
1288  *
1289  * This is partly borrowed from ext3.
1290  */
1291 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1292 				 unsigned int length)
1293 {
1294 	struct address_space *mapping = inode->i_mapping;
1295 	struct gfs2_inode *ip = GFS2_I(inode);
1296 	unsigned long index = from >> PAGE_SHIFT;
1297 	unsigned offset = from & (PAGE_SIZE-1);
1298 	unsigned blocksize, iblock, pos;
1299 	struct buffer_head *bh;
1300 	struct page *page;
1301 	int err;
1302 
1303 	page = find_or_create_page(mapping, index, GFP_NOFS);
1304 	if (!page)
1305 		return 0;
1306 
1307 	blocksize = inode->i_sb->s_blocksize;
1308 	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
1309 
1310 	if (!page_has_buffers(page))
1311 		create_empty_buffers(page, blocksize, 0);
1312 
1313 	/* Find the buffer that contains "offset" */
1314 	bh = page_buffers(page);
1315 	pos = blocksize;
1316 	while (offset >= pos) {
1317 		bh = bh->b_this_page;
1318 		iblock++;
1319 		pos += blocksize;
1320 	}
1321 
1322 	err = 0;
1323 
1324 	if (!buffer_mapped(bh)) {
1325 		gfs2_block_map(inode, iblock, bh, 0);
1326 		/* unmapped? It's a hole - nothing to do */
1327 		if (!buffer_mapped(bh))
1328 			goto unlock;
1329 	}
1330 
1331 	/* Ok, it's mapped. Make sure it's up-to-date */
1332 	if (PageUptodate(page))
1333 		set_buffer_uptodate(bh);
1334 
1335 	if (!buffer_uptodate(bh)) {
1336 		err = -EIO;
1337 		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1338 		wait_on_buffer(bh);
1339 		/* Uhhuh. Read error. Complain and punt. */
1340 		if (!buffer_uptodate(bh))
1341 			goto unlock;
1342 		err = 0;
1343 	}
1344 
1345 	if (gfs2_is_jdata(ip))
1346 		gfs2_trans_add_data(ip->i_gl, bh);
1347 	else
1348 		gfs2_ordered_add_inode(ip);
1349 
1350 	zero_user(page, offset, length);
1351 	mark_buffer_dirty(bh);
1352 unlock:
1353 	unlock_page(page);
1354 	put_page(page);
1355 	return err;
1356 }
1357 
1358 #define GFS2_JTRUNC_REVOKES 8192
1359 
1360 /**
1361  * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1362  * @inode: The inode being truncated
1363  * @oldsize: The original (larger) size
1364  * @newsize: The new smaller size
1365  *
1366  * With jdata files, we have to journal a revoke for each block which is
1367  * truncated. As a result, we need to split this into separate transactions
1368  * if the number of pages being truncated gets too large.
1369  */
1370 
1371 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1372 {
1373 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1374 	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1375 	u64 chunk;
1376 	int error;
1377 
1378 	while (oldsize != newsize) {
1379 		struct gfs2_trans *tr;
1380 		unsigned int offs;
1381 
1382 		chunk = oldsize - newsize;
1383 		if (chunk > max_chunk)
1384 			chunk = max_chunk;
1385 
1386 		offs = oldsize & ~PAGE_MASK;
1387 		if (offs && chunk > PAGE_SIZE)
1388 			chunk = offs + ((chunk - offs) & PAGE_MASK);
1389 
1390 		truncate_pagecache(inode, oldsize - chunk);
1391 		oldsize -= chunk;
1392 
1393 		tr = current->journal_info;
1394 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1395 			continue;
1396 
1397 		gfs2_trans_end(sdp);
1398 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1399 		if (error)
1400 			return error;
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 static int trunc_start(struct inode *inode, u64 newsize)
1407 {
1408 	struct gfs2_inode *ip = GFS2_I(inode);
1409 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1410 	struct buffer_head *dibh = NULL;
1411 	int journaled = gfs2_is_jdata(ip);
1412 	u64 oldsize = inode->i_size;
1413 	int error;
1414 
1415 	if (journaled)
1416 		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1417 	else
1418 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1419 	if (error)
1420 		return error;
1421 
1422 	error = gfs2_meta_inode_buffer(ip, &dibh);
1423 	if (error)
1424 		goto out;
1425 
1426 	gfs2_trans_add_meta(ip->i_gl, dibh);
1427 
1428 	if (gfs2_is_stuffed(ip)) {
1429 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1430 	} else {
1431 		unsigned int blocksize = i_blocksize(inode);
1432 		unsigned int offs = newsize & (blocksize - 1);
1433 		if (offs) {
1434 			error = gfs2_block_zero_range(inode, newsize,
1435 						      blocksize - offs);
1436 			if (error)
1437 				goto out;
1438 		}
1439 		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1440 	}
1441 
1442 	i_size_write(inode, newsize);
1443 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1444 	gfs2_dinode_out(ip, dibh->b_data);
1445 
1446 	if (journaled)
1447 		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1448 	else
1449 		truncate_pagecache(inode, newsize);
1450 
1451 out:
1452 	brelse(dibh);
1453 	if (current->journal_info)
1454 		gfs2_trans_end(sdp);
1455 	return error;
1456 }
1457 
1458 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1459 			 struct iomap *iomap)
1460 {
1461 	struct metapath mp = { .mp_aheight = 1, };
1462 	int ret;
1463 
1464 	ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1465 	if (!ret && iomap->type == IOMAP_HOLE)
1466 		ret = gfs2_iomap_alloc(inode, iomap, &mp);
1467 	release_metapath(&mp);
1468 	return ret;
1469 }
1470 
1471 /**
1472  * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1473  * @ip: inode
1474  * @rg_gh: holder of resource group glock
1475  * @bh: buffer head to sweep
1476  * @start: starting point in bh
1477  * @end: end point in bh
1478  * @meta: true if bh points to metadata (rather than data)
1479  * @btotal: place to keep count of total blocks freed
1480  *
1481  * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1482  * free, and free them all. However, we do it one rgrp at a time. If this
1483  * block has references to multiple rgrps, we break it into individual
1484  * transactions. This allows other processes to use the rgrps while we're
1485  * focused on a single one, for better concurrency / performance.
1486  * At every transaction boundary, we rewrite the inode into the journal.
1487  * That way the bitmaps are kept consistent with the inode and we can recover
1488  * if we're interrupted by power-outages.
1489  *
1490  * Returns: 0, or return code if an error occurred.
1491  *          *btotal has the total number of blocks freed
1492  */
1493 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1494 			      struct buffer_head *bh, __be64 *start, __be64 *end,
1495 			      bool meta, u32 *btotal)
1496 {
1497 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1498 	struct gfs2_rgrpd *rgd;
1499 	struct gfs2_trans *tr;
1500 	__be64 *p;
1501 	int blks_outside_rgrp;
1502 	u64 bn, bstart, isize_blks;
1503 	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1504 	int ret = 0;
1505 	bool buf_in_tr = false; /* buffer was added to transaction */
1506 
1507 more_rgrps:
1508 	rgd = NULL;
1509 	if (gfs2_holder_initialized(rd_gh)) {
1510 		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1511 		gfs2_assert_withdraw(sdp,
1512 			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1513 	}
1514 	blks_outside_rgrp = 0;
1515 	bstart = 0;
1516 	blen = 0;
1517 
1518 	for (p = start; p < end; p++) {
1519 		if (!*p)
1520 			continue;
1521 		bn = be64_to_cpu(*p);
1522 
1523 		if (rgd) {
1524 			if (!rgrp_contains_block(rgd, bn)) {
1525 				blks_outside_rgrp++;
1526 				continue;
1527 			}
1528 		} else {
1529 			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1530 			if (unlikely(!rgd)) {
1531 				ret = -EIO;
1532 				goto out;
1533 			}
1534 			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1535 						 0, rd_gh);
1536 			if (ret)
1537 				goto out;
1538 
1539 			/* Must be done with the rgrp glock held: */
1540 			if (gfs2_rs_active(&ip->i_res) &&
1541 			    rgd == ip->i_res.rs_rbm.rgd)
1542 				gfs2_rs_deltree(&ip->i_res);
1543 		}
1544 
1545 		/* The size of our transactions will be unknown until we
1546 		   actually process all the metadata blocks that relate to
1547 		   the rgrp. So we estimate. We know it can't be more than
1548 		   the dinode's i_blocks and we don't want to exceed the
1549 		   journal flush threshold, sd_log_thresh2. */
1550 		if (current->journal_info == NULL) {
1551 			unsigned int jblocks_rqsted, revokes;
1552 
1553 			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1554 				RES_INDIRECT;
1555 			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1556 			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1557 				jblocks_rqsted +=
1558 					atomic_read(&sdp->sd_log_thresh2);
1559 			else
1560 				jblocks_rqsted += isize_blks;
1561 			revokes = jblocks_rqsted;
1562 			if (meta)
1563 				revokes += end - start;
1564 			else if (ip->i_depth)
1565 				revokes += sdp->sd_inptrs;
1566 			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1567 			if (ret)
1568 				goto out_unlock;
1569 			down_write(&ip->i_rw_mutex);
1570 		}
1571 		/* check if we will exceed the transaction blocks requested */
1572 		tr = current->journal_info;
1573 		if (tr->tr_num_buf_new + RES_STATFS +
1574 		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1575 			/* We set blks_outside_rgrp to ensure the loop will
1576 			   be repeated for the same rgrp, but with a new
1577 			   transaction. */
1578 			blks_outside_rgrp++;
1579 			/* This next part is tricky. If the buffer was added
1580 			   to the transaction, we've already set some block
1581 			   pointers to 0, so we better follow through and free
1582 			   them, or we will introduce corruption (so break).
1583 			   This may be impossible, or at least rare, but I
1584 			   decided to cover the case regardless.
1585 
1586 			   If the buffer was not added to the transaction
1587 			   (this call), doing so would exceed our transaction
1588 			   size, so we need to end the transaction and start a
1589 			   new one (so goto). */
1590 
1591 			if (buf_in_tr)
1592 				break;
1593 			goto out_unlock;
1594 		}
1595 
1596 		gfs2_trans_add_meta(ip->i_gl, bh);
1597 		buf_in_tr = true;
1598 		*p = 0;
1599 		if (bstart + blen == bn) {
1600 			blen++;
1601 			continue;
1602 		}
1603 		if (bstart) {
1604 			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1605 			(*btotal) += blen;
1606 			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1607 		}
1608 		bstart = bn;
1609 		blen = 1;
1610 	}
1611 	if (bstart) {
1612 		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1613 		(*btotal) += blen;
1614 		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1615 	}
1616 out_unlock:
1617 	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1618 					    outside the rgrp we just processed,
1619 					    do it all over again. */
1620 		if (current->journal_info) {
1621 			struct buffer_head *dibh;
1622 
1623 			ret = gfs2_meta_inode_buffer(ip, &dibh);
1624 			if (ret)
1625 				goto out;
1626 
1627 			/* Every transaction boundary, we rewrite the dinode
1628 			   to keep its di_blocks current in case of failure. */
1629 			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1630 				current_time(&ip->i_inode);
1631 			gfs2_trans_add_meta(ip->i_gl, dibh);
1632 			gfs2_dinode_out(ip, dibh->b_data);
1633 			brelse(dibh);
1634 			up_write(&ip->i_rw_mutex);
1635 			gfs2_trans_end(sdp);
1636 		}
1637 		gfs2_glock_dq_uninit(rd_gh);
1638 		cond_resched();
1639 		goto more_rgrps;
1640 	}
1641 out:
1642 	return ret;
1643 }
1644 
1645 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1646 {
1647 	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1648 		return false;
1649 	return true;
1650 }
1651 
1652 /**
1653  * find_nonnull_ptr - find a non-null pointer given a metapath and height
1654  * @mp: starting metapath
1655  * @h: desired height to search
1656  *
1657  * Assumes the metapath is valid (with buffers) out to height h.
1658  * Returns: true if a non-null pointer was found in the metapath buffer
1659  *          false if all remaining pointers are NULL in the buffer
1660  */
1661 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1662 			     unsigned int h,
1663 			     __u16 *end_list, unsigned int end_aligned)
1664 {
1665 	struct buffer_head *bh = mp->mp_bh[h];
1666 	__be64 *first, *ptr, *end;
1667 
1668 	first = metaptr1(h, mp);
1669 	ptr = first + mp->mp_list[h];
1670 	end = (__be64 *)(bh->b_data + bh->b_size);
1671 	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1672 		bool keep_end = h < end_aligned;
1673 		end = first + end_list[h] + keep_end;
1674 	}
1675 
1676 	while (ptr < end) {
1677 		if (*ptr) { /* if we have a non-null pointer */
1678 			mp->mp_list[h] = ptr - first;
1679 			h++;
1680 			if (h < GFS2_MAX_META_HEIGHT)
1681 				mp->mp_list[h] = 0;
1682 			return true;
1683 		}
1684 		ptr++;
1685 	}
1686 	return false;
1687 }
1688 
1689 enum dealloc_states {
1690 	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1691 	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1692 	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1693 	DEALLOC_DONE = 3,       /* process complete */
1694 };
1695 
1696 static inline void
1697 metapointer_range(struct metapath *mp, int height,
1698 		  __u16 *start_list, unsigned int start_aligned,
1699 		  __u16 *end_list, unsigned int end_aligned,
1700 		  __be64 **start, __be64 **end)
1701 {
1702 	struct buffer_head *bh = mp->mp_bh[height];
1703 	__be64 *first;
1704 
1705 	first = metaptr1(height, mp);
1706 	*start = first;
1707 	if (mp_eq_to_hgt(mp, start_list, height)) {
1708 		bool keep_start = height < start_aligned;
1709 		*start = first + start_list[height] + keep_start;
1710 	}
1711 	*end = (__be64 *)(bh->b_data + bh->b_size);
1712 	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1713 		bool keep_end = height < end_aligned;
1714 		*end = first + end_list[height] + keep_end;
1715 	}
1716 }
1717 
1718 static inline bool walk_done(struct gfs2_sbd *sdp,
1719 			     struct metapath *mp, int height,
1720 			     __u16 *end_list, unsigned int end_aligned)
1721 {
1722 	__u16 end;
1723 
1724 	if (end_list) {
1725 		bool keep_end = height < end_aligned;
1726 		if (!mp_eq_to_hgt(mp, end_list, height))
1727 			return false;
1728 		end = end_list[height] + keep_end;
1729 	} else
1730 		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1731 	return mp->mp_list[height] >= end;
1732 }
1733 
1734 /**
1735  * punch_hole - deallocate blocks in a file
1736  * @ip: inode to truncate
1737  * @offset: the start of the hole
1738  * @length: the size of the hole (or 0 for truncate)
1739  *
1740  * Punch a hole into a file or truncate a file at a given position.  This
1741  * function operates in whole blocks (@offset and @length are rounded
1742  * accordingly); partially filled blocks must be cleared otherwise.
1743  *
1744  * This function works from the bottom up, and from the right to the left. In
1745  * other words, it strips off the highest layer (data) before stripping any of
1746  * the metadata. Doing it this way is best in case the operation is interrupted
1747  * by power failure, etc.  The dinode is rewritten in every transaction to
1748  * guarantee integrity.
1749  */
1750 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1751 {
1752 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1753 	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1754 	struct metapath mp = {};
1755 	struct buffer_head *dibh, *bh;
1756 	struct gfs2_holder rd_gh;
1757 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1758 	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1759 	__u16 start_list[GFS2_MAX_META_HEIGHT];
1760 	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1761 	unsigned int start_aligned, uninitialized_var(end_aligned);
1762 	unsigned int strip_h = ip->i_height - 1;
1763 	u32 btotal = 0;
1764 	int ret, state;
1765 	int mp_h; /* metapath buffers are read in to this height */
1766 	u64 prev_bnr = 0;
1767 	__be64 *start, *end;
1768 
1769 	if (offset >= maxsize) {
1770 		/*
1771 		 * The starting point lies beyond the allocated meta-data;
1772 		 * there are no blocks do deallocate.
1773 		 */
1774 		return 0;
1775 	}
1776 
1777 	/*
1778 	 * The start position of the hole is defined by lblock, start_list, and
1779 	 * start_aligned.  The end position of the hole is defined by lend,
1780 	 * end_list, and end_aligned.
1781 	 *
1782 	 * start_aligned and end_aligned define down to which height the start
1783 	 * and end positions are aligned to the metadata tree (i.e., the
1784 	 * position is a multiple of the metadata granularity at the height
1785 	 * above).  This determines at which heights additional meta pointers
1786 	 * needs to be preserved for the remaining data.
1787 	 */
1788 
1789 	if (length) {
1790 		u64 end_offset = offset + length;
1791 		u64 lend;
1792 
1793 		/*
1794 		 * Clip the end at the maximum file size for the given height:
1795 		 * that's how far the metadata goes; files bigger than that
1796 		 * will have additional layers of indirection.
1797 		 */
1798 		if (end_offset > maxsize)
1799 			end_offset = maxsize;
1800 		lend = end_offset >> bsize_shift;
1801 
1802 		if (lblock >= lend)
1803 			return 0;
1804 
1805 		find_metapath(sdp, lend, &mp, ip->i_height);
1806 		end_list = __end_list;
1807 		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1808 
1809 		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1810 			if (end_list[mp_h])
1811 				break;
1812 		}
1813 		end_aligned = mp_h;
1814 	}
1815 
1816 	find_metapath(sdp, lblock, &mp, ip->i_height);
1817 	memcpy(start_list, mp.mp_list, sizeof(start_list));
1818 
1819 	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1820 		if (start_list[mp_h])
1821 			break;
1822 	}
1823 	start_aligned = mp_h;
1824 
1825 	ret = gfs2_meta_inode_buffer(ip, &dibh);
1826 	if (ret)
1827 		return ret;
1828 
1829 	mp.mp_bh[0] = dibh;
1830 	ret = lookup_metapath(ip, &mp);
1831 	if (ret)
1832 		goto out_metapath;
1833 
1834 	/* issue read-ahead on metadata */
1835 	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1836 		metapointer_range(&mp, mp_h, start_list, start_aligned,
1837 				  end_list, end_aligned, &start, &end);
1838 		gfs2_metapath_ra(ip->i_gl, start, end);
1839 	}
1840 
1841 	if (mp.mp_aheight == ip->i_height)
1842 		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1843 	else
1844 		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1845 
1846 	ret = gfs2_rindex_update(sdp);
1847 	if (ret)
1848 		goto out_metapath;
1849 
1850 	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1851 	if (ret)
1852 		goto out_metapath;
1853 	gfs2_holder_mark_uninitialized(&rd_gh);
1854 
1855 	mp_h = strip_h;
1856 
1857 	while (state != DEALLOC_DONE) {
1858 		switch (state) {
1859 		/* Truncate a full metapath at the given strip height.
1860 		 * Note that strip_h == mp_h in order to be in this state. */
1861 		case DEALLOC_MP_FULL:
1862 			bh = mp.mp_bh[mp_h];
1863 			gfs2_assert_withdraw(sdp, bh);
1864 			if (gfs2_assert_withdraw(sdp,
1865 						 prev_bnr != bh->b_blocknr)) {
1866 				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1867 					 "s_h:%u, mp_h:%u\n",
1868 				       (unsigned long long)ip->i_no_addr,
1869 				       prev_bnr, ip->i_height, strip_h, mp_h);
1870 			}
1871 			prev_bnr = bh->b_blocknr;
1872 
1873 			if (gfs2_metatype_check(sdp, bh,
1874 						(mp_h ? GFS2_METATYPE_IN :
1875 							GFS2_METATYPE_DI))) {
1876 				ret = -EIO;
1877 				goto out;
1878 			}
1879 
1880 			/*
1881 			 * Below, passing end_aligned as 0 gives us the
1882 			 * metapointer range excluding the end point: the end
1883 			 * point is the first metapath we must not deallocate!
1884 			 */
1885 
1886 			metapointer_range(&mp, mp_h, start_list, start_aligned,
1887 					  end_list, 0 /* end_aligned */,
1888 					  &start, &end);
1889 			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1890 						 start, end,
1891 						 mp_h != ip->i_height - 1,
1892 						 &btotal);
1893 
1894 			/* If we hit an error or just swept dinode buffer,
1895 			   just exit. */
1896 			if (ret || !mp_h) {
1897 				state = DEALLOC_DONE;
1898 				break;
1899 			}
1900 			state = DEALLOC_MP_LOWER;
1901 			break;
1902 
1903 		/* lower the metapath strip height */
1904 		case DEALLOC_MP_LOWER:
1905 			/* We're done with the current buffer, so release it,
1906 			   unless it's the dinode buffer. Then back up to the
1907 			   previous pointer. */
1908 			if (mp_h) {
1909 				brelse(mp.mp_bh[mp_h]);
1910 				mp.mp_bh[mp_h] = NULL;
1911 			}
1912 			/* If we can't get any lower in height, we've stripped
1913 			   off all we can. Next step is to back up and start
1914 			   stripping the previous level of metadata. */
1915 			if (mp_h == 0) {
1916 				strip_h--;
1917 				memcpy(mp.mp_list, start_list, sizeof(start_list));
1918 				mp_h = strip_h;
1919 				state = DEALLOC_FILL_MP;
1920 				break;
1921 			}
1922 			mp.mp_list[mp_h] = 0;
1923 			mp_h--; /* search one metadata height down */
1924 			mp.mp_list[mp_h]++;
1925 			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1926 				break;
1927 			/* Here we've found a part of the metapath that is not
1928 			 * allocated. We need to search at that height for the
1929 			 * next non-null pointer. */
1930 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1931 				state = DEALLOC_FILL_MP;
1932 				mp_h++;
1933 			}
1934 			/* No more non-null pointers at this height. Back up
1935 			   to the previous height and try again. */
1936 			break; /* loop around in the same state */
1937 
1938 		/* Fill the metapath with buffers to the given height. */
1939 		case DEALLOC_FILL_MP:
1940 			/* Fill the buffers out to the current height. */
1941 			ret = fillup_metapath(ip, &mp, mp_h);
1942 			if (ret < 0)
1943 				goto out;
1944 
1945 			/* On the first pass, issue read-ahead on metadata. */
1946 			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1947 				unsigned int height = mp.mp_aheight - 1;
1948 
1949 				/* No read-ahead for data blocks. */
1950 				if (mp.mp_aheight - 1 == strip_h)
1951 					height--;
1952 
1953 				for (; height >= mp.mp_aheight - ret; height--) {
1954 					metapointer_range(&mp, height,
1955 							  start_list, start_aligned,
1956 							  end_list, end_aligned,
1957 							  &start, &end);
1958 					gfs2_metapath_ra(ip->i_gl, start, end);
1959 				}
1960 			}
1961 
1962 			/* If buffers found for the entire strip height */
1963 			if (mp.mp_aheight - 1 == strip_h) {
1964 				state = DEALLOC_MP_FULL;
1965 				break;
1966 			}
1967 			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1968 				mp_h = mp.mp_aheight - 1;
1969 
1970 			/* If we find a non-null block pointer, crawl a bit
1971 			   higher up in the metapath and try again, otherwise
1972 			   we need to look lower for a new starting point. */
1973 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1974 				mp_h++;
1975 			else
1976 				state = DEALLOC_MP_LOWER;
1977 			break;
1978 		}
1979 	}
1980 
1981 	if (btotal) {
1982 		if (current->journal_info == NULL) {
1983 			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1984 					       RES_QUOTA, 0);
1985 			if (ret)
1986 				goto out;
1987 			down_write(&ip->i_rw_mutex);
1988 		}
1989 		gfs2_statfs_change(sdp, 0, +btotal, 0);
1990 		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1991 				  ip->i_inode.i_gid);
1992 		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1993 		gfs2_trans_add_meta(ip->i_gl, dibh);
1994 		gfs2_dinode_out(ip, dibh->b_data);
1995 		up_write(&ip->i_rw_mutex);
1996 		gfs2_trans_end(sdp);
1997 	}
1998 
1999 out:
2000 	if (gfs2_holder_initialized(&rd_gh))
2001 		gfs2_glock_dq_uninit(&rd_gh);
2002 	if (current->journal_info) {
2003 		up_write(&ip->i_rw_mutex);
2004 		gfs2_trans_end(sdp);
2005 		cond_resched();
2006 	}
2007 	gfs2_quota_unhold(ip);
2008 out_metapath:
2009 	release_metapath(&mp);
2010 	return ret;
2011 }
2012 
2013 static int trunc_end(struct gfs2_inode *ip)
2014 {
2015 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2016 	struct buffer_head *dibh;
2017 	int error;
2018 
2019 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2020 	if (error)
2021 		return error;
2022 
2023 	down_write(&ip->i_rw_mutex);
2024 
2025 	error = gfs2_meta_inode_buffer(ip, &dibh);
2026 	if (error)
2027 		goto out;
2028 
2029 	if (!i_size_read(&ip->i_inode)) {
2030 		ip->i_height = 0;
2031 		ip->i_goal = ip->i_no_addr;
2032 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2033 		gfs2_ordered_del_inode(ip);
2034 	}
2035 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2036 	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2037 
2038 	gfs2_trans_add_meta(ip->i_gl, dibh);
2039 	gfs2_dinode_out(ip, dibh->b_data);
2040 	brelse(dibh);
2041 
2042 out:
2043 	up_write(&ip->i_rw_mutex);
2044 	gfs2_trans_end(sdp);
2045 	return error;
2046 }
2047 
2048 /**
2049  * do_shrink - make a file smaller
2050  * @inode: the inode
2051  * @newsize: the size to make the file
2052  *
2053  * Called with an exclusive lock on @inode. The @size must
2054  * be equal to or smaller than the current inode size.
2055  *
2056  * Returns: errno
2057  */
2058 
2059 static int do_shrink(struct inode *inode, u64 newsize)
2060 {
2061 	struct gfs2_inode *ip = GFS2_I(inode);
2062 	int error;
2063 
2064 	error = trunc_start(inode, newsize);
2065 	if (error < 0)
2066 		return error;
2067 	if (gfs2_is_stuffed(ip))
2068 		return 0;
2069 
2070 	error = punch_hole(ip, newsize, 0);
2071 	if (error == 0)
2072 		error = trunc_end(ip);
2073 
2074 	return error;
2075 }
2076 
2077 void gfs2_trim_blocks(struct inode *inode)
2078 {
2079 	int ret;
2080 
2081 	ret = do_shrink(inode, inode->i_size);
2082 	WARN_ON(ret != 0);
2083 }
2084 
2085 /**
2086  * do_grow - Touch and update inode size
2087  * @inode: The inode
2088  * @size: The new size
2089  *
2090  * This function updates the timestamps on the inode and
2091  * may also increase the size of the inode. This function
2092  * must not be called with @size any smaller than the current
2093  * inode size.
2094  *
2095  * Although it is not strictly required to unstuff files here,
2096  * earlier versions of GFS2 have a bug in the stuffed file reading
2097  * code which will result in a buffer overrun if the size is larger
2098  * than the max stuffed file size. In order to prevent this from
2099  * occurring, such files are unstuffed, but in other cases we can
2100  * just update the inode size directly.
2101  *
2102  * Returns: 0 on success, or -ve on error
2103  */
2104 
2105 static int do_grow(struct inode *inode, u64 size)
2106 {
2107 	struct gfs2_inode *ip = GFS2_I(inode);
2108 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2109 	struct gfs2_alloc_parms ap = { .target = 1, };
2110 	struct buffer_head *dibh;
2111 	int error;
2112 	int unstuff = 0;
2113 
2114 	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2115 		error = gfs2_quota_lock_check(ip, &ap);
2116 		if (error)
2117 			return error;
2118 
2119 		error = gfs2_inplace_reserve(ip, &ap);
2120 		if (error)
2121 			goto do_grow_qunlock;
2122 		unstuff = 1;
2123 	}
2124 
2125 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2126 				 (unstuff &&
2127 				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2128 				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2129 				  0 : RES_QUOTA), 0);
2130 	if (error)
2131 		goto do_grow_release;
2132 
2133 	if (unstuff) {
2134 		error = gfs2_unstuff_dinode(ip, NULL);
2135 		if (error)
2136 			goto do_end_trans;
2137 	}
2138 
2139 	error = gfs2_meta_inode_buffer(ip, &dibh);
2140 	if (error)
2141 		goto do_end_trans;
2142 
2143 	i_size_write(inode, size);
2144 	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2145 	gfs2_trans_add_meta(ip->i_gl, dibh);
2146 	gfs2_dinode_out(ip, dibh->b_data);
2147 	brelse(dibh);
2148 
2149 do_end_trans:
2150 	gfs2_trans_end(sdp);
2151 do_grow_release:
2152 	if (unstuff) {
2153 		gfs2_inplace_release(ip);
2154 do_grow_qunlock:
2155 		gfs2_quota_unlock(ip);
2156 	}
2157 	return error;
2158 }
2159 
2160 /**
2161  * gfs2_setattr_size - make a file a given size
2162  * @inode: the inode
2163  * @newsize: the size to make the file
2164  *
2165  * The file size can grow, shrink, or stay the same size. This
2166  * is called holding i_rwsem and an exclusive glock on the inode
2167  * in question.
2168  *
2169  * Returns: errno
2170  */
2171 
2172 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2173 {
2174 	struct gfs2_inode *ip = GFS2_I(inode);
2175 	int ret;
2176 
2177 	BUG_ON(!S_ISREG(inode->i_mode));
2178 
2179 	ret = inode_newsize_ok(inode, newsize);
2180 	if (ret)
2181 		return ret;
2182 
2183 	inode_dio_wait(inode);
2184 
2185 	ret = gfs2_rsqa_alloc(ip);
2186 	if (ret)
2187 		goto out;
2188 
2189 	if (newsize >= inode->i_size) {
2190 		ret = do_grow(inode, newsize);
2191 		goto out;
2192 	}
2193 
2194 	ret = do_shrink(inode, newsize);
2195 out:
2196 	gfs2_rsqa_delete(ip, NULL);
2197 	return ret;
2198 }
2199 
2200 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2201 {
2202 	int error;
2203 	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2204 	if (!error)
2205 		error = trunc_end(ip);
2206 	return error;
2207 }
2208 
2209 int gfs2_file_dealloc(struct gfs2_inode *ip)
2210 {
2211 	return punch_hole(ip, 0, 0);
2212 }
2213 
2214 /**
2215  * gfs2_free_journal_extents - Free cached journal bmap info
2216  * @jd: The journal
2217  *
2218  */
2219 
2220 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2221 {
2222 	struct gfs2_journal_extent *jext;
2223 
2224 	while(!list_empty(&jd->extent_list)) {
2225 		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2226 		list_del(&jext->list);
2227 		kfree(jext);
2228 	}
2229 }
2230 
2231 /**
2232  * gfs2_add_jextent - Add or merge a new extent to extent cache
2233  * @jd: The journal descriptor
2234  * @lblock: The logical block at start of new extent
2235  * @dblock: The physical block at start of new extent
2236  * @blocks: Size of extent in fs blocks
2237  *
2238  * Returns: 0 on success or -ENOMEM
2239  */
2240 
2241 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2242 {
2243 	struct gfs2_journal_extent *jext;
2244 
2245 	if (!list_empty(&jd->extent_list)) {
2246 		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2247 		if ((jext->dblock + jext->blocks) == dblock) {
2248 			jext->blocks += blocks;
2249 			return 0;
2250 		}
2251 	}
2252 
2253 	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2254 	if (jext == NULL)
2255 		return -ENOMEM;
2256 	jext->dblock = dblock;
2257 	jext->lblock = lblock;
2258 	jext->blocks = blocks;
2259 	list_add_tail(&jext->list, &jd->extent_list);
2260 	jd->nr_extents++;
2261 	return 0;
2262 }
2263 
2264 /**
2265  * gfs2_map_journal_extents - Cache journal bmap info
2266  * @sdp: The super block
2267  * @jd: The journal to map
2268  *
2269  * Create a reusable "extent" mapping from all logical
2270  * blocks to all physical blocks for the given journal.  This will save
2271  * us time when writing journal blocks.  Most journals will have only one
2272  * extent that maps all their logical blocks.  That's because gfs2.mkfs
2273  * arranges the journal blocks sequentially to maximize performance.
2274  * So the extent would map the first block for the entire file length.
2275  * However, gfs2_jadd can happen while file activity is happening, so
2276  * those journals may not be sequential.  Less likely is the case where
2277  * the users created their own journals by mounting the metafs and
2278  * laying it out.  But it's still possible.  These journals might have
2279  * several extents.
2280  *
2281  * Returns: 0 on success, or error on failure
2282  */
2283 
2284 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2285 {
2286 	u64 lblock = 0;
2287 	u64 lblock_stop;
2288 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2289 	struct buffer_head bh;
2290 	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2291 	u64 size;
2292 	int rc;
2293 	ktime_t start, end;
2294 
2295 	start = ktime_get();
2296 	lblock_stop = i_size_read(jd->jd_inode) >> shift;
2297 	size = (lblock_stop - lblock) << shift;
2298 	jd->nr_extents = 0;
2299 	WARN_ON(!list_empty(&jd->extent_list));
2300 
2301 	do {
2302 		bh.b_state = 0;
2303 		bh.b_blocknr = 0;
2304 		bh.b_size = size;
2305 		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2306 		if (rc || !buffer_mapped(&bh))
2307 			goto fail;
2308 		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2309 		if (rc)
2310 			goto fail;
2311 		size -= bh.b_size;
2312 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2313 	} while(size > 0);
2314 
2315 	end = ktime_get();
2316 	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2317 		jd->nr_extents, ktime_ms_delta(end, start));
2318 	return 0;
2319 
2320 fail:
2321 	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2322 		rc, jd->jd_jid,
2323 		(unsigned long long)(i_size_read(jd->jd_inode) - size),
2324 		jd->nr_extents);
2325 	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2326 		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2327 		bh.b_state, (unsigned long long)bh.b_size);
2328 	gfs2_free_journal_extents(jd);
2329 	return rc;
2330 }
2331 
2332 /**
2333  * gfs2_write_alloc_required - figure out if a write will require an allocation
2334  * @ip: the file being written to
2335  * @offset: the offset to write to
2336  * @len: the number of bytes being written
2337  *
2338  * Returns: 1 if an alloc is required, 0 otherwise
2339  */
2340 
2341 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2342 			      unsigned int len)
2343 {
2344 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2345 	struct buffer_head bh;
2346 	unsigned int shift;
2347 	u64 lblock, lblock_stop, size;
2348 	u64 end_of_file;
2349 
2350 	if (!len)
2351 		return 0;
2352 
2353 	if (gfs2_is_stuffed(ip)) {
2354 		if (offset + len > gfs2_max_stuffed_size(ip))
2355 			return 1;
2356 		return 0;
2357 	}
2358 
2359 	shift = sdp->sd_sb.sb_bsize_shift;
2360 	BUG_ON(gfs2_is_dir(ip));
2361 	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2362 	lblock = offset >> shift;
2363 	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2364 	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2365 		return 1;
2366 
2367 	size = (lblock_stop - lblock) << shift;
2368 	do {
2369 		bh.b_state = 0;
2370 		bh.b_size = size;
2371 		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2372 		if (!buffer_mapped(&bh))
2373 			return 1;
2374 		size -= bh.b_size;
2375 		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2376 	} while(size > 0);
2377 
2378 	return 0;
2379 }
2380 
2381 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2382 {
2383 	struct gfs2_inode *ip = GFS2_I(inode);
2384 	struct buffer_head *dibh;
2385 	int error;
2386 
2387 	if (offset >= inode->i_size)
2388 		return 0;
2389 	if (offset + length > inode->i_size)
2390 		length = inode->i_size - offset;
2391 
2392 	error = gfs2_meta_inode_buffer(ip, &dibh);
2393 	if (error)
2394 		return error;
2395 	gfs2_trans_add_meta(ip->i_gl, dibh);
2396 	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2397 	       length);
2398 	brelse(dibh);
2399 	return 0;
2400 }
2401 
2402 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2403 					 loff_t length)
2404 {
2405 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2406 	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2407 	int error;
2408 
2409 	while (length) {
2410 		struct gfs2_trans *tr;
2411 		loff_t chunk;
2412 		unsigned int offs;
2413 
2414 		chunk = length;
2415 		if (chunk > max_chunk)
2416 			chunk = max_chunk;
2417 
2418 		offs = offset & ~PAGE_MASK;
2419 		if (offs && chunk > PAGE_SIZE)
2420 			chunk = offs + ((chunk - offs) & PAGE_MASK);
2421 
2422 		truncate_pagecache_range(inode, offset, chunk);
2423 		offset += chunk;
2424 		length -= chunk;
2425 
2426 		tr = current->journal_info;
2427 		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2428 			continue;
2429 
2430 		gfs2_trans_end(sdp);
2431 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2432 		if (error)
2433 			return error;
2434 	}
2435 	return 0;
2436 }
2437 
2438 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2439 {
2440 	struct inode *inode = file_inode(file);
2441 	struct gfs2_inode *ip = GFS2_I(inode);
2442 	struct gfs2_sbd *sdp = GFS2_SB(inode);
2443 	int error;
2444 
2445 	if (gfs2_is_jdata(ip))
2446 		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2447 					 GFS2_JTRUNC_REVOKES);
2448 	else
2449 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2450 	if (error)
2451 		return error;
2452 
2453 	if (gfs2_is_stuffed(ip)) {
2454 		error = stuffed_zero_range(inode, offset, length);
2455 		if (error)
2456 			goto out;
2457 	} else {
2458 		unsigned int start_off, end_len, blocksize;
2459 
2460 		blocksize = i_blocksize(inode);
2461 		start_off = offset & (blocksize - 1);
2462 		end_len = (offset + length) & (blocksize - 1);
2463 		if (start_off) {
2464 			unsigned int len = length;
2465 			if (length > blocksize - start_off)
2466 				len = blocksize - start_off;
2467 			error = gfs2_block_zero_range(inode, offset, len);
2468 			if (error)
2469 				goto out;
2470 			if (start_off + length < blocksize)
2471 				end_len = 0;
2472 		}
2473 		if (end_len) {
2474 			error = gfs2_block_zero_range(inode,
2475 				offset + length - end_len, end_len);
2476 			if (error)
2477 				goto out;
2478 		}
2479 	}
2480 
2481 	if (gfs2_is_jdata(ip)) {
2482 		BUG_ON(!current->journal_info);
2483 		gfs2_journaled_truncate_range(inode, offset, length);
2484 	} else
2485 		truncate_pagecache_range(inode, offset, offset + length - 1);
2486 
2487 	file_update_time(file);
2488 	mark_inode_dirty(inode);
2489 
2490 	if (current->journal_info)
2491 		gfs2_trans_end(sdp);
2492 
2493 	if (!gfs2_is_stuffed(ip))
2494 		error = punch_hole(ip, offset, length);
2495 
2496 out:
2497 	if (current->journal_info)
2498 		gfs2_trans_end(sdp);
2499 	return error;
2500 }
2501