xref: /openbmc/linux/fs/gfs2/aops.c (revision 350a9b0a)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 
38 
39 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40 				   unsigned int from, unsigned int to)
41 {
42 	struct buffer_head *head = page_buffers(page);
43 	unsigned int bsize = head->b_size;
44 	struct buffer_head *bh;
45 	unsigned int start, end;
46 
47 	for (bh = head, start = 0; bh != head || !start;
48 	     bh = bh->b_this_page, start = end) {
49 		end = start + bsize;
50 		if (end <= from || start >= to)
51 			continue;
52 		if (gfs2_is_jdata(ip))
53 			set_buffer_uptodate(bh);
54 		gfs2_trans_add_data(ip->i_gl, bh);
55 	}
56 }
57 
58 /**
59  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
60  * @inode: The inode
61  * @lblock: The block number to look up
62  * @bh_result: The buffer head to return the result in
63  * @create: Non-zero if we may add block to the file
64  *
65  * Returns: errno
66  */
67 
68 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
69 				  struct buffer_head *bh_result, int create)
70 {
71 	int error;
72 
73 	error = gfs2_block_map(inode, lblock, bh_result, 0);
74 	if (error)
75 		return error;
76 	if (!buffer_mapped(bh_result))
77 		return -EIO;
78 	return 0;
79 }
80 
81 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
82 				 struct buffer_head *bh_result, int create)
83 {
84 	return gfs2_block_map(inode, lblock, bh_result, 0);
85 }
86 
87 /**
88  * gfs2_writepage_common - Common bits of writepage
89  * @page: The page to be written
90  * @wbc: The writeback control
91  *
92  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
93  */
94 
95 static int gfs2_writepage_common(struct page *page,
96 				 struct writeback_control *wbc)
97 {
98 	struct inode *inode = page->mapping->host;
99 	struct gfs2_inode *ip = GFS2_I(inode);
100 	struct gfs2_sbd *sdp = GFS2_SB(inode);
101 	loff_t i_size = i_size_read(inode);
102 	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
103 	unsigned offset;
104 
105 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106 		goto out;
107 	if (current->journal_info)
108 		goto redirty;
109 	/* Is the page fully outside i_size? (truncate in progress) */
110 	offset = i_size & (PAGE_CACHE_SIZE-1);
111 	if (page->index > end_index || (page->index == end_index && !offset)) {
112 		page->mapping->a_ops->invalidatepage(page, 0);
113 		goto out;
114 	}
115 	return 1;
116 redirty:
117 	redirty_page_for_writepage(wbc, page);
118 out:
119 	unlock_page(page);
120 	return 0;
121 }
122 
123 /**
124  * gfs2_writeback_writepage - Write page for writeback mappings
125  * @page: The page
126  * @wbc: The writeback control
127  *
128  */
129 
130 static int gfs2_writeback_writepage(struct page *page,
131 				    struct writeback_control *wbc)
132 {
133 	int ret;
134 
135 	ret = gfs2_writepage_common(page, wbc);
136 	if (ret <= 0)
137 		return ret;
138 
139 	return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
140 }
141 
142 /**
143  * gfs2_ordered_writepage - Write page for ordered data files
144  * @page: The page to write
145  * @wbc: The writeback control
146  *
147  */
148 
149 static int gfs2_ordered_writepage(struct page *page,
150 				  struct writeback_control *wbc)
151 {
152 	struct inode *inode = page->mapping->host;
153 	struct gfs2_inode *ip = GFS2_I(inode);
154 	int ret;
155 
156 	ret = gfs2_writepage_common(page, wbc);
157 	if (ret <= 0)
158 		return ret;
159 
160 	if (!page_has_buffers(page)) {
161 		create_empty_buffers(page, inode->i_sb->s_blocksize,
162 				     (1 << BH_Dirty)|(1 << BH_Uptodate));
163 	}
164 	gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
165 	return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
166 }
167 
168 /**
169  * __gfs2_jdata_writepage - The core of jdata writepage
170  * @page: The page to write
171  * @wbc: The writeback control
172  *
173  * This is shared between writepage and writepages and implements the
174  * core of the writepage operation. If a transaction is required then
175  * PageChecked will have been set and the transaction will have
176  * already been started before this is called.
177  */
178 
179 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
180 {
181 	struct inode *inode = page->mapping->host;
182 	struct gfs2_inode *ip = GFS2_I(inode);
183 	struct gfs2_sbd *sdp = GFS2_SB(inode);
184 
185 	if (PageChecked(page)) {
186 		ClearPageChecked(page);
187 		if (!page_has_buffers(page)) {
188 			create_empty_buffers(page, inode->i_sb->s_blocksize,
189 					     (1 << BH_Dirty)|(1 << BH_Uptodate));
190 		}
191 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
192 	}
193 	return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
194 }
195 
196 /**
197  * gfs2_jdata_writepage - Write complete page
198  * @page: Page to write
199  *
200  * Returns: errno
201  *
202  */
203 
204 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205 {
206 	struct inode *inode = page->mapping->host;
207 	struct gfs2_sbd *sdp = GFS2_SB(inode);
208 	int ret;
209 	int done_trans = 0;
210 
211 	if (PageChecked(page)) {
212 		if (wbc->sync_mode != WB_SYNC_ALL)
213 			goto out_ignore;
214 		ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
215 		if (ret)
216 			goto out_ignore;
217 		done_trans = 1;
218 	}
219 	ret = gfs2_writepage_common(page, wbc);
220 	if (ret > 0)
221 		ret = __gfs2_jdata_writepage(page, wbc);
222 	if (done_trans)
223 		gfs2_trans_end(sdp);
224 	return ret;
225 
226 out_ignore:
227 	redirty_page_for_writepage(wbc, page);
228 	unlock_page(page);
229 	return 0;
230 }
231 
232 /**
233  * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
234  * @mapping: The mapping to write
235  * @wbc: Write-back control
236  *
237  * For the data=writeback case we can already ignore buffer heads
238  * and write whole extents at once. This is a big reduction in the
239  * number of I/O requests we send and the bmap calls we make in this case.
240  */
241 static int gfs2_writeback_writepages(struct address_space *mapping,
242 				     struct writeback_control *wbc)
243 {
244 	return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
245 }
246 
247 /**
248  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
249  * @mapping: The mapping
250  * @wbc: The writeback control
251  * @writepage: The writepage function to call for each page
252  * @pvec: The vector of pages
253  * @nr_pages: The number of pages to write
254  *
255  * Returns: non-zero if loop should terminate, zero otherwise
256  */
257 
258 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
259 				    struct writeback_control *wbc,
260 				    struct pagevec *pvec,
261 				    int nr_pages, pgoff_t end)
262 {
263 	struct inode *inode = mapping->host;
264 	struct gfs2_sbd *sdp = GFS2_SB(inode);
265 	loff_t i_size = i_size_read(inode);
266 	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
267 	unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
268 	unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
269 	int i;
270 	int ret;
271 
272 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
273 	if (ret < 0)
274 		return ret;
275 
276 	for(i = 0; i < nr_pages; i++) {
277 		struct page *page = pvec->pages[i];
278 
279 		lock_page(page);
280 
281 		if (unlikely(page->mapping != mapping)) {
282 			unlock_page(page);
283 			continue;
284 		}
285 
286 		if (!wbc->range_cyclic && page->index > end) {
287 			ret = 1;
288 			unlock_page(page);
289 			continue;
290 		}
291 
292 		if (wbc->sync_mode != WB_SYNC_NONE)
293 			wait_on_page_writeback(page);
294 
295 		if (PageWriteback(page) ||
296 		    !clear_page_dirty_for_io(page)) {
297 			unlock_page(page);
298 			continue;
299 		}
300 
301 		/* Is the page fully outside i_size? (truncate in progress) */
302 		if (page->index > end_index || (page->index == end_index && !offset)) {
303 			page->mapping->a_ops->invalidatepage(page, 0);
304 			unlock_page(page);
305 			continue;
306 		}
307 
308 		ret = __gfs2_jdata_writepage(page, wbc);
309 
310 		if (ret || (--(wbc->nr_to_write) <= 0))
311 			ret = 1;
312 	}
313 	gfs2_trans_end(sdp);
314 	return ret;
315 }
316 
317 /**
318  * gfs2_write_cache_jdata - Like write_cache_pages but different
319  * @mapping: The mapping to write
320  * @wbc: The writeback control
321  * @writepage: The writepage function to call
322  * @data: The data to pass to writepage
323  *
324  * The reason that we use our own function here is that we need to
325  * start transactions before we grab page locks. This allows us
326  * to get the ordering right.
327  */
328 
329 static int gfs2_write_cache_jdata(struct address_space *mapping,
330 				  struct writeback_control *wbc)
331 {
332 	int ret = 0;
333 	int done = 0;
334 	struct pagevec pvec;
335 	int nr_pages;
336 	pgoff_t index;
337 	pgoff_t end;
338 	int scanned = 0;
339 	int range_whole = 0;
340 
341 	pagevec_init(&pvec, 0);
342 	if (wbc->range_cyclic) {
343 		index = mapping->writeback_index; /* Start from prev offset */
344 		end = -1;
345 	} else {
346 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
347 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
348 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
349 			range_whole = 1;
350 		scanned = 1;
351 	}
352 
353 retry:
354 	 while (!done && (index <= end) &&
355 		(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
356 					       PAGECACHE_TAG_DIRTY,
357 					       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
358 		scanned = 1;
359 		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
360 		if (ret)
361 			done = 1;
362 		if (ret > 0)
363 			ret = 0;
364 
365 		pagevec_release(&pvec);
366 		cond_resched();
367 	}
368 
369 	if (!scanned && !done) {
370 		/*
371 		 * We hit the last page and there is more work to be done: wrap
372 		 * back to the start of the file
373 		 */
374 		scanned = 1;
375 		index = 0;
376 		goto retry;
377 	}
378 
379 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
380 		mapping->writeback_index = index;
381 	return ret;
382 }
383 
384 
385 /**
386  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
387  * @mapping: The mapping to write
388  * @wbc: The writeback control
389  *
390  */
391 
392 static int gfs2_jdata_writepages(struct address_space *mapping,
393 				 struct writeback_control *wbc)
394 {
395 	struct gfs2_inode *ip = GFS2_I(mapping->host);
396 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
397 	int ret;
398 
399 	ret = gfs2_write_cache_jdata(mapping, wbc);
400 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
401 		gfs2_log_flush(sdp, ip->i_gl);
402 		ret = gfs2_write_cache_jdata(mapping, wbc);
403 	}
404 	return ret;
405 }
406 
407 /**
408  * stuffed_readpage - Fill in a Linux page with stuffed file data
409  * @ip: the inode
410  * @page: the page
411  *
412  * Returns: errno
413  */
414 
415 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
416 {
417 	struct buffer_head *dibh;
418 	u64 dsize = i_size_read(&ip->i_inode);
419 	void *kaddr;
420 	int error;
421 
422 	/*
423 	 * Due to the order of unstuffing files and ->fault(), we can be
424 	 * asked for a zero page in the case of a stuffed file being extended,
425 	 * so we need to supply one here. It doesn't happen often.
426 	 */
427 	if (unlikely(page->index)) {
428 		zero_user(page, 0, PAGE_CACHE_SIZE);
429 		SetPageUptodate(page);
430 		return 0;
431 	}
432 
433 	error = gfs2_meta_inode_buffer(ip, &dibh);
434 	if (error)
435 		return error;
436 
437 	kaddr = kmap_atomic(page);
438 	if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
439 		dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
440 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
441 	memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
442 	kunmap_atomic(kaddr);
443 	flush_dcache_page(page);
444 	brelse(dibh);
445 	SetPageUptodate(page);
446 
447 	return 0;
448 }
449 
450 
451 /**
452  * __gfs2_readpage - readpage
453  * @file: The file to read a page for
454  * @page: The page to read
455  *
456  * This is the core of gfs2's readpage. Its used by the internal file
457  * reading code as in that case we already hold the glock. Also its
458  * called by gfs2_readpage() once the required lock has been granted.
459  *
460  */
461 
462 static int __gfs2_readpage(void *file, struct page *page)
463 {
464 	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
465 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
466 	int error;
467 
468 	if (gfs2_is_stuffed(ip)) {
469 		error = stuffed_readpage(ip, page);
470 		unlock_page(page);
471 	} else {
472 		error = mpage_readpage(page, gfs2_block_map);
473 	}
474 
475 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
476 		return -EIO;
477 
478 	return error;
479 }
480 
481 /**
482  * gfs2_readpage - read a page of a file
483  * @file: The file to read
484  * @page: The page of the file
485  *
486  * This deals with the locking required. We have to unlock and
487  * relock the page in order to get the locking in the right
488  * order.
489  */
490 
491 static int gfs2_readpage(struct file *file, struct page *page)
492 {
493 	struct address_space *mapping = page->mapping;
494 	struct gfs2_inode *ip = GFS2_I(mapping->host);
495 	struct gfs2_holder gh;
496 	int error;
497 
498 	unlock_page(page);
499 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
500 	error = gfs2_glock_nq(&gh);
501 	if (unlikely(error))
502 		goto out;
503 	error = AOP_TRUNCATED_PAGE;
504 	lock_page(page);
505 	if (page->mapping == mapping && !PageUptodate(page))
506 		error = __gfs2_readpage(file, page);
507 	else
508 		unlock_page(page);
509 	gfs2_glock_dq(&gh);
510 out:
511 	gfs2_holder_uninit(&gh);
512 	if (error && error != AOP_TRUNCATED_PAGE)
513 		lock_page(page);
514 	return error;
515 }
516 
517 /**
518  * gfs2_internal_read - read an internal file
519  * @ip: The gfs2 inode
520  * @buf: The buffer to fill
521  * @pos: The file position
522  * @size: The amount to read
523  *
524  */
525 
526 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
527                        unsigned size)
528 {
529 	struct address_space *mapping = ip->i_inode.i_mapping;
530 	unsigned long index = *pos / PAGE_CACHE_SIZE;
531 	unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
532 	unsigned copied = 0;
533 	unsigned amt;
534 	struct page *page;
535 	void *p;
536 
537 	do {
538 		amt = size - copied;
539 		if (offset + size > PAGE_CACHE_SIZE)
540 			amt = PAGE_CACHE_SIZE - offset;
541 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
542 		if (IS_ERR(page))
543 			return PTR_ERR(page);
544 		p = kmap_atomic(page);
545 		memcpy(buf + copied, p + offset, amt);
546 		kunmap_atomic(p);
547 		mark_page_accessed(page);
548 		page_cache_release(page);
549 		copied += amt;
550 		index++;
551 		offset = 0;
552 	} while(copied < size);
553 	(*pos) += size;
554 	return size;
555 }
556 
557 /**
558  * gfs2_readpages - Read a bunch of pages at once
559  *
560  * Some notes:
561  * 1. This is only for readahead, so we can simply ignore any things
562  *    which are slightly inconvenient (such as locking conflicts between
563  *    the page lock and the glock) and return having done no I/O. Its
564  *    obviously not something we'd want to do on too regular a basis.
565  *    Any I/O we ignore at this time will be done via readpage later.
566  * 2. We don't handle stuffed files here we let readpage do the honours.
567  * 3. mpage_readpages() does most of the heavy lifting in the common case.
568  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
569  */
570 
571 static int gfs2_readpages(struct file *file, struct address_space *mapping,
572 			  struct list_head *pages, unsigned nr_pages)
573 {
574 	struct inode *inode = mapping->host;
575 	struct gfs2_inode *ip = GFS2_I(inode);
576 	struct gfs2_sbd *sdp = GFS2_SB(inode);
577 	struct gfs2_holder gh;
578 	int ret;
579 
580 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
581 	ret = gfs2_glock_nq(&gh);
582 	if (unlikely(ret))
583 		goto out_uninit;
584 	if (!gfs2_is_stuffed(ip))
585 		ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
586 	gfs2_glock_dq(&gh);
587 out_uninit:
588 	gfs2_holder_uninit(&gh);
589 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
590 		ret = -EIO;
591 	return ret;
592 }
593 
594 /**
595  * gfs2_write_begin - Begin to write to a file
596  * @file: The file to write to
597  * @mapping: The mapping in which to write
598  * @pos: The file offset at which to start writing
599  * @len: Length of the write
600  * @flags: Various flags
601  * @pagep: Pointer to return the page
602  * @fsdata: Pointer to return fs data (unused by GFS2)
603  *
604  * Returns: errno
605  */
606 
607 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
608 			    loff_t pos, unsigned len, unsigned flags,
609 			    struct page **pagep, void **fsdata)
610 {
611 	struct gfs2_inode *ip = GFS2_I(mapping->host);
612 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
613 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
614 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
615 	unsigned requested = 0;
616 	int alloc_required;
617 	int error = 0;
618 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
619 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
620 	struct page *page;
621 
622 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
623 	error = gfs2_glock_nq(&ip->i_gh);
624 	if (unlikely(error))
625 		goto out_uninit;
626 	if (&ip->i_inode == sdp->sd_rindex) {
627 		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
628 					   GL_NOCACHE, &m_ip->i_gh);
629 		if (unlikely(error)) {
630 			gfs2_glock_dq(&ip->i_gh);
631 			goto out_uninit;
632 		}
633 	}
634 
635 	alloc_required = gfs2_write_alloc_required(ip, pos, len);
636 
637 	if (alloc_required || gfs2_is_jdata(ip))
638 		gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
639 
640 	if (alloc_required) {
641 		error = gfs2_quota_lock_check(ip);
642 		if (error)
643 			goto out_unlock;
644 
645 		requested = data_blocks + ind_blocks;
646 		error = gfs2_inplace_reserve(ip, requested, 0);
647 		if (error)
648 			goto out_qunlock;
649 	}
650 
651 	rblocks = RES_DINODE + ind_blocks;
652 	if (gfs2_is_jdata(ip))
653 		rblocks += data_blocks ? data_blocks : 1;
654 	if (ind_blocks || data_blocks)
655 		rblocks += RES_STATFS + RES_QUOTA;
656 	if (&ip->i_inode == sdp->sd_rindex)
657 		rblocks += 2 * RES_STATFS;
658 	if (alloc_required)
659 		rblocks += gfs2_rg_blocks(ip, requested);
660 
661 	error = gfs2_trans_begin(sdp, rblocks,
662 				 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
663 	if (error)
664 		goto out_trans_fail;
665 
666 	error = -ENOMEM;
667 	flags |= AOP_FLAG_NOFS;
668 	page = grab_cache_page_write_begin(mapping, index, flags);
669 	*pagep = page;
670 	if (unlikely(!page))
671 		goto out_endtrans;
672 
673 	if (gfs2_is_stuffed(ip)) {
674 		error = 0;
675 		if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
676 			error = gfs2_unstuff_dinode(ip, page);
677 			if (error == 0)
678 				goto prepare_write;
679 		} else if (!PageUptodate(page)) {
680 			error = stuffed_readpage(ip, page);
681 		}
682 		goto out;
683 	}
684 
685 prepare_write:
686 	error = __block_write_begin(page, from, len, gfs2_block_map);
687 out:
688 	if (error == 0)
689 		return 0;
690 
691 	unlock_page(page);
692 	page_cache_release(page);
693 
694 	gfs2_trans_end(sdp);
695 	if (pos + len > ip->i_inode.i_size)
696 		gfs2_trim_blocks(&ip->i_inode);
697 	goto out_trans_fail;
698 
699 out_endtrans:
700 	gfs2_trans_end(sdp);
701 out_trans_fail:
702 	if (alloc_required) {
703 		gfs2_inplace_release(ip);
704 out_qunlock:
705 		gfs2_quota_unlock(ip);
706 	}
707 out_unlock:
708 	if (&ip->i_inode == sdp->sd_rindex) {
709 		gfs2_glock_dq(&m_ip->i_gh);
710 		gfs2_holder_uninit(&m_ip->i_gh);
711 	}
712 	gfs2_glock_dq(&ip->i_gh);
713 out_uninit:
714 	gfs2_holder_uninit(&ip->i_gh);
715 	return error;
716 }
717 
718 /**
719  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
720  * @inode: the rindex inode
721  */
722 static void adjust_fs_space(struct inode *inode)
723 {
724 	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
725 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
726 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
727 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
728 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
729 	struct buffer_head *m_bh, *l_bh;
730 	u64 fs_total, new_free;
731 
732 	/* Total up the file system space, according to the latest rindex. */
733 	fs_total = gfs2_ri_total(sdp);
734 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
735 		return;
736 
737 	spin_lock(&sdp->sd_statfs_spin);
738 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
739 			      sizeof(struct gfs2_dinode));
740 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
741 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
742 	else
743 		new_free = 0;
744 	spin_unlock(&sdp->sd_statfs_spin);
745 	fs_warn(sdp, "File system extended by %llu blocks.\n",
746 		(unsigned long long)new_free);
747 	gfs2_statfs_change(sdp, new_free, new_free, 0);
748 
749 	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
750 		goto out;
751 	update_statfs(sdp, m_bh, l_bh);
752 	brelse(l_bh);
753 out:
754 	brelse(m_bh);
755 }
756 
757 /**
758  * gfs2_stuffed_write_end - Write end for stuffed files
759  * @inode: The inode
760  * @dibh: The buffer_head containing the on-disk inode
761  * @pos: The file position
762  * @len: The length of the write
763  * @copied: How much was actually copied by the VFS
764  * @page: The page
765  *
766  * This copies the data from the page into the inode block after
767  * the inode data structure itself.
768  *
769  * Returns: errno
770  */
771 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
772 				  loff_t pos, unsigned len, unsigned copied,
773 				  struct page *page)
774 {
775 	struct gfs2_inode *ip = GFS2_I(inode);
776 	struct gfs2_sbd *sdp = GFS2_SB(inode);
777 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
778 	u64 to = pos + copied;
779 	void *kaddr;
780 	unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
781 
782 	BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
783 	kaddr = kmap_atomic(page);
784 	memcpy(buf + pos, kaddr + pos, copied);
785 	memset(kaddr + pos + copied, 0, len - copied);
786 	flush_dcache_page(page);
787 	kunmap_atomic(kaddr);
788 
789 	if (!PageUptodate(page))
790 		SetPageUptodate(page);
791 	unlock_page(page);
792 	page_cache_release(page);
793 
794 	if (copied) {
795 		if (inode->i_size < to)
796 			i_size_write(inode, to);
797 		mark_inode_dirty(inode);
798 	}
799 
800 	if (inode == sdp->sd_rindex) {
801 		adjust_fs_space(inode);
802 		sdp->sd_rindex_uptodate = 0;
803 	}
804 
805 	brelse(dibh);
806 	gfs2_trans_end(sdp);
807 	if (inode == sdp->sd_rindex) {
808 		gfs2_glock_dq(&m_ip->i_gh);
809 		gfs2_holder_uninit(&m_ip->i_gh);
810 	}
811 	gfs2_glock_dq(&ip->i_gh);
812 	gfs2_holder_uninit(&ip->i_gh);
813 	return copied;
814 }
815 
816 /**
817  * gfs2_write_end
818  * @file: The file to write to
819  * @mapping: The address space to write to
820  * @pos: The file position
821  * @len: The length of the data
822  * @copied:
823  * @page: The page that has been written
824  * @fsdata: The fsdata (unused in GFS2)
825  *
826  * The main write_end function for GFS2. We have a separate one for
827  * stuffed files as they are slightly different, otherwise we just
828  * put our locking around the VFS provided functions.
829  *
830  * Returns: errno
831  */
832 
833 static int gfs2_write_end(struct file *file, struct address_space *mapping,
834 			  loff_t pos, unsigned len, unsigned copied,
835 			  struct page *page, void *fsdata)
836 {
837 	struct inode *inode = page->mapping->host;
838 	struct gfs2_inode *ip = GFS2_I(inode);
839 	struct gfs2_sbd *sdp = GFS2_SB(inode);
840 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
841 	struct buffer_head *dibh;
842 	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
843 	unsigned int to = from + len;
844 	int ret;
845 
846 	BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
847 
848 	ret = gfs2_meta_inode_buffer(ip, &dibh);
849 	if (unlikely(ret)) {
850 		unlock_page(page);
851 		page_cache_release(page);
852 		goto failed;
853 	}
854 
855 	gfs2_trans_add_meta(ip->i_gl, dibh);
856 
857 	if (gfs2_is_stuffed(ip))
858 		return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
859 
860 	if (!gfs2_is_writeback(ip))
861 		gfs2_page_add_databufs(ip, page, from, to);
862 
863 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
864 
865 	if (inode == sdp->sd_rindex) {
866 		adjust_fs_space(inode);
867 		sdp->sd_rindex_uptodate = 0;
868 	}
869 
870 	brelse(dibh);
871 failed:
872 	gfs2_trans_end(sdp);
873 	gfs2_inplace_release(ip);
874 	if (ip->i_res->rs_qa_qd_num)
875 		gfs2_quota_unlock(ip);
876 	if (inode == sdp->sd_rindex) {
877 		gfs2_glock_dq(&m_ip->i_gh);
878 		gfs2_holder_uninit(&m_ip->i_gh);
879 	}
880 	gfs2_glock_dq(&ip->i_gh);
881 	gfs2_holder_uninit(&ip->i_gh);
882 	return ret;
883 }
884 
885 /**
886  * gfs2_set_page_dirty - Page dirtying function
887  * @page: The page to dirty
888  *
889  * Returns: 1 if it dirtyed the page, or 0 otherwise
890  */
891 
892 static int gfs2_set_page_dirty(struct page *page)
893 {
894 	SetPageChecked(page);
895 	return __set_page_dirty_buffers(page);
896 }
897 
898 /**
899  * gfs2_bmap - Block map function
900  * @mapping: Address space info
901  * @lblock: The block to map
902  *
903  * Returns: The disk address for the block or 0 on hole or error
904  */
905 
906 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
907 {
908 	struct gfs2_inode *ip = GFS2_I(mapping->host);
909 	struct gfs2_holder i_gh;
910 	sector_t dblock = 0;
911 	int error;
912 
913 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
914 	if (error)
915 		return 0;
916 
917 	if (!gfs2_is_stuffed(ip))
918 		dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
919 
920 	gfs2_glock_dq_uninit(&i_gh);
921 
922 	return dblock;
923 }
924 
925 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
926 {
927 	struct gfs2_bufdata *bd;
928 
929 	lock_buffer(bh);
930 	gfs2_log_lock(sdp);
931 	clear_buffer_dirty(bh);
932 	bd = bh->b_private;
933 	if (bd) {
934 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
935 			list_del_init(&bd->bd_list);
936 		else
937 			gfs2_remove_from_journal(bh, current->journal_info, 0);
938 	}
939 	bh->b_bdev = NULL;
940 	clear_buffer_mapped(bh);
941 	clear_buffer_req(bh);
942 	clear_buffer_new(bh);
943 	gfs2_log_unlock(sdp);
944 	unlock_buffer(bh);
945 }
946 
947 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
948 {
949 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
950 	struct buffer_head *bh, *head;
951 	unsigned long pos = 0;
952 
953 	BUG_ON(!PageLocked(page));
954 	if (offset == 0)
955 		ClearPageChecked(page);
956 	if (!page_has_buffers(page))
957 		goto out;
958 
959 	bh = head = page_buffers(page);
960 	do {
961 		if (offset <= pos)
962 			gfs2_discard(sdp, bh);
963 		pos += bh->b_size;
964 		bh = bh->b_this_page;
965 	} while (bh != head);
966 out:
967 	if (offset == 0)
968 		try_to_release_page(page, 0);
969 }
970 
971 /**
972  * gfs2_ok_for_dio - check that dio is valid on this file
973  * @ip: The inode
974  * @rw: READ or WRITE
975  * @offset: The offset at which we are reading or writing
976  *
977  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
978  *          1 (to accept the i/o request)
979  */
980 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
981 {
982 	/*
983 	 * Should we return an error here? I can't see that O_DIRECT for
984 	 * a stuffed file makes any sense. For now we'll silently fall
985 	 * back to buffered I/O
986 	 */
987 	if (gfs2_is_stuffed(ip))
988 		return 0;
989 
990 	if (offset >= i_size_read(&ip->i_inode))
991 		return 0;
992 	return 1;
993 }
994 
995 
996 
997 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
998 			      const struct iovec *iov, loff_t offset,
999 			      unsigned long nr_segs)
1000 {
1001 	struct file *file = iocb->ki_filp;
1002 	struct inode *inode = file->f_mapping->host;
1003 	struct gfs2_inode *ip = GFS2_I(inode);
1004 	struct gfs2_holder gh;
1005 	int rv;
1006 
1007 	/*
1008 	 * Deferred lock, even if its a write, since we do no allocation
1009 	 * on this path. All we need change is atime, and this lock mode
1010 	 * ensures that other nodes have flushed their buffered read caches
1011 	 * (i.e. their page cache entries for this inode). We do not,
1012 	 * unfortunately have the option of only flushing a range like
1013 	 * the VFS does.
1014 	 */
1015 	gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1016 	rv = gfs2_glock_nq(&gh);
1017 	if (rv)
1018 		return rv;
1019 	rv = gfs2_ok_for_dio(ip, rw, offset);
1020 	if (rv != 1)
1021 		goto out; /* dio not valid, fall back to buffered i/o */
1022 
1023 	rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1024 				  offset, nr_segs, gfs2_get_block_direct,
1025 				  NULL, NULL, 0);
1026 out:
1027 	gfs2_glock_dq(&gh);
1028 	gfs2_holder_uninit(&gh);
1029 	return rv;
1030 }
1031 
1032 /**
1033  * gfs2_releasepage - free the metadata associated with a page
1034  * @page: the page that's being released
1035  * @gfp_mask: passed from Linux VFS, ignored by us
1036  *
1037  * Call try_to_free_buffers() if the buffers in this page can be
1038  * released.
1039  *
1040  * Returns: 0
1041  */
1042 
1043 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1044 {
1045 	struct address_space *mapping = page->mapping;
1046 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1047 	struct buffer_head *bh, *head;
1048 	struct gfs2_bufdata *bd;
1049 
1050 	if (!page_has_buffers(page))
1051 		return 0;
1052 
1053 	gfs2_log_lock(sdp);
1054 	spin_lock(&sdp->sd_ail_lock);
1055 	head = bh = page_buffers(page);
1056 	do {
1057 		if (atomic_read(&bh->b_count))
1058 			goto cannot_release;
1059 		bd = bh->b_private;
1060 		if (bd && bd->bd_ail)
1061 			goto cannot_release;
1062 		if (buffer_pinned(bh) || buffer_dirty(bh))
1063 			goto not_possible;
1064 		bh = bh->b_this_page;
1065 	} while(bh != head);
1066 	spin_unlock(&sdp->sd_ail_lock);
1067 	gfs2_log_unlock(sdp);
1068 
1069 	head = bh = page_buffers(page);
1070 	do {
1071 		gfs2_log_lock(sdp);
1072 		bd = bh->b_private;
1073 		if (bd) {
1074 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
1075 			if (!list_empty(&bd->bd_list)) {
1076 				if (!buffer_pinned(bh))
1077 					list_del_init(&bd->bd_list);
1078 				else
1079 					bd = NULL;
1080 			}
1081 			if (bd)
1082 				bd->bd_bh = NULL;
1083 			bh->b_private = NULL;
1084 		}
1085 		gfs2_log_unlock(sdp);
1086 		if (bd)
1087 			kmem_cache_free(gfs2_bufdata_cachep, bd);
1088 
1089 		bh = bh->b_this_page;
1090 	} while (bh != head);
1091 
1092 	return try_to_free_buffers(page);
1093 
1094 not_possible: /* Should never happen */
1095 	WARN_ON(buffer_dirty(bh));
1096 	WARN_ON(buffer_pinned(bh));
1097 cannot_release:
1098 	spin_unlock(&sdp->sd_ail_lock);
1099 	gfs2_log_unlock(sdp);
1100 	return 0;
1101 }
1102 
1103 static const struct address_space_operations gfs2_writeback_aops = {
1104 	.writepage = gfs2_writeback_writepage,
1105 	.writepages = gfs2_writeback_writepages,
1106 	.readpage = gfs2_readpage,
1107 	.readpages = gfs2_readpages,
1108 	.write_begin = gfs2_write_begin,
1109 	.write_end = gfs2_write_end,
1110 	.bmap = gfs2_bmap,
1111 	.invalidatepage = gfs2_invalidatepage,
1112 	.releasepage = gfs2_releasepage,
1113 	.direct_IO = gfs2_direct_IO,
1114 	.migratepage = buffer_migrate_page,
1115 	.is_partially_uptodate = block_is_partially_uptodate,
1116 	.error_remove_page = generic_error_remove_page,
1117 };
1118 
1119 static const struct address_space_operations gfs2_ordered_aops = {
1120 	.writepage = gfs2_ordered_writepage,
1121 	.readpage = gfs2_readpage,
1122 	.readpages = gfs2_readpages,
1123 	.write_begin = gfs2_write_begin,
1124 	.write_end = gfs2_write_end,
1125 	.set_page_dirty = gfs2_set_page_dirty,
1126 	.bmap = gfs2_bmap,
1127 	.invalidatepage = gfs2_invalidatepage,
1128 	.releasepage = gfs2_releasepage,
1129 	.direct_IO = gfs2_direct_IO,
1130 	.migratepage = buffer_migrate_page,
1131 	.is_partially_uptodate = block_is_partially_uptodate,
1132 	.error_remove_page = generic_error_remove_page,
1133 };
1134 
1135 static const struct address_space_operations gfs2_jdata_aops = {
1136 	.writepage = gfs2_jdata_writepage,
1137 	.writepages = gfs2_jdata_writepages,
1138 	.readpage = gfs2_readpage,
1139 	.readpages = gfs2_readpages,
1140 	.write_begin = gfs2_write_begin,
1141 	.write_end = gfs2_write_end,
1142 	.set_page_dirty = gfs2_set_page_dirty,
1143 	.bmap = gfs2_bmap,
1144 	.invalidatepage = gfs2_invalidatepage,
1145 	.releasepage = gfs2_releasepage,
1146 	.is_partially_uptodate = block_is_partially_uptodate,
1147 	.error_remove_page = generic_error_remove_page,
1148 };
1149 
1150 void gfs2_set_aops(struct inode *inode)
1151 {
1152 	struct gfs2_inode *ip = GFS2_I(inode);
1153 
1154 	if (gfs2_is_writeback(ip))
1155 		inode->i_mapping->a_ops = &gfs2_writeback_aops;
1156 	else if (gfs2_is_ordered(ip))
1157 		inode->i_mapping->a_ops = &gfs2_ordered_aops;
1158 	else if (gfs2_is_jdata(ip))
1159 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
1160 	else
1161 		BUG();
1162 }
1163 
1164