xref: /openbmc/linux/fs/gfs2/aops.c (revision 59bd9ded)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23 #include <linux/uio.h>
24 #include <trace/events/writeback.h>
25 #include <linux/sched/signal.h>
26 
27 #include "gfs2.h"
28 #include "incore.h"
29 #include "bmap.h"
30 #include "glock.h"
31 #include "inode.h"
32 #include "log.h"
33 #include "meta_io.h"
34 #include "quota.h"
35 #include "trans.h"
36 #include "rgrp.h"
37 #include "super.h"
38 #include "util.h"
39 #include "glops.h"
40 #include "aops.h"
41 
42 
43 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
44 			    unsigned int from, unsigned int len)
45 {
46 	struct buffer_head *head = page_buffers(page);
47 	unsigned int bsize = head->b_size;
48 	struct buffer_head *bh;
49 	unsigned int to = from + len;
50 	unsigned int start, end;
51 
52 	for (bh = head, start = 0; bh != head || !start;
53 	     bh = bh->b_this_page, start = end) {
54 		end = start + bsize;
55 		if (end <= from)
56 			continue;
57 		if (start >= to)
58 			break;
59 		set_buffer_uptodate(bh);
60 		gfs2_trans_add_data(ip->i_gl, bh);
61 	}
62 }
63 
64 /**
65  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
66  * @inode: The inode
67  * @lblock: The block number to look up
68  * @bh_result: The buffer head to return the result in
69  * @create: Non-zero if we may add block to the file
70  *
71  * Returns: errno
72  */
73 
74 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
75 				  struct buffer_head *bh_result, int create)
76 {
77 	int error;
78 
79 	error = gfs2_block_map(inode, lblock, bh_result, 0);
80 	if (error)
81 		return error;
82 	if (!buffer_mapped(bh_result))
83 		return -EIO;
84 	return 0;
85 }
86 
87 /**
88  * gfs2_writepage_common - Common bits of writepage
89  * @page: The page to be written
90  * @wbc: The writeback control
91  *
92  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
93  */
94 
95 static int gfs2_writepage_common(struct page *page,
96 				 struct writeback_control *wbc)
97 {
98 	struct inode *inode = page->mapping->host;
99 	struct gfs2_inode *ip = GFS2_I(inode);
100 	struct gfs2_sbd *sdp = GFS2_SB(inode);
101 	loff_t i_size = i_size_read(inode);
102 	pgoff_t end_index = i_size >> PAGE_SHIFT;
103 	unsigned offset;
104 
105 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106 		goto out;
107 	if (current->journal_info)
108 		goto redirty;
109 	/* Is the page fully outside i_size? (truncate in progress) */
110 	offset = i_size & (PAGE_SIZE-1);
111 	if (page->index > end_index || (page->index == end_index && !offset)) {
112 		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
113 		goto out;
114 	}
115 	return 1;
116 redirty:
117 	redirty_page_for_writepage(wbc, page);
118 out:
119 	unlock_page(page);
120 	return 0;
121 }
122 
123 /**
124  * gfs2_writepage - Write page for writeback mappings
125  * @page: The page
126  * @wbc: The writeback control
127  *
128  */
129 
130 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
131 {
132 	int ret;
133 
134 	ret = gfs2_writepage_common(page, wbc);
135 	if (ret <= 0)
136 		return ret;
137 
138 	return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
139 }
140 
141 /* This is the same as calling block_write_full_page, but it also
142  * writes pages outside of i_size
143  */
144 static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
145 				struct writeback_control *wbc)
146 {
147 	struct inode * const inode = page->mapping->host;
148 	loff_t i_size = i_size_read(inode);
149 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
150 	unsigned offset;
151 
152 	/*
153 	 * The page straddles i_size.  It must be zeroed out on each and every
154 	 * writepage invocation because it may be mmapped.  "A file is mapped
155 	 * in multiples of the page size.  For a file that is not a multiple of
156 	 * the  page size, the remaining memory is zeroed when mapped, and
157 	 * writes to that region are not written out to the file."
158 	 */
159 	offset = i_size & (PAGE_SIZE-1);
160 	if (page->index == end_index && offset)
161 		zero_user_segment(page, offset, PAGE_SIZE);
162 
163 	return __block_write_full_page(inode, page, get_block, wbc,
164 				       end_buffer_async_write);
165 }
166 
167 /**
168  * __gfs2_jdata_writepage - The core of jdata writepage
169  * @page: The page to write
170  * @wbc: The writeback control
171  *
172  * This is shared between writepage and writepages and implements the
173  * core of the writepage operation. If a transaction is required then
174  * PageChecked will have been set and the transaction will have
175  * already been started before this is called.
176  */
177 
178 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
179 {
180 	struct inode *inode = page->mapping->host;
181 	struct gfs2_inode *ip = GFS2_I(inode);
182 	struct gfs2_sbd *sdp = GFS2_SB(inode);
183 
184 	if (PageChecked(page)) {
185 		ClearPageChecked(page);
186 		if (!page_has_buffers(page)) {
187 			create_empty_buffers(page, inode->i_sb->s_blocksize,
188 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
189 		}
190 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
191 	}
192 	return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
193 }
194 
195 /**
196  * gfs2_jdata_writepage - Write complete page
197  * @page: Page to write
198  * @wbc: The writeback control
199  *
200  * Returns: errno
201  *
202  */
203 
204 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205 {
206 	struct inode *inode = page->mapping->host;
207 	struct gfs2_inode *ip = GFS2_I(inode);
208 	struct gfs2_sbd *sdp = GFS2_SB(inode);
209 	int ret;
210 
211 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
212 		goto out;
213 	if (PageChecked(page) || current->journal_info)
214 		goto out_ignore;
215 	ret = __gfs2_jdata_writepage(page, wbc);
216 	return ret;
217 
218 out_ignore:
219 	redirty_page_for_writepage(wbc, page);
220 out:
221 	unlock_page(page);
222 	return 0;
223 }
224 
225 /**
226  * gfs2_writepages - Write a bunch of dirty pages back to disk
227  * @mapping: The mapping to write
228  * @wbc: Write-back control
229  *
230  * Used for both ordered and writeback modes.
231  */
232 static int gfs2_writepages(struct address_space *mapping,
233 			   struct writeback_control *wbc)
234 {
235 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
236 	int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
237 
238 	/*
239 	 * Even if we didn't write any pages here, we might still be holding
240 	 * dirty pages in the ail. We forcibly flush the ail because we don't
241 	 * want balance_dirty_pages() to loop indefinitely trying to write out
242 	 * pages held in the ail that it can't find.
243 	 */
244 	if (ret == 0)
245 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
246 
247 	return ret;
248 }
249 
250 /**
251  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
252  * @mapping: The mapping
253  * @wbc: The writeback control
254  * @pvec: The vector of pages
255  * @nr_pages: The number of pages to write
256  * @done_index: Page index
257  *
258  * Returns: non-zero if loop should terminate, zero otherwise
259  */
260 
261 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
262 				    struct writeback_control *wbc,
263 				    struct pagevec *pvec,
264 				    int nr_pages,
265 				    pgoff_t *done_index)
266 {
267 	struct inode *inode = mapping->host;
268 	struct gfs2_sbd *sdp = GFS2_SB(inode);
269 	unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
270 	int i;
271 	int ret;
272 
273 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
274 	if (ret < 0)
275 		return ret;
276 
277 	for(i = 0; i < nr_pages; i++) {
278 		struct page *page = pvec->pages[i];
279 
280 		*done_index = page->index;
281 
282 		lock_page(page);
283 
284 		if (unlikely(page->mapping != mapping)) {
285 continue_unlock:
286 			unlock_page(page);
287 			continue;
288 		}
289 
290 		if (!PageDirty(page)) {
291 			/* someone wrote it for us */
292 			goto continue_unlock;
293 		}
294 
295 		if (PageWriteback(page)) {
296 			if (wbc->sync_mode != WB_SYNC_NONE)
297 				wait_on_page_writeback(page);
298 			else
299 				goto continue_unlock;
300 		}
301 
302 		BUG_ON(PageWriteback(page));
303 		if (!clear_page_dirty_for_io(page))
304 			goto continue_unlock;
305 
306 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
307 
308 		ret = __gfs2_jdata_writepage(page, wbc);
309 		if (unlikely(ret)) {
310 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
311 				unlock_page(page);
312 				ret = 0;
313 			} else {
314 
315 				/*
316 				 * done_index is set past this page,
317 				 * so media errors will not choke
318 				 * background writeout for the entire
319 				 * file. This has consequences for
320 				 * range_cyclic semantics (ie. it may
321 				 * not be suitable for data integrity
322 				 * writeout).
323 				 */
324 				*done_index = page->index + 1;
325 				ret = 1;
326 				break;
327 			}
328 		}
329 
330 		/*
331 		 * We stop writing back only if we are not doing
332 		 * integrity sync. In case of integrity sync we have to
333 		 * keep going until we have written all the pages
334 		 * we tagged for writeback prior to entering this loop.
335 		 */
336 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
337 			ret = 1;
338 			break;
339 		}
340 
341 	}
342 	gfs2_trans_end(sdp);
343 	return ret;
344 }
345 
346 /**
347  * gfs2_write_cache_jdata - Like write_cache_pages but different
348  * @mapping: The mapping to write
349  * @wbc: The writeback control
350  *
351  * The reason that we use our own function here is that we need to
352  * start transactions before we grab page locks. This allows us
353  * to get the ordering right.
354  */
355 
356 static int gfs2_write_cache_jdata(struct address_space *mapping,
357 				  struct writeback_control *wbc)
358 {
359 	int ret = 0;
360 	int done = 0;
361 	struct pagevec pvec;
362 	int nr_pages;
363 	pgoff_t uninitialized_var(writeback_index);
364 	pgoff_t index;
365 	pgoff_t end;
366 	pgoff_t done_index;
367 	int cycled;
368 	int range_whole = 0;
369 	xa_mark_t tag;
370 
371 	pagevec_init(&pvec);
372 	if (wbc->range_cyclic) {
373 		writeback_index = mapping->writeback_index; /* prev offset */
374 		index = writeback_index;
375 		if (index == 0)
376 			cycled = 1;
377 		else
378 			cycled = 0;
379 		end = -1;
380 	} else {
381 		index = wbc->range_start >> PAGE_SHIFT;
382 		end = wbc->range_end >> PAGE_SHIFT;
383 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
384 			range_whole = 1;
385 		cycled = 1; /* ignore range_cyclic tests */
386 	}
387 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
388 		tag = PAGECACHE_TAG_TOWRITE;
389 	else
390 		tag = PAGECACHE_TAG_DIRTY;
391 
392 retry:
393 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
394 		tag_pages_for_writeback(mapping, index, end);
395 	done_index = index;
396 	while (!done && (index <= end)) {
397 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
398 				tag);
399 		if (nr_pages == 0)
400 			break;
401 
402 		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
403 		if (ret)
404 			done = 1;
405 		if (ret > 0)
406 			ret = 0;
407 		pagevec_release(&pvec);
408 		cond_resched();
409 	}
410 
411 	if (!cycled && !done) {
412 		/*
413 		 * range_cyclic:
414 		 * We hit the last page and there is more work to be done: wrap
415 		 * back to the start of the file
416 		 */
417 		cycled = 1;
418 		index = 0;
419 		end = writeback_index - 1;
420 		goto retry;
421 	}
422 
423 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
424 		mapping->writeback_index = done_index;
425 
426 	return ret;
427 }
428 
429 
430 /**
431  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
432  * @mapping: The mapping to write
433  * @wbc: The writeback control
434  *
435  */
436 
437 static int gfs2_jdata_writepages(struct address_space *mapping,
438 				 struct writeback_control *wbc)
439 {
440 	struct gfs2_inode *ip = GFS2_I(mapping->host);
441 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
442 	int ret;
443 
444 	ret = gfs2_write_cache_jdata(mapping, wbc);
445 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
446 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
447 			       GFS2_LFC_JDATA_WPAGES);
448 		ret = gfs2_write_cache_jdata(mapping, wbc);
449 	}
450 	return ret;
451 }
452 
453 /**
454  * stuffed_readpage - Fill in a Linux page with stuffed file data
455  * @ip: the inode
456  * @page: the page
457  *
458  * Returns: errno
459  */
460 
461 int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
462 {
463 	struct buffer_head *dibh;
464 	u64 dsize = i_size_read(&ip->i_inode);
465 	void *kaddr;
466 	int error;
467 
468 	/*
469 	 * Due to the order of unstuffing files and ->fault(), we can be
470 	 * asked for a zero page in the case of a stuffed file being extended,
471 	 * so we need to supply one here. It doesn't happen often.
472 	 */
473 	if (unlikely(page->index)) {
474 		zero_user(page, 0, PAGE_SIZE);
475 		SetPageUptodate(page);
476 		return 0;
477 	}
478 
479 	error = gfs2_meta_inode_buffer(ip, &dibh);
480 	if (error)
481 		return error;
482 
483 	kaddr = kmap_atomic(page);
484 	if (dsize > gfs2_max_stuffed_size(ip))
485 		dsize = gfs2_max_stuffed_size(ip);
486 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
487 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
488 	kunmap_atomic(kaddr);
489 	flush_dcache_page(page);
490 	brelse(dibh);
491 	SetPageUptodate(page);
492 
493 	return 0;
494 }
495 
496 
497 /**
498  * __gfs2_readpage - readpage
499  * @file: The file to read a page for
500  * @page: The page to read
501  *
502  * This is the core of gfs2's readpage. It's used by the internal file
503  * reading code as in that case we already hold the glock. Also it's
504  * called by gfs2_readpage() once the required lock has been granted.
505  */
506 
507 static int __gfs2_readpage(void *file, struct page *page)
508 {
509 	struct gfs2_inode *ip = GFS2_I(page->mapping->host);
510 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
511 
512 	int error;
513 
514 	if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
515 	    !page_has_buffers(page)) {
516 		error = iomap_readpage(page, &gfs2_iomap_ops);
517 	} else if (gfs2_is_stuffed(ip)) {
518 		error = stuffed_readpage(ip, page);
519 		unlock_page(page);
520 	} else {
521 		error = mpage_readpage(page, gfs2_block_map);
522 	}
523 
524 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
525 		return -EIO;
526 
527 	return error;
528 }
529 
530 /**
531  * gfs2_readpage - read a page of a file
532  * @file: The file to read
533  * @page: The page of the file
534  *
535  * This deals with the locking required. We have to unlock and
536  * relock the page in order to get the locking in the right
537  * order.
538  */
539 
540 static int gfs2_readpage(struct file *file, struct page *page)
541 {
542 	struct address_space *mapping = page->mapping;
543 	struct gfs2_inode *ip = GFS2_I(mapping->host);
544 	struct gfs2_holder gh;
545 	int error;
546 
547 	unlock_page(page);
548 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
549 	error = gfs2_glock_nq(&gh);
550 	if (unlikely(error))
551 		goto out;
552 	error = AOP_TRUNCATED_PAGE;
553 	lock_page(page);
554 	if (page->mapping == mapping && !PageUptodate(page))
555 		error = __gfs2_readpage(file, page);
556 	else
557 		unlock_page(page);
558 	gfs2_glock_dq(&gh);
559 out:
560 	gfs2_holder_uninit(&gh);
561 	if (error && error != AOP_TRUNCATED_PAGE)
562 		lock_page(page);
563 	return error;
564 }
565 
566 /**
567  * gfs2_internal_read - read an internal file
568  * @ip: The gfs2 inode
569  * @buf: The buffer to fill
570  * @pos: The file position
571  * @size: The amount to read
572  *
573  */
574 
575 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
576                        unsigned size)
577 {
578 	struct address_space *mapping = ip->i_inode.i_mapping;
579 	unsigned long index = *pos / PAGE_SIZE;
580 	unsigned offset = *pos & (PAGE_SIZE - 1);
581 	unsigned copied = 0;
582 	unsigned amt;
583 	struct page *page;
584 	void *p;
585 
586 	do {
587 		amt = size - copied;
588 		if (offset + size > PAGE_SIZE)
589 			amt = PAGE_SIZE - offset;
590 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
591 		if (IS_ERR(page))
592 			return PTR_ERR(page);
593 		p = kmap_atomic(page);
594 		memcpy(buf + copied, p + offset, amt);
595 		kunmap_atomic(p);
596 		put_page(page);
597 		copied += amt;
598 		index++;
599 		offset = 0;
600 	} while(copied < size);
601 	(*pos) += size;
602 	return size;
603 }
604 
605 /**
606  * gfs2_readpages - Read a bunch of pages at once
607  * @file: The file to read from
608  * @mapping: Address space info
609  * @pages: List of pages to read
610  * @nr_pages: Number of pages to read
611  *
612  * Some notes:
613  * 1. This is only for readahead, so we can simply ignore any things
614  *    which are slightly inconvenient (such as locking conflicts between
615  *    the page lock and the glock) and return having done no I/O. Its
616  *    obviously not something we'd want to do on too regular a basis.
617  *    Any I/O we ignore at this time will be done via readpage later.
618  * 2. We don't handle stuffed files here we let readpage do the honours.
619  * 3. mpage_readpages() does most of the heavy lifting in the common case.
620  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
621  */
622 
623 static int gfs2_readpages(struct file *file, struct address_space *mapping,
624 			  struct list_head *pages, unsigned nr_pages)
625 {
626 	struct inode *inode = mapping->host;
627 	struct gfs2_inode *ip = GFS2_I(inode);
628 	struct gfs2_sbd *sdp = GFS2_SB(inode);
629 	struct gfs2_holder gh;
630 	int ret;
631 
632 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
633 	ret = gfs2_glock_nq(&gh);
634 	if (unlikely(ret))
635 		goto out_uninit;
636 	if (!gfs2_is_stuffed(ip))
637 		ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
638 	gfs2_glock_dq(&gh);
639 out_uninit:
640 	gfs2_holder_uninit(&gh);
641 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
642 		ret = -EIO;
643 	return ret;
644 }
645 
646 /**
647  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
648  * @inode: the rindex inode
649  */
650 void adjust_fs_space(struct inode *inode)
651 {
652 	struct gfs2_sbd *sdp = GFS2_SB(inode);
653 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
654 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
655 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
656 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
657 	struct buffer_head *m_bh, *l_bh;
658 	u64 fs_total, new_free;
659 
660 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
661 		return;
662 
663 	/* Total up the file system space, according to the latest rindex. */
664 	fs_total = gfs2_ri_total(sdp);
665 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
666 		goto out;
667 
668 	spin_lock(&sdp->sd_statfs_spin);
669 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
670 			      sizeof(struct gfs2_dinode));
671 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
672 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
673 	else
674 		new_free = 0;
675 	spin_unlock(&sdp->sd_statfs_spin);
676 	fs_warn(sdp, "File system extended by %llu blocks.\n",
677 		(unsigned long long)new_free);
678 	gfs2_statfs_change(sdp, new_free, new_free, 0);
679 
680 	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
681 		goto out2;
682 	update_statfs(sdp, m_bh, l_bh);
683 	brelse(l_bh);
684 out2:
685 	brelse(m_bh);
686 out:
687 	sdp->sd_rindex_uptodate = 0;
688 	gfs2_trans_end(sdp);
689 }
690 
691 /**
692  * gfs2_stuffed_write_end - Write end for stuffed files
693  * @inode: The inode
694  * @dibh: The buffer_head containing the on-disk inode
695  * @pos: The file position
696  * @copied: How much was actually copied by the VFS
697  * @page: The page
698  *
699  * This copies the data from the page into the inode block after
700  * the inode data structure itself.
701  *
702  * Returns: copied bytes or errno
703  */
704 int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
705 			   loff_t pos, unsigned copied,
706 			   struct page *page)
707 {
708 	struct gfs2_inode *ip = GFS2_I(inode);
709 	u64 to = pos + copied;
710 	void *kaddr;
711 	unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
712 
713 	BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
714 
715 	kaddr = kmap_atomic(page);
716 	memcpy(buf + pos, kaddr + pos, copied);
717 	flush_dcache_page(page);
718 	kunmap_atomic(kaddr);
719 
720 	WARN_ON(!PageUptodate(page));
721 	unlock_page(page);
722 	put_page(page);
723 
724 	if (copied) {
725 		if (inode->i_size < to)
726 			i_size_write(inode, to);
727 		mark_inode_dirty(inode);
728 	}
729 	return copied;
730 }
731 
732 /**
733  * jdata_set_page_dirty - Page dirtying function
734  * @page: The page to dirty
735  *
736  * Returns: 1 if it dirtyed the page, or 0 otherwise
737  */
738 
739 static int jdata_set_page_dirty(struct page *page)
740 {
741 	SetPageChecked(page);
742 	return __set_page_dirty_buffers(page);
743 }
744 
745 /**
746  * gfs2_bmap - Block map function
747  * @mapping: Address space info
748  * @lblock: The block to map
749  *
750  * Returns: The disk address for the block or 0 on hole or error
751  */
752 
753 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
754 {
755 	struct gfs2_inode *ip = GFS2_I(mapping->host);
756 	struct gfs2_holder i_gh;
757 	sector_t dblock = 0;
758 	int error;
759 
760 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
761 	if (error)
762 		return 0;
763 
764 	if (!gfs2_is_stuffed(ip))
765 		dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
766 
767 	gfs2_glock_dq_uninit(&i_gh);
768 
769 	return dblock;
770 }
771 
772 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
773 {
774 	struct gfs2_bufdata *bd;
775 
776 	lock_buffer(bh);
777 	gfs2_log_lock(sdp);
778 	clear_buffer_dirty(bh);
779 	bd = bh->b_private;
780 	if (bd) {
781 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
782 			list_del_init(&bd->bd_list);
783 		else
784 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
785 	}
786 	bh->b_bdev = NULL;
787 	clear_buffer_mapped(bh);
788 	clear_buffer_req(bh);
789 	clear_buffer_new(bh);
790 	gfs2_log_unlock(sdp);
791 	unlock_buffer(bh);
792 }
793 
794 static void gfs2_invalidatepage(struct page *page, unsigned int offset,
795 				unsigned int length)
796 {
797 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
798 	unsigned int stop = offset + length;
799 	int partial_page = (offset || length < PAGE_SIZE);
800 	struct buffer_head *bh, *head;
801 	unsigned long pos = 0;
802 
803 	BUG_ON(!PageLocked(page));
804 	if (!partial_page)
805 		ClearPageChecked(page);
806 	if (!page_has_buffers(page))
807 		goto out;
808 
809 	bh = head = page_buffers(page);
810 	do {
811 		if (pos + bh->b_size > stop)
812 			return;
813 
814 		if (offset <= pos)
815 			gfs2_discard(sdp, bh);
816 		pos += bh->b_size;
817 		bh = bh->b_this_page;
818 	} while (bh != head);
819 out:
820 	if (!partial_page)
821 		try_to_release_page(page, 0);
822 }
823 
824 /**
825  * gfs2_releasepage - free the metadata associated with a page
826  * @page: the page that's being released
827  * @gfp_mask: passed from Linux VFS, ignored by us
828  *
829  * Calls try_to_free_buffers() to free the buffers and put the page if the
830  * buffers can be released.
831  *
832  * Returns: 1 if the page was put or else 0
833  */
834 
835 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
836 {
837 	struct address_space *mapping = page->mapping;
838 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
839 	struct buffer_head *bh, *head;
840 	struct gfs2_bufdata *bd;
841 
842 	if (!page_has_buffers(page))
843 		return 0;
844 
845 	/*
846 	 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
847 	 * clean pages might not have had the dirty bit cleared.  Thus, it can
848 	 * send actual dirty pages to ->releasepage() via shrink_active_list().
849 	 *
850 	 * As a workaround, we skip pages that contain dirty buffers below.
851 	 * Once ->releasepage isn't called on dirty pages anymore, we can warn
852 	 * on dirty buffers like we used to here again.
853 	 */
854 
855 	gfs2_log_lock(sdp);
856 	spin_lock(&sdp->sd_ail_lock);
857 	head = bh = page_buffers(page);
858 	do {
859 		if (atomic_read(&bh->b_count))
860 			goto cannot_release;
861 		bd = bh->b_private;
862 		if (bd && bd->bd_tr)
863 			goto cannot_release;
864 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
865 			goto cannot_release;
866 		bh = bh->b_this_page;
867 	} while(bh != head);
868 	spin_unlock(&sdp->sd_ail_lock);
869 
870 	head = bh = page_buffers(page);
871 	do {
872 		bd = bh->b_private;
873 		if (bd) {
874 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
875 			if (!list_empty(&bd->bd_list))
876 				list_del_init(&bd->bd_list);
877 			bd->bd_bh = NULL;
878 			bh->b_private = NULL;
879 			kmem_cache_free(gfs2_bufdata_cachep, bd);
880 		}
881 
882 		bh = bh->b_this_page;
883 	} while (bh != head);
884 	gfs2_log_unlock(sdp);
885 
886 	return try_to_free_buffers(page);
887 
888 cannot_release:
889 	spin_unlock(&sdp->sd_ail_lock);
890 	gfs2_log_unlock(sdp);
891 	return 0;
892 }
893 
894 static const struct address_space_operations gfs2_writeback_aops = {
895 	.writepage = gfs2_writepage,
896 	.writepages = gfs2_writepages,
897 	.readpage = gfs2_readpage,
898 	.readpages = gfs2_readpages,
899 	.bmap = gfs2_bmap,
900 	.invalidatepage = gfs2_invalidatepage,
901 	.releasepage = gfs2_releasepage,
902 	.direct_IO = noop_direct_IO,
903 	.migratepage = buffer_migrate_page,
904 	.is_partially_uptodate = block_is_partially_uptodate,
905 	.error_remove_page = generic_error_remove_page,
906 };
907 
908 static const struct address_space_operations gfs2_ordered_aops = {
909 	.writepage = gfs2_writepage,
910 	.writepages = gfs2_writepages,
911 	.readpage = gfs2_readpage,
912 	.readpages = gfs2_readpages,
913 	.set_page_dirty = __set_page_dirty_buffers,
914 	.bmap = gfs2_bmap,
915 	.invalidatepage = gfs2_invalidatepage,
916 	.releasepage = gfs2_releasepage,
917 	.direct_IO = noop_direct_IO,
918 	.migratepage = buffer_migrate_page,
919 	.is_partially_uptodate = block_is_partially_uptodate,
920 	.error_remove_page = generic_error_remove_page,
921 };
922 
923 static const struct address_space_operations gfs2_jdata_aops = {
924 	.writepage = gfs2_jdata_writepage,
925 	.writepages = gfs2_jdata_writepages,
926 	.readpage = gfs2_readpage,
927 	.readpages = gfs2_readpages,
928 	.set_page_dirty = jdata_set_page_dirty,
929 	.bmap = gfs2_bmap,
930 	.invalidatepage = gfs2_invalidatepage,
931 	.releasepage = gfs2_releasepage,
932 	.is_partially_uptodate = block_is_partially_uptodate,
933 	.error_remove_page = generic_error_remove_page,
934 };
935 
936 void gfs2_set_aops(struct inode *inode)
937 {
938 	struct gfs2_inode *ip = GFS2_I(inode);
939 	struct gfs2_sbd *sdp = GFS2_SB(inode);
940 
941 	if (gfs2_is_jdata(ip))
942 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
943 	else if (gfs2_is_writeback(sdp))
944 		inode->i_mapping->a_ops = &gfs2_writeback_aops;
945 	else if (gfs2_is_ordered(sdp))
946 		inode->i_mapping->a_ops = &gfs2_ordered_aops;
947 	else
948 		BUG();
949 }
950