xref: /openbmc/linux/fs/xfs/xfs_aops.c (revision 293d5b43)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
38 
39 /* flags for direct write completions */
40 #define XFS_DIO_FLAG_UNWRITTEN	(1 << 0)
41 #define XFS_DIO_FLAG_APPEND	(1 << 1)
42 
43 /*
44  * structure owned by writepages passed to individual writepage calls
45  */
46 struct xfs_writepage_ctx {
47 	struct xfs_bmbt_irec    imap;
48 	bool			imap_valid;
49 	unsigned int		io_type;
50 	struct xfs_ioend	*ioend;
51 	sector_t		last_block;
52 };
53 
54 void
55 xfs_count_page_state(
56 	struct page		*page,
57 	int			*delalloc,
58 	int			*unwritten)
59 {
60 	struct buffer_head	*bh, *head;
61 
62 	*delalloc = *unwritten = 0;
63 
64 	bh = head = page_buffers(page);
65 	do {
66 		if (buffer_unwritten(bh))
67 			(*unwritten) = 1;
68 		else if (buffer_delay(bh))
69 			(*delalloc) = 1;
70 	} while ((bh = bh->b_this_page) != head);
71 }
72 
73 struct block_device *
74 xfs_find_bdev_for_inode(
75 	struct inode		*inode)
76 {
77 	struct xfs_inode	*ip = XFS_I(inode);
78 	struct xfs_mount	*mp = ip->i_mount;
79 
80 	if (XFS_IS_REALTIME_INODE(ip))
81 		return mp->m_rtdev_targp->bt_bdev;
82 	else
83 		return mp->m_ddev_targp->bt_bdev;
84 }
85 
86 /*
87  * We're now finished for good with this page.  Update the page state via the
88  * associated buffer_heads, paying attention to the start and end offsets that
89  * we need to process on the page.
90  *
91  * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
92  * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
93  * the page at all, as we may be racing with memory reclaim and it can free both
94  * the bufferhead chain and the page as it will see the page as clean and
95  * unused.
96  */
97 static void
98 xfs_finish_page_writeback(
99 	struct inode		*inode,
100 	struct bio_vec		*bvec,
101 	int			error)
102 {
103 	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
104 	struct buffer_head	*head, *bh, *next;
105 	unsigned int		off = 0;
106 	unsigned int		bsize;
107 
108 	ASSERT(bvec->bv_offset < PAGE_SIZE);
109 	ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
110 	ASSERT(end < PAGE_SIZE);
111 	ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
112 
113 	bh = head = page_buffers(bvec->bv_page);
114 
115 	bsize = bh->b_size;
116 	do {
117 		next = bh->b_this_page;
118 		if (off < bvec->bv_offset)
119 			goto next_bh;
120 		if (off > end)
121 			break;
122 		bh->b_end_io(bh, !error);
123 next_bh:
124 		off += bsize;
125 	} while ((bh = next) != head);
126 }
127 
128 /*
129  * We're now finished for good with this ioend structure.  Update the page
130  * state, release holds on bios, and finally free up memory.  Do not use the
131  * ioend after this.
132  */
133 STATIC void
134 xfs_destroy_ioend(
135 	struct xfs_ioend	*ioend,
136 	int			error)
137 {
138 	struct inode		*inode = ioend->io_inode;
139 	struct bio		*last = ioend->io_bio;
140 	struct bio		*bio, *next;
141 
142 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
143 		struct bio_vec	*bvec;
144 		int		i;
145 
146 		/*
147 		 * For the last bio, bi_private points to the ioend, so we
148 		 * need to explicitly end the iteration here.
149 		 */
150 		if (bio == last)
151 			next = NULL;
152 		else
153 			next = bio->bi_private;
154 
155 		/* walk each page on bio, ending page IO on them */
156 		bio_for_each_segment_all(bvec, bio, i)
157 			xfs_finish_page_writeback(inode, bvec, error);
158 
159 		bio_put(bio);
160 	}
161 }
162 
163 /*
164  * Fast and loose check if this write could update the on-disk inode size.
165  */
166 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
167 {
168 	return ioend->io_offset + ioend->io_size >
169 		XFS_I(ioend->io_inode)->i_d.di_size;
170 }
171 
172 STATIC int
173 xfs_setfilesize_trans_alloc(
174 	struct xfs_ioend	*ioend)
175 {
176 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
177 	struct xfs_trans	*tp;
178 	int			error;
179 
180 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
181 	if (error)
182 		return error;
183 
184 	ioend->io_append_trans = tp;
185 
186 	/*
187 	 * We may pass freeze protection with a transaction.  So tell lockdep
188 	 * we released it.
189 	 */
190 	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
191 	/*
192 	 * We hand off the transaction to the completion thread now, so
193 	 * clear the flag here.
194 	 */
195 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
196 	return 0;
197 }
198 
199 /*
200  * Update on-disk file size now that data has been written to disk.
201  */
202 STATIC int
203 xfs_setfilesize(
204 	struct xfs_inode	*ip,
205 	struct xfs_trans	*tp,
206 	xfs_off_t		offset,
207 	size_t			size)
208 {
209 	xfs_fsize_t		isize;
210 
211 	xfs_ilock(ip, XFS_ILOCK_EXCL);
212 	isize = xfs_new_eof(ip, offset + size);
213 	if (!isize) {
214 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
215 		xfs_trans_cancel(tp);
216 		return 0;
217 	}
218 
219 	trace_xfs_setfilesize(ip, offset, size);
220 
221 	ip->i_d.di_size = isize;
222 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
223 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
224 
225 	return xfs_trans_commit(tp);
226 }
227 
228 STATIC int
229 xfs_setfilesize_ioend(
230 	struct xfs_ioend	*ioend,
231 	int			error)
232 {
233 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
234 	struct xfs_trans	*tp = ioend->io_append_trans;
235 
236 	/*
237 	 * The transaction may have been allocated in the I/O submission thread,
238 	 * thus we need to mark ourselves as being in a transaction manually.
239 	 * Similarly for freeze protection.
240 	 */
241 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
242 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
243 
244 	/* we abort the update if there was an IO error */
245 	if (error) {
246 		xfs_trans_cancel(tp);
247 		return error;
248 	}
249 
250 	return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
251 }
252 
253 /*
254  * IO write completion.
255  */
256 STATIC void
257 xfs_end_io(
258 	struct work_struct *work)
259 {
260 	struct xfs_ioend	*ioend =
261 		container_of(work, struct xfs_ioend, io_work);
262 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
263 	int			error = ioend->io_bio->bi_error;
264 
265 	/*
266 	 * Set an error if the mount has shut down and proceed with end I/O
267 	 * processing so it can perform whatever cleanups are necessary.
268 	 */
269 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
270 		error = -EIO;
271 
272 	/*
273 	 * For unwritten extents we need to issue transactions to convert a
274 	 * range to normal written extens after the data I/O has finished.
275 	 * Detecting and handling completion IO errors is done individually
276 	 * for each case as different cleanup operations need to be performed
277 	 * on error.
278 	 */
279 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
280 		if (error)
281 			goto done;
282 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
283 						  ioend->io_size);
284 	} else if (ioend->io_append_trans) {
285 		error = xfs_setfilesize_ioend(ioend, error);
286 	} else {
287 		ASSERT(!xfs_ioend_is_append(ioend));
288 	}
289 
290 done:
291 	xfs_destroy_ioend(ioend, error);
292 }
293 
294 STATIC void
295 xfs_end_bio(
296 	struct bio		*bio)
297 {
298 	struct xfs_ioend	*ioend = bio->bi_private;
299 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
300 
301 	if (ioend->io_type == XFS_IO_UNWRITTEN)
302 		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
303 	else if (ioend->io_append_trans)
304 		queue_work(mp->m_data_workqueue, &ioend->io_work);
305 	else
306 		xfs_destroy_ioend(ioend, bio->bi_error);
307 }
308 
309 STATIC int
310 xfs_map_blocks(
311 	struct inode		*inode,
312 	loff_t			offset,
313 	struct xfs_bmbt_irec	*imap,
314 	int			type)
315 {
316 	struct xfs_inode	*ip = XFS_I(inode);
317 	struct xfs_mount	*mp = ip->i_mount;
318 	ssize_t			count = 1 << inode->i_blkbits;
319 	xfs_fileoff_t		offset_fsb, end_fsb;
320 	int			error = 0;
321 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
322 	int			nimaps = 1;
323 
324 	if (XFS_FORCED_SHUTDOWN(mp))
325 		return -EIO;
326 
327 	if (type == XFS_IO_UNWRITTEN)
328 		bmapi_flags |= XFS_BMAPI_IGSTATE;
329 
330 	xfs_ilock(ip, XFS_ILOCK_SHARED);
331 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
332 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
333 	ASSERT(offset <= mp->m_super->s_maxbytes);
334 
335 	if (offset + count > mp->m_super->s_maxbytes)
336 		count = mp->m_super->s_maxbytes - offset;
337 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
338 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
339 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
340 				imap, &nimaps, bmapi_flags);
341 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
342 
343 	if (error)
344 		return error;
345 
346 	if (type == XFS_IO_DELALLOC &&
347 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
348 		error = xfs_iomap_write_allocate(ip, offset, imap);
349 		if (!error)
350 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
351 		return error;
352 	}
353 
354 #ifdef DEBUG
355 	if (type == XFS_IO_UNWRITTEN) {
356 		ASSERT(nimaps);
357 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
358 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
359 	}
360 #endif
361 	if (nimaps)
362 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
363 	return 0;
364 }
365 
366 STATIC bool
367 xfs_imap_valid(
368 	struct inode		*inode,
369 	struct xfs_bmbt_irec	*imap,
370 	xfs_off_t		offset)
371 {
372 	offset >>= inode->i_blkbits;
373 
374 	return offset >= imap->br_startoff &&
375 		offset < imap->br_startoff + imap->br_blockcount;
376 }
377 
378 STATIC void
379 xfs_start_buffer_writeback(
380 	struct buffer_head	*bh)
381 {
382 	ASSERT(buffer_mapped(bh));
383 	ASSERT(buffer_locked(bh));
384 	ASSERT(!buffer_delay(bh));
385 	ASSERT(!buffer_unwritten(bh));
386 
387 	mark_buffer_async_write(bh);
388 	set_buffer_uptodate(bh);
389 	clear_buffer_dirty(bh);
390 }
391 
392 STATIC void
393 xfs_start_page_writeback(
394 	struct page		*page,
395 	int			clear_dirty)
396 {
397 	ASSERT(PageLocked(page));
398 	ASSERT(!PageWriteback(page));
399 
400 	/*
401 	 * if the page was not fully cleaned, we need to ensure that the higher
402 	 * layers come back to it correctly. That means we need to keep the page
403 	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
404 	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
405 	 * write this page in this writeback sweep will be made.
406 	 */
407 	if (clear_dirty) {
408 		clear_page_dirty_for_io(page);
409 		set_page_writeback(page);
410 	} else
411 		set_page_writeback_keepwrite(page);
412 
413 	unlock_page(page);
414 }
415 
416 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
417 {
418 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
419 }
420 
421 /*
422  * Submit the bio for an ioend. We are passed an ioend with a bio attached to
423  * it, and we submit that bio. The ioend may be used for multiple bio
424  * submissions, so we only want to allocate an append transaction for the ioend
425  * once. In the case of multiple bio submission, each bio will take an IO
426  * reference to the ioend to ensure that the ioend completion is only done once
427  * all bios have been submitted and the ioend is really done.
428  *
429  * If @fail is non-zero, it means that we have a situation where some part of
430  * the submission process has failed after we have marked paged for writeback
431  * and unlocked them. In this situation, we need to fail the bio and ioend
432  * rather than submit it to IO. This typically only happens on a filesystem
433  * shutdown.
434  */
435 STATIC int
436 xfs_submit_ioend(
437 	struct writeback_control *wbc,
438 	struct xfs_ioend	*ioend,
439 	int			status)
440 {
441 	/* Reserve log space if we might write beyond the on-disk inode size. */
442 	if (!status &&
443 	    ioend->io_type != XFS_IO_UNWRITTEN &&
444 	    xfs_ioend_is_append(ioend) &&
445 	    !ioend->io_append_trans)
446 		status = xfs_setfilesize_trans_alloc(ioend);
447 
448 	ioend->io_bio->bi_private = ioend;
449 	ioend->io_bio->bi_end_io = xfs_end_bio;
450 	bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
451 			 (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
452 	/*
453 	 * If we are failing the IO now, just mark the ioend with an
454 	 * error and finish it. This will run IO completion immediately
455 	 * as there is only one reference to the ioend at this point in
456 	 * time.
457 	 */
458 	if (status) {
459 		ioend->io_bio->bi_error = status;
460 		bio_endio(ioend->io_bio);
461 		return status;
462 	}
463 
464 	submit_bio(ioend->io_bio);
465 	return 0;
466 }
467 
468 static void
469 xfs_init_bio_from_bh(
470 	struct bio		*bio,
471 	struct buffer_head	*bh)
472 {
473 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
474 	bio->bi_bdev = bh->b_bdev;
475 }
476 
477 static struct xfs_ioend *
478 xfs_alloc_ioend(
479 	struct inode		*inode,
480 	unsigned int		type,
481 	xfs_off_t		offset,
482 	struct buffer_head	*bh)
483 {
484 	struct xfs_ioend	*ioend;
485 	struct bio		*bio;
486 
487 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
488 	xfs_init_bio_from_bh(bio, bh);
489 
490 	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
491 	INIT_LIST_HEAD(&ioend->io_list);
492 	ioend->io_type = type;
493 	ioend->io_inode = inode;
494 	ioend->io_size = 0;
495 	ioend->io_offset = offset;
496 	INIT_WORK(&ioend->io_work, xfs_end_io);
497 	ioend->io_append_trans = NULL;
498 	ioend->io_bio = bio;
499 	return ioend;
500 }
501 
502 /*
503  * Allocate a new bio, and chain the old bio to the new one.
504  *
505  * Note that we have to do perform the chaining in this unintuitive order
506  * so that the bi_private linkage is set up in the right direction for the
507  * traversal in xfs_destroy_ioend().
508  */
509 static void
510 xfs_chain_bio(
511 	struct xfs_ioend	*ioend,
512 	struct writeback_control *wbc,
513 	struct buffer_head	*bh)
514 {
515 	struct bio *new;
516 
517 	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
518 	xfs_init_bio_from_bh(new, bh);
519 
520 	bio_chain(ioend->io_bio, new);
521 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
522 	bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
523 			  (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
524 	submit_bio(ioend->io_bio);
525 	ioend->io_bio = new;
526 }
527 
528 /*
529  * Test to see if we've been building up a completion structure for
530  * earlier buffers -- if so, we try to append to this ioend if we
531  * can, otherwise we finish off any current ioend and start another.
532  * Return the ioend we finished off so that the caller can submit it
533  * once it has finished processing the dirty page.
534  */
535 STATIC void
536 xfs_add_to_ioend(
537 	struct inode		*inode,
538 	struct buffer_head	*bh,
539 	xfs_off_t		offset,
540 	struct xfs_writepage_ctx *wpc,
541 	struct writeback_control *wbc,
542 	struct list_head	*iolist)
543 {
544 	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
545 	    bh->b_blocknr != wpc->last_block + 1 ||
546 	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
547 		if (wpc->ioend)
548 			list_add(&wpc->ioend->io_list, iolist);
549 		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
550 	}
551 
552 	/*
553 	 * If the buffer doesn't fit into the bio we need to allocate a new
554 	 * one.  This shouldn't happen more than once for a given buffer.
555 	 */
556 	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
557 		xfs_chain_bio(wpc->ioend, wbc, bh);
558 
559 	wpc->ioend->io_size += bh->b_size;
560 	wpc->last_block = bh->b_blocknr;
561 	xfs_start_buffer_writeback(bh);
562 }
563 
564 STATIC void
565 xfs_map_buffer(
566 	struct inode		*inode,
567 	struct buffer_head	*bh,
568 	struct xfs_bmbt_irec	*imap,
569 	xfs_off_t		offset)
570 {
571 	sector_t		bn;
572 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
573 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
574 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
575 
576 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
577 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
578 
579 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
580 	      ((offset - iomap_offset) >> inode->i_blkbits);
581 
582 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
583 
584 	bh->b_blocknr = bn;
585 	set_buffer_mapped(bh);
586 }
587 
588 STATIC void
589 xfs_map_at_offset(
590 	struct inode		*inode,
591 	struct buffer_head	*bh,
592 	struct xfs_bmbt_irec	*imap,
593 	xfs_off_t		offset)
594 {
595 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
596 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
597 
598 	xfs_map_buffer(inode, bh, imap, offset);
599 	set_buffer_mapped(bh);
600 	clear_buffer_delay(bh);
601 	clear_buffer_unwritten(bh);
602 }
603 
604 /*
605  * Test if a given page contains at least one buffer of a given @type.
606  * If @check_all_buffers is true, then we walk all the buffers in the page to
607  * try to find one of the type passed in. If it is not set, then the caller only
608  * needs to check the first buffer on the page for a match.
609  */
610 STATIC bool
611 xfs_check_page_type(
612 	struct page		*page,
613 	unsigned int		type,
614 	bool			check_all_buffers)
615 {
616 	struct buffer_head	*bh;
617 	struct buffer_head	*head;
618 
619 	if (PageWriteback(page))
620 		return false;
621 	if (!page->mapping)
622 		return false;
623 	if (!page_has_buffers(page))
624 		return false;
625 
626 	bh = head = page_buffers(page);
627 	do {
628 		if (buffer_unwritten(bh)) {
629 			if (type == XFS_IO_UNWRITTEN)
630 				return true;
631 		} else if (buffer_delay(bh)) {
632 			if (type == XFS_IO_DELALLOC)
633 				return true;
634 		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
635 			if (type == XFS_IO_OVERWRITE)
636 				return true;
637 		}
638 
639 		/* If we are only checking the first buffer, we are done now. */
640 		if (!check_all_buffers)
641 			break;
642 	} while ((bh = bh->b_this_page) != head);
643 
644 	return false;
645 }
646 
647 STATIC void
648 xfs_vm_invalidatepage(
649 	struct page		*page,
650 	unsigned int		offset,
651 	unsigned int		length)
652 {
653 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
654 				 length);
655 	block_invalidatepage(page, offset, length);
656 }
657 
658 /*
659  * If the page has delalloc buffers on it, we need to punch them out before we
660  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
661  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
662  * is done on that same region - the delalloc extent is returned when none is
663  * supposed to be there.
664  *
665  * We prevent this by truncating away the delalloc regions on the page before
666  * invalidating it. Because they are delalloc, we can do this without needing a
667  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
668  * truncation without a transaction as there is no space left for block
669  * reservation (typically why we see a ENOSPC in writeback).
670  *
671  * This is not a performance critical path, so for now just do the punching a
672  * buffer head at a time.
673  */
674 STATIC void
675 xfs_aops_discard_page(
676 	struct page		*page)
677 {
678 	struct inode		*inode = page->mapping->host;
679 	struct xfs_inode	*ip = XFS_I(inode);
680 	struct buffer_head	*bh, *head;
681 	loff_t			offset = page_offset(page);
682 
683 	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
684 		goto out_invalidate;
685 
686 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
687 		goto out_invalidate;
688 
689 	xfs_alert(ip->i_mount,
690 		"page discard on page %p, inode 0x%llx, offset %llu.",
691 			page, ip->i_ino, offset);
692 
693 	xfs_ilock(ip, XFS_ILOCK_EXCL);
694 	bh = head = page_buffers(page);
695 	do {
696 		int		error;
697 		xfs_fileoff_t	start_fsb;
698 
699 		if (!buffer_delay(bh))
700 			goto next_buffer;
701 
702 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
703 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
704 		if (error) {
705 			/* something screwed, just bail */
706 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
707 				xfs_alert(ip->i_mount,
708 			"page discard unable to remove delalloc mapping.");
709 			}
710 			break;
711 		}
712 next_buffer:
713 		offset += 1 << inode->i_blkbits;
714 
715 	} while ((bh = bh->b_this_page) != head);
716 
717 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
718 out_invalidate:
719 	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
720 	return;
721 }
722 
723 /*
724  * We implement an immediate ioend submission policy here to avoid needing to
725  * chain multiple ioends and hence nest mempool allocations which can violate
726  * forward progress guarantees we need to provide. The current ioend we are
727  * adding buffers to is cached on the writepage context, and if the new buffer
728  * does not append to the cached ioend it will create a new ioend and cache that
729  * instead.
730  *
731  * If a new ioend is created and cached, the old ioend is returned and queued
732  * locally for submission once the entire page is processed or an error has been
733  * detected.  While ioends are submitted immediately after they are completed,
734  * batching optimisations are provided by higher level block plugging.
735  *
736  * At the end of a writeback pass, there will be a cached ioend remaining on the
737  * writepage context that the caller will need to submit.
738  */
739 static int
740 xfs_writepage_map(
741 	struct xfs_writepage_ctx *wpc,
742 	struct writeback_control *wbc,
743 	struct inode		*inode,
744 	struct page		*page,
745 	loff_t			offset,
746 	__uint64_t              end_offset)
747 {
748 	LIST_HEAD(submit_list);
749 	struct xfs_ioend	*ioend, *next;
750 	struct buffer_head	*bh, *head;
751 	ssize_t			len = 1 << inode->i_blkbits;
752 	int			error = 0;
753 	int			count = 0;
754 	int			uptodate = 1;
755 
756 	bh = head = page_buffers(page);
757 	offset = page_offset(page);
758 	do {
759 		if (offset >= end_offset)
760 			break;
761 		if (!buffer_uptodate(bh))
762 			uptodate = 0;
763 
764 		/*
765 		 * set_page_dirty dirties all buffers in a page, independent
766 		 * of their state.  The dirty state however is entirely
767 		 * meaningless for holes (!mapped && uptodate), so skip
768 		 * buffers covering holes here.
769 		 */
770 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
771 			wpc->imap_valid = false;
772 			continue;
773 		}
774 
775 		if (buffer_unwritten(bh)) {
776 			if (wpc->io_type != XFS_IO_UNWRITTEN) {
777 				wpc->io_type = XFS_IO_UNWRITTEN;
778 				wpc->imap_valid = false;
779 			}
780 		} else if (buffer_delay(bh)) {
781 			if (wpc->io_type != XFS_IO_DELALLOC) {
782 				wpc->io_type = XFS_IO_DELALLOC;
783 				wpc->imap_valid = false;
784 			}
785 		} else if (buffer_uptodate(bh)) {
786 			if (wpc->io_type != XFS_IO_OVERWRITE) {
787 				wpc->io_type = XFS_IO_OVERWRITE;
788 				wpc->imap_valid = false;
789 			}
790 		} else {
791 			if (PageUptodate(page))
792 				ASSERT(buffer_mapped(bh));
793 			/*
794 			 * This buffer is not uptodate and will not be
795 			 * written to disk.  Ensure that we will put any
796 			 * subsequent writeable buffers into a new
797 			 * ioend.
798 			 */
799 			wpc->imap_valid = false;
800 			continue;
801 		}
802 
803 		if (wpc->imap_valid)
804 			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
805 							 offset);
806 		if (!wpc->imap_valid) {
807 			error = xfs_map_blocks(inode, offset, &wpc->imap,
808 					     wpc->io_type);
809 			if (error)
810 				goto out;
811 			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
812 							 offset);
813 		}
814 		if (wpc->imap_valid) {
815 			lock_buffer(bh);
816 			if (wpc->io_type != XFS_IO_OVERWRITE)
817 				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
818 			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
819 			count++;
820 		}
821 
822 	} while (offset += len, ((bh = bh->b_this_page) != head));
823 
824 	if (uptodate && bh == head)
825 		SetPageUptodate(page);
826 
827 	ASSERT(wpc->ioend || list_empty(&submit_list));
828 
829 out:
830 	/*
831 	 * On error, we have to fail the ioend here because we have locked
832 	 * buffers in the ioend. If we don't do this, we'll deadlock
833 	 * invalidating the page as that tries to lock the buffers on the page.
834 	 * Also, because we may have set pages under writeback, we have to make
835 	 * sure we run IO completion to mark the error state of the IO
836 	 * appropriately, so we can't cancel the ioend directly here. That means
837 	 * we have to mark this page as under writeback if we included any
838 	 * buffers from it in the ioend chain so that completion treats it
839 	 * correctly.
840 	 *
841 	 * If we didn't include the page in the ioend, the on error we can
842 	 * simply discard and unlock it as there are no other users of the page
843 	 * or it's buffers right now. The caller will still need to trigger
844 	 * submission of outstanding ioends on the writepage context so they are
845 	 * treated correctly on error.
846 	 */
847 	if (count) {
848 		xfs_start_page_writeback(page, !error);
849 
850 		/*
851 		 * Preserve the original error if there was one, otherwise catch
852 		 * submission errors here and propagate into subsequent ioend
853 		 * submissions.
854 		 */
855 		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
856 			int error2;
857 
858 			list_del_init(&ioend->io_list);
859 			error2 = xfs_submit_ioend(wbc, ioend, error);
860 			if (error2 && !error)
861 				error = error2;
862 		}
863 	} else if (error) {
864 		xfs_aops_discard_page(page);
865 		ClearPageUptodate(page);
866 		unlock_page(page);
867 	} else {
868 		/*
869 		 * We can end up here with no error and nothing to write if we
870 		 * race with a partial page truncate on a sub-page block sized
871 		 * filesystem. In that case we need to mark the page clean.
872 		 */
873 		xfs_start_page_writeback(page, 1);
874 		end_page_writeback(page);
875 	}
876 
877 	mapping_set_error(page->mapping, error);
878 	return error;
879 }
880 
881 /*
882  * Write out a dirty page.
883  *
884  * For delalloc space on the page we need to allocate space and flush it.
885  * For unwritten space on the page we need to start the conversion to
886  * regular allocated space.
887  * For any other dirty buffer heads on the page we should flush them.
888  */
889 STATIC int
890 xfs_do_writepage(
891 	struct page		*page,
892 	struct writeback_control *wbc,
893 	void			*data)
894 {
895 	struct xfs_writepage_ctx *wpc = data;
896 	struct inode		*inode = page->mapping->host;
897 	loff_t			offset;
898 	__uint64_t              end_offset;
899 	pgoff_t                 end_index;
900 
901 	trace_xfs_writepage(inode, page, 0, 0);
902 
903 	ASSERT(page_has_buffers(page));
904 
905 	/*
906 	 * Refuse to write the page out if we are called from reclaim context.
907 	 *
908 	 * This avoids stack overflows when called from deeply used stacks in
909 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
910 	 * allow reclaim from kswapd as the stack usage there is relatively low.
911 	 *
912 	 * This should never happen except in the case of a VM regression so
913 	 * warn about it.
914 	 */
915 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
916 			PF_MEMALLOC))
917 		goto redirty;
918 
919 	/*
920 	 * Given that we do not allow direct reclaim to call us, we should
921 	 * never be called while in a filesystem transaction.
922 	 */
923 	if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
924 		goto redirty;
925 
926 	/*
927 	 * Is this page beyond the end of the file?
928 	 *
929 	 * The page index is less than the end_index, adjust the end_offset
930 	 * to the highest offset that this page should represent.
931 	 * -----------------------------------------------------
932 	 * |			file mapping	       | <EOF> |
933 	 * -----------------------------------------------------
934 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
935 	 * ^--------------------------------^----------|--------
936 	 * |     desired writeback range    |      see else    |
937 	 * ---------------------------------^------------------|
938 	 */
939 	offset = i_size_read(inode);
940 	end_index = offset >> PAGE_SHIFT;
941 	if (page->index < end_index)
942 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
943 	else {
944 		/*
945 		 * Check whether the page to write out is beyond or straddles
946 		 * i_size or not.
947 		 * -------------------------------------------------------
948 		 * |		file mapping		        | <EOF>  |
949 		 * -------------------------------------------------------
950 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
951 		 * ^--------------------------------^-----------|---------
952 		 * |				    |      Straddles     |
953 		 * ---------------------------------^-----------|--------|
954 		 */
955 		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
956 
957 		/*
958 		 * Skip the page if it is fully outside i_size, e.g. due to a
959 		 * truncate operation that is in progress. We must redirty the
960 		 * page so that reclaim stops reclaiming it. Otherwise
961 		 * xfs_vm_releasepage() is called on it and gets confused.
962 		 *
963 		 * Note that the end_index is unsigned long, it would overflow
964 		 * if the given offset is greater than 16TB on 32-bit system
965 		 * and if we do check the page is fully outside i_size or not
966 		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
967 		 * will be evaluated to 0.  Hence this page will be redirtied
968 		 * and be written out repeatedly which would result in an
969 		 * infinite loop, the user program that perform this operation
970 		 * will hang.  Instead, we can verify this situation by checking
971 		 * if the page to write is totally beyond the i_size or if it's
972 		 * offset is just equal to the EOF.
973 		 */
974 		if (page->index > end_index ||
975 		    (page->index == end_index && offset_into_page == 0))
976 			goto redirty;
977 
978 		/*
979 		 * The page straddles i_size.  It must be zeroed out on each
980 		 * and every writepage invocation because it may be mmapped.
981 		 * "A file is mapped in multiples of the page size.  For a file
982 		 * that is not a multiple of the page size, the remaining
983 		 * memory is zeroed when mapped, and writes to that region are
984 		 * not written out to the file."
985 		 */
986 		zero_user_segment(page, offset_into_page, PAGE_SIZE);
987 
988 		/* Adjust the end_offset to the end of file */
989 		end_offset = offset;
990 	}
991 
992 	return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
993 
994 redirty:
995 	redirty_page_for_writepage(wbc, page);
996 	unlock_page(page);
997 	return 0;
998 }
999 
1000 STATIC int
1001 xfs_vm_writepage(
1002 	struct page		*page,
1003 	struct writeback_control *wbc)
1004 {
1005 	struct xfs_writepage_ctx wpc = {
1006 		.io_type = XFS_IO_INVALID,
1007 	};
1008 	int			ret;
1009 
1010 	ret = xfs_do_writepage(page, wbc, &wpc);
1011 	if (wpc.ioend)
1012 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1013 	return ret;
1014 }
1015 
1016 STATIC int
1017 xfs_vm_writepages(
1018 	struct address_space	*mapping,
1019 	struct writeback_control *wbc)
1020 {
1021 	struct xfs_writepage_ctx wpc = {
1022 		.io_type = XFS_IO_INVALID,
1023 	};
1024 	int			ret;
1025 
1026 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1027 	if (dax_mapping(mapping))
1028 		return dax_writeback_mapping_range(mapping,
1029 				xfs_find_bdev_for_inode(mapping->host), wbc);
1030 
1031 	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1032 	if (wpc.ioend)
1033 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1034 	return ret;
1035 }
1036 
1037 /*
1038  * Called to move a page into cleanable state - and from there
1039  * to be released. The page should already be clean. We always
1040  * have buffer heads in this call.
1041  *
1042  * Returns 1 if the page is ok to release, 0 otherwise.
1043  */
1044 STATIC int
1045 xfs_vm_releasepage(
1046 	struct page		*page,
1047 	gfp_t			gfp_mask)
1048 {
1049 	int			delalloc, unwritten;
1050 
1051 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1052 
1053 	/*
1054 	 * mm accommodates an old ext3 case where clean pages might not have had
1055 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
1056 	 * ->releasepage() via shrink_active_list(). Conversely,
1057 	 * block_invalidatepage() can send pages that are still marked dirty
1058 	 * but otherwise have invalidated buffers.
1059 	 *
1060 	 * We've historically freed buffers on the latter. Instead, quietly
1061 	 * filter out all dirty pages to avoid spurious buffer state warnings.
1062 	 * This can likely be removed once shrink_active_list() is fixed.
1063 	 */
1064 	if (PageDirty(page))
1065 		return 0;
1066 
1067 	xfs_count_page_state(page, &delalloc, &unwritten);
1068 
1069 	if (WARN_ON_ONCE(delalloc))
1070 		return 0;
1071 	if (WARN_ON_ONCE(unwritten))
1072 		return 0;
1073 
1074 	return try_to_free_buffers(page);
1075 }
1076 
1077 /*
1078  * When we map a DIO buffer, we may need to pass flags to
1079  * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
1080  *
1081  * Note that for DIO, an IO to the highest supported file block offset (i.e.
1082  * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1083  * bit variable. Hence if we see this overflow, we have to assume that the IO is
1084  * extending the file size. We won't know for sure until IO completion is run
1085  * and the actual max write offset is communicated to the IO completion
1086  * routine.
1087  */
1088 static void
1089 xfs_map_direct(
1090 	struct inode		*inode,
1091 	struct buffer_head	*bh_result,
1092 	struct xfs_bmbt_irec	*imap,
1093 	xfs_off_t		offset)
1094 {
1095 	uintptr_t		*flags = (uintptr_t *)&bh_result->b_private;
1096 	xfs_off_t		size = bh_result->b_size;
1097 
1098 	trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
1099 		ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
1100 
1101 	if (ISUNWRITTEN(imap)) {
1102 		*flags |= XFS_DIO_FLAG_UNWRITTEN;
1103 		set_buffer_defer_completion(bh_result);
1104 	} else if (offset + size > i_size_read(inode) || offset + size < 0) {
1105 		*flags |= XFS_DIO_FLAG_APPEND;
1106 		set_buffer_defer_completion(bh_result);
1107 	}
1108 }
1109 
1110 /*
1111  * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1112  * is, so that we can avoid repeated get_blocks calls.
1113  *
1114  * If the mapping spans EOF, then we have to break the mapping up as the mapping
1115  * for blocks beyond EOF must be marked new so that sub block regions can be
1116  * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1117  * was just allocated or is unwritten, otherwise the callers would overwrite
1118  * existing data with zeros. Hence we have to split the mapping into a range up
1119  * to and including EOF, and a second mapping for beyond EOF.
1120  */
1121 static void
1122 xfs_map_trim_size(
1123 	struct inode		*inode,
1124 	sector_t		iblock,
1125 	struct buffer_head	*bh_result,
1126 	struct xfs_bmbt_irec	*imap,
1127 	xfs_off_t		offset,
1128 	ssize_t			size)
1129 {
1130 	xfs_off_t		mapping_size;
1131 
1132 	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1133 	mapping_size <<= inode->i_blkbits;
1134 
1135 	ASSERT(mapping_size > 0);
1136 	if (mapping_size > size)
1137 		mapping_size = size;
1138 	if (offset < i_size_read(inode) &&
1139 	    offset + mapping_size >= i_size_read(inode)) {
1140 		/* limit mapping to block that spans EOF */
1141 		mapping_size = roundup_64(i_size_read(inode) - offset,
1142 					  1 << inode->i_blkbits);
1143 	}
1144 	if (mapping_size > LONG_MAX)
1145 		mapping_size = LONG_MAX;
1146 
1147 	bh_result->b_size = mapping_size;
1148 }
1149 
1150 STATIC int
1151 __xfs_get_blocks(
1152 	struct inode		*inode,
1153 	sector_t		iblock,
1154 	struct buffer_head	*bh_result,
1155 	int			create,
1156 	bool			direct,
1157 	bool			dax_fault)
1158 {
1159 	struct xfs_inode	*ip = XFS_I(inode);
1160 	struct xfs_mount	*mp = ip->i_mount;
1161 	xfs_fileoff_t		offset_fsb, end_fsb;
1162 	int			error = 0;
1163 	int			lockmode = 0;
1164 	struct xfs_bmbt_irec	imap;
1165 	int			nimaps = 1;
1166 	xfs_off_t		offset;
1167 	ssize_t			size;
1168 	int			new = 0;
1169 
1170 	BUG_ON(create && !direct);
1171 
1172 	if (XFS_FORCED_SHUTDOWN(mp))
1173 		return -EIO;
1174 
1175 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1176 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1177 	size = bh_result->b_size;
1178 
1179 	if (!create && offset >= i_size_read(inode))
1180 		return 0;
1181 
1182 	/*
1183 	 * Direct I/O is usually done on preallocated files, so try getting
1184 	 * a block mapping without an exclusive lock first.
1185 	 */
1186 	lockmode = xfs_ilock_data_map_shared(ip);
1187 
1188 	ASSERT(offset <= mp->m_super->s_maxbytes);
1189 	if (offset + size > mp->m_super->s_maxbytes)
1190 		size = mp->m_super->s_maxbytes - offset;
1191 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1192 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1193 
1194 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1195 				&imap, &nimaps, XFS_BMAPI_ENTIRE);
1196 	if (error)
1197 		goto out_unlock;
1198 
1199 	/* for DAX, we convert unwritten extents directly */
1200 	if (create &&
1201 	    (!nimaps ||
1202 	     (imap.br_startblock == HOLESTARTBLOCK ||
1203 	      imap.br_startblock == DELAYSTARTBLOCK) ||
1204 	     (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1205 		/*
1206 		 * xfs_iomap_write_direct() expects the shared lock. It
1207 		 * is unlocked on return.
1208 		 */
1209 		if (lockmode == XFS_ILOCK_EXCL)
1210 			xfs_ilock_demote(ip, lockmode);
1211 
1212 		error = xfs_iomap_write_direct(ip, offset, size,
1213 					       &imap, nimaps);
1214 		if (error)
1215 			return error;
1216 		new = 1;
1217 
1218 		trace_xfs_get_blocks_alloc(ip, offset, size,
1219 				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1220 						   : XFS_IO_DELALLOC, &imap);
1221 	} else if (nimaps) {
1222 		trace_xfs_get_blocks_found(ip, offset, size,
1223 				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1224 						   : XFS_IO_OVERWRITE, &imap);
1225 		xfs_iunlock(ip, lockmode);
1226 	} else {
1227 		trace_xfs_get_blocks_notfound(ip, offset, size);
1228 		goto out_unlock;
1229 	}
1230 
1231 	if (IS_DAX(inode) && create) {
1232 		ASSERT(!ISUNWRITTEN(&imap));
1233 		/* zeroing is not needed at a higher layer */
1234 		new = 0;
1235 	}
1236 
1237 	/* trim mapping down to size requested */
1238 	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1239 
1240 	/*
1241 	 * For unwritten extents do not report a disk address in the buffered
1242 	 * read case (treat as if we're reading into a hole).
1243 	 */
1244 	if (imap.br_startblock != HOLESTARTBLOCK &&
1245 	    imap.br_startblock != DELAYSTARTBLOCK &&
1246 	    (create || !ISUNWRITTEN(&imap))) {
1247 		xfs_map_buffer(inode, bh_result, &imap, offset);
1248 		if (ISUNWRITTEN(&imap))
1249 			set_buffer_unwritten(bh_result);
1250 		/* direct IO needs special help */
1251 		if (create) {
1252 			if (dax_fault)
1253 				ASSERT(!ISUNWRITTEN(&imap));
1254 			else
1255 				xfs_map_direct(inode, bh_result, &imap, offset);
1256 		}
1257 	}
1258 
1259 	/*
1260 	 * If this is a realtime file, data may be on a different device.
1261 	 * to that pointed to from the buffer_head b_bdev currently.
1262 	 */
1263 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1264 
1265 	/*
1266 	 * If we previously allocated a block out beyond eof and we are now
1267 	 * coming back to use it then we will need to flag it as new even if it
1268 	 * has a disk address.
1269 	 *
1270 	 * With sub-block writes into unwritten extents we also need to mark
1271 	 * the buffer as new so that the unwritten parts of the buffer gets
1272 	 * correctly zeroed.
1273 	 */
1274 	if (create &&
1275 	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1276 	     (offset >= i_size_read(inode)) ||
1277 	     (new || ISUNWRITTEN(&imap))))
1278 		set_buffer_new(bh_result);
1279 
1280 	BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
1281 
1282 	return 0;
1283 
1284 out_unlock:
1285 	xfs_iunlock(ip, lockmode);
1286 	return error;
1287 }
1288 
1289 int
1290 xfs_get_blocks(
1291 	struct inode		*inode,
1292 	sector_t		iblock,
1293 	struct buffer_head	*bh_result,
1294 	int			create)
1295 {
1296 	return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1297 }
1298 
1299 int
1300 xfs_get_blocks_direct(
1301 	struct inode		*inode,
1302 	sector_t		iblock,
1303 	struct buffer_head	*bh_result,
1304 	int			create)
1305 {
1306 	return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1307 }
1308 
1309 int
1310 xfs_get_blocks_dax_fault(
1311 	struct inode		*inode,
1312 	sector_t		iblock,
1313 	struct buffer_head	*bh_result,
1314 	int			create)
1315 {
1316 	return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1317 }
1318 
1319 /*
1320  * Complete a direct I/O write request.
1321  *
1322  * xfs_map_direct passes us some flags in the private data to tell us what to
1323  * do.  If no flags are set, then the write IO is an overwrite wholly within
1324  * the existing allocated file size and so there is nothing for us to do.
1325  *
1326  * Note that in this case the completion can be called in interrupt context,
1327  * whereas if we have flags set we will always be called in task context
1328  * (i.e. from a workqueue).
1329  */
1330 int
1331 xfs_end_io_direct_write(
1332 	struct kiocb		*iocb,
1333 	loff_t			offset,
1334 	ssize_t			size,
1335 	void			*private)
1336 {
1337 	struct inode		*inode = file_inode(iocb->ki_filp);
1338 	struct xfs_inode	*ip = XFS_I(inode);
1339 	struct xfs_mount	*mp = ip->i_mount;
1340 	uintptr_t		flags = (uintptr_t)private;
1341 	int			error = 0;
1342 
1343 	trace_xfs_end_io_direct_write(ip, offset, size);
1344 
1345 	if (XFS_FORCED_SHUTDOWN(mp))
1346 		return -EIO;
1347 
1348 	if (size <= 0)
1349 		return size;
1350 
1351 	/*
1352 	 * The flags tell us whether we are doing unwritten extent conversions
1353 	 * or an append transaction that updates the on-disk file size. These
1354 	 * cases are the only cases where we should *potentially* be needing
1355 	 * to update the VFS inode size.
1356 	 */
1357 	if (flags == 0) {
1358 		ASSERT(offset + size <= i_size_read(inode));
1359 		return 0;
1360 	}
1361 
1362 	/*
1363 	 * We need to update the in-core inode size here so that we don't end up
1364 	 * with the on-disk inode size being outside the in-core inode size. We
1365 	 * have no other method of updating EOF for AIO, so always do it here
1366 	 * if necessary.
1367 	 *
1368 	 * We need to lock the test/set EOF update as we can be racing with
1369 	 * other IO completions here to update the EOF. Failing to serialise
1370 	 * here can result in EOF moving backwards and Bad Things Happen when
1371 	 * that occurs.
1372 	 */
1373 	spin_lock(&ip->i_flags_lock);
1374 	if (offset + size > i_size_read(inode))
1375 		i_size_write(inode, offset + size);
1376 	spin_unlock(&ip->i_flags_lock);
1377 
1378 	if (flags & XFS_DIO_FLAG_UNWRITTEN) {
1379 		trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1380 
1381 		error = xfs_iomap_write_unwritten(ip, offset, size);
1382 	} else if (flags & XFS_DIO_FLAG_APPEND) {
1383 		struct xfs_trans *tp;
1384 
1385 		trace_xfs_end_io_direct_write_append(ip, offset, size);
1386 
1387 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
1388 				&tp);
1389 		if (!error)
1390 			error = xfs_setfilesize(ip, tp, offset, size);
1391 	}
1392 
1393 	return error;
1394 }
1395 
1396 STATIC ssize_t
1397 xfs_vm_direct_IO(
1398 	struct kiocb		*iocb,
1399 	struct iov_iter		*iter)
1400 {
1401 	/*
1402 	 * We just need the method present so that open/fcntl allow direct I/O.
1403 	 */
1404 	return -EINVAL;
1405 }
1406 
1407 STATIC sector_t
1408 xfs_vm_bmap(
1409 	struct address_space	*mapping,
1410 	sector_t		block)
1411 {
1412 	struct inode		*inode = (struct inode *)mapping->host;
1413 	struct xfs_inode	*ip = XFS_I(inode);
1414 
1415 	trace_xfs_vm_bmap(XFS_I(inode));
1416 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
1417 	filemap_write_and_wait(mapping);
1418 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1419 	return generic_block_bmap(mapping, block, xfs_get_blocks);
1420 }
1421 
1422 STATIC int
1423 xfs_vm_readpage(
1424 	struct file		*unused,
1425 	struct page		*page)
1426 {
1427 	trace_xfs_vm_readpage(page->mapping->host, 1);
1428 	return mpage_readpage(page, xfs_get_blocks);
1429 }
1430 
1431 STATIC int
1432 xfs_vm_readpages(
1433 	struct file		*unused,
1434 	struct address_space	*mapping,
1435 	struct list_head	*pages,
1436 	unsigned		nr_pages)
1437 {
1438 	trace_xfs_vm_readpages(mapping->host, nr_pages);
1439 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1440 }
1441 
1442 /*
1443  * This is basically a copy of __set_page_dirty_buffers() with one
1444  * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1445  * dirty, we'll never be able to clean them because we don't write buffers
1446  * beyond EOF, and that means we can't invalidate pages that span EOF
1447  * that have been marked dirty. Further, the dirty state can leak into
1448  * the file interior if the file is extended, resulting in all sorts of
1449  * bad things happening as the state does not match the underlying data.
1450  *
1451  * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1452  * this only exist because of bufferheads and how the generic code manages them.
1453  */
1454 STATIC int
1455 xfs_vm_set_page_dirty(
1456 	struct page		*page)
1457 {
1458 	struct address_space	*mapping = page->mapping;
1459 	struct inode		*inode = mapping->host;
1460 	loff_t			end_offset;
1461 	loff_t			offset;
1462 	int			newly_dirty;
1463 
1464 	if (unlikely(!mapping))
1465 		return !TestSetPageDirty(page);
1466 
1467 	end_offset = i_size_read(inode);
1468 	offset = page_offset(page);
1469 
1470 	spin_lock(&mapping->private_lock);
1471 	if (page_has_buffers(page)) {
1472 		struct buffer_head *head = page_buffers(page);
1473 		struct buffer_head *bh = head;
1474 
1475 		do {
1476 			if (offset < end_offset)
1477 				set_buffer_dirty(bh);
1478 			bh = bh->b_this_page;
1479 			offset += 1 << inode->i_blkbits;
1480 		} while (bh != head);
1481 	}
1482 	/*
1483 	 * Lock out page->mem_cgroup migration to keep PageDirty
1484 	 * synchronized with per-memcg dirty page counters.
1485 	 */
1486 	lock_page_memcg(page);
1487 	newly_dirty = !TestSetPageDirty(page);
1488 	spin_unlock(&mapping->private_lock);
1489 
1490 	if (newly_dirty) {
1491 		/* sigh - __set_page_dirty() is static, so copy it here, too */
1492 		unsigned long flags;
1493 
1494 		spin_lock_irqsave(&mapping->tree_lock, flags);
1495 		if (page->mapping) {	/* Race with truncate? */
1496 			WARN_ON_ONCE(!PageUptodate(page));
1497 			account_page_dirtied(page, mapping);
1498 			radix_tree_tag_set(&mapping->page_tree,
1499 					page_index(page), PAGECACHE_TAG_DIRTY);
1500 		}
1501 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1502 	}
1503 	unlock_page_memcg(page);
1504 	if (newly_dirty)
1505 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1506 	return newly_dirty;
1507 }
1508 
1509 const struct address_space_operations xfs_address_space_operations = {
1510 	.readpage		= xfs_vm_readpage,
1511 	.readpages		= xfs_vm_readpages,
1512 	.writepage		= xfs_vm_writepage,
1513 	.writepages		= xfs_vm_writepages,
1514 	.set_page_dirty		= xfs_vm_set_page_dirty,
1515 	.releasepage		= xfs_vm_releasepage,
1516 	.invalidatepage		= xfs_vm_invalidatepage,
1517 	.bmap			= xfs_vm_bmap,
1518 	.direct_IO		= xfs_vm_direct_IO,
1519 	.migratepage		= buffer_migrate_page,
1520 	.is_partially_uptodate  = block_is_partially_uptodate,
1521 	.error_remove_page	= generic_error_remove_page,
1522 };
1523