xref: /openbmc/linux/fs/xfs/xfs_aops.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_inode.h"
13 #include "xfs_trans.h"
14 #include "xfs_inode_item.h"
15 #include "xfs_alloc.h"
16 #include "xfs_error.h"
17 #include "xfs_iomap.h"
18 #include "xfs_trace.h"
19 #include "xfs_bmap.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_bmap_btree.h"
22 #include "xfs_reflink.h"
23 #include <linux/gfp.h>
24 #include <linux/mpage.h>
25 #include <linux/pagevec.h>
26 #include <linux/writeback.h>
27 
28 /*
29  * structure owned by writepages passed to individual writepage calls
30  */
31 struct xfs_writepage_ctx {
32 	struct xfs_bmbt_irec    imap;
33 	bool			imap_valid;
34 	unsigned int		io_type;
35 	struct xfs_ioend	*ioend;
36 	sector_t		last_block;
37 };
38 
39 void
40 xfs_count_page_state(
41 	struct page		*page,
42 	int			*delalloc,
43 	int			*unwritten)
44 {
45 	struct buffer_head	*bh, *head;
46 
47 	*delalloc = *unwritten = 0;
48 
49 	bh = head = page_buffers(page);
50 	do {
51 		if (buffer_unwritten(bh))
52 			(*unwritten) = 1;
53 		else if (buffer_delay(bh))
54 			(*delalloc) = 1;
55 	} while ((bh = bh->b_this_page) != head);
56 }
57 
58 struct block_device *
59 xfs_find_bdev_for_inode(
60 	struct inode		*inode)
61 {
62 	struct xfs_inode	*ip = XFS_I(inode);
63 	struct xfs_mount	*mp = ip->i_mount;
64 
65 	if (XFS_IS_REALTIME_INODE(ip))
66 		return mp->m_rtdev_targp->bt_bdev;
67 	else
68 		return mp->m_ddev_targp->bt_bdev;
69 }
70 
71 struct dax_device *
72 xfs_find_daxdev_for_inode(
73 	struct inode		*inode)
74 {
75 	struct xfs_inode	*ip = XFS_I(inode);
76 	struct xfs_mount	*mp = ip->i_mount;
77 
78 	if (XFS_IS_REALTIME_INODE(ip))
79 		return mp->m_rtdev_targp->bt_daxdev;
80 	else
81 		return mp->m_ddev_targp->bt_daxdev;
82 }
83 
84 /*
85  * We're now finished for good with this page.  Update the page state via the
86  * associated buffer_heads, paying attention to the start and end offsets that
87  * we need to process on the page.
88  *
89  * Note that we open code the action in end_buffer_async_write here so that we
90  * only have to iterate over the buffers attached to the page once.  This is not
91  * only more efficient, but also ensures that we only calls end_page_writeback
92  * at the end of the iteration, and thus avoids the pitfall of having the page
93  * and buffers potentially freed after every call to end_buffer_async_write.
94  */
95 static void
96 xfs_finish_page_writeback(
97 	struct inode		*inode,
98 	struct bio_vec		*bvec,
99 	int			error)
100 {
101 	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
102 	bool			busy = false;
103 	unsigned int		off = 0;
104 	unsigned long		flags;
105 
106 	ASSERT(bvec->bv_offset < PAGE_SIZE);
107 	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
108 	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
109 	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
110 
111 	local_irq_save(flags);
112 	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
113 	do {
114 		if (off >= bvec->bv_offset &&
115 		    off < bvec->bv_offset + bvec->bv_len) {
116 			ASSERT(buffer_async_write(bh));
117 			ASSERT(bh->b_end_io == NULL);
118 
119 			if (error) {
120 				mark_buffer_write_io_error(bh);
121 				clear_buffer_uptodate(bh);
122 				SetPageError(bvec->bv_page);
123 			} else {
124 				set_buffer_uptodate(bh);
125 			}
126 			clear_buffer_async_write(bh);
127 			unlock_buffer(bh);
128 		} else if (buffer_async_write(bh)) {
129 			ASSERT(buffer_locked(bh));
130 			busy = true;
131 		}
132 		off += bh->b_size;
133 	} while ((bh = bh->b_this_page) != head);
134 	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
135 	local_irq_restore(flags);
136 
137 	if (!busy)
138 		end_page_writeback(bvec->bv_page);
139 }
140 
141 /*
142  * We're now finished for good with this ioend structure.  Update the page
143  * state, release holds on bios, and finally free up memory.  Do not use the
144  * ioend after this.
145  */
146 STATIC void
147 xfs_destroy_ioend(
148 	struct xfs_ioend	*ioend,
149 	int			error)
150 {
151 	struct inode		*inode = ioend->io_inode;
152 	struct bio		*bio = &ioend->io_inline_bio;
153 	struct bio		*last = ioend->io_bio, *next;
154 	u64			start = bio->bi_iter.bi_sector;
155 	bool			quiet = bio_flagged(bio, BIO_QUIET);
156 
157 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
158 		struct bio_vec	*bvec;
159 		int		i;
160 
161 		/*
162 		 * For the last bio, bi_private points to the ioend, so we
163 		 * need to explicitly end the iteration here.
164 		 */
165 		if (bio == last)
166 			next = NULL;
167 		else
168 			next = bio->bi_private;
169 
170 		/* walk each page on bio, ending page IO on them */
171 		bio_for_each_segment_all(bvec, bio, i)
172 			xfs_finish_page_writeback(inode, bvec, error);
173 
174 		bio_put(bio);
175 	}
176 
177 	if (unlikely(error && !quiet)) {
178 		xfs_err_ratelimited(XFS_I(inode)->i_mount,
179 			"writeback error on sector %llu", start);
180 	}
181 }
182 
183 /*
184  * Fast and loose check if this write could update the on-disk inode size.
185  */
186 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
187 {
188 	return ioend->io_offset + ioend->io_size >
189 		XFS_I(ioend->io_inode)->i_d.di_size;
190 }
191 
192 STATIC int
193 xfs_setfilesize_trans_alloc(
194 	struct xfs_ioend	*ioend)
195 {
196 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
197 	struct xfs_trans	*tp;
198 	int			error;
199 
200 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
201 				XFS_TRANS_NOFS, &tp);
202 	if (error)
203 		return error;
204 
205 	ioend->io_append_trans = tp;
206 
207 	/*
208 	 * We may pass freeze protection with a transaction.  So tell lockdep
209 	 * we released it.
210 	 */
211 	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
212 	/*
213 	 * We hand off the transaction to the completion thread now, so
214 	 * clear the flag here.
215 	 */
216 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
217 	return 0;
218 }
219 
220 /*
221  * Update on-disk file size now that data has been written to disk.
222  */
223 STATIC int
224 __xfs_setfilesize(
225 	struct xfs_inode	*ip,
226 	struct xfs_trans	*tp,
227 	xfs_off_t		offset,
228 	size_t			size)
229 {
230 	xfs_fsize_t		isize;
231 
232 	xfs_ilock(ip, XFS_ILOCK_EXCL);
233 	isize = xfs_new_eof(ip, offset + size);
234 	if (!isize) {
235 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
236 		xfs_trans_cancel(tp);
237 		return 0;
238 	}
239 
240 	trace_xfs_setfilesize(ip, offset, size);
241 
242 	ip->i_d.di_size = isize;
243 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
244 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
245 
246 	return xfs_trans_commit(tp);
247 }
248 
249 int
250 xfs_setfilesize(
251 	struct xfs_inode	*ip,
252 	xfs_off_t		offset,
253 	size_t			size)
254 {
255 	struct xfs_mount	*mp = ip->i_mount;
256 	struct xfs_trans	*tp;
257 	int			error;
258 
259 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
260 	if (error)
261 		return error;
262 
263 	return __xfs_setfilesize(ip, tp, offset, size);
264 }
265 
266 STATIC int
267 xfs_setfilesize_ioend(
268 	struct xfs_ioend	*ioend,
269 	int			error)
270 {
271 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
272 	struct xfs_trans	*tp = ioend->io_append_trans;
273 
274 	/*
275 	 * The transaction may have been allocated in the I/O submission thread,
276 	 * thus we need to mark ourselves as being in a transaction manually.
277 	 * Similarly for freeze protection.
278 	 */
279 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
280 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
281 
282 	/* we abort the update if there was an IO error */
283 	if (error) {
284 		xfs_trans_cancel(tp);
285 		return error;
286 	}
287 
288 	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
289 }
290 
291 /*
292  * IO write completion.
293  */
294 STATIC void
295 xfs_end_io(
296 	struct work_struct *work)
297 {
298 	struct xfs_ioend	*ioend =
299 		container_of(work, struct xfs_ioend, io_work);
300 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
301 	xfs_off_t		offset = ioend->io_offset;
302 	size_t			size = ioend->io_size;
303 	int			error;
304 
305 	/*
306 	 * Just clean up the in-memory strutures if the fs has been shut down.
307 	 */
308 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
309 		error = -EIO;
310 		goto done;
311 	}
312 
313 	/*
314 	 * Clean up any COW blocks on an I/O error.
315 	 */
316 	error = blk_status_to_errno(ioend->io_bio->bi_status);
317 	if (unlikely(error)) {
318 		switch (ioend->io_type) {
319 		case XFS_IO_COW:
320 			xfs_reflink_cancel_cow_range(ip, offset, size, true);
321 			break;
322 		}
323 
324 		goto done;
325 	}
326 
327 	/*
328 	 * Success:  commit the COW or unwritten blocks if needed.
329 	 */
330 	switch (ioend->io_type) {
331 	case XFS_IO_COW:
332 		error = xfs_reflink_end_cow(ip, offset, size);
333 		break;
334 	case XFS_IO_UNWRITTEN:
335 		/* writeback should never update isize */
336 		error = xfs_iomap_write_unwritten(ip, offset, size, false);
337 		break;
338 	default:
339 		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
340 		break;
341 	}
342 
343 done:
344 	if (ioend->io_append_trans)
345 		error = xfs_setfilesize_ioend(ioend, error);
346 	xfs_destroy_ioend(ioend, error);
347 }
348 
349 STATIC void
350 xfs_end_bio(
351 	struct bio		*bio)
352 {
353 	struct xfs_ioend	*ioend = bio->bi_private;
354 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
355 
356 	if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
357 		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
358 	else if (ioend->io_append_trans)
359 		queue_work(mp->m_data_workqueue, &ioend->io_work);
360 	else
361 		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
362 }
363 
364 STATIC int
365 xfs_map_blocks(
366 	struct inode		*inode,
367 	loff_t			offset,
368 	struct xfs_bmbt_irec	*imap,
369 	int			type)
370 {
371 	struct xfs_inode	*ip = XFS_I(inode);
372 	struct xfs_mount	*mp = ip->i_mount;
373 	ssize_t			count = i_blocksize(inode);
374 	xfs_fileoff_t		offset_fsb, end_fsb;
375 	int			error = 0;
376 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
377 	int			nimaps = 1;
378 
379 	if (XFS_FORCED_SHUTDOWN(mp))
380 		return -EIO;
381 
382 	/*
383 	 * Truncate can race with writeback since writeback doesn't take the
384 	 * iolock and truncate decreases the file size before it starts
385 	 * truncating the pages between new_size and old_size.  Therefore, we
386 	 * can end up in the situation where writeback gets a CoW fork mapping
387 	 * but the truncate makes the mapping invalid and we end up in here
388 	 * trying to get a new mapping.  Bail out here so that we simply never
389 	 * get a valid mapping and so we drop the write altogether.  The page
390 	 * truncation will kill the contents anyway.
391 	 */
392 	if (type == XFS_IO_COW && offset > i_size_read(inode))
393 		return 0;
394 
395 	ASSERT(type != XFS_IO_COW);
396 	if (type == XFS_IO_UNWRITTEN)
397 		bmapi_flags |= XFS_BMAPI_IGSTATE;
398 
399 	xfs_ilock(ip, XFS_ILOCK_SHARED);
400 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
401 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
402 	ASSERT(offset <= mp->m_super->s_maxbytes);
403 
404 	if (offset > mp->m_super->s_maxbytes - count)
405 		count = mp->m_super->s_maxbytes - offset;
406 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
407 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
408 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
409 				imap, &nimaps, bmapi_flags);
410 	/*
411 	 * Truncate an overwrite extent if there's a pending CoW
412 	 * reservation before the end of this extent.  This forces us
413 	 * to come back to writepage to take care of the CoW.
414 	 */
415 	if (nimaps && type == XFS_IO_OVERWRITE)
416 		xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
417 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
418 
419 	if (error)
420 		return error;
421 
422 	if (type == XFS_IO_DELALLOC &&
423 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
424 		error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
425 				imap);
426 		if (!error)
427 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
428 		return error;
429 	}
430 
431 #ifdef DEBUG
432 	if (type == XFS_IO_UNWRITTEN) {
433 		ASSERT(nimaps);
434 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
435 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
436 	}
437 #endif
438 	if (nimaps)
439 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
440 	return 0;
441 }
442 
443 STATIC bool
444 xfs_imap_valid(
445 	struct inode		*inode,
446 	struct xfs_bmbt_irec	*imap,
447 	xfs_off_t		offset)
448 {
449 	offset >>= inode->i_blkbits;
450 
451 	/*
452 	 * We have to make sure the cached mapping is within EOF to protect
453 	 * against eofblocks trimming on file release leaving us with a stale
454 	 * mapping. Otherwise, a page for a subsequent file extending buffered
455 	 * write could get picked up by this writeback cycle and written to the
456 	 * wrong blocks.
457 	 *
458 	 * Note that what we really want here is a generic mapping invalidation
459 	 * mechanism to protect us from arbitrary extent modifying contexts, not
460 	 * just eofblocks.
461 	 */
462 	xfs_trim_extent_eof(imap, XFS_I(inode));
463 
464 	return offset >= imap->br_startoff &&
465 		offset < imap->br_startoff + imap->br_blockcount;
466 }
467 
468 STATIC void
469 xfs_start_buffer_writeback(
470 	struct buffer_head	*bh)
471 {
472 	ASSERT(buffer_mapped(bh));
473 	ASSERT(buffer_locked(bh));
474 	ASSERT(!buffer_delay(bh));
475 	ASSERT(!buffer_unwritten(bh));
476 
477 	bh->b_end_io = NULL;
478 	set_buffer_async_write(bh);
479 	set_buffer_uptodate(bh);
480 	clear_buffer_dirty(bh);
481 }
482 
483 STATIC void
484 xfs_start_page_writeback(
485 	struct page		*page,
486 	int			clear_dirty)
487 {
488 	ASSERT(PageLocked(page));
489 	ASSERT(!PageWriteback(page));
490 
491 	/*
492 	 * if the page was not fully cleaned, we need to ensure that the higher
493 	 * layers come back to it correctly. That means we need to keep the page
494 	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
495 	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
496 	 * write this page in this writeback sweep will be made.
497 	 */
498 	if (clear_dirty) {
499 		clear_page_dirty_for_io(page);
500 		set_page_writeback(page);
501 	} else
502 		set_page_writeback_keepwrite(page);
503 
504 	unlock_page(page);
505 }
506 
507 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
508 {
509 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
510 }
511 
512 /*
513  * Submit the bio for an ioend. We are passed an ioend with a bio attached to
514  * it, and we submit that bio. The ioend may be used for multiple bio
515  * submissions, so we only want to allocate an append transaction for the ioend
516  * once. In the case of multiple bio submission, each bio will take an IO
517  * reference to the ioend to ensure that the ioend completion is only done once
518  * all bios have been submitted and the ioend is really done.
519  *
520  * If @fail is non-zero, it means that we have a situation where some part of
521  * the submission process has failed after we have marked paged for writeback
522  * and unlocked them. In this situation, we need to fail the bio and ioend
523  * rather than submit it to IO. This typically only happens on a filesystem
524  * shutdown.
525  */
526 STATIC int
527 xfs_submit_ioend(
528 	struct writeback_control *wbc,
529 	struct xfs_ioend	*ioend,
530 	int			status)
531 {
532 	/* Convert CoW extents to regular */
533 	if (!status && ioend->io_type == XFS_IO_COW) {
534 		/*
535 		 * Yuk. This can do memory allocation, but is not a
536 		 * transactional operation so everything is done in GFP_KERNEL
537 		 * context. That can deadlock, because we hold pages in
538 		 * writeback state and GFP_KERNEL allocations can block on them.
539 		 * Hence we must operate in nofs conditions here.
540 		 */
541 		unsigned nofs_flag;
542 
543 		nofs_flag = memalloc_nofs_save();
544 		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
545 				ioend->io_offset, ioend->io_size);
546 		memalloc_nofs_restore(nofs_flag);
547 	}
548 
549 	/* Reserve log space if we might write beyond the on-disk inode size. */
550 	if (!status &&
551 	    ioend->io_type != XFS_IO_UNWRITTEN &&
552 	    xfs_ioend_is_append(ioend) &&
553 	    !ioend->io_append_trans)
554 		status = xfs_setfilesize_trans_alloc(ioend);
555 
556 	ioend->io_bio->bi_private = ioend;
557 	ioend->io_bio->bi_end_io = xfs_end_bio;
558 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
559 
560 	/*
561 	 * If we are failing the IO now, just mark the ioend with an
562 	 * error and finish it. This will run IO completion immediately
563 	 * as there is only one reference to the ioend at this point in
564 	 * time.
565 	 */
566 	if (status) {
567 		ioend->io_bio->bi_status = errno_to_blk_status(status);
568 		bio_endio(ioend->io_bio);
569 		return status;
570 	}
571 
572 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
573 	submit_bio(ioend->io_bio);
574 	return 0;
575 }
576 
577 static void
578 xfs_init_bio_from_bh(
579 	struct bio		*bio,
580 	struct buffer_head	*bh)
581 {
582 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
583 	bio_set_dev(bio, bh->b_bdev);
584 }
585 
586 static struct xfs_ioend *
587 xfs_alloc_ioend(
588 	struct inode		*inode,
589 	unsigned int		type,
590 	xfs_off_t		offset,
591 	struct buffer_head	*bh)
592 {
593 	struct xfs_ioend	*ioend;
594 	struct bio		*bio;
595 
596 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
597 	xfs_init_bio_from_bh(bio, bh);
598 
599 	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
600 	INIT_LIST_HEAD(&ioend->io_list);
601 	ioend->io_type = type;
602 	ioend->io_inode = inode;
603 	ioend->io_size = 0;
604 	ioend->io_offset = offset;
605 	INIT_WORK(&ioend->io_work, xfs_end_io);
606 	ioend->io_append_trans = NULL;
607 	ioend->io_bio = bio;
608 	return ioend;
609 }
610 
611 /*
612  * Allocate a new bio, and chain the old bio to the new one.
613  *
614  * Note that we have to do perform the chaining in this unintuitive order
615  * so that the bi_private linkage is set up in the right direction for the
616  * traversal in xfs_destroy_ioend().
617  */
618 static void
619 xfs_chain_bio(
620 	struct xfs_ioend	*ioend,
621 	struct writeback_control *wbc,
622 	struct buffer_head	*bh)
623 {
624 	struct bio *new;
625 
626 	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
627 	xfs_init_bio_from_bh(new, bh);
628 
629 	bio_chain(ioend->io_bio, new);
630 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
631 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
632 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
633 	submit_bio(ioend->io_bio);
634 	ioend->io_bio = new;
635 }
636 
637 /*
638  * Test to see if we've been building up a completion structure for
639  * earlier buffers -- if so, we try to append to this ioend if we
640  * can, otherwise we finish off any current ioend and start another.
641  * Return the ioend we finished off so that the caller can submit it
642  * once it has finished processing the dirty page.
643  */
644 STATIC void
645 xfs_add_to_ioend(
646 	struct inode		*inode,
647 	struct buffer_head	*bh,
648 	xfs_off_t		offset,
649 	struct xfs_writepage_ctx *wpc,
650 	struct writeback_control *wbc,
651 	struct list_head	*iolist)
652 {
653 	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
654 	    bh->b_blocknr != wpc->last_block + 1 ||
655 	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
656 		if (wpc->ioend)
657 			list_add(&wpc->ioend->io_list, iolist);
658 		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
659 	}
660 
661 	/*
662 	 * If the buffer doesn't fit into the bio we need to allocate a new
663 	 * one.  This shouldn't happen more than once for a given buffer.
664 	 */
665 	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
666 		xfs_chain_bio(wpc->ioend, wbc, bh);
667 
668 	wpc->ioend->io_size += bh->b_size;
669 	wpc->last_block = bh->b_blocknr;
670 	xfs_start_buffer_writeback(bh);
671 }
672 
673 STATIC void
674 xfs_map_buffer(
675 	struct inode		*inode,
676 	struct buffer_head	*bh,
677 	struct xfs_bmbt_irec	*imap,
678 	xfs_off_t		offset)
679 {
680 	sector_t		bn;
681 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
682 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
683 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
684 
685 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
686 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
687 
688 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
689 	      ((offset - iomap_offset) >> inode->i_blkbits);
690 
691 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
692 
693 	bh->b_blocknr = bn;
694 	set_buffer_mapped(bh);
695 }
696 
697 STATIC void
698 xfs_map_at_offset(
699 	struct inode		*inode,
700 	struct buffer_head	*bh,
701 	struct xfs_bmbt_irec	*imap,
702 	xfs_off_t		offset)
703 {
704 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
705 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
706 
707 	xfs_map_buffer(inode, bh, imap, offset);
708 	set_buffer_mapped(bh);
709 	clear_buffer_delay(bh);
710 	clear_buffer_unwritten(bh);
711 }
712 
713 /*
714  * Test if a given page contains at least one buffer of a given @type.
715  * If @check_all_buffers is true, then we walk all the buffers in the page to
716  * try to find one of the type passed in. If it is not set, then the caller only
717  * needs to check the first buffer on the page for a match.
718  */
719 STATIC bool
720 xfs_check_page_type(
721 	struct page		*page,
722 	unsigned int		type,
723 	bool			check_all_buffers)
724 {
725 	struct buffer_head	*bh;
726 	struct buffer_head	*head;
727 
728 	if (PageWriteback(page))
729 		return false;
730 	if (!page->mapping)
731 		return false;
732 	if (!page_has_buffers(page))
733 		return false;
734 
735 	bh = head = page_buffers(page);
736 	do {
737 		if (buffer_unwritten(bh)) {
738 			if (type == XFS_IO_UNWRITTEN)
739 				return true;
740 		} else if (buffer_delay(bh)) {
741 			if (type == XFS_IO_DELALLOC)
742 				return true;
743 		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
744 			if (type == XFS_IO_OVERWRITE)
745 				return true;
746 		}
747 
748 		/* If we are only checking the first buffer, we are done now. */
749 		if (!check_all_buffers)
750 			break;
751 	} while ((bh = bh->b_this_page) != head);
752 
753 	return false;
754 }
755 
756 STATIC void
757 xfs_vm_invalidatepage(
758 	struct page		*page,
759 	unsigned int		offset,
760 	unsigned int		length)
761 {
762 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
763 				 length);
764 
765 	/*
766 	 * If we are invalidating the entire page, clear the dirty state from it
767 	 * so that we can check for attempts to release dirty cached pages in
768 	 * xfs_vm_releasepage().
769 	 */
770 	if (offset == 0 && length >= PAGE_SIZE)
771 		cancel_dirty_page(page);
772 	block_invalidatepage(page, offset, length);
773 }
774 
775 /*
776  * If the page has delalloc buffers on it, we need to punch them out before we
777  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
778  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
779  * is done on that same region - the delalloc extent is returned when none is
780  * supposed to be there.
781  *
782  * We prevent this by truncating away the delalloc regions on the page before
783  * invalidating it. Because they are delalloc, we can do this without needing a
784  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
785  * truncation without a transaction as there is no space left for block
786  * reservation (typically why we see a ENOSPC in writeback).
787  *
788  * This is not a performance critical path, so for now just do the punching a
789  * buffer head at a time.
790  */
791 STATIC void
792 xfs_aops_discard_page(
793 	struct page		*page)
794 {
795 	struct inode		*inode = page->mapping->host;
796 	struct xfs_inode	*ip = XFS_I(inode);
797 	struct buffer_head	*bh, *head;
798 	loff_t			offset = page_offset(page);
799 
800 	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
801 		goto out_invalidate;
802 
803 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
804 		goto out_invalidate;
805 
806 	xfs_alert(ip->i_mount,
807 		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
808 			page, ip->i_ino, offset);
809 
810 	xfs_ilock(ip, XFS_ILOCK_EXCL);
811 	bh = head = page_buffers(page);
812 	do {
813 		int		error;
814 		xfs_fileoff_t	start_fsb;
815 
816 		if (!buffer_delay(bh))
817 			goto next_buffer;
818 
819 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
820 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
821 		if (error) {
822 			/* something screwed, just bail */
823 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
824 				xfs_alert(ip->i_mount,
825 			"page discard unable to remove delalloc mapping.");
826 			}
827 			break;
828 		}
829 next_buffer:
830 		offset += i_blocksize(inode);
831 
832 	} while ((bh = bh->b_this_page) != head);
833 
834 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
835 out_invalidate:
836 	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
837 	return;
838 }
839 
840 static int
841 xfs_map_cow(
842 	struct xfs_writepage_ctx *wpc,
843 	struct inode		*inode,
844 	loff_t			offset,
845 	unsigned int		*new_type)
846 {
847 	struct xfs_inode	*ip = XFS_I(inode);
848 	struct xfs_bmbt_irec	imap;
849 	bool			is_cow = false;
850 	int			error;
851 
852 	/*
853 	 * If we already have a valid COW mapping keep using it.
854 	 */
855 	if (wpc->io_type == XFS_IO_COW) {
856 		wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
857 		if (wpc->imap_valid) {
858 			*new_type = XFS_IO_COW;
859 			return 0;
860 		}
861 	}
862 
863 	/*
864 	 * Else we need to check if there is a COW mapping at this offset.
865 	 */
866 	xfs_ilock(ip, XFS_ILOCK_SHARED);
867 	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
868 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
869 
870 	if (!is_cow)
871 		return 0;
872 
873 	/*
874 	 * And if the COW mapping has a delayed extent here we need to
875 	 * allocate real space for it now.
876 	 */
877 	if (isnullstartblock(imap.br_startblock)) {
878 		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
879 				&imap);
880 		if (error)
881 			return error;
882 	}
883 
884 	wpc->io_type = *new_type = XFS_IO_COW;
885 	wpc->imap_valid = true;
886 	wpc->imap = imap;
887 	return 0;
888 }
889 
890 /*
891  * We implement an immediate ioend submission policy here to avoid needing to
892  * chain multiple ioends and hence nest mempool allocations which can violate
893  * forward progress guarantees we need to provide. The current ioend we are
894  * adding buffers to is cached on the writepage context, and if the new buffer
895  * does not append to the cached ioend it will create a new ioend and cache that
896  * instead.
897  *
898  * If a new ioend is created and cached, the old ioend is returned and queued
899  * locally for submission once the entire page is processed or an error has been
900  * detected.  While ioends are submitted immediately after they are completed,
901  * batching optimisations are provided by higher level block plugging.
902  *
903  * At the end of a writeback pass, there will be a cached ioend remaining on the
904  * writepage context that the caller will need to submit.
905  */
906 static int
907 xfs_writepage_map(
908 	struct xfs_writepage_ctx *wpc,
909 	struct writeback_control *wbc,
910 	struct inode		*inode,
911 	struct page		*page,
912 	uint64_t		end_offset)
913 {
914 	LIST_HEAD(submit_list);
915 	struct xfs_ioend	*ioend, *next;
916 	struct buffer_head	*bh, *head;
917 	ssize_t			len = i_blocksize(inode);
918 	uint64_t		offset;
919 	int			error = 0;
920 	int			count = 0;
921 	int			uptodate = 1;
922 	unsigned int		new_type;
923 
924 	bh = head = page_buffers(page);
925 	offset = page_offset(page);
926 	do {
927 		if (offset >= end_offset)
928 			break;
929 		if (!buffer_uptodate(bh))
930 			uptodate = 0;
931 
932 		/*
933 		 * set_page_dirty dirties all buffers in a page, independent
934 		 * of their state.  The dirty state however is entirely
935 		 * meaningless for holes (!mapped && uptodate), so skip
936 		 * buffers covering holes here.
937 		 */
938 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
939 			wpc->imap_valid = false;
940 			continue;
941 		}
942 
943 		if (buffer_unwritten(bh))
944 			new_type = XFS_IO_UNWRITTEN;
945 		else if (buffer_delay(bh))
946 			new_type = XFS_IO_DELALLOC;
947 		else if (buffer_uptodate(bh))
948 			new_type = XFS_IO_OVERWRITE;
949 		else {
950 			if (PageUptodate(page))
951 				ASSERT(buffer_mapped(bh));
952 			/*
953 			 * This buffer is not uptodate and will not be
954 			 * written to disk.  Ensure that we will put any
955 			 * subsequent writeable buffers into a new
956 			 * ioend.
957 			 */
958 			wpc->imap_valid = false;
959 			continue;
960 		}
961 
962 		if (xfs_is_reflink_inode(XFS_I(inode))) {
963 			error = xfs_map_cow(wpc, inode, offset, &new_type);
964 			if (error)
965 				goto out;
966 		}
967 
968 		if (wpc->io_type != new_type) {
969 			wpc->io_type = new_type;
970 			wpc->imap_valid = false;
971 		}
972 
973 		if (wpc->imap_valid)
974 			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
975 							 offset);
976 		if (!wpc->imap_valid) {
977 			error = xfs_map_blocks(inode, offset, &wpc->imap,
978 					     wpc->io_type);
979 			if (error)
980 				goto out;
981 			wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
982 							 offset);
983 		}
984 		if (wpc->imap_valid) {
985 			lock_buffer(bh);
986 			if (wpc->io_type != XFS_IO_OVERWRITE)
987 				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
988 			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
989 			count++;
990 		}
991 
992 	} while (offset += len, ((bh = bh->b_this_page) != head));
993 
994 	if (uptodate && bh == head)
995 		SetPageUptodate(page);
996 
997 	ASSERT(wpc->ioend || list_empty(&submit_list));
998 
999 out:
1000 	/*
1001 	 * On error, we have to fail the ioend here because we have locked
1002 	 * buffers in the ioend. If we don't do this, we'll deadlock
1003 	 * invalidating the page as that tries to lock the buffers on the page.
1004 	 * Also, because we may have set pages under writeback, we have to make
1005 	 * sure we run IO completion to mark the error state of the IO
1006 	 * appropriately, so we can't cancel the ioend directly here. That means
1007 	 * we have to mark this page as under writeback if we included any
1008 	 * buffers from it in the ioend chain so that completion treats it
1009 	 * correctly.
1010 	 *
1011 	 * If we didn't include the page in the ioend, the on error we can
1012 	 * simply discard and unlock it as there are no other users of the page
1013 	 * or it's buffers right now. The caller will still need to trigger
1014 	 * submission of outstanding ioends on the writepage context so they are
1015 	 * treated correctly on error.
1016 	 */
1017 	if (count) {
1018 		xfs_start_page_writeback(page, !error);
1019 
1020 		/*
1021 		 * Preserve the original error if there was one, otherwise catch
1022 		 * submission errors here and propagate into subsequent ioend
1023 		 * submissions.
1024 		 */
1025 		list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1026 			int error2;
1027 
1028 			list_del_init(&ioend->io_list);
1029 			error2 = xfs_submit_ioend(wbc, ioend, error);
1030 			if (error2 && !error)
1031 				error = error2;
1032 		}
1033 	} else if (error) {
1034 		xfs_aops_discard_page(page);
1035 		ClearPageUptodate(page);
1036 		unlock_page(page);
1037 	} else {
1038 		/*
1039 		 * We can end up here with no error and nothing to write if we
1040 		 * race with a partial page truncate on a sub-page block sized
1041 		 * filesystem. In that case we need to mark the page clean.
1042 		 */
1043 		xfs_start_page_writeback(page, 1);
1044 		end_page_writeback(page);
1045 	}
1046 
1047 	mapping_set_error(page->mapping, error);
1048 	return error;
1049 }
1050 
1051 /*
1052  * Write out a dirty page.
1053  *
1054  * For delalloc space on the page we need to allocate space and flush it.
1055  * For unwritten space on the page we need to start the conversion to
1056  * regular allocated space.
1057  * For any other dirty buffer heads on the page we should flush them.
1058  */
1059 STATIC int
1060 xfs_do_writepage(
1061 	struct page		*page,
1062 	struct writeback_control *wbc,
1063 	void			*data)
1064 {
1065 	struct xfs_writepage_ctx *wpc = data;
1066 	struct inode		*inode = page->mapping->host;
1067 	loff_t			offset;
1068 	uint64_t              end_offset;
1069 	pgoff_t                 end_index;
1070 
1071 	trace_xfs_writepage(inode, page, 0, 0);
1072 
1073 	ASSERT(page_has_buffers(page));
1074 
1075 	/*
1076 	 * Refuse to write the page out if we are called from reclaim context.
1077 	 *
1078 	 * This avoids stack overflows when called from deeply used stacks in
1079 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
1080 	 * allow reclaim from kswapd as the stack usage there is relatively low.
1081 	 *
1082 	 * This should never happen except in the case of a VM regression so
1083 	 * warn about it.
1084 	 */
1085 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1086 			PF_MEMALLOC))
1087 		goto redirty;
1088 
1089 	/*
1090 	 * Given that we do not allow direct reclaim to call us, we should
1091 	 * never be called while in a filesystem transaction.
1092 	 */
1093 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1094 		goto redirty;
1095 
1096 	/*
1097 	 * Is this page beyond the end of the file?
1098 	 *
1099 	 * The page index is less than the end_index, adjust the end_offset
1100 	 * to the highest offset that this page should represent.
1101 	 * -----------------------------------------------------
1102 	 * |			file mapping	       | <EOF> |
1103 	 * -----------------------------------------------------
1104 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1105 	 * ^--------------------------------^----------|--------
1106 	 * |     desired writeback range    |      see else    |
1107 	 * ---------------------------------^------------------|
1108 	 */
1109 	offset = i_size_read(inode);
1110 	end_index = offset >> PAGE_SHIFT;
1111 	if (page->index < end_index)
1112 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1113 	else {
1114 		/*
1115 		 * Check whether the page to write out is beyond or straddles
1116 		 * i_size or not.
1117 		 * -------------------------------------------------------
1118 		 * |		file mapping		        | <EOF>  |
1119 		 * -------------------------------------------------------
1120 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1121 		 * ^--------------------------------^-----------|---------
1122 		 * |				    |      Straddles     |
1123 		 * ---------------------------------^-----------|--------|
1124 		 */
1125 		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1126 
1127 		/*
1128 		 * Skip the page if it is fully outside i_size, e.g. due to a
1129 		 * truncate operation that is in progress. We must redirty the
1130 		 * page so that reclaim stops reclaiming it. Otherwise
1131 		 * xfs_vm_releasepage() is called on it and gets confused.
1132 		 *
1133 		 * Note that the end_index is unsigned long, it would overflow
1134 		 * if the given offset is greater than 16TB on 32-bit system
1135 		 * and if we do check the page is fully outside i_size or not
1136 		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1137 		 * will be evaluated to 0.  Hence this page will be redirtied
1138 		 * and be written out repeatedly which would result in an
1139 		 * infinite loop, the user program that perform this operation
1140 		 * will hang.  Instead, we can verify this situation by checking
1141 		 * if the page to write is totally beyond the i_size or if it's
1142 		 * offset is just equal to the EOF.
1143 		 */
1144 		if (page->index > end_index ||
1145 		    (page->index == end_index && offset_into_page == 0))
1146 			goto redirty;
1147 
1148 		/*
1149 		 * The page straddles i_size.  It must be zeroed out on each
1150 		 * and every writepage invocation because it may be mmapped.
1151 		 * "A file is mapped in multiples of the page size.  For a file
1152 		 * that is not a multiple of the page size, the remaining
1153 		 * memory is zeroed when mapped, and writes to that region are
1154 		 * not written out to the file."
1155 		 */
1156 		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1157 
1158 		/* Adjust the end_offset to the end of file */
1159 		end_offset = offset;
1160 	}
1161 
1162 	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1163 
1164 redirty:
1165 	redirty_page_for_writepage(wbc, page);
1166 	unlock_page(page);
1167 	return 0;
1168 }
1169 
1170 STATIC int
1171 xfs_vm_writepage(
1172 	struct page		*page,
1173 	struct writeback_control *wbc)
1174 {
1175 	struct xfs_writepage_ctx wpc = {
1176 		.io_type = XFS_IO_INVALID,
1177 	};
1178 	int			ret;
1179 
1180 	ret = xfs_do_writepage(page, wbc, &wpc);
1181 	if (wpc.ioend)
1182 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1183 	return ret;
1184 }
1185 
1186 STATIC int
1187 xfs_vm_writepages(
1188 	struct address_space	*mapping,
1189 	struct writeback_control *wbc)
1190 {
1191 	struct xfs_writepage_ctx wpc = {
1192 		.io_type = XFS_IO_INVALID,
1193 	};
1194 	int			ret;
1195 
1196 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1197 	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1198 	if (wpc.ioend)
1199 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1200 	return ret;
1201 }
1202 
1203 STATIC int
1204 xfs_dax_writepages(
1205 	struct address_space	*mapping,
1206 	struct writeback_control *wbc)
1207 {
1208 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1209 	return dax_writeback_mapping_range(mapping,
1210 			xfs_find_bdev_for_inode(mapping->host), wbc);
1211 }
1212 
1213 /*
1214  * Called to move a page into cleanable state - and from there
1215  * to be released. The page should already be clean. We always
1216  * have buffer heads in this call.
1217  *
1218  * Returns 1 if the page is ok to release, 0 otherwise.
1219  */
1220 STATIC int
1221 xfs_vm_releasepage(
1222 	struct page		*page,
1223 	gfp_t			gfp_mask)
1224 {
1225 	int			delalloc, unwritten;
1226 
1227 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1228 
1229 	/*
1230 	 * mm accommodates an old ext3 case where clean pages might not have had
1231 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
1232 	 * ->releasepage() via shrink_active_list(). Conversely,
1233 	 * block_invalidatepage() can send pages that are still marked dirty but
1234 	 * otherwise have invalidated buffers.
1235 	 *
1236 	 * We want to release the latter to avoid unnecessary buildup of the
1237 	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1238 	 * that are entirely invalidated and need to be released.  Hence the
1239 	 * only time we should get dirty pages here is through
1240 	 * shrink_active_list() and so we can simply skip those now.
1241 	 *
1242 	 * warn if we've left any lingering delalloc/unwritten buffers on clean
1243 	 * or invalidated pages we are about to release.
1244 	 */
1245 	if (PageDirty(page))
1246 		return 0;
1247 
1248 	xfs_count_page_state(page, &delalloc, &unwritten);
1249 
1250 	if (WARN_ON_ONCE(delalloc))
1251 		return 0;
1252 	if (WARN_ON_ONCE(unwritten))
1253 		return 0;
1254 
1255 	return try_to_free_buffers(page);
1256 }
1257 
1258 /*
1259  * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1260  * is, so that we can avoid repeated get_blocks calls.
1261  *
1262  * If the mapping spans EOF, then we have to break the mapping up as the mapping
1263  * for blocks beyond EOF must be marked new so that sub block regions can be
1264  * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1265  * was just allocated or is unwritten, otherwise the callers would overwrite
1266  * existing data with zeros. Hence we have to split the mapping into a range up
1267  * to and including EOF, and a second mapping for beyond EOF.
1268  */
1269 static void
1270 xfs_map_trim_size(
1271 	struct inode		*inode,
1272 	sector_t		iblock,
1273 	struct buffer_head	*bh_result,
1274 	struct xfs_bmbt_irec	*imap,
1275 	xfs_off_t		offset,
1276 	ssize_t			size)
1277 {
1278 	xfs_off_t		mapping_size;
1279 
1280 	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1281 	mapping_size <<= inode->i_blkbits;
1282 
1283 	ASSERT(mapping_size > 0);
1284 	if (mapping_size > size)
1285 		mapping_size = size;
1286 	if (offset < i_size_read(inode) &&
1287 	    (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
1288 		/* limit mapping to block that spans EOF */
1289 		mapping_size = roundup_64(i_size_read(inode) - offset,
1290 					  i_blocksize(inode));
1291 	}
1292 	if (mapping_size > LONG_MAX)
1293 		mapping_size = LONG_MAX;
1294 
1295 	bh_result->b_size = mapping_size;
1296 }
1297 
1298 static int
1299 xfs_get_blocks(
1300 	struct inode		*inode,
1301 	sector_t		iblock,
1302 	struct buffer_head	*bh_result,
1303 	int			create)
1304 {
1305 	struct xfs_inode	*ip = XFS_I(inode);
1306 	struct xfs_mount	*mp = ip->i_mount;
1307 	xfs_fileoff_t		offset_fsb, end_fsb;
1308 	int			error = 0;
1309 	int			lockmode = 0;
1310 	struct xfs_bmbt_irec	imap;
1311 	int			nimaps = 1;
1312 	xfs_off_t		offset;
1313 	ssize_t			size;
1314 
1315 	BUG_ON(create);
1316 
1317 	if (XFS_FORCED_SHUTDOWN(mp))
1318 		return -EIO;
1319 
1320 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1321 	ASSERT(bh_result->b_size >= i_blocksize(inode));
1322 	size = bh_result->b_size;
1323 
1324 	if (offset >= i_size_read(inode))
1325 		return 0;
1326 
1327 	/*
1328 	 * Direct I/O is usually done on preallocated files, so try getting
1329 	 * a block mapping without an exclusive lock first.
1330 	 */
1331 	lockmode = xfs_ilock_data_map_shared(ip);
1332 
1333 	ASSERT(offset <= mp->m_super->s_maxbytes);
1334 	if (offset > mp->m_super->s_maxbytes - size)
1335 		size = mp->m_super->s_maxbytes - offset;
1336 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1337 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1338 
1339 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1340 			&nimaps, 0);
1341 	if (error)
1342 		goto out_unlock;
1343 	if (!nimaps) {
1344 		trace_xfs_get_blocks_notfound(ip, offset, size);
1345 		goto out_unlock;
1346 	}
1347 
1348 	trace_xfs_get_blocks_found(ip, offset, size,
1349 		imap.br_state == XFS_EXT_UNWRITTEN ?
1350 			XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
1351 	xfs_iunlock(ip, lockmode);
1352 
1353 	/* trim mapping down to size requested */
1354 	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1355 
1356 	/*
1357 	 * For unwritten extents do not report a disk address in the buffered
1358 	 * read case (treat as if we're reading into a hole).
1359 	 */
1360 	if (xfs_bmap_is_real_extent(&imap))
1361 		xfs_map_buffer(inode, bh_result, &imap, offset);
1362 
1363 	/*
1364 	 * If this is a realtime file, data may be on a different device.
1365 	 * to that pointed to from the buffer_head b_bdev currently.
1366 	 */
1367 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1368 	return 0;
1369 
1370 out_unlock:
1371 	xfs_iunlock(ip, lockmode);
1372 	return error;
1373 }
1374 
1375 STATIC sector_t
1376 xfs_vm_bmap(
1377 	struct address_space	*mapping,
1378 	sector_t		block)
1379 {
1380 	struct xfs_inode	*ip = XFS_I(mapping->host);
1381 
1382 	trace_xfs_vm_bmap(ip);
1383 
1384 	/*
1385 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1386 	 * bypasses the file system for actual I/O.  We really can't allow
1387 	 * that on reflinks inodes, so we have to skip out here.  And yes,
1388 	 * 0 is the magic code for a bmap error.
1389 	 *
1390 	 * Since we don't pass back blockdev info, we can't return bmap
1391 	 * information for rt files either.
1392 	 */
1393 	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1394 		return 0;
1395 	return iomap_bmap(mapping, block, &xfs_iomap_ops);
1396 }
1397 
1398 STATIC int
1399 xfs_vm_readpage(
1400 	struct file		*unused,
1401 	struct page		*page)
1402 {
1403 	trace_xfs_vm_readpage(page->mapping->host, 1);
1404 	return mpage_readpage(page, xfs_get_blocks);
1405 }
1406 
1407 STATIC int
1408 xfs_vm_readpages(
1409 	struct file		*unused,
1410 	struct address_space	*mapping,
1411 	struct list_head	*pages,
1412 	unsigned		nr_pages)
1413 {
1414 	trace_xfs_vm_readpages(mapping->host, nr_pages);
1415 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1416 }
1417 
1418 /*
1419  * This is basically a copy of __set_page_dirty_buffers() with one
1420  * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1421  * dirty, we'll never be able to clean them because we don't write buffers
1422  * beyond EOF, and that means we can't invalidate pages that span EOF
1423  * that have been marked dirty. Further, the dirty state can leak into
1424  * the file interior if the file is extended, resulting in all sorts of
1425  * bad things happening as the state does not match the underlying data.
1426  *
1427  * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1428  * this only exist because of bufferheads and how the generic code manages them.
1429  */
1430 STATIC int
1431 xfs_vm_set_page_dirty(
1432 	struct page		*page)
1433 {
1434 	struct address_space	*mapping = page->mapping;
1435 	struct inode		*inode = mapping->host;
1436 	loff_t			end_offset;
1437 	loff_t			offset;
1438 	int			newly_dirty;
1439 
1440 	if (unlikely(!mapping))
1441 		return !TestSetPageDirty(page);
1442 
1443 	end_offset = i_size_read(inode);
1444 	offset = page_offset(page);
1445 
1446 	spin_lock(&mapping->private_lock);
1447 	if (page_has_buffers(page)) {
1448 		struct buffer_head *head = page_buffers(page);
1449 		struct buffer_head *bh = head;
1450 
1451 		do {
1452 			if (offset < end_offset)
1453 				set_buffer_dirty(bh);
1454 			bh = bh->b_this_page;
1455 			offset += i_blocksize(inode);
1456 		} while (bh != head);
1457 	}
1458 	/*
1459 	 * Lock out page->mem_cgroup migration to keep PageDirty
1460 	 * synchronized with per-memcg dirty page counters.
1461 	 */
1462 	lock_page_memcg(page);
1463 	newly_dirty = !TestSetPageDirty(page);
1464 	spin_unlock(&mapping->private_lock);
1465 
1466 	if (newly_dirty)
1467 		__set_page_dirty(page, mapping, 1);
1468 	unlock_page_memcg(page);
1469 	if (newly_dirty)
1470 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1471 	return newly_dirty;
1472 }
1473 
1474 static int
1475 xfs_iomap_swapfile_activate(
1476 	struct swap_info_struct		*sis,
1477 	struct file			*swap_file,
1478 	sector_t			*span)
1479 {
1480 	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1481 	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
1482 }
1483 
1484 const struct address_space_operations xfs_address_space_operations = {
1485 	.readpage		= xfs_vm_readpage,
1486 	.readpages		= xfs_vm_readpages,
1487 	.writepage		= xfs_vm_writepage,
1488 	.writepages		= xfs_vm_writepages,
1489 	.set_page_dirty		= xfs_vm_set_page_dirty,
1490 	.releasepage		= xfs_vm_releasepage,
1491 	.invalidatepage		= xfs_vm_invalidatepage,
1492 	.bmap			= xfs_vm_bmap,
1493 	.direct_IO		= noop_direct_IO,
1494 	.migratepage		= buffer_migrate_page,
1495 	.is_partially_uptodate  = block_is_partially_uptodate,
1496 	.error_remove_page	= generic_error_remove_page,
1497 	.swap_activate		= xfs_iomap_swapfile_activate,
1498 };
1499 
1500 const struct address_space_operations xfs_dax_aops = {
1501 	.writepages		= xfs_dax_writepages,
1502 	.direct_IO		= noop_direct_IO,
1503 	.set_page_dirty		= noop_set_page_dirty,
1504 	.invalidatepage		= noop_invalidatepage,
1505 	.swap_activate		= xfs_iomap_swapfile_activate,
1506 };
1507