xref: /openbmc/linux/fs/xfs/xfs_aops.c (revision afb46f79)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_sb.h"
24 #include "xfs_ag.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_trans.h"
28 #include "xfs_inode_item.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
31 #include "xfs_iomap.h"
32 #include "xfs_trace.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_dinode.h"
37 #include <linux/aio.h>
38 #include <linux/gfp.h>
39 #include <linux/mpage.h>
40 #include <linux/pagevec.h>
41 #include <linux/writeback.h>
42 
43 void
44 xfs_count_page_state(
45 	struct page		*page,
46 	int			*delalloc,
47 	int			*unwritten)
48 {
49 	struct buffer_head	*bh, *head;
50 
51 	*delalloc = *unwritten = 0;
52 
53 	bh = head = page_buffers(page);
54 	do {
55 		if (buffer_unwritten(bh))
56 			(*unwritten) = 1;
57 		else if (buffer_delay(bh))
58 			(*delalloc) = 1;
59 	} while ((bh = bh->b_this_page) != head);
60 }
61 
62 STATIC struct block_device *
63 xfs_find_bdev_for_inode(
64 	struct inode		*inode)
65 {
66 	struct xfs_inode	*ip = XFS_I(inode);
67 	struct xfs_mount	*mp = ip->i_mount;
68 
69 	if (XFS_IS_REALTIME_INODE(ip))
70 		return mp->m_rtdev_targp->bt_bdev;
71 	else
72 		return mp->m_ddev_targp->bt_bdev;
73 }
74 
75 /*
76  * We're now finished for good with this ioend structure.
77  * Update the page state via the associated buffer_heads,
78  * release holds on the inode and bio, and finally free
79  * up memory.  Do not use the ioend after this.
80  */
81 STATIC void
82 xfs_destroy_ioend(
83 	xfs_ioend_t		*ioend)
84 {
85 	struct buffer_head	*bh, *next;
86 
87 	for (bh = ioend->io_buffer_head; bh; bh = next) {
88 		next = bh->b_private;
89 		bh->b_end_io(bh, !ioend->io_error);
90 	}
91 
92 	mempool_free(ioend, xfs_ioend_pool);
93 }
94 
95 /*
96  * Fast and loose check if this write could update the on-disk inode size.
97  */
98 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
99 {
100 	return ioend->io_offset + ioend->io_size >
101 		XFS_I(ioend->io_inode)->i_d.di_size;
102 }
103 
104 STATIC int
105 xfs_setfilesize_trans_alloc(
106 	struct xfs_ioend	*ioend)
107 {
108 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
109 	struct xfs_trans	*tp;
110 	int			error;
111 
112 	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
113 
114 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
115 	if (error) {
116 		xfs_trans_cancel(tp, 0);
117 		return error;
118 	}
119 
120 	ioend->io_append_trans = tp;
121 
122 	/*
123 	 * We may pass freeze protection with a transaction.  So tell lockdep
124 	 * we released it.
125 	 */
126 	rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
127 		      1, _THIS_IP_);
128 	/*
129 	 * We hand off the transaction to the completion thread now, so
130 	 * clear the flag here.
131 	 */
132 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
133 	return 0;
134 }
135 
136 /*
137  * Update on-disk file size now that data has been written to disk.
138  */
139 STATIC int
140 xfs_setfilesize(
141 	struct xfs_ioend	*ioend)
142 {
143 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
144 	struct xfs_trans	*tp = ioend->io_append_trans;
145 	xfs_fsize_t		isize;
146 
147 	/*
148 	 * The transaction may have been allocated in the I/O submission thread,
149 	 * thus we need to mark ourselves as beeing in a transaction manually.
150 	 * Similarly for freeze protection.
151 	 */
152 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
153 	rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
154 			   0, 1, _THIS_IP_);
155 
156 	xfs_ilock(ip, XFS_ILOCK_EXCL);
157 	isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
158 	if (!isize) {
159 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
160 		xfs_trans_cancel(tp, 0);
161 		return 0;
162 	}
163 
164 	trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
165 
166 	ip->i_d.di_size = isize;
167 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
168 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
169 
170 	return xfs_trans_commit(tp, 0);
171 }
172 
173 /*
174  * Schedule IO completion handling on the final put of an ioend.
175  *
176  * If there is no work to do we might as well call it a day and free the
177  * ioend right now.
178  */
179 STATIC void
180 xfs_finish_ioend(
181 	struct xfs_ioend	*ioend)
182 {
183 	if (atomic_dec_and_test(&ioend->io_remaining)) {
184 		struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
185 
186 		if (ioend->io_type == XFS_IO_UNWRITTEN)
187 			queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
188 		else if (ioend->io_append_trans ||
189 			 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
190 			queue_work(mp->m_data_workqueue, &ioend->io_work);
191 		else
192 			xfs_destroy_ioend(ioend);
193 	}
194 }
195 
196 /*
197  * IO write completion.
198  */
199 STATIC void
200 xfs_end_io(
201 	struct work_struct *work)
202 {
203 	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
204 	struct xfs_inode *ip = XFS_I(ioend->io_inode);
205 	int		error = 0;
206 
207 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
208 		ioend->io_error = -EIO;
209 		goto done;
210 	}
211 	if (ioend->io_error)
212 		goto done;
213 
214 	/*
215 	 * For unwritten extents we need to issue transactions to convert a
216 	 * range to normal written extens after the data I/O has finished.
217 	 */
218 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
219 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
220 						  ioend->io_size);
221 	} else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
222 		/*
223 		 * For direct I/O we do not know if we need to allocate blocks
224 		 * or not so we can't preallocate an append transaction as that
225 		 * results in nested reservations and log space deadlocks. Hence
226 		 * allocate the transaction here. While this is sub-optimal and
227 		 * can block IO completion for some time, we're stuck with doing
228 		 * it this way until we can pass the ioend to the direct IO
229 		 * allocation callbacks and avoid nesting that way.
230 		 */
231 		error = xfs_setfilesize_trans_alloc(ioend);
232 		if (error)
233 			goto done;
234 		error = xfs_setfilesize(ioend);
235 	} else if (ioend->io_append_trans) {
236 		error = xfs_setfilesize(ioend);
237 	} else {
238 		ASSERT(!xfs_ioend_is_append(ioend));
239 	}
240 
241 done:
242 	if (error)
243 		ioend->io_error = -error;
244 	xfs_destroy_ioend(ioend);
245 }
246 
247 /*
248  * Call IO completion handling in caller context on the final put of an ioend.
249  */
250 STATIC void
251 xfs_finish_ioend_sync(
252 	struct xfs_ioend	*ioend)
253 {
254 	if (atomic_dec_and_test(&ioend->io_remaining))
255 		xfs_end_io(&ioend->io_work);
256 }
257 
258 /*
259  * Allocate and initialise an IO completion structure.
260  * We need to track unwritten extent write completion here initially.
261  * We'll need to extend this for updating the ondisk inode size later
262  * (vs. incore size).
263  */
264 STATIC xfs_ioend_t *
265 xfs_alloc_ioend(
266 	struct inode		*inode,
267 	unsigned int		type)
268 {
269 	xfs_ioend_t		*ioend;
270 
271 	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
272 
273 	/*
274 	 * Set the count to 1 initially, which will prevent an I/O
275 	 * completion callback from happening before we have started
276 	 * all the I/O from calling the completion routine too early.
277 	 */
278 	atomic_set(&ioend->io_remaining, 1);
279 	ioend->io_isdirect = 0;
280 	ioend->io_error = 0;
281 	ioend->io_list = NULL;
282 	ioend->io_type = type;
283 	ioend->io_inode = inode;
284 	ioend->io_buffer_head = NULL;
285 	ioend->io_buffer_tail = NULL;
286 	ioend->io_offset = 0;
287 	ioend->io_size = 0;
288 	ioend->io_append_trans = NULL;
289 
290 	INIT_WORK(&ioend->io_work, xfs_end_io);
291 	return ioend;
292 }
293 
294 STATIC int
295 xfs_map_blocks(
296 	struct inode		*inode,
297 	loff_t			offset,
298 	struct xfs_bmbt_irec	*imap,
299 	int			type,
300 	int			nonblocking)
301 {
302 	struct xfs_inode	*ip = XFS_I(inode);
303 	struct xfs_mount	*mp = ip->i_mount;
304 	ssize_t			count = 1 << inode->i_blkbits;
305 	xfs_fileoff_t		offset_fsb, end_fsb;
306 	int			error = 0;
307 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
308 	int			nimaps = 1;
309 
310 	if (XFS_FORCED_SHUTDOWN(mp))
311 		return -XFS_ERROR(EIO);
312 
313 	if (type == XFS_IO_UNWRITTEN)
314 		bmapi_flags |= XFS_BMAPI_IGSTATE;
315 
316 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
317 		if (nonblocking)
318 			return -XFS_ERROR(EAGAIN);
319 		xfs_ilock(ip, XFS_ILOCK_SHARED);
320 	}
321 
322 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
323 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
324 	ASSERT(offset <= mp->m_super->s_maxbytes);
325 
326 	if (offset + count > mp->m_super->s_maxbytes)
327 		count = mp->m_super->s_maxbytes - offset;
328 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
329 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
330 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
331 				imap, &nimaps, bmapi_flags);
332 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
333 
334 	if (error)
335 		return -XFS_ERROR(error);
336 
337 	if (type == XFS_IO_DELALLOC &&
338 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
339 		error = xfs_iomap_write_allocate(ip, offset, imap);
340 		if (!error)
341 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
342 		return -XFS_ERROR(error);
343 	}
344 
345 #ifdef DEBUG
346 	if (type == XFS_IO_UNWRITTEN) {
347 		ASSERT(nimaps);
348 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
349 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
350 	}
351 #endif
352 	if (nimaps)
353 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
354 	return 0;
355 }
356 
357 STATIC int
358 xfs_imap_valid(
359 	struct inode		*inode,
360 	struct xfs_bmbt_irec	*imap,
361 	xfs_off_t		offset)
362 {
363 	offset >>= inode->i_blkbits;
364 
365 	return offset >= imap->br_startoff &&
366 		offset < imap->br_startoff + imap->br_blockcount;
367 }
368 
369 /*
370  * BIO completion handler for buffered IO.
371  */
372 STATIC void
373 xfs_end_bio(
374 	struct bio		*bio,
375 	int			error)
376 {
377 	xfs_ioend_t		*ioend = bio->bi_private;
378 
379 	ASSERT(atomic_read(&bio->bi_cnt) >= 1);
380 	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
381 
382 	/* Toss bio and pass work off to an xfsdatad thread */
383 	bio->bi_private = NULL;
384 	bio->bi_end_io = NULL;
385 	bio_put(bio);
386 
387 	xfs_finish_ioend(ioend);
388 }
389 
390 STATIC void
391 xfs_submit_ioend_bio(
392 	struct writeback_control *wbc,
393 	xfs_ioend_t		*ioend,
394 	struct bio		*bio)
395 {
396 	atomic_inc(&ioend->io_remaining);
397 	bio->bi_private = ioend;
398 	bio->bi_end_io = xfs_end_bio;
399 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
400 }
401 
402 STATIC struct bio *
403 xfs_alloc_ioend_bio(
404 	struct buffer_head	*bh)
405 {
406 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
407 	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
408 
409 	ASSERT(bio->bi_private == NULL);
410 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
411 	bio->bi_bdev = bh->b_bdev;
412 	return bio;
413 }
414 
415 STATIC void
416 xfs_start_buffer_writeback(
417 	struct buffer_head	*bh)
418 {
419 	ASSERT(buffer_mapped(bh));
420 	ASSERT(buffer_locked(bh));
421 	ASSERT(!buffer_delay(bh));
422 	ASSERT(!buffer_unwritten(bh));
423 
424 	mark_buffer_async_write(bh);
425 	set_buffer_uptodate(bh);
426 	clear_buffer_dirty(bh);
427 }
428 
429 STATIC void
430 xfs_start_page_writeback(
431 	struct page		*page,
432 	int			clear_dirty,
433 	int			buffers)
434 {
435 	ASSERT(PageLocked(page));
436 	ASSERT(!PageWriteback(page));
437 	if (clear_dirty)
438 		clear_page_dirty_for_io(page);
439 	set_page_writeback(page);
440 	unlock_page(page);
441 	/* If no buffers on the page are to be written, finish it here */
442 	if (!buffers)
443 		end_page_writeback(page);
444 }
445 
446 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
447 {
448 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
449 }
450 
451 /*
452  * Submit all of the bios for all of the ioends we have saved up, covering the
453  * initial writepage page and also any probed pages.
454  *
455  * Because we may have multiple ioends spanning a page, we need to start
456  * writeback on all the buffers before we submit them for I/O. If we mark the
457  * buffers as we got, then we can end up with a page that only has buffers
458  * marked async write and I/O complete on can occur before we mark the other
459  * buffers async write.
460  *
461  * The end result of this is that we trip a bug in end_page_writeback() because
462  * we call it twice for the one page as the code in end_buffer_async_write()
463  * assumes that all buffers on the page are started at the same time.
464  *
465  * The fix is two passes across the ioend list - one to start writeback on the
466  * buffer_heads, and then submit them for I/O on the second pass.
467  *
468  * If @fail is non-zero, it means that we have a situation where some part of
469  * the submission process has failed after we have marked paged for writeback
470  * and unlocked them. In this situation, we need to fail the ioend chain rather
471  * than submit it to IO. This typically only happens on a filesystem shutdown.
472  */
473 STATIC void
474 xfs_submit_ioend(
475 	struct writeback_control *wbc,
476 	xfs_ioend_t		*ioend,
477 	int			fail)
478 {
479 	xfs_ioend_t		*head = ioend;
480 	xfs_ioend_t		*next;
481 	struct buffer_head	*bh;
482 	struct bio		*bio;
483 	sector_t		lastblock = 0;
484 
485 	/* Pass 1 - start writeback */
486 	do {
487 		next = ioend->io_list;
488 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
489 			xfs_start_buffer_writeback(bh);
490 	} while ((ioend = next) != NULL);
491 
492 	/* Pass 2 - submit I/O */
493 	ioend = head;
494 	do {
495 		next = ioend->io_list;
496 		bio = NULL;
497 
498 		/*
499 		 * If we are failing the IO now, just mark the ioend with an
500 		 * error and finish it. This will run IO completion immediately
501 		 * as there is only one reference to the ioend at this point in
502 		 * time.
503 		 */
504 		if (fail) {
505 			ioend->io_error = -fail;
506 			xfs_finish_ioend(ioend);
507 			continue;
508 		}
509 
510 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
511 
512 			if (!bio) {
513  retry:
514 				bio = xfs_alloc_ioend_bio(bh);
515 			} else if (bh->b_blocknr != lastblock + 1) {
516 				xfs_submit_ioend_bio(wbc, ioend, bio);
517 				goto retry;
518 			}
519 
520 			if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
521 				xfs_submit_ioend_bio(wbc, ioend, bio);
522 				goto retry;
523 			}
524 
525 			lastblock = bh->b_blocknr;
526 		}
527 		if (bio)
528 			xfs_submit_ioend_bio(wbc, ioend, bio);
529 		xfs_finish_ioend(ioend);
530 	} while ((ioend = next) != NULL);
531 }
532 
533 /*
534  * Cancel submission of all buffer_heads so far in this endio.
535  * Toss the endio too.  Only ever called for the initial page
536  * in a writepage request, so only ever one page.
537  */
538 STATIC void
539 xfs_cancel_ioend(
540 	xfs_ioend_t		*ioend)
541 {
542 	xfs_ioend_t		*next;
543 	struct buffer_head	*bh, *next_bh;
544 
545 	do {
546 		next = ioend->io_list;
547 		bh = ioend->io_buffer_head;
548 		do {
549 			next_bh = bh->b_private;
550 			clear_buffer_async_write(bh);
551 			unlock_buffer(bh);
552 		} while ((bh = next_bh) != NULL);
553 
554 		mempool_free(ioend, xfs_ioend_pool);
555 	} while ((ioend = next) != NULL);
556 }
557 
558 /*
559  * Test to see if we've been building up a completion structure for
560  * earlier buffers -- if so, we try to append to this ioend if we
561  * can, otherwise we finish off any current ioend and start another.
562  * Return true if we've finished the given ioend.
563  */
564 STATIC void
565 xfs_add_to_ioend(
566 	struct inode		*inode,
567 	struct buffer_head	*bh,
568 	xfs_off_t		offset,
569 	unsigned int		type,
570 	xfs_ioend_t		**result,
571 	int			need_ioend)
572 {
573 	xfs_ioend_t		*ioend = *result;
574 
575 	if (!ioend || need_ioend || type != ioend->io_type) {
576 		xfs_ioend_t	*previous = *result;
577 
578 		ioend = xfs_alloc_ioend(inode, type);
579 		ioend->io_offset = offset;
580 		ioend->io_buffer_head = bh;
581 		ioend->io_buffer_tail = bh;
582 		if (previous)
583 			previous->io_list = ioend;
584 		*result = ioend;
585 	} else {
586 		ioend->io_buffer_tail->b_private = bh;
587 		ioend->io_buffer_tail = bh;
588 	}
589 
590 	bh->b_private = NULL;
591 	ioend->io_size += bh->b_size;
592 }
593 
594 STATIC void
595 xfs_map_buffer(
596 	struct inode		*inode,
597 	struct buffer_head	*bh,
598 	struct xfs_bmbt_irec	*imap,
599 	xfs_off_t		offset)
600 {
601 	sector_t		bn;
602 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
603 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
604 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
605 
606 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
607 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
608 
609 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
610 	      ((offset - iomap_offset) >> inode->i_blkbits);
611 
612 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
613 
614 	bh->b_blocknr = bn;
615 	set_buffer_mapped(bh);
616 }
617 
618 STATIC void
619 xfs_map_at_offset(
620 	struct inode		*inode,
621 	struct buffer_head	*bh,
622 	struct xfs_bmbt_irec	*imap,
623 	xfs_off_t		offset)
624 {
625 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
626 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
627 
628 	xfs_map_buffer(inode, bh, imap, offset);
629 	set_buffer_mapped(bh);
630 	clear_buffer_delay(bh);
631 	clear_buffer_unwritten(bh);
632 }
633 
634 /*
635  * Test if a given page contains at least one buffer of a given @type.
636  * If @check_all_buffers is true, then we walk all the buffers in the page to
637  * try to find one of the type passed in. If it is not set, then the caller only
638  * needs to check the first buffer on the page for a match.
639  */
640 STATIC bool
641 xfs_check_page_type(
642 	struct page		*page,
643 	unsigned int		type,
644 	bool			check_all_buffers)
645 {
646 	struct buffer_head	*bh;
647 	struct buffer_head	*head;
648 
649 	if (PageWriteback(page))
650 		return false;
651 	if (!page->mapping)
652 		return false;
653 	if (!page_has_buffers(page))
654 		return false;
655 
656 	bh = head = page_buffers(page);
657 	do {
658 		if (buffer_unwritten(bh)) {
659 			if (type == XFS_IO_UNWRITTEN)
660 				return true;
661 		} else if (buffer_delay(bh)) {
662 			if (type == XFS_IO_DELALLOC)
663 				return true;
664 		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
665 			if (type == XFS_IO_OVERWRITE)
666 				return true;
667 		}
668 
669 		/* If we are only checking the first buffer, we are done now. */
670 		if (!check_all_buffers)
671 			break;
672 	} while ((bh = bh->b_this_page) != head);
673 
674 	return false;
675 }
676 
677 /*
678  * Allocate & map buffers for page given the extent map. Write it out.
679  * except for the original page of a writepage, this is called on
680  * delalloc/unwritten pages only, for the original page it is possible
681  * that the page has no mapping at all.
682  */
683 STATIC int
684 xfs_convert_page(
685 	struct inode		*inode,
686 	struct page		*page,
687 	loff_t			tindex,
688 	struct xfs_bmbt_irec	*imap,
689 	xfs_ioend_t		**ioendp,
690 	struct writeback_control *wbc)
691 {
692 	struct buffer_head	*bh, *head;
693 	xfs_off_t		end_offset;
694 	unsigned long		p_offset;
695 	unsigned int		type;
696 	int			len, page_dirty;
697 	int			count = 0, done = 0, uptodate = 1;
698  	xfs_off_t		offset = page_offset(page);
699 
700 	if (page->index != tindex)
701 		goto fail;
702 	if (!trylock_page(page))
703 		goto fail;
704 	if (PageWriteback(page))
705 		goto fail_unlock_page;
706 	if (page->mapping != inode->i_mapping)
707 		goto fail_unlock_page;
708 	if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
709 		goto fail_unlock_page;
710 
711 	/*
712 	 * page_dirty is initially a count of buffers on the page before
713 	 * EOF and is decremented as we move each into a cleanable state.
714 	 *
715 	 * Derivation:
716 	 *
717 	 * End offset is the highest offset that this page should represent.
718 	 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
719 	 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
720 	 * hence give us the correct page_dirty count. On any other page,
721 	 * it will be zero and in that case we need page_dirty to be the
722 	 * count of buffers on the page.
723 	 */
724 	end_offset = min_t(unsigned long long,
725 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 			i_size_read(inode));
727 
728 	/*
729 	 * If the current map does not span the entire page we are about to try
730 	 * to write, then give up. The only way we can write a page that spans
731 	 * multiple mappings in a single writeback iteration is via the
732 	 * xfs_vm_writepage() function. Data integrity writeback requires the
733 	 * entire page to be written in a single attempt, otherwise the part of
734 	 * the page we don't write here doesn't get written as part of the data
735 	 * integrity sync.
736 	 *
737 	 * For normal writeback, we also don't attempt to write partial pages
738 	 * here as it simply means that write_cache_pages() will see it under
739 	 * writeback and ignore the page until some point in the future, at
740 	 * which time this will be the only page in the file that needs
741 	 * writeback.  Hence for more optimal IO patterns, we should always
742 	 * avoid partial page writeback due to multiple mappings on a page here.
743 	 */
744 	if (!xfs_imap_valid(inode, imap, end_offset))
745 		goto fail_unlock_page;
746 
747 	len = 1 << inode->i_blkbits;
748 	p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
749 					PAGE_CACHE_SIZE);
750 	p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
751 	page_dirty = p_offset / len;
752 
753 	/*
754 	 * The moment we find a buffer that doesn't match our current type
755 	 * specification or can't be written, abort the loop and start
756 	 * writeback. As per the above xfs_imap_valid() check, only
757 	 * xfs_vm_writepage() can handle partial page writeback fully - we are
758 	 * limited here to the buffers that are contiguous with the current
759 	 * ioend, and hence a buffer we can't write breaks that contiguity and
760 	 * we have to defer the rest of the IO to xfs_vm_writepage().
761 	 */
762 	bh = head = page_buffers(page);
763 	do {
764 		if (offset >= end_offset)
765 			break;
766 		if (!buffer_uptodate(bh))
767 			uptodate = 0;
768 		if (!(PageUptodate(page) || buffer_uptodate(bh))) {
769 			done = 1;
770 			break;
771 		}
772 
773 		if (buffer_unwritten(bh) || buffer_delay(bh) ||
774 		    buffer_mapped(bh)) {
775 			if (buffer_unwritten(bh))
776 				type = XFS_IO_UNWRITTEN;
777 			else if (buffer_delay(bh))
778 				type = XFS_IO_DELALLOC;
779 			else
780 				type = XFS_IO_OVERWRITE;
781 
782 			/*
783 			 * imap should always be valid because of the above
784 			 * partial page end_offset check on the imap.
785 			 */
786 			ASSERT(xfs_imap_valid(inode, imap, offset));
787 
788 			lock_buffer(bh);
789 			if (type != XFS_IO_OVERWRITE)
790 				xfs_map_at_offset(inode, bh, imap, offset);
791 			xfs_add_to_ioend(inode, bh, offset, type,
792 					 ioendp, done);
793 
794 			page_dirty--;
795 			count++;
796 		} else {
797 			done = 1;
798 			break;
799 		}
800 	} while (offset += len, (bh = bh->b_this_page) != head);
801 
802 	if (uptodate && bh == head)
803 		SetPageUptodate(page);
804 
805 	if (count) {
806 		if (--wbc->nr_to_write <= 0 &&
807 		    wbc->sync_mode == WB_SYNC_NONE)
808 			done = 1;
809 	}
810 	xfs_start_page_writeback(page, !page_dirty, count);
811 
812 	return done;
813  fail_unlock_page:
814 	unlock_page(page);
815  fail:
816 	return 1;
817 }
818 
819 /*
820  * Convert & write out a cluster of pages in the same extent as defined
821  * by mp and following the start page.
822  */
823 STATIC void
824 xfs_cluster_write(
825 	struct inode		*inode,
826 	pgoff_t			tindex,
827 	struct xfs_bmbt_irec	*imap,
828 	xfs_ioend_t		**ioendp,
829 	struct writeback_control *wbc,
830 	pgoff_t			tlast)
831 {
832 	struct pagevec		pvec;
833 	int			done = 0, i;
834 
835 	pagevec_init(&pvec, 0);
836 	while (!done && tindex <= tlast) {
837 		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
838 
839 		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
840 			break;
841 
842 		for (i = 0; i < pagevec_count(&pvec); i++) {
843 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
844 					imap, ioendp, wbc);
845 			if (done)
846 				break;
847 		}
848 
849 		pagevec_release(&pvec);
850 		cond_resched();
851 	}
852 }
853 
854 STATIC void
855 xfs_vm_invalidatepage(
856 	struct page		*page,
857 	unsigned int		offset,
858 	unsigned int		length)
859 {
860 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
861 				 length);
862 	block_invalidatepage(page, offset, length);
863 }
864 
865 /*
866  * If the page has delalloc buffers on it, we need to punch them out before we
867  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
868  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
869  * is done on that same region - the delalloc extent is returned when none is
870  * supposed to be there.
871  *
872  * We prevent this by truncating away the delalloc regions on the page before
873  * invalidating it. Because they are delalloc, we can do this without needing a
874  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
875  * truncation without a transaction as there is no space left for block
876  * reservation (typically why we see a ENOSPC in writeback).
877  *
878  * This is not a performance critical path, so for now just do the punching a
879  * buffer head at a time.
880  */
881 STATIC void
882 xfs_aops_discard_page(
883 	struct page		*page)
884 {
885 	struct inode		*inode = page->mapping->host;
886 	struct xfs_inode	*ip = XFS_I(inode);
887 	struct buffer_head	*bh, *head;
888 	loff_t			offset = page_offset(page);
889 
890 	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
891 		goto out_invalidate;
892 
893 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
894 		goto out_invalidate;
895 
896 	xfs_alert(ip->i_mount,
897 		"page discard on page %p, inode 0x%llx, offset %llu.",
898 			page, ip->i_ino, offset);
899 
900 	xfs_ilock(ip, XFS_ILOCK_EXCL);
901 	bh = head = page_buffers(page);
902 	do {
903 		int		error;
904 		xfs_fileoff_t	start_fsb;
905 
906 		if (!buffer_delay(bh))
907 			goto next_buffer;
908 
909 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
910 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
911 		if (error) {
912 			/* something screwed, just bail */
913 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
914 				xfs_alert(ip->i_mount,
915 			"page discard unable to remove delalloc mapping.");
916 			}
917 			break;
918 		}
919 next_buffer:
920 		offset += 1 << inode->i_blkbits;
921 
922 	} while ((bh = bh->b_this_page) != head);
923 
924 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
925 out_invalidate:
926 	xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
927 	return;
928 }
929 
930 /*
931  * Write out a dirty page.
932  *
933  * For delalloc space on the page we need to allocate space and flush it.
934  * For unwritten space on the page we need to start the conversion to
935  * regular allocated space.
936  * For any other dirty buffer heads on the page we should flush them.
937  */
938 STATIC int
939 xfs_vm_writepage(
940 	struct page		*page,
941 	struct writeback_control *wbc)
942 {
943 	struct inode		*inode = page->mapping->host;
944 	struct buffer_head	*bh, *head;
945 	struct xfs_bmbt_irec	imap;
946 	xfs_ioend_t		*ioend = NULL, *iohead = NULL;
947 	loff_t			offset;
948 	unsigned int		type;
949 	__uint64_t              end_offset;
950 	pgoff_t                 end_index, last_index;
951 	ssize_t			len;
952 	int			err, imap_valid = 0, uptodate = 1;
953 	int			count = 0;
954 	int			nonblocking = 0;
955 
956 	trace_xfs_writepage(inode, page, 0, 0);
957 
958 	ASSERT(page_has_buffers(page));
959 
960 	/*
961 	 * Refuse to write the page out if we are called from reclaim context.
962 	 *
963 	 * This avoids stack overflows when called from deeply used stacks in
964 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
965 	 * allow reclaim from kswapd as the stack usage there is relatively low.
966 	 *
967 	 * This should never happen except in the case of a VM regression so
968 	 * warn about it.
969 	 */
970 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
971 			PF_MEMALLOC))
972 		goto redirty;
973 
974 	/*
975 	 * Given that we do not allow direct reclaim to call us, we should
976 	 * never be called while in a filesystem transaction.
977 	 */
978 	if (WARN_ON(current->flags & PF_FSTRANS))
979 		goto redirty;
980 
981 	/* Is this page beyond the end of the file? */
982 	offset = i_size_read(inode);
983 	end_index = offset >> PAGE_CACHE_SHIFT;
984 	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
985 	if (page->index >= end_index) {
986 		unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
987 
988 		/*
989 		 * Skip the page if it is fully outside i_size, e.g. due to a
990 		 * truncate operation that is in progress. We must redirty the
991 		 * page so that reclaim stops reclaiming it. Otherwise
992 		 * xfs_vm_releasepage() is called on it and gets confused.
993 		 */
994 		if (page->index >= end_index + 1 || offset_into_page == 0)
995 			goto redirty;
996 
997 		/*
998 		 * The page straddles i_size.  It must be zeroed out on each
999 		 * and every writepage invocation because it may be mmapped.
1000 		 * "A file is mapped in multiples of the page size.  For a file
1001 		 * that is not a multiple of the  page size, the remaining
1002 		 * memory is zeroed when mapped, and writes to that region are
1003 		 * not written out to the file."
1004 		 */
1005 		zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1006 	}
1007 
1008 	end_offset = min_t(unsigned long long,
1009 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
1010 			offset);
1011 	len = 1 << inode->i_blkbits;
1012 
1013 	bh = head = page_buffers(page);
1014 	offset = page_offset(page);
1015 	type = XFS_IO_OVERWRITE;
1016 
1017 	if (wbc->sync_mode == WB_SYNC_NONE)
1018 		nonblocking = 1;
1019 
1020 	do {
1021 		int new_ioend = 0;
1022 
1023 		if (offset >= end_offset)
1024 			break;
1025 		if (!buffer_uptodate(bh))
1026 			uptodate = 0;
1027 
1028 		/*
1029 		 * set_page_dirty dirties all buffers in a page, independent
1030 		 * of their state.  The dirty state however is entirely
1031 		 * meaningless for holes (!mapped && uptodate), so skip
1032 		 * buffers covering holes here.
1033 		 */
1034 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1035 			imap_valid = 0;
1036 			continue;
1037 		}
1038 
1039 		if (buffer_unwritten(bh)) {
1040 			if (type != XFS_IO_UNWRITTEN) {
1041 				type = XFS_IO_UNWRITTEN;
1042 				imap_valid = 0;
1043 			}
1044 		} else if (buffer_delay(bh)) {
1045 			if (type != XFS_IO_DELALLOC) {
1046 				type = XFS_IO_DELALLOC;
1047 				imap_valid = 0;
1048 			}
1049 		} else if (buffer_uptodate(bh)) {
1050 			if (type != XFS_IO_OVERWRITE) {
1051 				type = XFS_IO_OVERWRITE;
1052 				imap_valid = 0;
1053 			}
1054 		} else {
1055 			if (PageUptodate(page))
1056 				ASSERT(buffer_mapped(bh));
1057 			/*
1058 			 * This buffer is not uptodate and will not be
1059 			 * written to disk.  Ensure that we will put any
1060 			 * subsequent writeable buffers into a new
1061 			 * ioend.
1062 			 */
1063 			imap_valid = 0;
1064 			continue;
1065 		}
1066 
1067 		if (imap_valid)
1068 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1069 		if (!imap_valid) {
1070 			/*
1071 			 * If we didn't have a valid mapping then we need to
1072 			 * put the new mapping into a separate ioend structure.
1073 			 * This ensures non-contiguous extents always have
1074 			 * separate ioends, which is particularly important
1075 			 * for unwritten extent conversion at I/O completion
1076 			 * time.
1077 			 */
1078 			new_ioend = 1;
1079 			err = xfs_map_blocks(inode, offset, &imap, type,
1080 					     nonblocking);
1081 			if (err)
1082 				goto error;
1083 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1084 		}
1085 		if (imap_valid) {
1086 			lock_buffer(bh);
1087 			if (type != XFS_IO_OVERWRITE)
1088 				xfs_map_at_offset(inode, bh, &imap, offset);
1089 			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1090 					 new_ioend);
1091 			count++;
1092 		}
1093 
1094 		if (!iohead)
1095 			iohead = ioend;
1096 
1097 	} while (offset += len, ((bh = bh->b_this_page) != head));
1098 
1099 	if (uptodate && bh == head)
1100 		SetPageUptodate(page);
1101 
1102 	xfs_start_page_writeback(page, 1, count);
1103 
1104 	/* if there is no IO to be submitted for this page, we are done */
1105 	if (!ioend)
1106 		return 0;
1107 
1108 	ASSERT(iohead);
1109 
1110 	/*
1111 	 * Any errors from this point onwards need tobe reported through the IO
1112 	 * completion path as we have marked the initial page as under writeback
1113 	 * and unlocked it.
1114 	 */
1115 	if (imap_valid) {
1116 		xfs_off_t		end_index;
1117 
1118 		end_index = imap.br_startoff + imap.br_blockcount;
1119 
1120 		/* to bytes */
1121 		end_index <<= inode->i_blkbits;
1122 
1123 		/* to pages */
1124 		end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1125 
1126 		/* check against file size */
1127 		if (end_index > last_index)
1128 			end_index = last_index;
1129 
1130 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1131 				  wbc, end_index);
1132 	}
1133 
1134 
1135 	/*
1136 	 * Reserve log space if we might write beyond the on-disk inode size.
1137 	 */
1138 	err = 0;
1139 	if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1140 		err = xfs_setfilesize_trans_alloc(ioend);
1141 
1142 	xfs_submit_ioend(wbc, iohead, err);
1143 
1144 	return 0;
1145 
1146 error:
1147 	if (iohead)
1148 		xfs_cancel_ioend(iohead);
1149 
1150 	if (err == -EAGAIN)
1151 		goto redirty;
1152 
1153 	xfs_aops_discard_page(page);
1154 	ClearPageUptodate(page);
1155 	unlock_page(page);
1156 	return err;
1157 
1158 redirty:
1159 	redirty_page_for_writepage(wbc, page);
1160 	unlock_page(page);
1161 	return 0;
1162 }
1163 
1164 STATIC int
1165 xfs_vm_writepages(
1166 	struct address_space	*mapping,
1167 	struct writeback_control *wbc)
1168 {
1169 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1170 	return generic_writepages(mapping, wbc);
1171 }
1172 
1173 /*
1174  * Called to move a page into cleanable state - and from there
1175  * to be released. The page should already be clean. We always
1176  * have buffer heads in this call.
1177  *
1178  * Returns 1 if the page is ok to release, 0 otherwise.
1179  */
1180 STATIC int
1181 xfs_vm_releasepage(
1182 	struct page		*page,
1183 	gfp_t			gfp_mask)
1184 {
1185 	int			delalloc, unwritten;
1186 
1187 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1188 
1189 	xfs_count_page_state(page, &delalloc, &unwritten);
1190 
1191 	if (WARN_ON(delalloc))
1192 		return 0;
1193 	if (WARN_ON(unwritten))
1194 		return 0;
1195 
1196 	return try_to_free_buffers(page);
1197 }
1198 
1199 STATIC int
1200 __xfs_get_blocks(
1201 	struct inode		*inode,
1202 	sector_t		iblock,
1203 	struct buffer_head	*bh_result,
1204 	int			create,
1205 	int			direct)
1206 {
1207 	struct xfs_inode	*ip = XFS_I(inode);
1208 	struct xfs_mount	*mp = ip->i_mount;
1209 	xfs_fileoff_t		offset_fsb, end_fsb;
1210 	int			error = 0;
1211 	int			lockmode = 0;
1212 	struct xfs_bmbt_irec	imap;
1213 	int			nimaps = 1;
1214 	xfs_off_t		offset;
1215 	ssize_t			size;
1216 	int			new = 0;
1217 
1218 	if (XFS_FORCED_SHUTDOWN(mp))
1219 		return -XFS_ERROR(EIO);
1220 
1221 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1222 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1223 	size = bh_result->b_size;
1224 
1225 	if (!create && direct && offset >= i_size_read(inode))
1226 		return 0;
1227 
1228 	/*
1229 	 * Direct I/O is usually done on preallocated files, so try getting
1230 	 * a block mapping without an exclusive lock first.  For buffered
1231 	 * writes we already have the exclusive iolock anyway, so avoiding
1232 	 * a lock roundtrip here by taking the ilock exclusive from the
1233 	 * beginning is a useful micro optimization.
1234 	 */
1235 	if (create && !direct) {
1236 		lockmode = XFS_ILOCK_EXCL;
1237 		xfs_ilock(ip, lockmode);
1238 	} else {
1239 		lockmode = xfs_ilock_data_map_shared(ip);
1240 	}
1241 
1242 	ASSERT(offset <= mp->m_super->s_maxbytes);
1243 	if (offset + size > mp->m_super->s_maxbytes)
1244 		size = mp->m_super->s_maxbytes - offset;
1245 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1246 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1247 
1248 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1249 				&imap, &nimaps, XFS_BMAPI_ENTIRE);
1250 	if (error)
1251 		goto out_unlock;
1252 
1253 	if (create &&
1254 	    (!nimaps ||
1255 	     (imap.br_startblock == HOLESTARTBLOCK ||
1256 	      imap.br_startblock == DELAYSTARTBLOCK))) {
1257 		if (direct || xfs_get_extsz_hint(ip)) {
1258 			/*
1259 			 * Drop the ilock in preparation for starting the block
1260 			 * allocation transaction.  It will be retaken
1261 			 * exclusively inside xfs_iomap_write_direct for the
1262 			 * actual allocation.
1263 			 */
1264 			xfs_iunlock(ip, lockmode);
1265 			error = xfs_iomap_write_direct(ip, offset, size,
1266 						       &imap, nimaps);
1267 			if (error)
1268 				return -error;
1269 			new = 1;
1270 		} else {
1271 			/*
1272 			 * Delalloc reservations do not require a transaction,
1273 			 * we can go on without dropping the lock here. If we
1274 			 * are allocating a new delalloc block, make sure that
1275 			 * we set the new flag so that we mark the buffer new so
1276 			 * that we know that it is newly allocated if the write
1277 			 * fails.
1278 			 */
1279 			if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1280 				new = 1;
1281 			error = xfs_iomap_write_delay(ip, offset, size, &imap);
1282 			if (error)
1283 				goto out_unlock;
1284 
1285 			xfs_iunlock(ip, lockmode);
1286 		}
1287 
1288 		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1289 	} else if (nimaps) {
1290 		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1291 		xfs_iunlock(ip, lockmode);
1292 	} else {
1293 		trace_xfs_get_blocks_notfound(ip, offset, size);
1294 		goto out_unlock;
1295 	}
1296 
1297 	if (imap.br_startblock != HOLESTARTBLOCK &&
1298 	    imap.br_startblock != DELAYSTARTBLOCK) {
1299 		/*
1300 		 * For unwritten extents do not report a disk address on
1301 		 * the read case (treat as if we're reading into a hole).
1302 		 */
1303 		if (create || !ISUNWRITTEN(&imap))
1304 			xfs_map_buffer(inode, bh_result, &imap, offset);
1305 		if (create && ISUNWRITTEN(&imap)) {
1306 			if (direct) {
1307 				bh_result->b_private = inode;
1308 				set_buffer_defer_completion(bh_result);
1309 			}
1310 			set_buffer_unwritten(bh_result);
1311 		}
1312 	}
1313 
1314 	/*
1315 	 * If this is a realtime file, data may be on a different device.
1316 	 * to that pointed to from the buffer_head b_bdev currently.
1317 	 */
1318 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1319 
1320 	/*
1321 	 * If we previously allocated a block out beyond eof and we are now
1322 	 * coming back to use it then we will need to flag it as new even if it
1323 	 * has a disk address.
1324 	 *
1325 	 * With sub-block writes into unwritten extents we also need to mark
1326 	 * the buffer as new so that the unwritten parts of the buffer gets
1327 	 * correctly zeroed.
1328 	 */
1329 	if (create &&
1330 	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1331 	     (offset >= i_size_read(inode)) ||
1332 	     (new || ISUNWRITTEN(&imap))))
1333 		set_buffer_new(bh_result);
1334 
1335 	if (imap.br_startblock == DELAYSTARTBLOCK) {
1336 		BUG_ON(direct);
1337 		if (create) {
1338 			set_buffer_uptodate(bh_result);
1339 			set_buffer_mapped(bh_result);
1340 			set_buffer_delay(bh_result);
1341 		}
1342 	}
1343 
1344 	/*
1345 	 * If this is O_DIRECT or the mpage code calling tell them how large
1346 	 * the mapping is, so that we can avoid repeated get_blocks calls.
1347 	 *
1348 	 * If the mapping spans EOF, then we have to break the mapping up as the
1349 	 * mapping for blocks beyond EOF must be marked new so that sub block
1350 	 * regions can be correctly zeroed. We can't do this for mappings within
1351 	 * EOF unless the mapping was just allocated or is unwritten, otherwise
1352 	 * the callers would overwrite existing data with zeros. Hence we have
1353 	 * to split the mapping into a range up to and including EOF, and a
1354 	 * second mapping for beyond EOF.
1355 	 */
1356 	if (direct || size > (1 << inode->i_blkbits)) {
1357 		xfs_off_t		mapping_size;
1358 
1359 		mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1360 		mapping_size <<= inode->i_blkbits;
1361 
1362 		ASSERT(mapping_size > 0);
1363 		if (mapping_size > size)
1364 			mapping_size = size;
1365 		if (offset < i_size_read(inode) &&
1366 		    offset + mapping_size >= i_size_read(inode)) {
1367 			/* limit mapping to block that spans EOF */
1368 			mapping_size = roundup_64(i_size_read(inode) - offset,
1369 						  1 << inode->i_blkbits);
1370 		}
1371 		if (mapping_size > LONG_MAX)
1372 			mapping_size = LONG_MAX;
1373 
1374 		bh_result->b_size = mapping_size;
1375 	}
1376 
1377 	return 0;
1378 
1379 out_unlock:
1380 	xfs_iunlock(ip, lockmode);
1381 	return -error;
1382 }
1383 
1384 int
1385 xfs_get_blocks(
1386 	struct inode		*inode,
1387 	sector_t		iblock,
1388 	struct buffer_head	*bh_result,
1389 	int			create)
1390 {
1391 	return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1392 }
1393 
1394 STATIC int
1395 xfs_get_blocks_direct(
1396 	struct inode		*inode,
1397 	sector_t		iblock,
1398 	struct buffer_head	*bh_result,
1399 	int			create)
1400 {
1401 	return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1402 }
1403 
1404 /*
1405  * Complete a direct I/O write request.
1406  *
1407  * If the private argument is non-NULL __xfs_get_blocks signals us that we
1408  * need to issue a transaction to convert the range from unwritten to written
1409  * extents.  In case this is regular synchronous I/O we just call xfs_end_io
1410  * to do this and we are done.  But in case this was a successful AIO
1411  * request this handler is called from interrupt context, from which we
1412  * can't start transactions.  In that case offload the I/O completion to
1413  * the workqueues we also use for buffered I/O completion.
1414  */
1415 STATIC void
1416 xfs_end_io_direct_write(
1417 	struct kiocb		*iocb,
1418 	loff_t			offset,
1419 	ssize_t			size,
1420 	void			*private)
1421 {
1422 	struct xfs_ioend	*ioend = iocb->private;
1423 
1424 	/*
1425 	 * While the generic direct I/O code updates the inode size, it does
1426 	 * so only after the end_io handler is called, which means our
1427 	 * end_io handler thinks the on-disk size is outside the in-core
1428 	 * size.  To prevent this just update it a little bit earlier here.
1429 	 */
1430 	if (offset + size > i_size_read(ioend->io_inode))
1431 		i_size_write(ioend->io_inode, offset + size);
1432 
1433 	/*
1434 	 * blockdev_direct_IO can return an error even after the I/O
1435 	 * completion handler was called.  Thus we need to protect
1436 	 * against double-freeing.
1437 	 */
1438 	iocb->private = NULL;
1439 
1440 	ioend->io_offset = offset;
1441 	ioend->io_size = size;
1442 	if (private && size > 0)
1443 		ioend->io_type = XFS_IO_UNWRITTEN;
1444 
1445 	xfs_finish_ioend_sync(ioend);
1446 }
1447 
1448 STATIC ssize_t
1449 xfs_vm_direct_IO(
1450 	int			rw,
1451 	struct kiocb		*iocb,
1452 	const struct iovec	*iov,
1453 	loff_t			offset,
1454 	unsigned long		nr_segs)
1455 {
1456 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
1457 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
1458 	struct xfs_ioend	*ioend = NULL;
1459 	ssize_t			ret;
1460 
1461 	if (rw & WRITE) {
1462 		size_t size = iov_length(iov, nr_segs);
1463 
1464 		/*
1465 		 * We cannot preallocate a size update transaction here as we
1466 		 * don't know whether allocation is necessary or not. Hence we
1467 		 * can only tell IO completion that one is necessary if we are
1468 		 * not doing unwritten extent conversion.
1469 		 */
1470 		iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1471 		if (offset + size > XFS_I(inode)->i_d.di_size)
1472 			ioend->io_isdirect = 1;
1473 
1474 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1475 					    offset, nr_segs,
1476 					    xfs_get_blocks_direct,
1477 					    xfs_end_io_direct_write, NULL,
1478 					    DIO_ASYNC_EXTEND);
1479 		if (ret != -EIOCBQUEUED && iocb->private)
1480 			goto out_destroy_ioend;
1481 	} else {
1482 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1483 					    offset, nr_segs,
1484 					    xfs_get_blocks_direct,
1485 					    NULL, NULL, 0);
1486 	}
1487 
1488 	return ret;
1489 
1490 out_destroy_ioend:
1491 	xfs_destroy_ioend(ioend);
1492 	return ret;
1493 }
1494 
1495 /*
1496  * Punch out the delalloc blocks we have already allocated.
1497  *
1498  * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1499  * as the page is still locked at this point.
1500  */
1501 STATIC void
1502 xfs_vm_kill_delalloc_range(
1503 	struct inode		*inode,
1504 	loff_t			start,
1505 	loff_t			end)
1506 {
1507 	struct xfs_inode	*ip = XFS_I(inode);
1508 	xfs_fileoff_t		start_fsb;
1509 	xfs_fileoff_t		end_fsb;
1510 	int			error;
1511 
1512 	start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1513 	end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1514 	if (end_fsb <= start_fsb)
1515 		return;
1516 
1517 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1518 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1519 						end_fsb - start_fsb);
1520 	if (error) {
1521 		/* something screwed, just bail */
1522 		if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1523 			xfs_alert(ip->i_mount,
1524 		"xfs_vm_write_failed: unable to clean up ino %lld",
1525 					ip->i_ino);
1526 		}
1527 	}
1528 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1529 }
1530 
1531 STATIC void
1532 xfs_vm_write_failed(
1533 	struct inode		*inode,
1534 	struct page		*page,
1535 	loff_t			pos,
1536 	unsigned		len)
1537 {
1538 	loff_t			block_offset;
1539 	loff_t			block_start;
1540 	loff_t			block_end;
1541 	loff_t			from = pos & (PAGE_CACHE_SIZE - 1);
1542 	loff_t			to = from + len;
1543 	struct buffer_head	*bh, *head;
1544 
1545 	/*
1546 	 * The request pos offset might be 32 or 64 bit, this is all fine
1547 	 * on 64-bit platform.  However, for 64-bit pos request on 32-bit
1548 	 * platform, the high 32-bit will be masked off if we evaluate the
1549 	 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1550 	 * 0xfffff000 as an unsigned long, hence the result is incorrect
1551 	 * which could cause the following ASSERT failed in most cases.
1552 	 * In order to avoid this, we can evaluate the block_offset of the
1553 	 * start of the page by using shifts rather than masks the mismatch
1554 	 * problem.
1555 	 */
1556 	block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1557 
1558 	ASSERT(block_offset + from == pos);
1559 
1560 	head = page_buffers(page);
1561 	block_start = 0;
1562 	for (bh = head; bh != head || !block_start;
1563 	     bh = bh->b_this_page, block_start = block_end,
1564 				   block_offset += bh->b_size) {
1565 		block_end = block_start + bh->b_size;
1566 
1567 		/* skip buffers before the write */
1568 		if (block_end <= from)
1569 			continue;
1570 
1571 		/* if the buffer is after the write, we're done */
1572 		if (block_start >= to)
1573 			break;
1574 
1575 		if (!buffer_delay(bh))
1576 			continue;
1577 
1578 		if (!buffer_new(bh) && block_offset < i_size_read(inode))
1579 			continue;
1580 
1581 		xfs_vm_kill_delalloc_range(inode, block_offset,
1582 					   block_offset + bh->b_size);
1583 
1584 		/*
1585 		 * This buffer does not contain data anymore. make sure anyone
1586 		 * who finds it knows that for certain.
1587 		 */
1588 		clear_buffer_delay(bh);
1589 		clear_buffer_uptodate(bh);
1590 		clear_buffer_mapped(bh);
1591 		clear_buffer_new(bh);
1592 		clear_buffer_dirty(bh);
1593 	}
1594 
1595 }
1596 
1597 /*
1598  * This used to call block_write_begin(), but it unlocks and releases the page
1599  * on error, and we need that page to be able to punch stale delalloc blocks out
1600  * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1601  * the appropriate point.
1602  */
1603 STATIC int
1604 xfs_vm_write_begin(
1605 	struct file		*file,
1606 	struct address_space	*mapping,
1607 	loff_t			pos,
1608 	unsigned		len,
1609 	unsigned		flags,
1610 	struct page		**pagep,
1611 	void			**fsdata)
1612 {
1613 	pgoff_t			index = pos >> PAGE_CACHE_SHIFT;
1614 	struct page		*page;
1615 	int			status;
1616 
1617 	ASSERT(len <= PAGE_CACHE_SIZE);
1618 
1619 	page = grab_cache_page_write_begin(mapping, index, flags);
1620 	if (!page)
1621 		return -ENOMEM;
1622 
1623 	status = __block_write_begin(page, pos, len, xfs_get_blocks);
1624 	if (unlikely(status)) {
1625 		struct inode	*inode = mapping->host;
1626 		size_t		isize = i_size_read(inode);
1627 
1628 		xfs_vm_write_failed(inode, page, pos, len);
1629 		unlock_page(page);
1630 
1631 		/*
1632 		 * If the write is beyond EOF, we only want to kill blocks
1633 		 * allocated in this write, not blocks that were previously
1634 		 * written successfully.
1635 		 */
1636 		if (pos + len > isize) {
1637 			ssize_t start = max_t(ssize_t, pos, isize);
1638 
1639 			truncate_pagecache_range(inode, start, pos + len);
1640 		}
1641 
1642 		page_cache_release(page);
1643 		page = NULL;
1644 	}
1645 
1646 	*pagep = page;
1647 	return status;
1648 }
1649 
1650 /*
1651  * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1652  * this specific write because they will never be written. Previous writes
1653  * beyond EOF where block allocation succeeded do not need to be trashed, so
1654  * only new blocks from this write should be trashed. For blocks within
1655  * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1656  * written with all the other valid data.
1657  */
1658 STATIC int
1659 xfs_vm_write_end(
1660 	struct file		*file,
1661 	struct address_space	*mapping,
1662 	loff_t			pos,
1663 	unsigned		len,
1664 	unsigned		copied,
1665 	struct page		*page,
1666 	void			*fsdata)
1667 {
1668 	int			ret;
1669 
1670 	ASSERT(len <= PAGE_CACHE_SIZE);
1671 
1672 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1673 	if (unlikely(ret < len)) {
1674 		struct inode	*inode = mapping->host;
1675 		size_t		isize = i_size_read(inode);
1676 		loff_t		to = pos + len;
1677 
1678 		if (to > isize) {
1679 			/* only kill blocks in this write beyond EOF */
1680 			if (pos > isize)
1681 				isize = pos;
1682 			xfs_vm_kill_delalloc_range(inode, isize, to);
1683 			truncate_pagecache_range(inode, isize, to);
1684 		}
1685 	}
1686 	return ret;
1687 }
1688 
1689 STATIC sector_t
1690 xfs_vm_bmap(
1691 	struct address_space	*mapping,
1692 	sector_t		block)
1693 {
1694 	struct inode		*inode = (struct inode *)mapping->host;
1695 	struct xfs_inode	*ip = XFS_I(inode);
1696 
1697 	trace_xfs_vm_bmap(XFS_I(inode));
1698 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
1699 	filemap_write_and_wait(mapping);
1700 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1701 	return generic_block_bmap(mapping, block, xfs_get_blocks);
1702 }
1703 
1704 STATIC int
1705 xfs_vm_readpage(
1706 	struct file		*unused,
1707 	struct page		*page)
1708 {
1709 	return mpage_readpage(page, xfs_get_blocks);
1710 }
1711 
1712 STATIC int
1713 xfs_vm_readpages(
1714 	struct file		*unused,
1715 	struct address_space	*mapping,
1716 	struct list_head	*pages,
1717 	unsigned		nr_pages)
1718 {
1719 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1720 }
1721 
1722 const struct address_space_operations xfs_address_space_operations = {
1723 	.readpage		= xfs_vm_readpage,
1724 	.readpages		= xfs_vm_readpages,
1725 	.writepage		= xfs_vm_writepage,
1726 	.writepages		= xfs_vm_writepages,
1727 	.releasepage		= xfs_vm_releasepage,
1728 	.invalidatepage		= xfs_vm_invalidatepage,
1729 	.write_begin		= xfs_vm_write_begin,
1730 	.write_end		= xfs_vm_write_end,
1731 	.bmap			= xfs_vm_bmap,
1732 	.direct_IO		= xfs_vm_direct_IO,
1733 	.migratepage		= buffer_migrate_page,
1734 	.is_partially_uptodate  = block_is_partially_uptodate,
1735 	.error_remove_page	= generic_error_remove_page,
1736 };
1737