xref: /openbmc/linux/fs/xfs/xfs_aops.c (revision f35e839a)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_log.h"
20 #include "xfs_sb.h"
21 #include "xfs_ag.h"
22 #include "xfs_trans.h"
23 #include "xfs_mount.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_dinode.h"
26 #include "xfs_inode.h"
27 #include "xfs_inode_item.h"
28 #include "xfs_alloc.h"
29 #include "xfs_error.h"
30 #include "xfs_iomap.h"
31 #include "xfs_vnodeops.h"
32 #include "xfs_trace.h"
33 #include "xfs_bmap.h"
34 #include <linux/aio.h>
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
39 
40 void
41 xfs_count_page_state(
42 	struct page		*page,
43 	int			*delalloc,
44 	int			*unwritten)
45 {
46 	struct buffer_head	*bh, *head;
47 
48 	*delalloc = *unwritten = 0;
49 
50 	bh = head = page_buffers(page);
51 	do {
52 		if (buffer_unwritten(bh))
53 			(*unwritten) = 1;
54 		else if (buffer_delay(bh))
55 			(*delalloc) = 1;
56 	} while ((bh = bh->b_this_page) != head);
57 }
58 
59 STATIC struct block_device *
60 xfs_find_bdev_for_inode(
61 	struct inode		*inode)
62 {
63 	struct xfs_inode	*ip = XFS_I(inode);
64 	struct xfs_mount	*mp = ip->i_mount;
65 
66 	if (XFS_IS_REALTIME_INODE(ip))
67 		return mp->m_rtdev_targp->bt_bdev;
68 	else
69 		return mp->m_ddev_targp->bt_bdev;
70 }
71 
72 /*
73  * We're now finished for good with this ioend structure.
74  * Update the page state via the associated buffer_heads,
75  * release holds on the inode and bio, and finally free
76  * up memory.  Do not use the ioend after this.
77  */
78 STATIC void
79 xfs_destroy_ioend(
80 	xfs_ioend_t		*ioend)
81 {
82 	struct buffer_head	*bh, *next;
83 
84 	for (bh = ioend->io_buffer_head; bh; bh = next) {
85 		next = bh->b_private;
86 		bh->b_end_io(bh, !ioend->io_error);
87 	}
88 
89 	if (ioend->io_iocb) {
90 		inode_dio_done(ioend->io_inode);
91 		if (ioend->io_isasync) {
92 			aio_complete(ioend->io_iocb, ioend->io_error ?
93 					ioend->io_error : ioend->io_result, 0);
94 		}
95 	}
96 
97 	mempool_free(ioend, xfs_ioend_pool);
98 }
99 
100 /*
101  * Fast and loose check if this write could update the on-disk inode size.
102  */
103 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
104 {
105 	return ioend->io_offset + ioend->io_size >
106 		XFS_I(ioend->io_inode)->i_d.di_size;
107 }
108 
109 STATIC int
110 xfs_setfilesize_trans_alloc(
111 	struct xfs_ioend	*ioend)
112 {
113 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
114 	struct xfs_trans	*tp;
115 	int			error;
116 
117 	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
118 
119 	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
120 	if (error) {
121 		xfs_trans_cancel(tp, 0);
122 		return error;
123 	}
124 
125 	ioend->io_append_trans = tp;
126 
127 	/*
128 	 * We may pass freeze protection with a transaction.  So tell lockdep
129 	 * we released it.
130 	 */
131 	rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
132 		      1, _THIS_IP_);
133 	/*
134 	 * We hand off the transaction to the completion thread now, so
135 	 * clear the flag here.
136 	 */
137 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
138 	return 0;
139 }
140 
141 /*
142  * Update on-disk file size now that data has been written to disk.
143  */
144 STATIC int
145 xfs_setfilesize(
146 	struct xfs_ioend	*ioend)
147 {
148 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
149 	struct xfs_trans	*tp = ioend->io_append_trans;
150 	xfs_fsize_t		isize;
151 
152 	/*
153 	 * The transaction may have been allocated in the I/O submission thread,
154 	 * thus we need to mark ourselves as beeing in a transaction manually.
155 	 * Similarly for freeze protection.
156 	 */
157 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
158 	rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
159 			   0, 1, _THIS_IP_);
160 
161 	xfs_ilock(ip, XFS_ILOCK_EXCL);
162 	isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
163 	if (!isize) {
164 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
165 		xfs_trans_cancel(tp, 0);
166 		return 0;
167 	}
168 
169 	trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
170 
171 	ip->i_d.di_size = isize;
172 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
173 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
174 
175 	return xfs_trans_commit(tp, 0);
176 }
177 
178 /*
179  * Schedule IO completion handling on the final put of an ioend.
180  *
181  * If there is no work to do we might as well call it a day and free the
182  * ioend right now.
183  */
184 STATIC void
185 xfs_finish_ioend(
186 	struct xfs_ioend	*ioend)
187 {
188 	if (atomic_dec_and_test(&ioend->io_remaining)) {
189 		struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
190 
191 		if (ioend->io_type == XFS_IO_UNWRITTEN)
192 			queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
193 		else if (ioend->io_append_trans ||
194 			 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
195 			queue_work(mp->m_data_workqueue, &ioend->io_work);
196 		else
197 			xfs_destroy_ioend(ioend);
198 	}
199 }
200 
201 /*
202  * IO write completion.
203  */
204 STATIC void
205 xfs_end_io(
206 	struct work_struct *work)
207 {
208 	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
209 	struct xfs_inode *ip = XFS_I(ioend->io_inode);
210 	int		error = 0;
211 
212 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
213 		ioend->io_error = -EIO;
214 		goto done;
215 	}
216 	if (ioend->io_error)
217 		goto done;
218 
219 	/*
220 	 * For unwritten extents we need to issue transactions to convert a
221 	 * range to normal written extens after the data I/O has finished.
222 	 */
223 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
224 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
225 						  ioend->io_size);
226 	} else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
227 		/*
228 		 * For direct I/O we do not know if we need to allocate blocks
229 		 * or not so we can't preallocate an append transaction as that
230 		 * results in nested reservations and log space deadlocks. Hence
231 		 * allocate the transaction here. While this is sub-optimal and
232 		 * can block IO completion for some time, we're stuck with doing
233 		 * it this way until we can pass the ioend to the direct IO
234 		 * allocation callbacks and avoid nesting that way.
235 		 */
236 		error = xfs_setfilesize_trans_alloc(ioend);
237 		if (error)
238 			goto done;
239 		error = xfs_setfilesize(ioend);
240 	} else if (ioend->io_append_trans) {
241 		error = xfs_setfilesize(ioend);
242 	} else {
243 		ASSERT(!xfs_ioend_is_append(ioend));
244 	}
245 
246 done:
247 	if (error)
248 		ioend->io_error = -error;
249 	xfs_destroy_ioend(ioend);
250 }
251 
252 /*
253  * Call IO completion handling in caller context on the final put of an ioend.
254  */
255 STATIC void
256 xfs_finish_ioend_sync(
257 	struct xfs_ioend	*ioend)
258 {
259 	if (atomic_dec_and_test(&ioend->io_remaining))
260 		xfs_end_io(&ioend->io_work);
261 }
262 
263 /*
264  * Allocate and initialise an IO completion structure.
265  * We need to track unwritten extent write completion here initially.
266  * We'll need to extend this for updating the ondisk inode size later
267  * (vs. incore size).
268  */
269 STATIC xfs_ioend_t *
270 xfs_alloc_ioend(
271 	struct inode		*inode,
272 	unsigned int		type)
273 {
274 	xfs_ioend_t		*ioend;
275 
276 	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
277 
278 	/*
279 	 * Set the count to 1 initially, which will prevent an I/O
280 	 * completion callback from happening before we have started
281 	 * all the I/O from calling the completion routine too early.
282 	 */
283 	atomic_set(&ioend->io_remaining, 1);
284 	ioend->io_isasync = 0;
285 	ioend->io_isdirect = 0;
286 	ioend->io_error = 0;
287 	ioend->io_list = NULL;
288 	ioend->io_type = type;
289 	ioend->io_inode = inode;
290 	ioend->io_buffer_head = NULL;
291 	ioend->io_buffer_tail = NULL;
292 	ioend->io_offset = 0;
293 	ioend->io_size = 0;
294 	ioend->io_iocb = NULL;
295 	ioend->io_result = 0;
296 	ioend->io_append_trans = NULL;
297 
298 	INIT_WORK(&ioend->io_work, xfs_end_io);
299 	return ioend;
300 }
301 
302 STATIC int
303 xfs_map_blocks(
304 	struct inode		*inode,
305 	loff_t			offset,
306 	struct xfs_bmbt_irec	*imap,
307 	int			type,
308 	int			nonblocking)
309 {
310 	struct xfs_inode	*ip = XFS_I(inode);
311 	struct xfs_mount	*mp = ip->i_mount;
312 	ssize_t			count = 1 << inode->i_blkbits;
313 	xfs_fileoff_t		offset_fsb, end_fsb;
314 	int			error = 0;
315 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
316 	int			nimaps = 1;
317 
318 	if (XFS_FORCED_SHUTDOWN(mp))
319 		return -XFS_ERROR(EIO);
320 
321 	if (type == XFS_IO_UNWRITTEN)
322 		bmapi_flags |= XFS_BMAPI_IGSTATE;
323 
324 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
325 		if (nonblocking)
326 			return -XFS_ERROR(EAGAIN);
327 		xfs_ilock(ip, XFS_ILOCK_SHARED);
328 	}
329 
330 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
331 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
332 	ASSERT(offset <= mp->m_super->s_maxbytes);
333 
334 	if (offset + count > mp->m_super->s_maxbytes)
335 		count = mp->m_super->s_maxbytes - offset;
336 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
337 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
338 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
339 				imap, &nimaps, bmapi_flags);
340 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
341 
342 	if (error)
343 		return -XFS_ERROR(error);
344 
345 	if (type == XFS_IO_DELALLOC &&
346 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
347 		error = xfs_iomap_write_allocate(ip, offset, count, imap);
348 		if (!error)
349 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
350 		return -XFS_ERROR(error);
351 	}
352 
353 #ifdef DEBUG
354 	if (type == XFS_IO_UNWRITTEN) {
355 		ASSERT(nimaps);
356 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
357 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
358 	}
359 #endif
360 	if (nimaps)
361 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
362 	return 0;
363 }
364 
365 STATIC int
366 xfs_imap_valid(
367 	struct inode		*inode,
368 	struct xfs_bmbt_irec	*imap,
369 	xfs_off_t		offset)
370 {
371 	offset >>= inode->i_blkbits;
372 
373 	return offset >= imap->br_startoff &&
374 		offset < imap->br_startoff + imap->br_blockcount;
375 }
376 
377 /*
378  * BIO completion handler for buffered IO.
379  */
380 STATIC void
381 xfs_end_bio(
382 	struct bio		*bio,
383 	int			error)
384 {
385 	xfs_ioend_t		*ioend = bio->bi_private;
386 
387 	ASSERT(atomic_read(&bio->bi_cnt) >= 1);
388 	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
389 
390 	/* Toss bio and pass work off to an xfsdatad thread */
391 	bio->bi_private = NULL;
392 	bio->bi_end_io = NULL;
393 	bio_put(bio);
394 
395 	xfs_finish_ioend(ioend);
396 }
397 
398 STATIC void
399 xfs_submit_ioend_bio(
400 	struct writeback_control *wbc,
401 	xfs_ioend_t		*ioend,
402 	struct bio		*bio)
403 {
404 	atomic_inc(&ioend->io_remaining);
405 	bio->bi_private = ioend;
406 	bio->bi_end_io = xfs_end_bio;
407 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
408 }
409 
410 STATIC struct bio *
411 xfs_alloc_ioend_bio(
412 	struct buffer_head	*bh)
413 {
414 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
415 	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
416 
417 	ASSERT(bio->bi_private == NULL);
418 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
419 	bio->bi_bdev = bh->b_bdev;
420 	return bio;
421 }
422 
423 STATIC void
424 xfs_start_buffer_writeback(
425 	struct buffer_head	*bh)
426 {
427 	ASSERT(buffer_mapped(bh));
428 	ASSERT(buffer_locked(bh));
429 	ASSERT(!buffer_delay(bh));
430 	ASSERT(!buffer_unwritten(bh));
431 
432 	mark_buffer_async_write(bh);
433 	set_buffer_uptodate(bh);
434 	clear_buffer_dirty(bh);
435 }
436 
437 STATIC void
438 xfs_start_page_writeback(
439 	struct page		*page,
440 	int			clear_dirty,
441 	int			buffers)
442 {
443 	ASSERT(PageLocked(page));
444 	ASSERT(!PageWriteback(page));
445 	if (clear_dirty)
446 		clear_page_dirty_for_io(page);
447 	set_page_writeback(page);
448 	unlock_page(page);
449 	/* If no buffers on the page are to be written, finish it here */
450 	if (!buffers)
451 		end_page_writeback(page);
452 }
453 
454 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
455 {
456 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
457 }
458 
459 /*
460  * Submit all of the bios for all of the ioends we have saved up, covering the
461  * initial writepage page and also any probed pages.
462  *
463  * Because we may have multiple ioends spanning a page, we need to start
464  * writeback on all the buffers before we submit them for I/O. If we mark the
465  * buffers as we got, then we can end up with a page that only has buffers
466  * marked async write and I/O complete on can occur before we mark the other
467  * buffers async write.
468  *
469  * The end result of this is that we trip a bug in end_page_writeback() because
470  * we call it twice for the one page as the code in end_buffer_async_write()
471  * assumes that all buffers on the page are started at the same time.
472  *
473  * The fix is two passes across the ioend list - one to start writeback on the
474  * buffer_heads, and then submit them for I/O on the second pass.
475  *
476  * If @fail is non-zero, it means that we have a situation where some part of
477  * the submission process has failed after we have marked paged for writeback
478  * and unlocked them. In this situation, we need to fail the ioend chain rather
479  * than submit it to IO. This typically only happens on a filesystem shutdown.
480  */
481 STATIC void
482 xfs_submit_ioend(
483 	struct writeback_control *wbc,
484 	xfs_ioend_t		*ioend,
485 	int			fail)
486 {
487 	xfs_ioend_t		*head = ioend;
488 	xfs_ioend_t		*next;
489 	struct buffer_head	*bh;
490 	struct bio		*bio;
491 	sector_t		lastblock = 0;
492 
493 	/* Pass 1 - start writeback */
494 	do {
495 		next = ioend->io_list;
496 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
497 			xfs_start_buffer_writeback(bh);
498 	} while ((ioend = next) != NULL);
499 
500 	/* Pass 2 - submit I/O */
501 	ioend = head;
502 	do {
503 		next = ioend->io_list;
504 		bio = NULL;
505 
506 		/*
507 		 * If we are failing the IO now, just mark the ioend with an
508 		 * error and finish it. This will run IO completion immediately
509 		 * as there is only one reference to the ioend at this point in
510 		 * time.
511 		 */
512 		if (fail) {
513 			ioend->io_error = -fail;
514 			xfs_finish_ioend(ioend);
515 			continue;
516 		}
517 
518 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
519 
520 			if (!bio) {
521  retry:
522 				bio = xfs_alloc_ioend_bio(bh);
523 			} else if (bh->b_blocknr != lastblock + 1) {
524 				xfs_submit_ioend_bio(wbc, ioend, bio);
525 				goto retry;
526 			}
527 
528 			if (bio_add_buffer(bio, bh) != bh->b_size) {
529 				xfs_submit_ioend_bio(wbc, ioend, bio);
530 				goto retry;
531 			}
532 
533 			lastblock = bh->b_blocknr;
534 		}
535 		if (bio)
536 			xfs_submit_ioend_bio(wbc, ioend, bio);
537 		xfs_finish_ioend(ioend);
538 	} while ((ioend = next) != NULL);
539 }
540 
541 /*
542  * Cancel submission of all buffer_heads so far in this endio.
543  * Toss the endio too.  Only ever called for the initial page
544  * in a writepage request, so only ever one page.
545  */
546 STATIC void
547 xfs_cancel_ioend(
548 	xfs_ioend_t		*ioend)
549 {
550 	xfs_ioend_t		*next;
551 	struct buffer_head	*bh, *next_bh;
552 
553 	do {
554 		next = ioend->io_list;
555 		bh = ioend->io_buffer_head;
556 		do {
557 			next_bh = bh->b_private;
558 			clear_buffer_async_write(bh);
559 			unlock_buffer(bh);
560 		} while ((bh = next_bh) != NULL);
561 
562 		mempool_free(ioend, xfs_ioend_pool);
563 	} while ((ioend = next) != NULL);
564 }
565 
566 /*
567  * Test to see if we've been building up a completion structure for
568  * earlier buffers -- if so, we try to append to this ioend if we
569  * can, otherwise we finish off any current ioend and start another.
570  * Return true if we've finished the given ioend.
571  */
572 STATIC void
573 xfs_add_to_ioend(
574 	struct inode		*inode,
575 	struct buffer_head	*bh,
576 	xfs_off_t		offset,
577 	unsigned int		type,
578 	xfs_ioend_t		**result,
579 	int			need_ioend)
580 {
581 	xfs_ioend_t		*ioend = *result;
582 
583 	if (!ioend || need_ioend || type != ioend->io_type) {
584 		xfs_ioend_t	*previous = *result;
585 
586 		ioend = xfs_alloc_ioend(inode, type);
587 		ioend->io_offset = offset;
588 		ioend->io_buffer_head = bh;
589 		ioend->io_buffer_tail = bh;
590 		if (previous)
591 			previous->io_list = ioend;
592 		*result = ioend;
593 	} else {
594 		ioend->io_buffer_tail->b_private = bh;
595 		ioend->io_buffer_tail = bh;
596 	}
597 
598 	bh->b_private = NULL;
599 	ioend->io_size += bh->b_size;
600 }
601 
602 STATIC void
603 xfs_map_buffer(
604 	struct inode		*inode,
605 	struct buffer_head	*bh,
606 	struct xfs_bmbt_irec	*imap,
607 	xfs_off_t		offset)
608 {
609 	sector_t		bn;
610 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
611 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
612 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
613 
614 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
615 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
616 
617 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
618 	      ((offset - iomap_offset) >> inode->i_blkbits);
619 
620 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
621 
622 	bh->b_blocknr = bn;
623 	set_buffer_mapped(bh);
624 }
625 
626 STATIC void
627 xfs_map_at_offset(
628 	struct inode		*inode,
629 	struct buffer_head	*bh,
630 	struct xfs_bmbt_irec	*imap,
631 	xfs_off_t		offset)
632 {
633 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
634 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
635 
636 	xfs_map_buffer(inode, bh, imap, offset);
637 	set_buffer_mapped(bh);
638 	clear_buffer_delay(bh);
639 	clear_buffer_unwritten(bh);
640 }
641 
642 /*
643  * Test if a given page is suitable for writing as part of an unwritten
644  * or delayed allocate extent.
645  */
646 STATIC int
647 xfs_check_page_type(
648 	struct page		*page,
649 	unsigned int		type)
650 {
651 	if (PageWriteback(page))
652 		return 0;
653 
654 	if (page->mapping && page_has_buffers(page)) {
655 		struct buffer_head	*bh, *head;
656 		int			acceptable = 0;
657 
658 		bh = head = page_buffers(page);
659 		do {
660 			if (buffer_unwritten(bh))
661 				acceptable += (type == XFS_IO_UNWRITTEN);
662 			else if (buffer_delay(bh))
663 				acceptable += (type == XFS_IO_DELALLOC);
664 			else if (buffer_dirty(bh) && buffer_mapped(bh))
665 				acceptable += (type == XFS_IO_OVERWRITE);
666 			else
667 				break;
668 		} while ((bh = bh->b_this_page) != head);
669 
670 		if (acceptable)
671 			return 1;
672 	}
673 
674 	return 0;
675 }
676 
677 /*
678  * Allocate & map buffers for page given the extent map. Write it out.
679  * except for the original page of a writepage, this is called on
680  * delalloc/unwritten pages only, for the original page it is possible
681  * that the page has no mapping at all.
682  */
683 STATIC int
684 xfs_convert_page(
685 	struct inode		*inode,
686 	struct page		*page,
687 	loff_t			tindex,
688 	struct xfs_bmbt_irec	*imap,
689 	xfs_ioend_t		**ioendp,
690 	struct writeback_control *wbc)
691 {
692 	struct buffer_head	*bh, *head;
693 	xfs_off_t		end_offset;
694 	unsigned long		p_offset;
695 	unsigned int		type;
696 	int			len, page_dirty;
697 	int			count = 0, done = 0, uptodate = 1;
698  	xfs_off_t		offset = page_offset(page);
699 
700 	if (page->index != tindex)
701 		goto fail;
702 	if (!trylock_page(page))
703 		goto fail;
704 	if (PageWriteback(page))
705 		goto fail_unlock_page;
706 	if (page->mapping != inode->i_mapping)
707 		goto fail_unlock_page;
708 	if (!xfs_check_page_type(page, (*ioendp)->io_type))
709 		goto fail_unlock_page;
710 
711 	/*
712 	 * page_dirty is initially a count of buffers on the page before
713 	 * EOF and is decremented as we move each into a cleanable state.
714 	 *
715 	 * Derivation:
716 	 *
717 	 * End offset is the highest offset that this page should represent.
718 	 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
719 	 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
720 	 * hence give us the correct page_dirty count. On any other page,
721 	 * it will be zero and in that case we need page_dirty to be the
722 	 * count of buffers on the page.
723 	 */
724 	end_offset = min_t(unsigned long long,
725 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 			i_size_read(inode));
727 
728 	len = 1 << inode->i_blkbits;
729 	p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
730 					PAGE_CACHE_SIZE);
731 	p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
732 	page_dirty = p_offset / len;
733 
734 	bh = head = page_buffers(page);
735 	do {
736 		if (offset >= end_offset)
737 			break;
738 		if (!buffer_uptodate(bh))
739 			uptodate = 0;
740 		if (!(PageUptodate(page) || buffer_uptodate(bh))) {
741 			done = 1;
742 			continue;
743 		}
744 
745 		if (buffer_unwritten(bh) || buffer_delay(bh) ||
746 		    buffer_mapped(bh)) {
747 			if (buffer_unwritten(bh))
748 				type = XFS_IO_UNWRITTEN;
749 			else if (buffer_delay(bh))
750 				type = XFS_IO_DELALLOC;
751 			else
752 				type = XFS_IO_OVERWRITE;
753 
754 			if (!xfs_imap_valid(inode, imap, offset)) {
755 				done = 1;
756 				continue;
757 			}
758 
759 			lock_buffer(bh);
760 			if (type != XFS_IO_OVERWRITE)
761 				xfs_map_at_offset(inode, bh, imap, offset);
762 			xfs_add_to_ioend(inode, bh, offset, type,
763 					 ioendp, done);
764 
765 			page_dirty--;
766 			count++;
767 		} else {
768 			done = 1;
769 		}
770 	} while (offset += len, (bh = bh->b_this_page) != head);
771 
772 	if (uptodate && bh == head)
773 		SetPageUptodate(page);
774 
775 	if (count) {
776 		if (--wbc->nr_to_write <= 0 &&
777 		    wbc->sync_mode == WB_SYNC_NONE)
778 			done = 1;
779 	}
780 	xfs_start_page_writeback(page, !page_dirty, count);
781 
782 	return done;
783  fail_unlock_page:
784 	unlock_page(page);
785  fail:
786 	return 1;
787 }
788 
789 /*
790  * Convert & write out a cluster of pages in the same extent as defined
791  * by mp and following the start page.
792  */
793 STATIC void
794 xfs_cluster_write(
795 	struct inode		*inode,
796 	pgoff_t			tindex,
797 	struct xfs_bmbt_irec	*imap,
798 	xfs_ioend_t		**ioendp,
799 	struct writeback_control *wbc,
800 	pgoff_t			tlast)
801 {
802 	struct pagevec		pvec;
803 	int			done = 0, i;
804 
805 	pagevec_init(&pvec, 0);
806 	while (!done && tindex <= tlast) {
807 		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
808 
809 		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
810 			break;
811 
812 		for (i = 0; i < pagevec_count(&pvec); i++) {
813 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
814 					imap, ioendp, wbc);
815 			if (done)
816 				break;
817 		}
818 
819 		pagevec_release(&pvec);
820 		cond_resched();
821 	}
822 }
823 
824 STATIC void
825 xfs_vm_invalidatepage(
826 	struct page		*page,
827 	unsigned long		offset)
828 {
829 	trace_xfs_invalidatepage(page->mapping->host, page, offset);
830 	block_invalidatepage(page, offset);
831 }
832 
833 /*
834  * If the page has delalloc buffers on it, we need to punch them out before we
835  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
836  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
837  * is done on that same region - the delalloc extent is returned when none is
838  * supposed to be there.
839  *
840  * We prevent this by truncating away the delalloc regions on the page before
841  * invalidating it. Because they are delalloc, we can do this without needing a
842  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
843  * truncation without a transaction as there is no space left for block
844  * reservation (typically why we see a ENOSPC in writeback).
845  *
846  * This is not a performance critical path, so for now just do the punching a
847  * buffer head at a time.
848  */
849 STATIC void
850 xfs_aops_discard_page(
851 	struct page		*page)
852 {
853 	struct inode		*inode = page->mapping->host;
854 	struct xfs_inode	*ip = XFS_I(inode);
855 	struct buffer_head	*bh, *head;
856 	loff_t			offset = page_offset(page);
857 
858 	if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
859 		goto out_invalidate;
860 
861 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
862 		goto out_invalidate;
863 
864 	xfs_alert(ip->i_mount,
865 		"page discard on page %p, inode 0x%llx, offset %llu.",
866 			page, ip->i_ino, offset);
867 
868 	xfs_ilock(ip, XFS_ILOCK_EXCL);
869 	bh = head = page_buffers(page);
870 	do {
871 		int		error;
872 		xfs_fileoff_t	start_fsb;
873 
874 		if (!buffer_delay(bh))
875 			goto next_buffer;
876 
877 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
878 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
879 		if (error) {
880 			/* something screwed, just bail */
881 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
882 				xfs_alert(ip->i_mount,
883 			"page discard unable to remove delalloc mapping.");
884 			}
885 			break;
886 		}
887 next_buffer:
888 		offset += 1 << inode->i_blkbits;
889 
890 	} while ((bh = bh->b_this_page) != head);
891 
892 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
893 out_invalidate:
894 	xfs_vm_invalidatepage(page, 0);
895 	return;
896 }
897 
898 /*
899  * Write out a dirty page.
900  *
901  * For delalloc space on the page we need to allocate space and flush it.
902  * For unwritten space on the page we need to start the conversion to
903  * regular allocated space.
904  * For any other dirty buffer heads on the page we should flush them.
905  */
906 STATIC int
907 xfs_vm_writepage(
908 	struct page		*page,
909 	struct writeback_control *wbc)
910 {
911 	struct inode		*inode = page->mapping->host;
912 	struct buffer_head	*bh, *head;
913 	struct xfs_bmbt_irec	imap;
914 	xfs_ioend_t		*ioend = NULL, *iohead = NULL;
915 	loff_t			offset;
916 	unsigned int		type;
917 	__uint64_t              end_offset;
918 	pgoff_t                 end_index, last_index;
919 	ssize_t			len;
920 	int			err, imap_valid = 0, uptodate = 1;
921 	int			count = 0;
922 	int			nonblocking = 0;
923 
924 	trace_xfs_writepage(inode, page, 0);
925 
926 	ASSERT(page_has_buffers(page));
927 
928 	/*
929 	 * Refuse to write the page out if we are called from reclaim context.
930 	 *
931 	 * This avoids stack overflows when called from deeply used stacks in
932 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
933 	 * allow reclaim from kswapd as the stack usage there is relatively low.
934 	 *
935 	 * This should never happen except in the case of a VM regression so
936 	 * warn about it.
937 	 */
938 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
939 			PF_MEMALLOC))
940 		goto redirty;
941 
942 	/*
943 	 * Given that we do not allow direct reclaim to call us, we should
944 	 * never be called while in a filesystem transaction.
945 	 */
946 	if (WARN_ON(current->flags & PF_FSTRANS))
947 		goto redirty;
948 
949 	/* Is this page beyond the end of the file? */
950 	offset = i_size_read(inode);
951 	end_index = offset >> PAGE_CACHE_SHIFT;
952 	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
953 	if (page->index >= end_index) {
954 		unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
955 
956 		/*
957 		 * Skip the page if it is fully outside i_size, e.g. due to a
958 		 * truncate operation that is in progress. We must redirty the
959 		 * page so that reclaim stops reclaiming it. Otherwise
960 		 * xfs_vm_releasepage() is called on it and gets confused.
961 		 */
962 		if (page->index >= end_index + 1 || offset_into_page == 0)
963 			goto redirty;
964 
965 		/*
966 		 * The page straddles i_size.  It must be zeroed out on each
967 		 * and every writepage invocation because it may be mmapped.
968 		 * "A file is mapped in multiples of the page size.  For a file
969 		 * that is not a multiple of the  page size, the remaining
970 		 * memory is zeroed when mapped, and writes to that region are
971 		 * not written out to the file."
972 		 */
973 		zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
974 	}
975 
976 	end_offset = min_t(unsigned long long,
977 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
978 			offset);
979 	len = 1 << inode->i_blkbits;
980 
981 	bh = head = page_buffers(page);
982 	offset = page_offset(page);
983 	type = XFS_IO_OVERWRITE;
984 
985 	if (wbc->sync_mode == WB_SYNC_NONE)
986 		nonblocking = 1;
987 
988 	do {
989 		int new_ioend = 0;
990 
991 		if (offset >= end_offset)
992 			break;
993 		if (!buffer_uptodate(bh))
994 			uptodate = 0;
995 
996 		/*
997 		 * set_page_dirty dirties all buffers in a page, independent
998 		 * of their state.  The dirty state however is entirely
999 		 * meaningless for holes (!mapped && uptodate), so skip
1000 		 * buffers covering holes here.
1001 		 */
1002 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1003 			imap_valid = 0;
1004 			continue;
1005 		}
1006 
1007 		if (buffer_unwritten(bh)) {
1008 			if (type != XFS_IO_UNWRITTEN) {
1009 				type = XFS_IO_UNWRITTEN;
1010 				imap_valid = 0;
1011 			}
1012 		} else if (buffer_delay(bh)) {
1013 			if (type != XFS_IO_DELALLOC) {
1014 				type = XFS_IO_DELALLOC;
1015 				imap_valid = 0;
1016 			}
1017 		} else if (buffer_uptodate(bh)) {
1018 			if (type != XFS_IO_OVERWRITE) {
1019 				type = XFS_IO_OVERWRITE;
1020 				imap_valid = 0;
1021 			}
1022 		} else {
1023 			if (PageUptodate(page))
1024 				ASSERT(buffer_mapped(bh));
1025 			/*
1026 			 * This buffer is not uptodate and will not be
1027 			 * written to disk.  Ensure that we will put any
1028 			 * subsequent writeable buffers into a new
1029 			 * ioend.
1030 			 */
1031 			imap_valid = 0;
1032 			continue;
1033 		}
1034 
1035 		if (imap_valid)
1036 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1037 		if (!imap_valid) {
1038 			/*
1039 			 * If we didn't have a valid mapping then we need to
1040 			 * put the new mapping into a separate ioend structure.
1041 			 * This ensures non-contiguous extents always have
1042 			 * separate ioends, which is particularly important
1043 			 * for unwritten extent conversion at I/O completion
1044 			 * time.
1045 			 */
1046 			new_ioend = 1;
1047 			err = xfs_map_blocks(inode, offset, &imap, type,
1048 					     nonblocking);
1049 			if (err)
1050 				goto error;
1051 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1052 		}
1053 		if (imap_valid) {
1054 			lock_buffer(bh);
1055 			if (type != XFS_IO_OVERWRITE)
1056 				xfs_map_at_offset(inode, bh, &imap, offset);
1057 			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1058 					 new_ioend);
1059 			count++;
1060 		}
1061 
1062 		if (!iohead)
1063 			iohead = ioend;
1064 
1065 	} while (offset += len, ((bh = bh->b_this_page) != head));
1066 
1067 	if (uptodate && bh == head)
1068 		SetPageUptodate(page);
1069 
1070 	xfs_start_page_writeback(page, 1, count);
1071 
1072 	/* if there is no IO to be submitted for this page, we are done */
1073 	if (!ioend)
1074 		return 0;
1075 
1076 	ASSERT(iohead);
1077 
1078 	/*
1079 	 * Any errors from this point onwards need tobe reported through the IO
1080 	 * completion path as we have marked the initial page as under writeback
1081 	 * and unlocked it.
1082 	 */
1083 	if (imap_valid) {
1084 		xfs_off_t		end_index;
1085 
1086 		end_index = imap.br_startoff + imap.br_blockcount;
1087 
1088 		/* to bytes */
1089 		end_index <<= inode->i_blkbits;
1090 
1091 		/* to pages */
1092 		end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1093 
1094 		/* check against file size */
1095 		if (end_index > last_index)
1096 			end_index = last_index;
1097 
1098 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1099 				  wbc, end_index);
1100 	}
1101 
1102 
1103 	/*
1104 	 * Reserve log space if we might write beyond the on-disk inode size.
1105 	 */
1106 	err = 0;
1107 	if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1108 		err = xfs_setfilesize_trans_alloc(ioend);
1109 
1110 	xfs_submit_ioend(wbc, iohead, err);
1111 
1112 	return 0;
1113 
1114 error:
1115 	if (iohead)
1116 		xfs_cancel_ioend(iohead);
1117 
1118 	if (err == -EAGAIN)
1119 		goto redirty;
1120 
1121 	xfs_aops_discard_page(page);
1122 	ClearPageUptodate(page);
1123 	unlock_page(page);
1124 	return err;
1125 
1126 redirty:
1127 	redirty_page_for_writepage(wbc, page);
1128 	unlock_page(page);
1129 	return 0;
1130 }
1131 
1132 STATIC int
1133 xfs_vm_writepages(
1134 	struct address_space	*mapping,
1135 	struct writeback_control *wbc)
1136 {
1137 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1138 	return generic_writepages(mapping, wbc);
1139 }
1140 
1141 /*
1142  * Called to move a page into cleanable state - and from there
1143  * to be released. The page should already be clean. We always
1144  * have buffer heads in this call.
1145  *
1146  * Returns 1 if the page is ok to release, 0 otherwise.
1147  */
1148 STATIC int
1149 xfs_vm_releasepage(
1150 	struct page		*page,
1151 	gfp_t			gfp_mask)
1152 {
1153 	int			delalloc, unwritten;
1154 
1155 	trace_xfs_releasepage(page->mapping->host, page, 0);
1156 
1157 	xfs_count_page_state(page, &delalloc, &unwritten);
1158 
1159 	if (WARN_ON(delalloc))
1160 		return 0;
1161 	if (WARN_ON(unwritten))
1162 		return 0;
1163 
1164 	return try_to_free_buffers(page);
1165 }
1166 
1167 STATIC int
1168 __xfs_get_blocks(
1169 	struct inode		*inode,
1170 	sector_t		iblock,
1171 	struct buffer_head	*bh_result,
1172 	int			create,
1173 	int			direct)
1174 {
1175 	struct xfs_inode	*ip = XFS_I(inode);
1176 	struct xfs_mount	*mp = ip->i_mount;
1177 	xfs_fileoff_t		offset_fsb, end_fsb;
1178 	int			error = 0;
1179 	int			lockmode = 0;
1180 	struct xfs_bmbt_irec	imap;
1181 	int			nimaps = 1;
1182 	xfs_off_t		offset;
1183 	ssize_t			size;
1184 	int			new = 0;
1185 
1186 	if (XFS_FORCED_SHUTDOWN(mp))
1187 		return -XFS_ERROR(EIO);
1188 
1189 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1190 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1191 	size = bh_result->b_size;
1192 
1193 	if (!create && direct && offset >= i_size_read(inode))
1194 		return 0;
1195 
1196 	/*
1197 	 * Direct I/O is usually done on preallocated files, so try getting
1198 	 * a block mapping without an exclusive lock first.  For buffered
1199 	 * writes we already have the exclusive iolock anyway, so avoiding
1200 	 * a lock roundtrip here by taking the ilock exclusive from the
1201 	 * beginning is a useful micro optimization.
1202 	 */
1203 	if (create && !direct) {
1204 		lockmode = XFS_ILOCK_EXCL;
1205 		xfs_ilock(ip, lockmode);
1206 	} else {
1207 		lockmode = xfs_ilock_map_shared(ip);
1208 	}
1209 
1210 	ASSERT(offset <= mp->m_super->s_maxbytes);
1211 	if (offset + size > mp->m_super->s_maxbytes)
1212 		size = mp->m_super->s_maxbytes - offset;
1213 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1214 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1215 
1216 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1217 				&imap, &nimaps, XFS_BMAPI_ENTIRE);
1218 	if (error)
1219 		goto out_unlock;
1220 
1221 	if (create &&
1222 	    (!nimaps ||
1223 	     (imap.br_startblock == HOLESTARTBLOCK ||
1224 	      imap.br_startblock == DELAYSTARTBLOCK))) {
1225 		if (direct || xfs_get_extsz_hint(ip)) {
1226 			/*
1227 			 * Drop the ilock in preparation for starting the block
1228 			 * allocation transaction.  It will be retaken
1229 			 * exclusively inside xfs_iomap_write_direct for the
1230 			 * actual allocation.
1231 			 */
1232 			xfs_iunlock(ip, lockmode);
1233 			error = xfs_iomap_write_direct(ip, offset, size,
1234 						       &imap, nimaps);
1235 			if (error)
1236 				return -error;
1237 			new = 1;
1238 		} else {
1239 			/*
1240 			 * Delalloc reservations do not require a transaction,
1241 			 * we can go on without dropping the lock here. If we
1242 			 * are allocating a new delalloc block, make sure that
1243 			 * we set the new flag so that we mark the buffer new so
1244 			 * that we know that it is newly allocated if the write
1245 			 * fails.
1246 			 */
1247 			if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1248 				new = 1;
1249 			error = xfs_iomap_write_delay(ip, offset, size, &imap);
1250 			if (error)
1251 				goto out_unlock;
1252 
1253 			xfs_iunlock(ip, lockmode);
1254 		}
1255 
1256 		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1257 	} else if (nimaps) {
1258 		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1259 		xfs_iunlock(ip, lockmode);
1260 	} else {
1261 		trace_xfs_get_blocks_notfound(ip, offset, size);
1262 		goto out_unlock;
1263 	}
1264 
1265 	if (imap.br_startblock != HOLESTARTBLOCK &&
1266 	    imap.br_startblock != DELAYSTARTBLOCK) {
1267 		/*
1268 		 * For unwritten extents do not report a disk address on
1269 		 * the read case (treat as if we're reading into a hole).
1270 		 */
1271 		if (create || !ISUNWRITTEN(&imap))
1272 			xfs_map_buffer(inode, bh_result, &imap, offset);
1273 		if (create && ISUNWRITTEN(&imap)) {
1274 			if (direct)
1275 				bh_result->b_private = inode;
1276 			set_buffer_unwritten(bh_result);
1277 		}
1278 	}
1279 
1280 	/*
1281 	 * If this is a realtime file, data may be on a different device.
1282 	 * to that pointed to from the buffer_head b_bdev currently.
1283 	 */
1284 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1285 
1286 	/*
1287 	 * If we previously allocated a block out beyond eof and we are now
1288 	 * coming back to use it then we will need to flag it as new even if it
1289 	 * has a disk address.
1290 	 *
1291 	 * With sub-block writes into unwritten extents we also need to mark
1292 	 * the buffer as new so that the unwritten parts of the buffer gets
1293 	 * correctly zeroed.
1294 	 */
1295 	if (create &&
1296 	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1297 	     (offset >= i_size_read(inode)) ||
1298 	     (new || ISUNWRITTEN(&imap))))
1299 		set_buffer_new(bh_result);
1300 
1301 	if (imap.br_startblock == DELAYSTARTBLOCK) {
1302 		BUG_ON(direct);
1303 		if (create) {
1304 			set_buffer_uptodate(bh_result);
1305 			set_buffer_mapped(bh_result);
1306 			set_buffer_delay(bh_result);
1307 		}
1308 	}
1309 
1310 	/*
1311 	 * If this is O_DIRECT or the mpage code calling tell them how large
1312 	 * the mapping is, so that we can avoid repeated get_blocks calls.
1313 	 */
1314 	if (direct || size > (1 << inode->i_blkbits)) {
1315 		xfs_off_t		mapping_size;
1316 
1317 		mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1318 		mapping_size <<= inode->i_blkbits;
1319 
1320 		ASSERT(mapping_size > 0);
1321 		if (mapping_size > size)
1322 			mapping_size = size;
1323 		if (mapping_size > LONG_MAX)
1324 			mapping_size = LONG_MAX;
1325 
1326 		bh_result->b_size = mapping_size;
1327 	}
1328 
1329 	return 0;
1330 
1331 out_unlock:
1332 	xfs_iunlock(ip, lockmode);
1333 	return -error;
1334 }
1335 
1336 int
1337 xfs_get_blocks(
1338 	struct inode		*inode,
1339 	sector_t		iblock,
1340 	struct buffer_head	*bh_result,
1341 	int			create)
1342 {
1343 	return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1344 }
1345 
1346 STATIC int
1347 xfs_get_blocks_direct(
1348 	struct inode		*inode,
1349 	sector_t		iblock,
1350 	struct buffer_head	*bh_result,
1351 	int			create)
1352 {
1353 	return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1354 }
1355 
1356 /*
1357  * Complete a direct I/O write request.
1358  *
1359  * If the private argument is non-NULL __xfs_get_blocks signals us that we
1360  * need to issue a transaction to convert the range from unwritten to written
1361  * extents.  In case this is regular synchronous I/O we just call xfs_end_io
1362  * to do this and we are done.  But in case this was a successful AIO
1363  * request this handler is called from interrupt context, from which we
1364  * can't start transactions.  In that case offload the I/O completion to
1365  * the workqueues we also use for buffered I/O completion.
1366  */
1367 STATIC void
1368 xfs_end_io_direct_write(
1369 	struct kiocb		*iocb,
1370 	loff_t			offset,
1371 	ssize_t			size,
1372 	void			*private,
1373 	int			ret,
1374 	bool			is_async)
1375 {
1376 	struct xfs_ioend	*ioend = iocb->private;
1377 
1378 	/*
1379 	 * While the generic direct I/O code updates the inode size, it does
1380 	 * so only after the end_io handler is called, which means our
1381 	 * end_io handler thinks the on-disk size is outside the in-core
1382 	 * size.  To prevent this just update it a little bit earlier here.
1383 	 */
1384 	if (offset + size > i_size_read(ioend->io_inode))
1385 		i_size_write(ioend->io_inode, offset + size);
1386 
1387 	/*
1388 	 * blockdev_direct_IO can return an error even after the I/O
1389 	 * completion handler was called.  Thus we need to protect
1390 	 * against double-freeing.
1391 	 */
1392 	iocb->private = NULL;
1393 
1394 	ioend->io_offset = offset;
1395 	ioend->io_size = size;
1396 	ioend->io_iocb = iocb;
1397 	ioend->io_result = ret;
1398 	if (private && size > 0)
1399 		ioend->io_type = XFS_IO_UNWRITTEN;
1400 
1401 	if (is_async) {
1402 		ioend->io_isasync = 1;
1403 		xfs_finish_ioend(ioend);
1404 	} else {
1405 		xfs_finish_ioend_sync(ioend);
1406 	}
1407 }
1408 
1409 STATIC ssize_t
1410 xfs_vm_direct_IO(
1411 	int			rw,
1412 	struct kiocb		*iocb,
1413 	const struct iovec	*iov,
1414 	loff_t			offset,
1415 	unsigned long		nr_segs)
1416 {
1417 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
1418 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
1419 	struct xfs_ioend	*ioend = NULL;
1420 	ssize_t			ret;
1421 
1422 	if (rw & WRITE) {
1423 		size_t size = iov_length(iov, nr_segs);
1424 
1425 		/*
1426 		 * We cannot preallocate a size update transaction here as we
1427 		 * don't know whether allocation is necessary or not. Hence we
1428 		 * can only tell IO completion that one is necessary if we are
1429 		 * not doing unwritten extent conversion.
1430 		 */
1431 		iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1432 		if (offset + size > XFS_I(inode)->i_d.di_size)
1433 			ioend->io_isdirect = 1;
1434 
1435 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1436 					    offset, nr_segs,
1437 					    xfs_get_blocks_direct,
1438 					    xfs_end_io_direct_write, NULL, 0);
1439 		if (ret != -EIOCBQUEUED && iocb->private)
1440 			goto out_destroy_ioend;
1441 	} else {
1442 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1443 					    offset, nr_segs,
1444 					    xfs_get_blocks_direct,
1445 					    NULL, NULL, 0);
1446 	}
1447 
1448 	return ret;
1449 
1450 out_destroy_ioend:
1451 	xfs_destroy_ioend(ioend);
1452 	return ret;
1453 }
1454 
1455 /*
1456  * Punch out the delalloc blocks we have already allocated.
1457  *
1458  * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1459  * as the page is still locked at this point.
1460  */
1461 STATIC void
1462 xfs_vm_kill_delalloc_range(
1463 	struct inode		*inode,
1464 	loff_t			start,
1465 	loff_t			end)
1466 {
1467 	struct xfs_inode	*ip = XFS_I(inode);
1468 	xfs_fileoff_t		start_fsb;
1469 	xfs_fileoff_t		end_fsb;
1470 	int			error;
1471 
1472 	start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1473 	end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1474 	if (end_fsb <= start_fsb)
1475 		return;
1476 
1477 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1478 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1479 						end_fsb - start_fsb);
1480 	if (error) {
1481 		/* something screwed, just bail */
1482 		if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1483 			xfs_alert(ip->i_mount,
1484 		"xfs_vm_write_failed: unable to clean up ino %lld",
1485 					ip->i_ino);
1486 		}
1487 	}
1488 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1489 }
1490 
1491 STATIC void
1492 xfs_vm_write_failed(
1493 	struct inode		*inode,
1494 	struct page		*page,
1495 	loff_t			pos,
1496 	unsigned		len)
1497 {
1498 	loff_t			block_offset = pos & PAGE_MASK;
1499 	loff_t			block_start;
1500 	loff_t			block_end;
1501 	loff_t			from = pos & (PAGE_CACHE_SIZE - 1);
1502 	loff_t			to = from + len;
1503 	struct buffer_head	*bh, *head;
1504 
1505 	ASSERT(block_offset + from == pos);
1506 
1507 	head = page_buffers(page);
1508 	block_start = 0;
1509 	for (bh = head; bh != head || !block_start;
1510 	     bh = bh->b_this_page, block_start = block_end,
1511 				   block_offset += bh->b_size) {
1512 		block_end = block_start + bh->b_size;
1513 
1514 		/* skip buffers before the write */
1515 		if (block_end <= from)
1516 			continue;
1517 
1518 		/* if the buffer is after the write, we're done */
1519 		if (block_start >= to)
1520 			break;
1521 
1522 		if (!buffer_delay(bh))
1523 			continue;
1524 
1525 		if (!buffer_new(bh) && block_offset < i_size_read(inode))
1526 			continue;
1527 
1528 		xfs_vm_kill_delalloc_range(inode, block_offset,
1529 					   block_offset + bh->b_size);
1530 	}
1531 
1532 }
1533 
1534 /*
1535  * This used to call block_write_begin(), but it unlocks and releases the page
1536  * on error, and we need that page to be able to punch stale delalloc blocks out
1537  * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1538  * the appropriate point.
1539  */
1540 STATIC int
1541 xfs_vm_write_begin(
1542 	struct file		*file,
1543 	struct address_space	*mapping,
1544 	loff_t			pos,
1545 	unsigned		len,
1546 	unsigned		flags,
1547 	struct page		**pagep,
1548 	void			**fsdata)
1549 {
1550 	pgoff_t			index = pos >> PAGE_CACHE_SHIFT;
1551 	struct page		*page;
1552 	int			status;
1553 
1554 	ASSERT(len <= PAGE_CACHE_SIZE);
1555 
1556 	page = grab_cache_page_write_begin(mapping, index,
1557 					   flags | AOP_FLAG_NOFS);
1558 	if (!page)
1559 		return -ENOMEM;
1560 
1561 	status = __block_write_begin(page, pos, len, xfs_get_blocks);
1562 	if (unlikely(status)) {
1563 		struct inode	*inode = mapping->host;
1564 
1565 		xfs_vm_write_failed(inode, page, pos, len);
1566 		unlock_page(page);
1567 
1568 		if (pos + len > i_size_read(inode))
1569 			truncate_pagecache(inode, pos + len, i_size_read(inode));
1570 
1571 		page_cache_release(page);
1572 		page = NULL;
1573 	}
1574 
1575 	*pagep = page;
1576 	return status;
1577 }
1578 
1579 /*
1580  * On failure, we only need to kill delalloc blocks beyond EOF because they
1581  * will never be written. For blocks within EOF, generic_write_end() zeros them
1582  * so they are safe to leave alone and be written with all the other valid data.
1583  */
1584 STATIC int
1585 xfs_vm_write_end(
1586 	struct file		*file,
1587 	struct address_space	*mapping,
1588 	loff_t			pos,
1589 	unsigned		len,
1590 	unsigned		copied,
1591 	struct page		*page,
1592 	void			*fsdata)
1593 {
1594 	int			ret;
1595 
1596 	ASSERT(len <= PAGE_CACHE_SIZE);
1597 
1598 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1599 	if (unlikely(ret < len)) {
1600 		struct inode	*inode = mapping->host;
1601 		size_t		isize = i_size_read(inode);
1602 		loff_t		to = pos + len;
1603 
1604 		if (to > isize) {
1605 			truncate_pagecache(inode, to, isize);
1606 			xfs_vm_kill_delalloc_range(inode, isize, to);
1607 		}
1608 	}
1609 	return ret;
1610 }
1611 
1612 STATIC sector_t
1613 xfs_vm_bmap(
1614 	struct address_space	*mapping,
1615 	sector_t		block)
1616 {
1617 	struct inode		*inode = (struct inode *)mapping->host;
1618 	struct xfs_inode	*ip = XFS_I(inode);
1619 
1620 	trace_xfs_vm_bmap(XFS_I(inode));
1621 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
1622 	filemap_write_and_wait(mapping);
1623 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1624 	return generic_block_bmap(mapping, block, xfs_get_blocks);
1625 }
1626 
1627 STATIC int
1628 xfs_vm_readpage(
1629 	struct file		*unused,
1630 	struct page		*page)
1631 {
1632 	return mpage_readpage(page, xfs_get_blocks);
1633 }
1634 
1635 STATIC int
1636 xfs_vm_readpages(
1637 	struct file		*unused,
1638 	struct address_space	*mapping,
1639 	struct list_head	*pages,
1640 	unsigned		nr_pages)
1641 {
1642 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1643 }
1644 
1645 const struct address_space_operations xfs_address_space_operations = {
1646 	.readpage		= xfs_vm_readpage,
1647 	.readpages		= xfs_vm_readpages,
1648 	.writepage		= xfs_vm_writepage,
1649 	.writepages		= xfs_vm_writepages,
1650 	.releasepage		= xfs_vm_releasepage,
1651 	.invalidatepage		= xfs_vm_invalidatepage,
1652 	.write_begin		= xfs_vm_write_begin,
1653 	.write_end		= xfs_vm_write_end,
1654 	.bmap			= xfs_vm_bmap,
1655 	.direct_IO		= xfs_vm_direct_IO,
1656 	.migratepage		= buffer_migrate_page,
1657 	.is_partially_uptodate  = block_is_partially_uptodate,
1658 	.error_remove_page	= generic_error_remove_page,
1659 };
1660