1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22
23 struct xfs_writepage_ctx {
24 struct iomap_writepage_ctx ctx;
25 unsigned int data_seq;
26 unsigned int cow_seq;
27 };
28
29 static inline struct xfs_writepage_ctx *
XFS_WPC(struct iomap_writepage_ctx * ctx)30 XFS_WPC(struct iomap_writepage_ctx *ctx)
31 {
32 return container_of(ctx, struct xfs_writepage_ctx, ctx);
33 }
34
35 /*
36 * Fast and loose check if this write could update the on-disk inode size.
37 */
xfs_ioend_is_append(struct iomap_ioend * ioend)38 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
39 {
40 return ioend->io_offset + ioend->io_size >
41 XFS_I(ioend->io_inode)->i_disk_size;
42 }
43
44 /*
45 * Update on-disk file size now that data has been written to disk.
46 */
47 int
xfs_setfilesize(struct xfs_inode * ip,xfs_off_t offset,size_t size)48 xfs_setfilesize(
49 struct xfs_inode *ip,
50 xfs_off_t offset,
51 size_t size)
52 {
53 struct xfs_mount *mp = ip->i_mount;
54 struct xfs_trans *tp;
55 xfs_fsize_t isize;
56 int error;
57
58 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
59 if (error)
60 return error;
61
62 xfs_ilock(ip, XFS_ILOCK_EXCL);
63 isize = xfs_new_eof(ip, offset + size);
64 if (!isize) {
65 xfs_iunlock(ip, XFS_ILOCK_EXCL);
66 xfs_trans_cancel(tp);
67 return 0;
68 }
69
70 trace_xfs_setfilesize(ip, offset, size);
71
72 ip->i_disk_size = isize;
73 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
74 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
75
76 return xfs_trans_commit(tp);
77 }
78
79 /*
80 * IO write completion.
81 */
82 STATIC void
xfs_end_ioend(struct iomap_ioend * ioend)83 xfs_end_ioend(
84 struct iomap_ioend *ioend)
85 {
86 struct xfs_inode *ip = XFS_I(ioend->io_inode);
87 struct xfs_mount *mp = ip->i_mount;
88 xfs_off_t offset = ioend->io_offset;
89 size_t size = ioend->io_size;
90 unsigned int nofs_flag;
91 int error;
92
93 /*
94 * We can allocate memory here while doing writeback on behalf of
95 * memory reclaim. To avoid memory allocation deadlocks set the
96 * task-wide nofs context for the following operations.
97 */
98 nofs_flag = memalloc_nofs_save();
99
100 /*
101 * Just clean up the in-memory structures if the fs has been shut down.
102 */
103 if (xfs_is_shutdown(mp)) {
104 error = -EIO;
105 goto done;
106 }
107
108 /*
109 * Clean up all COW blocks and underlying data fork delalloc blocks on
110 * I/O error. The delalloc punch is required because this ioend was
111 * mapped to blocks in the COW fork and the associated pages are no
112 * longer dirty. If we don't remove delalloc blocks here, they become
113 * stale and can corrupt free space accounting on unmount.
114 */
115 error = blk_status_to_errno(ioend->io_bio->bi_status);
116 if (unlikely(error)) {
117 if (ioend->io_flags & IOMAP_F_SHARED) {
118 xfs_reflink_cancel_cow_range(ip, offset, size, true);
119 xfs_bmap_punch_delalloc_range(ip, offset,
120 offset + size);
121 }
122 goto done;
123 }
124
125 /*
126 * Success: commit the COW or unwritten blocks if needed.
127 */
128 if (ioend->io_flags & IOMAP_F_SHARED)
129 error = xfs_reflink_end_cow(ip, offset, size);
130 else if (ioend->io_type == IOMAP_UNWRITTEN)
131 error = xfs_iomap_write_unwritten(ip, offset, size, false);
132
133 if (!error && xfs_ioend_is_append(ioend))
134 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135 done:
136 iomap_finish_ioends(ioend, error);
137 memalloc_nofs_restore(nofs_flag);
138 }
139
140 /*
141 * Finish all pending IO completions that require transactional modifications.
142 *
143 * We try to merge physical and logically contiguous ioends before completion to
144 * minimise the number of transactions we need to perform during IO completion.
145 * Both unwritten extent conversion and COW remapping need to iterate and modify
146 * one physical extent at a time, so we gain nothing by merging physically
147 * discontiguous extents here.
148 *
149 * The ioend chain length that we can be processing here is largely unbound in
150 * length and we may have to perform significant amounts of work on each ioend
151 * to complete it. Hence we have to be careful about holding the CPU for too
152 * long in this loop.
153 */
154 void
xfs_end_io(struct work_struct * work)155 xfs_end_io(
156 struct work_struct *work)
157 {
158 struct xfs_inode *ip =
159 container_of(work, struct xfs_inode, i_ioend_work);
160 struct iomap_ioend *ioend;
161 struct list_head tmp;
162 unsigned long flags;
163
164 spin_lock_irqsave(&ip->i_ioend_lock, flags);
165 list_replace_init(&ip->i_ioend_list, &tmp);
166 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167
168 iomap_sort_ioends(&tmp);
169 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170 io_list))) {
171 list_del_init(&ioend->io_list);
172 iomap_ioend_try_merge(ioend, &tmp);
173 xfs_end_ioend(ioend);
174 cond_resched();
175 }
176 }
177
178 STATIC void
xfs_end_bio(struct bio * bio)179 xfs_end_bio(
180 struct bio *bio)
181 {
182 struct iomap_ioend *ioend = bio->bi_private;
183 struct xfs_inode *ip = XFS_I(ioend->io_inode);
184 unsigned long flags;
185
186 spin_lock_irqsave(&ip->i_ioend_lock, flags);
187 if (list_empty(&ip->i_ioend_list))
188 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189 &ip->i_ioend_work));
190 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
192 }
193
194 /*
195 * Fast revalidation of the cached writeback mapping. Return true if the current
196 * mapping is valid, false otherwise.
197 */
198 static bool
xfs_imap_valid(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,loff_t offset)199 xfs_imap_valid(
200 struct iomap_writepage_ctx *wpc,
201 struct xfs_inode *ip,
202 loff_t offset)
203 {
204 if (offset < wpc->iomap.offset ||
205 offset >= wpc->iomap.offset + wpc->iomap.length)
206 return false;
207 /*
208 * If this is a COW mapping, it is sufficient to check that the mapping
209 * covers the offset. Be careful to check this first because the caller
210 * can revalidate a COW mapping without updating the data seqno.
211 */
212 if (wpc->iomap.flags & IOMAP_F_SHARED)
213 return true;
214
215 /*
216 * This is not a COW mapping. Check the sequence number of the data fork
217 * because concurrent changes could have invalidated the extent. Check
218 * the COW fork because concurrent changes since the last time we
219 * checked (and found nothing at this offset) could have added
220 * overlapping blocks.
221 */
222 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223 trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224 XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225 return false;
226 }
227 if (xfs_inode_has_cow_data(ip) &&
228 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229 trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230 XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231 return false;
232 }
233 return true;
234 }
235
236 /*
237 * Pass in a dellalloc extent and convert it to real extents, return the real
238 * extent that maps offset_fsb in wpc->iomap.
239 *
240 * The current page is held locked so nothing could have removed the block
241 * backing offset_fsb, although it could have moved from the COW to the data
242 * fork by another thread.
243 */
244 static int
xfs_convert_blocks(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,int whichfork,loff_t offset)245 xfs_convert_blocks(
246 struct iomap_writepage_ctx *wpc,
247 struct xfs_inode *ip,
248 int whichfork,
249 loff_t offset)
250 {
251 int error;
252 unsigned *seq;
253
254 if (whichfork == XFS_COW_FORK)
255 seq = &XFS_WPC(wpc)->cow_seq;
256 else
257 seq = &XFS_WPC(wpc)->data_seq;
258
259 /*
260 * Attempt to allocate whatever delalloc extent currently backs offset
261 * and put the result into wpc->iomap. Allocate in a loop because it
262 * may take several attempts to allocate real blocks for a contiguous
263 * delalloc extent if free space is sufficiently fragmented.
264 */
265 do {
266 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
267 &wpc->iomap, seq);
268 if (error)
269 return error;
270 } while (wpc->iomap.offset + wpc->iomap.length <= offset);
271
272 return 0;
273 }
274
275 static int
xfs_map_blocks(struct iomap_writepage_ctx * wpc,struct inode * inode,loff_t offset)276 xfs_map_blocks(
277 struct iomap_writepage_ctx *wpc,
278 struct inode *inode,
279 loff_t offset)
280 {
281 struct xfs_inode *ip = XFS_I(inode);
282 struct xfs_mount *mp = ip->i_mount;
283 ssize_t count = i_blocksize(inode);
284 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
285 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
286 xfs_fileoff_t cow_fsb;
287 int whichfork;
288 struct xfs_bmbt_irec imap;
289 struct xfs_iext_cursor icur;
290 int retries = 0;
291 int error = 0;
292
293 if (xfs_is_shutdown(mp))
294 return -EIO;
295
296 XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
297
298 /*
299 * COW fork blocks can overlap data fork blocks even if the blocks
300 * aren't shared. COW I/O always takes precedent, so we must always
301 * check for overlap on reflink inodes unless the mapping is already a
302 * COW one, or the COW fork hasn't changed from the last time we looked
303 * at it.
304 *
305 * It's safe to check the COW fork if_seq here without the ILOCK because
306 * we've indirectly protected against concurrent updates: writeback has
307 * the page locked, which prevents concurrent invalidations by reflink
308 * and directio and prevents concurrent buffered writes to the same
309 * page. Changes to if_seq always happen under i_lock, which protects
310 * against concurrent updates and provides a memory barrier on the way
311 * out that ensures that we always see the current value.
312 */
313 if (xfs_imap_valid(wpc, ip, offset))
314 return 0;
315
316 /*
317 * If we don't have a valid map, now it's time to get a new one for this
318 * offset. This will convert delayed allocations (including COW ones)
319 * into real extents. If we return without a valid map, it means we
320 * landed in a hole and we skip the block.
321 */
322 retry:
323 cow_fsb = NULLFILEOFF;
324 whichfork = XFS_DATA_FORK;
325 xfs_ilock(ip, XFS_ILOCK_SHARED);
326 ASSERT(!xfs_need_iread_extents(&ip->i_df));
327
328 /*
329 * Check if this is offset is covered by a COW extents, and if yes use
330 * it directly instead of looking up anything in the data fork.
331 */
332 if (xfs_inode_has_cow_data(ip) &&
333 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
334 cow_fsb = imap.br_startoff;
335 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
336 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
337 xfs_iunlock(ip, XFS_ILOCK_SHARED);
338
339 whichfork = XFS_COW_FORK;
340 goto allocate_blocks;
341 }
342
343 /*
344 * No COW extent overlap. Revalidate now that we may have updated
345 * ->cow_seq. If the data mapping is still valid, we're done.
346 */
347 if (xfs_imap_valid(wpc, ip, offset)) {
348 xfs_iunlock(ip, XFS_ILOCK_SHARED);
349 return 0;
350 }
351
352 /*
353 * If we don't have a valid map, now it's time to get a new one for this
354 * offset. This will convert delayed allocations (including COW ones)
355 * into real extents.
356 */
357 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
358 imap.br_startoff = end_fsb; /* fake a hole past EOF */
359 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
360 xfs_iunlock(ip, XFS_ILOCK_SHARED);
361
362 /* landed in a hole or beyond EOF? */
363 if (imap.br_startoff > offset_fsb) {
364 imap.br_blockcount = imap.br_startoff - offset_fsb;
365 imap.br_startoff = offset_fsb;
366 imap.br_startblock = HOLESTARTBLOCK;
367 imap.br_state = XFS_EXT_NORM;
368 }
369
370 /*
371 * Truncate to the next COW extent if there is one. This is the only
372 * opportunity to do this because we can skip COW fork lookups for the
373 * subsequent blocks in the mapping; however, the requirement to treat
374 * the COW range separately remains.
375 */
376 if (cow_fsb != NULLFILEOFF &&
377 cow_fsb < imap.br_startoff + imap.br_blockcount)
378 imap.br_blockcount = cow_fsb - imap.br_startoff;
379
380 /* got a delalloc extent? */
381 if (imap.br_startblock != HOLESTARTBLOCK &&
382 isnullstartblock(imap.br_startblock))
383 goto allocate_blocks;
384
385 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
386 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
387 return 0;
388 allocate_blocks:
389 error = xfs_convert_blocks(wpc, ip, whichfork, offset);
390 if (error) {
391 /*
392 * If we failed to find the extent in the COW fork we might have
393 * raced with a COW to data fork conversion or truncate.
394 * Restart the lookup to catch the extent in the data fork for
395 * the former case, but prevent additional retries to avoid
396 * looping forever for the latter case.
397 */
398 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
399 goto retry;
400 ASSERT(error != -EAGAIN);
401 return error;
402 }
403
404 /*
405 * Due to merging the return real extent might be larger than the
406 * original delalloc one. Trim the return extent to the next COW
407 * boundary again to force a re-lookup.
408 */
409 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
410 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
411
412 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
413 wpc->iomap.length = cow_offset - wpc->iomap.offset;
414 }
415
416 ASSERT(wpc->iomap.offset <= offset);
417 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
418 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
419 return 0;
420 }
421
422 static int
xfs_prepare_ioend(struct iomap_ioend * ioend,int status)423 xfs_prepare_ioend(
424 struct iomap_ioend *ioend,
425 int status)
426 {
427 unsigned int nofs_flag;
428
429 /*
430 * We can allocate memory here while doing writeback on behalf of
431 * memory reclaim. To avoid memory allocation deadlocks set the
432 * task-wide nofs context for the following operations.
433 */
434 nofs_flag = memalloc_nofs_save();
435
436 /* Convert CoW extents to regular */
437 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
438 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
439 ioend->io_offset, ioend->io_size);
440 }
441
442 memalloc_nofs_restore(nofs_flag);
443
444 /* send ioends that might require a transaction to the completion wq */
445 if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
446 (ioend->io_flags & IOMAP_F_SHARED))
447 ioend->io_bio->bi_end_io = xfs_end_bio;
448 return status;
449 }
450
451 /*
452 * If the folio has delalloc blocks on it, the caller is asking us to punch them
453 * out. If we don't, we can leave a stale delalloc mapping covered by a clean
454 * page that needs to be dirtied again before the delalloc mapping can be
455 * converted. This stale delalloc mapping can trip up a later direct I/O read
456 * operation on the same region.
457 *
458 * We prevent this by truncating away the delalloc regions on the folio. Because
459 * they are delalloc, we can do this without needing a transaction. Indeed - if
460 * we get ENOSPC errors, we have to be able to do this truncation without a
461 * transaction as there is no space left for block reservation (typically why
462 * we see a ENOSPC in writeback).
463 */
464 static void
xfs_discard_folio(struct folio * folio,loff_t pos)465 xfs_discard_folio(
466 struct folio *folio,
467 loff_t pos)
468 {
469 struct xfs_inode *ip = XFS_I(folio->mapping->host);
470 struct xfs_mount *mp = ip->i_mount;
471 int error;
472
473 if (xfs_is_shutdown(mp))
474 return;
475
476 xfs_alert_ratelimited(mp,
477 "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
478 folio, ip->i_ino, pos);
479
480 /*
481 * The end of the punch range is always the offset of the first
482 * byte of the next folio. Hence the end offset is only dependent on the
483 * folio itself and not the start offset that is passed in.
484 */
485 error = xfs_bmap_punch_delalloc_range(ip, pos,
486 folio_pos(folio) + folio_size(folio));
487
488 if (error && !xfs_is_shutdown(mp))
489 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
490 }
491
492 static const struct iomap_writeback_ops xfs_writeback_ops = {
493 .map_blocks = xfs_map_blocks,
494 .prepare_ioend = xfs_prepare_ioend,
495 .discard_folio = xfs_discard_folio,
496 };
497
498 STATIC int
xfs_vm_writepages(struct address_space * mapping,struct writeback_control * wbc)499 xfs_vm_writepages(
500 struct address_space *mapping,
501 struct writeback_control *wbc)
502 {
503 struct xfs_writepage_ctx wpc = { };
504
505 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
506 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
507 }
508
509 STATIC int
xfs_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)510 xfs_dax_writepages(
511 struct address_space *mapping,
512 struct writeback_control *wbc)
513 {
514 struct xfs_inode *ip = XFS_I(mapping->host);
515
516 xfs_iflags_clear(ip, XFS_ITRUNCATED);
517 return dax_writeback_mapping_range(mapping,
518 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
519 }
520
521 STATIC sector_t
xfs_vm_bmap(struct address_space * mapping,sector_t block)522 xfs_vm_bmap(
523 struct address_space *mapping,
524 sector_t block)
525 {
526 struct xfs_inode *ip = XFS_I(mapping->host);
527
528 trace_xfs_vm_bmap(ip);
529
530 /*
531 * The swap code (ab-)uses ->bmap to get a block mapping and then
532 * bypasses the file system for actual I/O. We really can't allow
533 * that on reflinks inodes, so we have to skip out here. And yes,
534 * 0 is the magic code for a bmap error.
535 *
536 * Since we don't pass back blockdev info, we can't return bmap
537 * information for rt files either.
538 */
539 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
540 return 0;
541 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
542 }
543
544 STATIC int
xfs_vm_read_folio(struct file * unused,struct folio * folio)545 xfs_vm_read_folio(
546 struct file *unused,
547 struct folio *folio)
548 {
549 return iomap_read_folio(folio, &xfs_read_iomap_ops);
550 }
551
552 STATIC void
xfs_vm_readahead(struct readahead_control * rac)553 xfs_vm_readahead(
554 struct readahead_control *rac)
555 {
556 iomap_readahead(rac, &xfs_read_iomap_ops);
557 }
558
559 static int
xfs_iomap_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)560 xfs_iomap_swapfile_activate(
561 struct swap_info_struct *sis,
562 struct file *swap_file,
563 sector_t *span)
564 {
565 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
566 return iomap_swapfile_activate(sis, swap_file, span,
567 &xfs_read_iomap_ops);
568 }
569
570 const struct address_space_operations xfs_address_space_operations = {
571 .read_folio = xfs_vm_read_folio,
572 .readahead = xfs_vm_readahead,
573 .writepages = xfs_vm_writepages,
574 .dirty_folio = iomap_dirty_folio,
575 .release_folio = iomap_release_folio,
576 .invalidate_folio = iomap_invalidate_folio,
577 .bmap = xfs_vm_bmap,
578 .migrate_folio = filemap_migrate_folio,
579 .is_partially_uptodate = iomap_is_partially_uptodate,
580 .error_remove_page = generic_error_remove_page,
581 .swap_activate = xfs_iomap_swapfile_activate,
582 };
583
584 const struct address_space_operations xfs_dax_aops = {
585 .writepages = xfs_dax_writepages,
586 .dirty_folio = noop_dirty_folio,
587 .swap_activate = xfs_iomap_swapfile_activate,
588 };
589