1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22
23 struct xfs_writepage_ctx {
24 struct iomap_writepage_ctx ctx;
25 unsigned int data_seq;
26 unsigned int cow_seq;
27 };
28
29 static inline struct xfs_writepage_ctx *
XFS_WPC(struct iomap_writepage_ctx * ctx)30 XFS_WPC(struct iomap_writepage_ctx *ctx)
31 {
32 return container_of(ctx, struct xfs_writepage_ctx, ctx);
33 }
34
35 /*
36 * Fast and loose check if this write could update the on-disk inode size.
37 */
xfs_ioend_is_append(struct iomap_ioend * ioend)38 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
39 {
40 return ioend->io_offset + ioend->io_size >
41 XFS_I(ioend->io_inode)->i_disk_size;
42 }
43
44 /*
45 * Update on-disk file size now that data has been written to disk.
46 */
47 int
xfs_setfilesize(struct xfs_inode * ip,xfs_off_t offset,size_t size)48 xfs_setfilesize(
49 struct xfs_inode *ip,
50 xfs_off_t offset,
51 size_t size)
52 {
53 struct xfs_mount *mp = ip->i_mount;
54 struct xfs_trans *tp;
55 xfs_fsize_t isize;
56 int error;
57
58 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
59 if (error)
60 return error;
61
62 xfs_ilock(ip, XFS_ILOCK_EXCL);
63 isize = xfs_new_eof(ip, offset + size);
64 if (!isize) {
65 xfs_iunlock(ip, XFS_ILOCK_EXCL);
66 xfs_trans_cancel(tp);
67 return 0;
68 }
69
70 trace_xfs_setfilesize(ip, offset, size);
71
72 ip->i_disk_size = isize;
73 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
74 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
75
76 return xfs_trans_commit(tp);
77 }
78
79 /*
80 * IO write completion.
81 */
82 STATIC void
xfs_end_ioend(struct iomap_ioend * ioend)83 xfs_end_ioend(
84 struct iomap_ioend *ioend)
85 {
86 struct xfs_inode *ip = XFS_I(ioend->io_inode);
87 struct xfs_mount *mp = ip->i_mount;
88 xfs_off_t offset = ioend->io_offset;
89 size_t size = ioend->io_size;
90 unsigned int nofs_flag;
91 int error;
92
93 /*
94 * We can allocate memory here while doing writeback on behalf of
95 * memory reclaim. To avoid memory allocation deadlocks set the
96 * task-wide nofs context for the following operations.
97 */
98 nofs_flag = memalloc_nofs_save();
99
100 /*
101 * Just clean up the in-memory structures if the fs has been shut down.
102 */
103 if (xfs_is_shutdown(mp)) {
104 error = -EIO;
105 goto done;
106 }
107
108 /*
109 * Clean up all COW blocks and underlying data fork delalloc blocks on
110 * I/O error. The delalloc punch is required because this ioend was
111 * mapped to blocks in the COW fork and the associated pages are no
112 * longer dirty. If we don't remove delalloc blocks here, they become
113 * stale and can corrupt free space accounting on unmount.
114 */
115 error = blk_status_to_errno(ioend->io_bio->bi_status);
116 if (unlikely(error)) {
117 if (ioend->io_flags & IOMAP_F_SHARED) {
118 xfs_reflink_cancel_cow_range(ip, offset, size, true);
119 xfs_bmap_punch_delalloc_range(ip, offset,
120 offset + size);
121 }
122 goto done;
123 }
124
125 /*
126 * Success: commit the COW or unwritten blocks if needed.
127 */
128 if (ioend->io_flags & IOMAP_F_SHARED)
129 error = xfs_reflink_end_cow(ip, offset, size);
130 else if (ioend->io_type == IOMAP_UNWRITTEN)
131 error = xfs_iomap_write_unwritten(ip, offset, size, false);
132
133 if (!error && xfs_ioend_is_append(ioend))
134 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135 done:
136 iomap_finish_ioends(ioend, error);
137 memalloc_nofs_restore(nofs_flag);
138 }
139
140 /*
141 * Finish all pending IO completions that require transactional modifications.
142 *
143 * We try to merge physical and logically contiguous ioends before completion to
144 * minimise the number of transactions we need to perform during IO completion.
145 * Both unwritten extent conversion and COW remapping need to iterate and modify
146 * one physical extent at a time, so we gain nothing by merging physically
147 * discontiguous extents here.
148 *
149 * The ioend chain length that we can be processing here is largely unbound in
150 * length and we may have to perform significant amounts of work on each ioend
151 * to complete it. Hence we have to be careful about holding the CPU for too
152 * long in this loop.
153 */
154 void
xfs_end_io(struct work_struct * work)155 xfs_end_io(
156 struct work_struct *work)
157 {
158 struct xfs_inode *ip =
159 container_of(work, struct xfs_inode, i_ioend_work);
160 struct iomap_ioend *ioend;
161 struct list_head tmp;
162 unsigned long flags;
163
164 spin_lock_irqsave(&ip->i_ioend_lock, flags);
165 list_replace_init(&ip->i_ioend_list, &tmp);
166 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167
168 iomap_sort_ioends(&tmp);
169 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170 io_list))) {
171 list_del_init(&ioend->io_list);
172 iomap_ioend_try_merge(ioend, &tmp);
173 xfs_end_ioend(ioend);
174 cond_resched();
175 }
176 }
177
178 STATIC void
xfs_end_bio(struct bio * bio)179 xfs_end_bio(
180 struct bio *bio)
181 {
182 struct iomap_ioend *ioend = bio->bi_private;
183 struct xfs_inode *ip = XFS_I(ioend->io_inode);
184 unsigned long flags;
185
186 spin_lock_irqsave(&ip->i_ioend_lock, flags);
187 if (list_empty(&ip->i_ioend_list))
188 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189 &ip->i_ioend_work));
190 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
192 }
193
194 /*
195 * Fast revalidation of the cached writeback mapping. Return true if the current
196 * mapping is valid, false otherwise.
197 */
198 static bool
xfs_imap_valid(struct iomap_writepage_ctx * wpc,struct xfs_inode * ip,loff_t offset)199 xfs_imap_valid(
200 struct iomap_writepage_ctx *wpc,
201 struct xfs_inode *ip,
202 loff_t offset)
203 {
204 if (offset < wpc->iomap.offset ||
205 offset >= wpc->iomap.offset + wpc->iomap.length)
206 return false;
207 /*
208 * If this is a COW mapping, it is sufficient to check that the mapping
209 * covers the offset. Be careful to check this first because the caller
210 * can revalidate a COW mapping without updating the data seqno.
211 */
212 if (wpc->iomap.flags & IOMAP_F_SHARED)
213 return true;
214
215 /*
216 * This is not a COW mapping. Check the sequence number of the data fork
217 * because concurrent changes could have invalidated the extent. Check
218 * the COW fork because concurrent changes since the last time we
219 * checked (and found nothing at this offset) could have added
220 * overlapping blocks.
221 */
222 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223 trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224 XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225 return false;
226 }
227 if (xfs_inode_has_cow_data(ip) &&
228 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229 trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230 XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231 return false;
232 }
233 return true;
234 }
235
236 static int
xfs_map_blocks(struct iomap_writepage_ctx * wpc,struct inode * inode,loff_t offset)237 xfs_map_blocks(
238 struct iomap_writepage_ctx *wpc,
239 struct inode *inode,
240 loff_t offset)
241 {
242 struct xfs_inode *ip = XFS_I(inode);
243 struct xfs_mount *mp = ip->i_mount;
244 ssize_t count = i_blocksize(inode);
245 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
246 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
247 xfs_fileoff_t cow_fsb;
248 int whichfork;
249 struct xfs_bmbt_irec imap;
250 struct xfs_iext_cursor icur;
251 int retries = 0;
252 int error = 0;
253 unsigned int *seq;
254
255 if (xfs_is_shutdown(mp))
256 return -EIO;
257
258 XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
259
260 /*
261 * COW fork blocks can overlap data fork blocks even if the blocks
262 * aren't shared. COW I/O always takes precedent, so we must always
263 * check for overlap on reflink inodes unless the mapping is already a
264 * COW one, or the COW fork hasn't changed from the last time we looked
265 * at it.
266 *
267 * It's safe to check the COW fork if_seq here without the ILOCK because
268 * we've indirectly protected against concurrent updates: writeback has
269 * the page locked, which prevents concurrent invalidations by reflink
270 * and directio and prevents concurrent buffered writes to the same
271 * page. Changes to if_seq always happen under i_lock, which protects
272 * against concurrent updates and provides a memory barrier on the way
273 * out that ensures that we always see the current value.
274 */
275 if (xfs_imap_valid(wpc, ip, offset))
276 return 0;
277
278 /*
279 * If we don't have a valid map, now it's time to get a new one for this
280 * offset. This will convert delayed allocations (including COW ones)
281 * into real extents. If we return without a valid map, it means we
282 * landed in a hole and we skip the block.
283 */
284 retry:
285 cow_fsb = NULLFILEOFF;
286 whichfork = XFS_DATA_FORK;
287 xfs_ilock(ip, XFS_ILOCK_SHARED);
288 ASSERT(!xfs_need_iread_extents(&ip->i_df));
289
290 /*
291 * Check if this is offset is covered by a COW extents, and if yes use
292 * it directly instead of looking up anything in the data fork.
293 */
294 if (xfs_inode_has_cow_data(ip) &&
295 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
296 cow_fsb = imap.br_startoff;
297 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
298 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
299 xfs_iunlock(ip, XFS_ILOCK_SHARED);
300
301 whichfork = XFS_COW_FORK;
302 goto allocate_blocks;
303 }
304
305 /*
306 * No COW extent overlap. Revalidate now that we may have updated
307 * ->cow_seq. If the data mapping is still valid, we're done.
308 */
309 if (xfs_imap_valid(wpc, ip, offset)) {
310 xfs_iunlock(ip, XFS_ILOCK_SHARED);
311 return 0;
312 }
313
314 /*
315 * If we don't have a valid map, now it's time to get a new one for this
316 * offset. This will convert delayed allocations (including COW ones)
317 * into real extents.
318 */
319 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
320 imap.br_startoff = end_fsb; /* fake a hole past EOF */
321 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
322 xfs_iunlock(ip, XFS_ILOCK_SHARED);
323
324 /* landed in a hole or beyond EOF? */
325 if (imap.br_startoff > offset_fsb) {
326 imap.br_blockcount = imap.br_startoff - offset_fsb;
327 imap.br_startoff = offset_fsb;
328 imap.br_startblock = HOLESTARTBLOCK;
329 imap.br_state = XFS_EXT_NORM;
330 }
331
332 /*
333 * Truncate to the next COW extent if there is one. This is the only
334 * opportunity to do this because we can skip COW fork lookups for the
335 * subsequent blocks in the mapping; however, the requirement to treat
336 * the COW range separately remains.
337 */
338 if (cow_fsb != NULLFILEOFF &&
339 cow_fsb < imap.br_startoff + imap.br_blockcount)
340 imap.br_blockcount = cow_fsb - imap.br_startoff;
341
342 /* got a delalloc extent? */
343 if (imap.br_startblock != HOLESTARTBLOCK &&
344 isnullstartblock(imap.br_startblock))
345 goto allocate_blocks;
346
347 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
348 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
349 return 0;
350 allocate_blocks:
351 /*
352 * Convert a dellalloc extent to a real one. The current page is held
353 * locked so nothing could have removed the block backing offset_fsb,
354 * although it could have moved from the COW to the data fork by another
355 * thread.
356 */
357 if (whichfork == XFS_COW_FORK)
358 seq = &XFS_WPC(wpc)->cow_seq;
359 else
360 seq = &XFS_WPC(wpc)->data_seq;
361
362 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
363 &wpc->iomap, seq);
364 if (error) {
365 /*
366 * If we failed to find the extent in the COW fork we might have
367 * raced with a COW to data fork conversion or truncate.
368 * Restart the lookup to catch the extent in the data fork for
369 * the former case, but prevent additional retries to avoid
370 * looping forever for the latter case.
371 */
372 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
373 goto retry;
374 ASSERT(error != -EAGAIN);
375 return error;
376 }
377
378 /*
379 * Due to merging the return real extent might be larger than the
380 * original delalloc one. Trim the return extent to the next COW
381 * boundary again to force a re-lookup.
382 */
383 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
384 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
385
386 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
387 wpc->iomap.length = cow_offset - wpc->iomap.offset;
388 }
389
390 ASSERT(wpc->iomap.offset <= offset);
391 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
392 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
393 return 0;
394 }
395
396 static int
xfs_prepare_ioend(struct iomap_ioend * ioend,int status)397 xfs_prepare_ioend(
398 struct iomap_ioend *ioend,
399 int status)
400 {
401 unsigned int nofs_flag;
402
403 /*
404 * We can allocate memory here while doing writeback on behalf of
405 * memory reclaim. To avoid memory allocation deadlocks set the
406 * task-wide nofs context for the following operations.
407 */
408 nofs_flag = memalloc_nofs_save();
409
410 /* Convert CoW extents to regular */
411 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
412 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
413 ioend->io_offset, ioend->io_size);
414 }
415
416 memalloc_nofs_restore(nofs_flag);
417
418 /* send ioends that might require a transaction to the completion wq */
419 if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
420 (ioend->io_flags & IOMAP_F_SHARED))
421 ioend->io_bio->bi_end_io = xfs_end_bio;
422 return status;
423 }
424
425 /*
426 * If the folio has delalloc blocks on it, the caller is asking us to punch them
427 * out. If we don't, we can leave a stale delalloc mapping covered by a clean
428 * page that needs to be dirtied again before the delalloc mapping can be
429 * converted. This stale delalloc mapping can trip up a later direct I/O read
430 * operation on the same region.
431 *
432 * We prevent this by truncating away the delalloc regions on the folio. Because
433 * they are delalloc, we can do this without needing a transaction. Indeed - if
434 * we get ENOSPC errors, we have to be able to do this truncation without a
435 * transaction as there is no space left for block reservation (typically why
436 * we see a ENOSPC in writeback).
437 */
438 static void
xfs_discard_folio(struct folio * folio,loff_t pos)439 xfs_discard_folio(
440 struct folio *folio,
441 loff_t pos)
442 {
443 struct xfs_inode *ip = XFS_I(folio->mapping->host);
444 struct xfs_mount *mp = ip->i_mount;
445 int error;
446
447 if (xfs_is_shutdown(mp))
448 return;
449
450 xfs_alert_ratelimited(mp,
451 "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
452 folio, ip->i_ino, pos);
453
454 /*
455 * The end of the punch range is always the offset of the first
456 * byte of the next folio. Hence the end offset is only dependent on the
457 * folio itself and not the start offset that is passed in.
458 */
459 error = xfs_bmap_punch_delalloc_range(ip, pos,
460 folio_pos(folio) + folio_size(folio));
461
462 if (error && !xfs_is_shutdown(mp))
463 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
464 }
465
466 static const struct iomap_writeback_ops xfs_writeback_ops = {
467 .map_blocks = xfs_map_blocks,
468 .prepare_ioend = xfs_prepare_ioend,
469 .discard_folio = xfs_discard_folio,
470 };
471
472 STATIC int
xfs_vm_writepages(struct address_space * mapping,struct writeback_control * wbc)473 xfs_vm_writepages(
474 struct address_space *mapping,
475 struct writeback_control *wbc)
476 {
477 struct xfs_writepage_ctx wpc = { };
478
479 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
480 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
481 }
482
483 STATIC int
xfs_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)484 xfs_dax_writepages(
485 struct address_space *mapping,
486 struct writeback_control *wbc)
487 {
488 struct xfs_inode *ip = XFS_I(mapping->host);
489
490 xfs_iflags_clear(ip, XFS_ITRUNCATED);
491 return dax_writeback_mapping_range(mapping,
492 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
493 }
494
495 STATIC sector_t
xfs_vm_bmap(struct address_space * mapping,sector_t block)496 xfs_vm_bmap(
497 struct address_space *mapping,
498 sector_t block)
499 {
500 struct xfs_inode *ip = XFS_I(mapping->host);
501
502 trace_xfs_vm_bmap(ip);
503
504 /*
505 * The swap code (ab-)uses ->bmap to get a block mapping and then
506 * bypasses the file system for actual I/O. We really can't allow
507 * that on reflinks inodes, so we have to skip out here. And yes,
508 * 0 is the magic code for a bmap error.
509 *
510 * Since we don't pass back blockdev info, we can't return bmap
511 * information for rt files either.
512 */
513 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
514 return 0;
515 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
516 }
517
518 STATIC int
xfs_vm_read_folio(struct file * unused,struct folio * folio)519 xfs_vm_read_folio(
520 struct file *unused,
521 struct folio *folio)
522 {
523 return iomap_read_folio(folio, &xfs_read_iomap_ops);
524 }
525
526 STATIC void
xfs_vm_readahead(struct readahead_control * rac)527 xfs_vm_readahead(
528 struct readahead_control *rac)
529 {
530 iomap_readahead(rac, &xfs_read_iomap_ops);
531 }
532
533 static int
xfs_iomap_swapfile_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)534 xfs_iomap_swapfile_activate(
535 struct swap_info_struct *sis,
536 struct file *swap_file,
537 sector_t *span)
538 {
539 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
540 return iomap_swapfile_activate(sis, swap_file, span,
541 &xfs_read_iomap_ops);
542 }
543
544 const struct address_space_operations xfs_address_space_operations = {
545 .read_folio = xfs_vm_read_folio,
546 .readahead = xfs_vm_readahead,
547 .writepages = xfs_vm_writepages,
548 .dirty_folio = iomap_dirty_folio,
549 .release_folio = iomap_release_folio,
550 .invalidate_folio = iomap_invalidate_folio,
551 .bmap = xfs_vm_bmap,
552 .migrate_folio = filemap_migrate_folio,
553 .is_partially_uptodate = iomap_is_partially_uptodate,
554 .error_remove_page = generic_error_remove_page,
555 .swap_activate = xfs_iomap_swapfile_activate,
556 };
557
558 const struct address_space_operations xfs_dax_aops = {
559 .writepages = xfs_dax_writepages,
560 .dirty_folio = noop_dirty_folio,
561 .swap_activate = xfs_iomap_swapfile_activate,
562 };
563