1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * mm/truncate.c - code for taking down pages from address_spaces
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds
61da177e4SLinus Torvalds *
7e1f8e874SFrancois Cami * 10Sep2002 Andrew Morton
81da177e4SLinus Torvalds * Initial version.
91da177e4SLinus Torvalds */
101da177e4SLinus Torvalds
111da177e4SLinus Torvalds #include <linux/kernel.h>
124af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h>
13f9fe48beSRoss Zwisler #include <linux/dax.h>
145a0e3ad6STejun Heo #include <linux/gfp.h>
151da177e4SLinus Torvalds #include <linux/mm.h>
160fd0e6b0SNick Piggin #include <linux/swap.h>
17b95f1b31SPaul Gortmaker #include <linux/export.h>
181da177e4SLinus Torvalds #include <linux/pagemap.h>
1901f2705dSNate Diller #include <linux/highmem.h>
201da177e4SLinus Torvalds #include <linux/pagevec.h>
21e08748ceSAndrew Morton #include <linux/task_io_accounting_ops.h>
223a4f8a0bSHugh Dickins #include <linux/shmem_fs.h>
2390a80202SJan Kara #include <linux/rmap.h>
24ba470de4SRik van Riel #include "internal.h"
251da177e4SLinus Torvalds
26f2187599SMel Gorman /*
27f2187599SMel Gorman * Regular page slots are stabilized by the page lock even without the tree
28f2187599SMel Gorman * itself locked. These unlocked entries need verification under the tree
29f2187599SMel Gorman * lock.
30f2187599SMel Gorman */
__clear_shadow_entry(struct address_space * mapping,pgoff_t index,void * entry)31f2187599SMel Gorman static inline void __clear_shadow_entry(struct address_space *mapping,
32f2187599SMel Gorman pgoff_t index, void *entry)
330cd6144aSJohannes Weiner {
3469b6c131SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index);
35449dd698SJohannes Weiner
3669b6c131SMatthew Wilcox xas_set_update(&xas, workingset_update_node);
3769b6c131SMatthew Wilcox if (xas_load(&xas) != entry)
38f2187599SMel Gorman return;
3969b6c131SMatthew Wilcox xas_store(&xas, NULL);
40f2187599SMel Gorman }
41f2187599SMel Gorman
clear_shadow_entry(struct address_space * mapping,pgoff_t index,void * entry)42f2187599SMel Gorman static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
43f2187599SMel Gorman void *entry)
44f2187599SMel Gorman {
4551b8c1feSJohannes Weiner spin_lock(&mapping->host->i_lock);
46b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages);
47f2187599SMel Gorman __clear_shadow_entry(mapping, index, entry);
48b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages);
4951b8c1feSJohannes Weiner if (mapping_shrinkable(mapping))
5051b8c1feSJohannes Weiner inode_add_lru(mapping->host);
5151b8c1feSJohannes Weiner spin_unlock(&mapping->host->i_lock);
520cd6144aSJohannes Weiner }
531da177e4SLinus Torvalds
54c6dcf52cSJan Kara /*
55f2187599SMel Gorman * Unconditionally remove exceptional entries. Usually called from truncate
5651dcbdacSMatthew Wilcox (Oracle) * path. Note that the folio_batch may be altered by this function by removing
571613fac9SMatthew Wilcox (Oracle) * exceptional entries similar to what folio_batch_remove_exceptionals() does.
58c6dcf52cSJan Kara */
truncate_folio_batch_exceptionals(struct address_space * mapping,struct folio_batch * fbatch,pgoff_t * indices)5951dcbdacSMatthew Wilcox (Oracle) static void truncate_folio_batch_exceptionals(struct address_space *mapping,
6051dcbdacSMatthew Wilcox (Oracle) struct folio_batch *fbatch, pgoff_t *indices)
61c6dcf52cSJan Kara {
62f2187599SMel Gorman int i, j;
6331d270fdSMatthew Wilcox (Oracle) bool dax;
64f2187599SMel Gorman
65c6dcf52cSJan Kara /* Handled by shmem itself */
66c6dcf52cSJan Kara if (shmem_mapping(mapping))
67c6dcf52cSJan Kara return;
68c6dcf52cSJan Kara
6951dcbdacSMatthew Wilcox (Oracle) for (j = 0; j < folio_batch_count(fbatch); j++)
7051dcbdacSMatthew Wilcox (Oracle) if (xa_is_value(fbatch->folios[j]))
71f2187599SMel Gorman break;
72f2187599SMel Gorman
7351dcbdacSMatthew Wilcox (Oracle) if (j == folio_batch_count(fbatch))
74c6dcf52cSJan Kara return;
75f2187599SMel Gorman
76f2187599SMel Gorman dax = dax_mapping(mapping);
7751b8c1feSJohannes Weiner if (!dax) {
7851b8c1feSJohannes Weiner spin_lock(&mapping->host->i_lock);
79b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages);
8051b8c1feSJohannes Weiner }
81f2187599SMel Gorman
8251dcbdacSMatthew Wilcox (Oracle) for (i = j; i < folio_batch_count(fbatch); i++) {
8351dcbdacSMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i];
84f2187599SMel Gorman pgoff_t index = indices[i];
85f2187599SMel Gorman
8651dcbdacSMatthew Wilcox (Oracle) if (!xa_is_value(folio)) {
8751dcbdacSMatthew Wilcox (Oracle) fbatch->folios[j++] = folio;
88f2187599SMel Gorman continue;
89c6dcf52cSJan Kara }
90f2187599SMel Gorman
91f2187599SMel Gorman if (unlikely(dax)) {
92f2187599SMel Gorman dax_delete_mapping_entry(mapping, index);
93f2187599SMel Gorman continue;
94f2187599SMel Gorman }
95f2187599SMel Gorman
9651dcbdacSMatthew Wilcox (Oracle) __clear_shadow_entry(mapping, index, folio);
97f2187599SMel Gorman }
98f2187599SMel Gorman
9951b8c1feSJohannes Weiner if (!dax) {
100b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages);
10151b8c1feSJohannes Weiner if (mapping_shrinkable(mapping))
10251b8c1feSJohannes Weiner inode_add_lru(mapping->host);
10351b8c1feSJohannes Weiner spin_unlock(&mapping->host->i_lock);
10451b8c1feSJohannes Weiner }
10551dcbdacSMatthew Wilcox (Oracle) fbatch->nr = j;
1060e499ed3SMatthew Wilcox (Oracle) }
1070e499ed3SMatthew Wilcox (Oracle)
108c6dcf52cSJan Kara /*
109c6dcf52cSJan Kara * Invalidate exceptional entry if easily possible. This handles exceptional
1104636e70bSRoss Zwisler * entries for invalidate_inode_pages().
111c6dcf52cSJan Kara */
invalidate_exceptional_entry(struct address_space * mapping,pgoff_t index,void * entry)112c6dcf52cSJan Kara static int invalidate_exceptional_entry(struct address_space *mapping,
113c6dcf52cSJan Kara pgoff_t index, void *entry)
114c6dcf52cSJan Kara {
1154636e70bSRoss Zwisler /* Handled by shmem itself, or for DAX we do nothing. */
1164636e70bSRoss Zwisler if (shmem_mapping(mapping) || dax_mapping(mapping))
117c6dcf52cSJan Kara return 1;
118c6dcf52cSJan Kara clear_shadow_entry(mapping, index, entry);
119c6dcf52cSJan Kara return 1;
120c6dcf52cSJan Kara }
121c6dcf52cSJan Kara
122c6dcf52cSJan Kara /*
123c6dcf52cSJan Kara * Invalidate exceptional entry if clean. This handles exceptional entries for
124c6dcf52cSJan Kara * invalidate_inode_pages2() so for DAX it evicts only clean entries.
125c6dcf52cSJan Kara */
invalidate_exceptional_entry2(struct address_space * mapping,pgoff_t index,void * entry)126c6dcf52cSJan Kara static int invalidate_exceptional_entry2(struct address_space *mapping,
127c6dcf52cSJan Kara pgoff_t index, void *entry)
128c6dcf52cSJan Kara {
129c6dcf52cSJan Kara /* Handled by shmem itself */
130c6dcf52cSJan Kara if (shmem_mapping(mapping))
131c6dcf52cSJan Kara return 1;
132c6dcf52cSJan Kara if (dax_mapping(mapping))
133c6dcf52cSJan Kara return dax_invalidate_mapping_entry_sync(mapping, index);
134c6dcf52cSJan Kara clear_shadow_entry(mapping, index, entry);
135c6dcf52cSJan Kara return 1;
136c6dcf52cSJan Kara }
137c6dcf52cSJan Kara
138cf9a2ae8SDavid Howells /**
1395ad6b2bdSMatthew Wilcox (Oracle) * folio_invalidate - Invalidate part or all of a folio.
1405ad6b2bdSMatthew Wilcox (Oracle) * @folio: The folio which is affected.
141d47992f8SLukas Czerner * @offset: start of the range to invalidate
142d47992f8SLukas Czerner * @length: length of the range to invalidate
143cf9a2ae8SDavid Howells *
1445ad6b2bdSMatthew Wilcox (Oracle) * folio_invalidate() is called when all or part of the folio has become
145cf9a2ae8SDavid Howells * invalidated by a truncate operation.
146cf9a2ae8SDavid Howells *
1475ad6b2bdSMatthew Wilcox (Oracle) * folio_invalidate() does not have to release all buffers, but it must
148cf9a2ae8SDavid Howells * ensure that no dirty buffer is left outside @offset and that no I/O
149cf9a2ae8SDavid Howells * is underway against any of the blocks which are outside the truncation
150cf9a2ae8SDavid Howells * point. Because the caller is about to free (and possibly reuse) those
151cf9a2ae8SDavid Howells * blocks on-disk.
152cf9a2ae8SDavid Howells */
folio_invalidate(struct folio * folio,size_t offset,size_t length)1535ad6b2bdSMatthew Wilcox (Oracle) void folio_invalidate(struct folio *folio, size_t offset, size_t length)
154cf9a2ae8SDavid Howells {
155128d1f82SMatthew Wilcox (Oracle) const struct address_space_operations *aops = folio->mapping->a_ops;
156d47992f8SLukas Czerner
157f50015a5SMatthew Wilcox (Oracle) if (aops->invalidate_folio)
158128d1f82SMatthew Wilcox (Oracle) aops->invalidate_folio(folio, offset, length);
159cf9a2ae8SDavid Howells }
1605ad6b2bdSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_invalidate);
161cf9a2ae8SDavid Howells
162ecdfc978SLinus Torvalds /*
1631da177e4SLinus Torvalds * If truncate cannot remove the fs-private metadata from the page, the page
16462e1c553SShaohua Li * becomes orphaned. It will be left on the LRU and may even be mapped into
16554cb8821SNick Piggin * user pagetables if we're racing with filemap_fault().
1661da177e4SLinus Torvalds *
167fc3a5ac5SMatthew Wilcox (Oracle) * We need to bail out if page->mapping is no longer equal to the original
1681da177e4SLinus Torvalds * mapping. This happens a) when the VM reclaimed the page while we waited on
169fc0ecff6SAndrew Morton * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1701da177e4SLinus Torvalds * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
1711da177e4SLinus Torvalds */
truncate_cleanup_folio(struct folio * folio)172efe99bbaSMatthew Wilcox (Oracle) static void truncate_cleanup_folio(struct folio *folio)
1731da177e4SLinus Torvalds {
174efe99bbaSMatthew Wilcox (Oracle) if (folio_mapped(folio))
1753506659eSMatthew Wilcox (Oracle) unmap_mapping_folio(folio);
1761da177e4SLinus Torvalds
177*881aee27SDavid Howells if (folio_needs_release(folio))
1785ad6b2bdSMatthew Wilcox (Oracle) folio_invalidate(folio, 0, folio_size(folio));
1791da177e4SLinus Torvalds
180b9ea2515SKonstantin Khlebnikov /*
181b9ea2515SKonstantin Khlebnikov * Some filesystems seem to re-dirty the page even after
182b9ea2515SKonstantin Khlebnikov * the VM has canceled the dirty bit (eg ext3 journaling).
183b9ea2515SKonstantin Khlebnikov * Hence dirty accounting check is placed after invalidation.
184b9ea2515SKonstantin Khlebnikov */
185efe99bbaSMatthew Wilcox (Oracle) folio_cancel_dirty(folio);
186efe99bbaSMatthew Wilcox (Oracle) folio_clear_mappedtodisk(folio);
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds
truncate_inode_folio(struct address_space * mapping,struct folio * folio)1891e84a3d9SMatthew Wilcox (Oracle) int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
190750b4987SNick Piggin {
1911e84a3d9SMatthew Wilcox (Oracle) if (folio->mapping != mapping)
1929f4e41f4SJan Kara return -EIO;
1939f4e41f4SJan Kara
194efe99bbaSMatthew Wilcox (Oracle) truncate_cleanup_folio(folio);
195efe99bbaSMatthew Wilcox (Oracle) filemap_remove_folio(folio);
1969f4e41f4SJan Kara return 0;
197750b4987SNick Piggin }
198750b4987SNick Piggin
19983f78668SWu Fengguang /*
200b9a8a419SMatthew Wilcox (Oracle) * Handle partial folios. The folio may be entirely within the
201b9a8a419SMatthew Wilcox (Oracle) * range if a split has raced with us. If not, we zero the part of the
202b9a8a419SMatthew Wilcox (Oracle) * folio that's within the [start, end] range, and then split the folio if
203b9a8a419SMatthew Wilcox (Oracle) * it's large. split_page_range() will discard pages which now lie beyond
204b9a8a419SMatthew Wilcox (Oracle) * i_size, and we rely on the caller to discard pages which lie within a
205b9a8a419SMatthew Wilcox (Oracle) * newly created hole.
206b9a8a419SMatthew Wilcox (Oracle) *
207b9a8a419SMatthew Wilcox (Oracle) * Returns false if splitting failed so the caller can avoid
208b9a8a419SMatthew Wilcox (Oracle) * discarding the entire folio which is stubbornly unsplit.
209b9a8a419SMatthew Wilcox (Oracle) */
truncate_inode_partial_folio(struct folio * folio,loff_t start,loff_t end)210b9a8a419SMatthew Wilcox (Oracle) bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
211b9a8a419SMatthew Wilcox (Oracle) {
212b9a8a419SMatthew Wilcox (Oracle) loff_t pos = folio_pos(folio);
213b9a8a419SMatthew Wilcox (Oracle) unsigned int offset, length;
214b9a8a419SMatthew Wilcox (Oracle)
215b9a8a419SMatthew Wilcox (Oracle) if (pos < start)
216b9a8a419SMatthew Wilcox (Oracle) offset = start - pos;
217b9a8a419SMatthew Wilcox (Oracle) else
218b9a8a419SMatthew Wilcox (Oracle) offset = 0;
219b9a8a419SMatthew Wilcox (Oracle) length = folio_size(folio);
220b9a8a419SMatthew Wilcox (Oracle) if (pos + length <= (u64)end)
221b9a8a419SMatthew Wilcox (Oracle) length = length - offset;
222b9a8a419SMatthew Wilcox (Oracle) else
223b9a8a419SMatthew Wilcox (Oracle) length = end + 1 - pos - offset;
224b9a8a419SMatthew Wilcox (Oracle)
225b9a8a419SMatthew Wilcox (Oracle) folio_wait_writeback(folio);
226b9a8a419SMatthew Wilcox (Oracle) if (length == folio_size(folio)) {
227b9a8a419SMatthew Wilcox (Oracle) truncate_inode_folio(folio->mapping, folio);
228b9a8a419SMatthew Wilcox (Oracle) return true;
229b9a8a419SMatthew Wilcox (Oracle) }
230b9a8a419SMatthew Wilcox (Oracle)
231b9a8a419SMatthew Wilcox (Oracle) /*
232b9a8a419SMatthew Wilcox (Oracle) * We may be zeroing pages we're about to discard, but it avoids
233b9a8a419SMatthew Wilcox (Oracle) * doing a complex calculation here, and then doing the zeroing
234b9a8a419SMatthew Wilcox (Oracle) * anyway if the page split fails.
235b9a8a419SMatthew Wilcox (Oracle) */
236b9a8a419SMatthew Wilcox (Oracle) folio_zero_range(folio, offset, length);
237b9a8a419SMatthew Wilcox (Oracle)
238*881aee27SDavid Howells if (folio_needs_release(folio))
2395ad6b2bdSMatthew Wilcox (Oracle) folio_invalidate(folio, offset, length);
240b9a8a419SMatthew Wilcox (Oracle) if (!folio_test_large(folio))
241b9a8a419SMatthew Wilcox (Oracle) return true;
242d788f5b3SMatthew Wilcox (Oracle) if (split_folio(folio) == 0)
243b9a8a419SMatthew Wilcox (Oracle) return true;
244b9a8a419SMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
245b9a8a419SMatthew Wilcox (Oracle) return false;
246b9a8a419SMatthew Wilcox (Oracle) truncate_inode_folio(folio->mapping, folio);
247b9a8a419SMatthew Wilcox (Oracle) return true;
248b9a8a419SMatthew Wilcox (Oracle) }
249b9a8a419SMatthew Wilcox (Oracle)
250b9a8a419SMatthew Wilcox (Oracle) /*
25125718736SAndi Kleen * Used to get rid of pages on hardware memory corruption.
25225718736SAndi Kleen */
generic_error_remove_page(struct address_space * mapping,struct page * page)25325718736SAndi Kleen int generic_error_remove_page(struct address_space *mapping, struct page *page)
25425718736SAndi Kleen {
2551e84a3d9SMatthew Wilcox (Oracle) VM_BUG_ON_PAGE(PageTail(page), page);
2561e84a3d9SMatthew Wilcox (Oracle)
25725718736SAndi Kleen if (!mapping)
25825718736SAndi Kleen return -EINVAL;
25925718736SAndi Kleen /*
26025718736SAndi Kleen * Only punch for normal data pages for now.
26125718736SAndi Kleen * Handling other types like directories would need more auditing.
26225718736SAndi Kleen */
26325718736SAndi Kleen if (!S_ISREG(mapping->host->i_mode))
26425718736SAndi Kleen return -EIO;
2651e84a3d9SMatthew Wilcox (Oracle) return truncate_inode_folio(mapping, page_folio(page));
26625718736SAndi Kleen }
26725718736SAndi Kleen EXPORT_SYMBOL(generic_error_remove_page);
26825718736SAndi Kleen
mapping_evict_folio(struct address_space * mapping,struct folio * folio)269d6c75dc2SMatthew Wilcox (Oracle) static long mapping_evict_folio(struct address_space *mapping,
270d6c75dc2SMatthew Wilcox (Oracle) struct folio *folio)
27183f78668SWu Fengguang {
27244184813SMatthew Wilcox (Oracle) if (folio_test_dirty(folio) || folio_test_writeback(folio))
27383f78668SWu Fengguang return 0;
274e41c81d0SMatthew Wilcox (Oracle) /* The refcount will be elevated if any page in the folio is mapped */
275e41c81d0SMatthew Wilcox (Oracle) if (folio_ref_count(folio) >
276e41c81d0SMatthew Wilcox (Oracle) folio_nr_pages(folio) + folio_has_private(folio) + 1)
27783f78668SWu Fengguang return 0;
2780201ebf2SDavid Howells if (!filemap_release_folio(folio, 0))
2791b8ddbeeSMatthew Wilcox (Oracle) return 0;
2801b8ddbeeSMatthew Wilcox (Oracle)
2815100da38SMatthew Wilcox (Oracle) return remove_mapping(mapping, folio);
28283f78668SWu Fengguang }
28383f78668SWu Fengguang
2841da177e4SLinus Torvalds /**
285d6c75dc2SMatthew Wilcox (Oracle) * invalidate_inode_page() - Remove an unused page from the pagecache.
286d6c75dc2SMatthew Wilcox (Oracle) * @page: The page to remove.
287d6c75dc2SMatthew Wilcox (Oracle) *
288d6c75dc2SMatthew Wilcox (Oracle) * Safely invalidate one page from its pagecache mapping.
289d6c75dc2SMatthew Wilcox (Oracle) * It only drops clean, unused pages.
290d6c75dc2SMatthew Wilcox (Oracle) *
291d6c75dc2SMatthew Wilcox (Oracle) * Context: Page must be locked.
292d6c75dc2SMatthew Wilcox (Oracle) * Return: The number of pages successfully removed.
293d6c75dc2SMatthew Wilcox (Oracle) */
invalidate_inode_page(struct page * page)294d6c75dc2SMatthew Wilcox (Oracle) long invalidate_inode_page(struct page *page)
295d6c75dc2SMatthew Wilcox (Oracle) {
296d6c75dc2SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
297d6c75dc2SMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio);
298d6c75dc2SMatthew Wilcox (Oracle)
299d6c75dc2SMatthew Wilcox (Oracle) /* The page may have been truncated before it was locked */
300d6c75dc2SMatthew Wilcox (Oracle) if (!mapping)
301d6c75dc2SMatthew Wilcox (Oracle) return 0;
302d6c75dc2SMatthew Wilcox (Oracle) return mapping_evict_folio(mapping, folio);
303d6c75dc2SMatthew Wilcox (Oracle) }
304d6c75dc2SMatthew Wilcox (Oracle)
305d6c75dc2SMatthew Wilcox (Oracle) /**
30673c1e204SLiu Bo * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
3071da177e4SLinus Torvalds * @mapping: mapping to truncate
3081da177e4SLinus Torvalds * @lstart: offset from which to truncate
3095a720394SLukas Czerner * @lend: offset to which to truncate (inclusive)
3101da177e4SLinus Torvalds *
311d7339071SHans Reiser * Truncate the page cache, removing the pages that are between
3125a720394SLukas Czerner * specified offsets (and zeroing out partial pages
3135a720394SLukas Czerner * if lstart or lend + 1 is not page aligned).
3141da177e4SLinus Torvalds *
3151da177e4SLinus Torvalds * Truncate takes two passes - the first pass is nonblocking. It will not
3161da177e4SLinus Torvalds * block on page locks and it will not block on writeback. The second pass
3171da177e4SLinus Torvalds * will wait. This is to prevent as much IO as possible in the affected region.
3181da177e4SLinus Torvalds * The first pass will remove most pages, so the search cost of the second pass
3191da177e4SLinus Torvalds * is low.
3201da177e4SLinus Torvalds *
3211da177e4SLinus Torvalds * We pass down the cache-hot hint to the page freeing code. Even if the
3221da177e4SLinus Torvalds * mapping is large, it is probably the case that the final pages are the most
3231da177e4SLinus Torvalds * recently touched, and freeing happens in ascending file offset order.
3245a720394SLukas Czerner *
325f50015a5SMatthew Wilcox (Oracle) * Note that since ->invalidate_folio() accepts range to invalidate
3265a720394SLukas Czerner * truncate_inode_pages_range is able to handle cases where lend + 1 is not
3275a720394SLukas Czerner * page aligned properly.
3281da177e4SLinus Torvalds */
truncate_inode_pages_range(struct address_space * mapping,loff_t lstart,loff_t lend)329d7339071SHans Reiser void truncate_inode_pages_range(struct address_space *mapping,
330d7339071SHans Reiser loff_t lstart, loff_t lend)
3311da177e4SLinus Torvalds {
3325a720394SLukas Czerner pgoff_t start; /* inclusive */
3335a720394SLukas Czerner pgoff_t end; /* exclusive */
3340e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch;
3350cd6144aSJohannes Weiner pgoff_t indices[PAGEVEC_SIZE];
336b85e0effSHugh Dickins pgoff_t index;
3371da177e4SLinus Torvalds int i;
338b9a8a419SMatthew Wilcox (Oracle) struct folio *folio;
339b9a8a419SMatthew Wilcox (Oracle) bool same_folio;
3401da177e4SLinus Torvalds
3417716506aSMatthew Wilcox (Oracle) if (mapping_empty(mapping))
3420a4ee518SChristoph Hellwig return;
3431da177e4SLinus Torvalds
3445a720394SLukas Czerner /*
3455a720394SLukas Czerner * 'start' and 'end' always covers the range of pages to be fully
3465a720394SLukas Czerner * truncated. Partial pages are covered with 'partial_start' at the
3475a720394SLukas Czerner * start of the range and 'partial_end' at the end of the range.
3485a720394SLukas Czerner * Note that 'end' is exclusive while 'lend' is inclusive.
3495a720394SLukas Czerner */
35009cbfeafSKirill A. Shutemov start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
3515a720394SLukas Czerner if (lend == -1)
3525a720394SLukas Czerner /*
3535a720394SLukas Czerner * lend == -1 indicates end-of-file so we have to set 'end'
3545a720394SLukas Czerner * to the highest possible pgoff_t and since the type is
3555a720394SLukas Czerner * unsigned we're using -1.
3565a720394SLukas Czerner */
3575a720394SLukas Czerner end = -1;
3585a720394SLukas Czerner else
35909cbfeafSKirill A. Shutemov end = (lend + 1) >> PAGE_SHIFT;
360d7339071SHans Reiser
36151dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
362b85e0effSHugh Dickins index = start;
3633392ca12SVishal Moola (Oracle) while (index < end && find_lock_entries(mapping, &index, end - 1,
36451dcbdacSMatthew Wilcox (Oracle) &fbatch, indices)) {
36551dcbdacSMatthew Wilcox (Oracle) truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
36651dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++)
36751dcbdacSMatthew Wilcox (Oracle) truncate_cleanup_folio(fbatch.folios[i]);
36851dcbdacSMatthew Wilcox (Oracle) delete_from_page_cache_batch(mapping, &fbatch);
36951dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++)
37051dcbdacSMatthew Wilcox (Oracle) folio_unlock(fbatch.folios[i]);
37151dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
3721da177e4SLinus Torvalds cond_resched();
3731da177e4SLinus Torvalds }
3745c211ba2SMatthew Wilcox (Oracle)
375b9a8a419SMatthew Wilcox (Oracle) same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
376b9a8a419SMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
37766dabbb6SChristoph Hellwig if (!IS_ERR(folio)) {
378b9a8a419SMatthew Wilcox (Oracle) same_folio = lend < folio_pos(folio) + folio_size(folio);
379b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) {
38087b11f86SSidhartha Kumar start = folio_next_index(folio);
381b9a8a419SMatthew Wilcox (Oracle) if (same_folio)
382b9a8a419SMatthew Wilcox (Oracle) end = folio->index;
3835a720394SLukas Czerner }
384b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio);
385b9a8a419SMatthew Wilcox (Oracle) folio_put(folio);
386b9a8a419SMatthew Wilcox (Oracle) folio = NULL;
3871da177e4SLinus Torvalds }
388b9a8a419SMatthew Wilcox (Oracle)
38966dabbb6SChristoph Hellwig if (!same_folio) {
390b9a8a419SMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
391b9a8a419SMatthew Wilcox (Oracle) FGP_LOCK, 0);
39266dabbb6SChristoph Hellwig if (!IS_ERR(folio)) {
393b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend))
394b9a8a419SMatthew Wilcox (Oracle) end = folio->index;
395b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio);
396b9a8a419SMatthew Wilcox (Oracle) folio_put(folio);
3971da177e4SLinus Torvalds }
39866dabbb6SChristoph Hellwig }
3991da177e4SLinus Torvalds
400b85e0effSHugh Dickins index = start;
401b9a8a419SMatthew Wilcox (Oracle) while (index < end) {
4021da177e4SLinus Torvalds cond_resched();
4039fb6beeaSVishal Moola (Oracle) if (!find_get_entries(mapping, &index, end - 1, &fbatch,
40438cefeb3SMatthew Wilcox (Oracle) indices)) {
405792ceaefSHugh Dickins /* If all gone from start onwards, we're done */
406b85e0effSHugh Dickins if (index == start)
4071da177e4SLinus Torvalds break;
408792ceaefSHugh Dickins /* Otherwise restart to make sure all gone */
409b85e0effSHugh Dickins index = start;
4101da177e4SLinus Torvalds continue;
4111da177e4SLinus Torvalds }
412f2187599SMel Gorman
4130e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) {
4140e499ed3SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
4151da177e4SLinus Torvalds
416b85e0effSHugh Dickins /* We rely upon deletion not changing page->index */
417b85e0effSHugh Dickins
4180e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio))
4190cd6144aSJohannes Weiner continue;
4200cd6144aSJohannes Weiner
4211e84a3d9SMatthew Wilcox (Oracle) folio_lock(folio);
4229fb6beeaSVishal Moola (Oracle) VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
4231e84a3d9SMatthew Wilcox (Oracle) folio_wait_writeback(folio);
4241e84a3d9SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio);
4251e84a3d9SMatthew Wilcox (Oracle) folio_unlock(folio);
4261da177e4SLinus Torvalds }
4270e499ed3SMatthew Wilcox (Oracle) truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
4280e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
4291da177e4SLinus Torvalds }
4301da177e4SLinus Torvalds }
431d7339071SHans Reiser EXPORT_SYMBOL(truncate_inode_pages_range);
4321da177e4SLinus Torvalds
433d7339071SHans Reiser /**
434d7339071SHans Reiser * truncate_inode_pages - truncate *all* the pages from an offset
435d7339071SHans Reiser * @mapping: mapping to truncate
436d7339071SHans Reiser * @lstart: offset from which to truncate
437d7339071SHans Reiser *
438730633f0SJan Kara * Called under (and serialised by) inode->i_rwsem and
439730633f0SJan Kara * mapping->invalidate_lock.
44008142579SJan Kara *
44108142579SJan Kara * Note: When this function returns, there can be a page in the process of
4426ffcd825SMatthew Wilcox (Oracle) * deletion (inside __filemap_remove_folio()) in the specified range. Thus
44308142579SJan Kara * mapping->nrpages can be non-zero when this function returns even after
44408142579SJan Kara * truncation of the whole mapping.
445d7339071SHans Reiser */
truncate_inode_pages(struct address_space * mapping,loff_t lstart)446d7339071SHans Reiser void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
447d7339071SHans Reiser {
448d7339071SHans Reiser truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
449d7339071SHans Reiser }
4501da177e4SLinus Torvalds EXPORT_SYMBOL(truncate_inode_pages);
4511da177e4SLinus Torvalds
45228697355SMike Waychison /**
45391b0abe3SJohannes Weiner * truncate_inode_pages_final - truncate *all* pages before inode dies
45491b0abe3SJohannes Weiner * @mapping: mapping to truncate
45591b0abe3SJohannes Weiner *
4569608703eSJan Kara * Called under (and serialized by) inode->i_rwsem.
45791b0abe3SJohannes Weiner *
45891b0abe3SJohannes Weiner * Filesystems have to use this in the .evict_inode path to inform the
45991b0abe3SJohannes Weiner * VM that this is the final truncate and the inode is going away.
46091b0abe3SJohannes Weiner */
truncate_inode_pages_final(struct address_space * mapping)46191b0abe3SJohannes Weiner void truncate_inode_pages_final(struct address_space *mapping)
46291b0abe3SJohannes Weiner {
46391b0abe3SJohannes Weiner /*
46491b0abe3SJohannes Weiner * Page reclaim can not participate in regular inode lifetime
46591b0abe3SJohannes Weiner * management (can't call iput()) and thus can race with the
46691b0abe3SJohannes Weiner * inode teardown. Tell it when the address space is exiting,
46791b0abe3SJohannes Weiner * so that it does not install eviction information after the
46891b0abe3SJohannes Weiner * final truncate has begun.
46991b0abe3SJohannes Weiner */
47091b0abe3SJohannes Weiner mapping_set_exiting(mapping);
47191b0abe3SJohannes Weiner
4727716506aSMatthew Wilcox (Oracle) if (!mapping_empty(mapping)) {
47391b0abe3SJohannes Weiner /*
47491b0abe3SJohannes Weiner * As truncation uses a lockless tree lookup, cycle
47591b0abe3SJohannes Weiner * the tree lock to make sure any ongoing tree
47691b0abe3SJohannes Weiner * modification that does not see AS_EXITING is
47791b0abe3SJohannes Weiner * completed before starting the final truncate.
47891b0abe3SJohannes Weiner */
479b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages);
480b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages);
48191b0abe3SJohannes Weiner }
4826ff38bd4SPavel Tikhomirov
4836ff38bd4SPavel Tikhomirov truncate_inode_pages(mapping, 0);
48491b0abe3SJohannes Weiner }
48591b0abe3SJohannes Weiner EXPORT_SYMBOL(truncate_inode_pages_final);
48691b0abe3SJohannes Weiner
487c56109ddSMatthew Wilcox (Oracle) /**
4881a0fc811SMatthew Wilcox (Oracle) * mapping_try_invalidate - Invalidate all the evictable folios of one inode
4891a0fc811SMatthew Wilcox (Oracle) * @mapping: the address_space which holds the folios to invalidate
490c56109ddSMatthew Wilcox (Oracle) * @start: the offset 'from' which to invalidate
491c56109ddSMatthew Wilcox (Oracle) * @end: the offset 'to' which to invalidate (inclusive)
4921a0fc811SMatthew Wilcox (Oracle) * @nr_failed: How many folio invalidations failed
493c56109ddSMatthew Wilcox (Oracle) *
4941a0fc811SMatthew Wilcox (Oracle) * This function is similar to invalidate_mapping_pages(), except that it
4951a0fc811SMatthew Wilcox (Oracle) * returns the number of folios which could not be evicted in @nr_failed.
496c56109ddSMatthew Wilcox (Oracle) */
mapping_try_invalidate(struct address_space * mapping,pgoff_t start,pgoff_t end,unsigned long * nr_failed)4971a0fc811SMatthew Wilcox (Oracle) unsigned long mapping_try_invalidate(struct address_space *mapping,
4981a0fc811SMatthew Wilcox (Oracle) pgoff_t start, pgoff_t end, unsigned long *nr_failed)
4991da177e4SLinus Torvalds {
5000cd6144aSJohannes Weiner pgoff_t indices[PAGEVEC_SIZE];
50151dcbdacSMatthew Wilcox (Oracle) struct folio_batch fbatch;
502b85e0effSHugh Dickins pgoff_t index = start;
50331560180SMinchan Kim unsigned long ret;
50431560180SMinchan Kim unsigned long count = 0;
5051da177e4SLinus Torvalds int i;
5061da177e4SLinus Torvalds
50751dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
5083392ca12SVishal Moola (Oracle) while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
50951dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) {
510b4545f46SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
5111da177e4SLinus Torvalds
512b4545f46SMatthew Wilcox (Oracle) /* We rely upon deletion not changing folio->index */
513e0f23603SNeilBrown
514b4545f46SMatthew Wilcox (Oracle) if (xa_is_value(folio)) {
5157ae12c80SJohannes Weiner count += invalidate_exceptional_entry(mapping,
5163392ca12SVishal Moola (Oracle) indices[i], folio);
5170cd6144aSJohannes Weiner continue;
5180cd6144aSJohannes Weiner }
519fc127da0SKirill A. Shutemov
520b4545f46SMatthew Wilcox (Oracle) ret = mapping_evict_folio(mapping, folio);
521b4545f46SMatthew Wilcox (Oracle) folio_unlock(folio);
52231560180SMinchan Kim /*
523b4545f46SMatthew Wilcox (Oracle) * Invalidation is a hint that the folio is no longer
52431560180SMinchan Kim * of interest and try to speed up its reclaim.
52531560180SMinchan Kim */
526eb1d7a65SYafang Shao if (!ret) {
527261b6840SMatthew Wilcox (Oracle) deactivate_file_folio(folio);
5281a0fc811SMatthew Wilcox (Oracle) /* Likely in the lru cache of a remote CPU */
5291a0fc811SMatthew Wilcox (Oracle) if (nr_failed)
5301a0fc811SMatthew Wilcox (Oracle) (*nr_failed)++;
531eb1d7a65SYafang Shao }
53231560180SMinchan Kim count += ret;
5331da177e4SLinus Torvalds }
53451dcbdacSMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch);
53551dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
536fc9a07e7SAndrew Morton cond_resched();
5371da177e4SLinus Torvalds }
53831560180SMinchan Kim return count;
5391da177e4SLinus Torvalds }
540eb1d7a65SYafang Shao
541eb1d7a65SYafang Shao /**
5427ae12c80SJohannes Weiner * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
5437ae12c80SJohannes Weiner * @mapping: the address_space which holds the cache to invalidate
544eb1d7a65SYafang Shao * @start: the offset 'from' which to invalidate
545eb1d7a65SYafang Shao * @end: the offset 'to' which to invalidate (inclusive)
546eb1d7a65SYafang Shao *
5477ae12c80SJohannes Weiner * This function removes pages that are clean, unmapped and unlocked,
5487ae12c80SJohannes Weiner * as well as shadow entries. It will not block on IO activity.
549eb1d7a65SYafang Shao *
5507ae12c80SJohannes Weiner * If you want to remove all the pages of one inode, regardless of
5517ae12c80SJohannes Weiner * their use and writeback state, use truncate_inode_pages().
552eb1d7a65SYafang Shao *
5531a0fc811SMatthew Wilcox (Oracle) * Return: The number of indices that had their contents invalidated
554eb1d7a65SYafang Shao */
invalidate_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t end)555eb1d7a65SYafang Shao unsigned long invalidate_mapping_pages(struct address_space *mapping,
556eb1d7a65SYafang Shao pgoff_t start, pgoff_t end)
557eb1d7a65SYafang Shao {
5581a0fc811SMatthew Wilcox (Oracle) return mapping_try_invalidate(mapping, start, end, NULL);
559eb1d7a65SYafang Shao }
56054bc4855SAnton Altaparmakov EXPORT_SYMBOL(invalidate_mapping_pages);
5611da177e4SLinus Torvalds
562bd4c8ce4SAndrew Morton /*
5631b8ddbeeSMatthew Wilcox (Oracle) * This is like invalidate_inode_page(), except it ignores the page's
564bd4c8ce4SAndrew Morton * refcount. We do this because invalidate_inode_pages2() needs stronger
565bd4c8ce4SAndrew Morton * invalidation guarantees, and cannot afford to leave pages behind because
5662706a1b8SAnderson Briglia * shrink_page_list() has a temp ref on them, or because they're transiently
5671fec6890SMatthew Wilcox (Oracle) * sitting in the folio_add_lru() caches.
568bd4c8ce4SAndrew Morton */
invalidate_complete_folio2(struct address_space * mapping,struct folio * folio)56978f42660SMatthew Wilcox (Oracle) static int invalidate_complete_folio2(struct address_space *mapping,
57078f42660SMatthew Wilcox (Oracle) struct folio *folio)
571bd4c8ce4SAndrew Morton {
57278f42660SMatthew Wilcox (Oracle) if (folio->mapping != mapping)
573bd4c8ce4SAndrew Morton return 0;
574bd4c8ce4SAndrew Morton
5750201ebf2SDavid Howells if (!filemap_release_folio(folio, GFP_KERNEL))
576bd4c8ce4SAndrew Morton return 0;
577bd4c8ce4SAndrew Morton
57851b8c1feSJohannes Weiner spin_lock(&mapping->host->i_lock);
57930472509SJohannes Weiner xa_lock_irq(&mapping->i_pages);
58078f42660SMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
581bd4c8ce4SAndrew Morton goto failed;
582bd4c8ce4SAndrew Morton
58378f42660SMatthew Wilcox (Oracle) BUG_ON(folio_has_private(folio));
58478f42660SMatthew Wilcox (Oracle) __filemap_remove_folio(folio, NULL);
58530472509SJohannes Weiner xa_unlock_irq(&mapping->i_pages);
58651b8c1feSJohannes Weiner if (mapping_shrinkable(mapping))
58751b8c1feSJohannes Weiner inode_add_lru(mapping->host);
58851b8c1feSJohannes Weiner spin_unlock(&mapping->host->i_lock);
5896072d13cSLinus Torvalds
59078f42660SMatthew Wilcox (Oracle) filemap_free_folio(mapping, folio);
591bd4c8ce4SAndrew Morton return 1;
592bd4c8ce4SAndrew Morton failed:
59330472509SJohannes Weiner xa_unlock_irq(&mapping->i_pages);
59451b8c1feSJohannes Weiner spin_unlock(&mapping->host->i_lock);
595bd4c8ce4SAndrew Morton return 0;
596bd4c8ce4SAndrew Morton }
597bd4c8ce4SAndrew Morton
folio_launder(struct address_space * mapping,struct folio * folio)598affa80e8SMatthew Wilcox (Oracle) static int folio_launder(struct address_space *mapping, struct folio *folio)
599e3db7691STrond Myklebust {
600f6357c3aSMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
601e3db7691STrond Myklebust return 0;
602affa80e8SMatthew Wilcox (Oracle) if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
603e3db7691STrond Myklebust return 0;
604affa80e8SMatthew Wilcox (Oracle) return mapping->a_ops->launder_folio(folio);
605e3db7691STrond Myklebust }
606e3db7691STrond Myklebust
6071da177e4SLinus Torvalds /**
6081da177e4SLinus Torvalds * invalidate_inode_pages2_range - remove range of pages from an address_space
60967be2dd1SMartin Waitz * @mapping: the address_space
6101da177e4SLinus Torvalds * @start: the page offset 'from' which to invalidate
6111da177e4SLinus Torvalds * @end: the page offset 'to' which to invalidate (inclusive)
6121da177e4SLinus Torvalds *
6131da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to
6141da177e4SLinus Torvalds * invalidation.
6151da177e4SLinus Torvalds *
616a862f68aSMike Rapoport * Return: -EBUSY if any pages could not be invalidated.
6171da177e4SLinus Torvalds */
invalidate_inode_pages2_range(struct address_space * mapping,pgoff_t start,pgoff_t end)6181da177e4SLinus Torvalds int invalidate_inode_pages2_range(struct address_space *mapping,
6191da177e4SLinus Torvalds pgoff_t start, pgoff_t end)
6201da177e4SLinus Torvalds {
6210cd6144aSJohannes Weiner pgoff_t indices[PAGEVEC_SIZE];
6220e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch;
623b85e0effSHugh Dickins pgoff_t index;
6241da177e4SLinus Torvalds int i;
6251da177e4SLinus Torvalds int ret = 0;
6260dd1334fSHisashi Hifumi int ret2 = 0;
6271da177e4SLinus Torvalds int did_range_unmap = 0;
6281da177e4SLinus Torvalds
6297716506aSMatthew Wilcox (Oracle) if (mapping_empty(mapping))
6300a4ee518SChristoph Hellwig return 0;
63132691f0fSAndrey Ryabinin
6320e499ed3SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
633b85e0effSHugh Dickins index = start;
6349fb6beeaSVishal Moola (Oracle) while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
6350e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) {
6360e499ed3SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
637b85e0effSHugh Dickins
638fae9bc4aSMatthew Wilcox (Oracle) /* We rely upon deletion not changing folio->index */
6391da177e4SLinus Torvalds
6400e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio)) {
641c6dcf52cSJan Kara if (!invalidate_exceptional_entry2(mapping,
6429fb6beeaSVishal Moola (Oracle) indices[i], folio))
643c6dcf52cSJan Kara ret = -EBUSY;
6440cd6144aSJohannes Weiner continue;
6450cd6144aSJohannes Weiner }
6460cd6144aSJohannes Weiner
647fae9bc4aSMatthew Wilcox (Oracle) if (!did_range_unmap && folio_mapped(folio)) {
64822061a1fSHugh Dickins /*
649fae9bc4aSMatthew Wilcox (Oracle) * If folio is mapped, before taking its lock,
65022061a1fSHugh Dickins * zap the rest of the file in one hit.
65122061a1fSHugh Dickins */
6529fb6beeaSVishal Moola (Oracle) unmap_mapping_pages(mapping, indices[i],
6539fb6beeaSVishal Moola (Oracle) (1 + end - indices[i]), false);
65422061a1fSHugh Dickins did_range_unmap = 1;
65522061a1fSHugh Dickins }
65622061a1fSHugh Dickins
657fae9bc4aSMatthew Wilcox (Oracle) folio_lock(folio);
658aa5b9178SHugh Dickins if (unlikely(folio->mapping != mapping)) {
659fae9bc4aSMatthew Wilcox (Oracle) folio_unlock(folio);
6601da177e4SLinus Torvalds continue;
6611da177e4SLinus Torvalds }
662aa5b9178SHugh Dickins VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
663fae9bc4aSMatthew Wilcox (Oracle) folio_wait_writeback(folio);
66422061a1fSHugh Dickins
665fae9bc4aSMatthew Wilcox (Oracle) if (folio_mapped(folio))
666fae9bc4aSMatthew Wilcox (Oracle) unmap_mapping_folio(folio);
667fae9bc4aSMatthew Wilcox (Oracle) BUG_ON(folio_mapped(folio));
66822061a1fSHugh Dickins
669affa80e8SMatthew Wilcox (Oracle) ret2 = folio_launder(mapping, folio);
6700dd1334fSHisashi Hifumi if (ret2 == 0) {
67178f42660SMatthew Wilcox (Oracle) if (!invalidate_complete_folio2(mapping, folio))
6726ccfa806SHisashi Hifumi ret2 = -EBUSY;
6730dd1334fSHisashi Hifumi }
6740dd1334fSHisashi Hifumi if (ret2 < 0)
6750dd1334fSHisashi Hifumi ret = ret2;
676fae9bc4aSMatthew Wilcox (Oracle) folio_unlock(folio);
6771da177e4SLinus Torvalds }
6780e499ed3SMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch);
6790e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
6801da177e4SLinus Torvalds cond_resched();
6811da177e4SLinus Torvalds }
682cd656375SJan Kara /*
68369b6c131SMatthew Wilcox * For DAX we invalidate page tables after invalidating page cache. We
684cd656375SJan Kara * could invalidate page tables while invalidating each entry however
685cd656375SJan Kara * that would be expensive. And doing range unmapping before doesn't
68669b6c131SMatthew Wilcox * work as we have no cheap way to find whether page cache entry didn't
687cd656375SJan Kara * get remapped later.
688cd656375SJan Kara */
689cd656375SJan Kara if (dax_mapping(mapping)) {
690977fbdcdSMatthew Wilcox unmap_mapping_pages(mapping, start, end - start + 1, false);
691cd656375SJan Kara }
6921da177e4SLinus Torvalds return ret;
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
6951da177e4SLinus Torvalds
6961da177e4SLinus Torvalds /**
6971da177e4SLinus Torvalds * invalidate_inode_pages2 - remove all pages from an address_space
69867be2dd1SMartin Waitz * @mapping: the address_space
6991da177e4SLinus Torvalds *
7001da177e4SLinus Torvalds * Any pages which are found to be mapped into pagetables are unmapped prior to
7011da177e4SLinus Torvalds * invalidation.
7021da177e4SLinus Torvalds *
703a862f68aSMike Rapoport * Return: -EBUSY if any pages could not be invalidated.
7041da177e4SLinus Torvalds */
invalidate_inode_pages2(struct address_space * mapping)7051da177e4SLinus Torvalds int invalidate_inode_pages2(struct address_space *mapping)
7061da177e4SLinus Torvalds {
7071da177e4SLinus Torvalds return invalidate_inode_pages2_range(mapping, 0, -1);
7081da177e4SLinus Torvalds }
7091da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
71025d9e2d1Snpiggin@suse.de
71125d9e2d1Snpiggin@suse.de /**
71225d9e2d1Snpiggin@suse.de * truncate_pagecache - unmap and remove pagecache that has been truncated
71325d9e2d1Snpiggin@suse.de * @inode: inode
7148a549beaSHugh Dickins * @newsize: new file size
71525d9e2d1Snpiggin@suse.de *
71625d9e2d1Snpiggin@suse.de * inode's new i_size must already be written before truncate_pagecache
71725d9e2d1Snpiggin@suse.de * is called.
71825d9e2d1Snpiggin@suse.de *
71925d9e2d1Snpiggin@suse.de * This function should typically be called before the filesystem
72025d9e2d1Snpiggin@suse.de * releases resources associated with the freed range (eg. deallocates
72125d9e2d1Snpiggin@suse.de * blocks). This way, pagecache will always stay logically coherent
72225d9e2d1Snpiggin@suse.de * with on-disk format, and the filesystem would not have to deal with
72325d9e2d1Snpiggin@suse.de * situations such as writepage being called for a page that has already
72425d9e2d1Snpiggin@suse.de * had its underlying blocks deallocated.
72525d9e2d1Snpiggin@suse.de */
truncate_pagecache(struct inode * inode,loff_t newsize)7267caef267SKirill A. Shutemov void truncate_pagecache(struct inode *inode, loff_t newsize)
72725d9e2d1Snpiggin@suse.de {
72825d9e2d1Snpiggin@suse.de struct address_space *mapping = inode->i_mapping;
7298a549beaSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE);
73025d9e2d1Snpiggin@suse.de
73125d9e2d1Snpiggin@suse.de /*
73225d9e2d1Snpiggin@suse.de * unmap_mapping_range is called twice, first simply for
73325d9e2d1Snpiggin@suse.de * efficiency so that truncate_inode_pages does fewer
73425d9e2d1Snpiggin@suse.de * single-page unmaps. However after this first call, and
73525d9e2d1Snpiggin@suse.de * before truncate_inode_pages finishes, it is possible for
73625d9e2d1Snpiggin@suse.de * private pages to be COWed, which remain after
73725d9e2d1Snpiggin@suse.de * truncate_inode_pages finishes, hence the second
73825d9e2d1Snpiggin@suse.de * unmap_mapping_range call must be made for correctness.
73925d9e2d1Snpiggin@suse.de */
7408a549beaSHugh Dickins unmap_mapping_range(mapping, holebegin, 0, 1);
7418a549beaSHugh Dickins truncate_inode_pages(mapping, newsize);
7428a549beaSHugh Dickins unmap_mapping_range(mapping, holebegin, 0, 1);
74325d9e2d1Snpiggin@suse.de }
74425d9e2d1Snpiggin@suse.de EXPORT_SYMBOL(truncate_pagecache);
74525d9e2d1Snpiggin@suse.de
74625d9e2d1Snpiggin@suse.de /**
7472c27c65eSChristoph Hellwig * truncate_setsize - update inode and pagecache for a new file size
7482c27c65eSChristoph Hellwig * @inode: inode
7492c27c65eSChristoph Hellwig * @newsize: new file size
7502c27c65eSChristoph Hellwig *
751382e27daSJan Kara * truncate_setsize updates i_size and performs pagecache truncation (if
752382e27daSJan Kara * necessary) to @newsize. It will be typically be called from the filesystem's
753382e27daSJan Kara * setattr function when ATTR_SIZE is passed in.
7542c27c65eSChristoph Hellwig *
75577783d06SJan Kara * Must be called with a lock serializing truncates and writes (generally
7569608703eSJan Kara * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
75777783d06SJan Kara * specific block truncation has been performed.
7582c27c65eSChristoph Hellwig */
truncate_setsize(struct inode * inode,loff_t newsize)7592c27c65eSChristoph Hellwig void truncate_setsize(struct inode *inode, loff_t newsize)
7602c27c65eSChristoph Hellwig {
76190a80202SJan Kara loff_t oldsize = inode->i_size;
76290a80202SJan Kara
7632c27c65eSChristoph Hellwig i_size_write(inode, newsize);
76490a80202SJan Kara if (newsize > oldsize)
76590a80202SJan Kara pagecache_isize_extended(inode, oldsize, newsize);
7667caef267SKirill A. Shutemov truncate_pagecache(inode, newsize);
7672c27c65eSChristoph Hellwig }
7682c27c65eSChristoph Hellwig EXPORT_SYMBOL(truncate_setsize);
7692c27c65eSChristoph Hellwig
7702c27c65eSChristoph Hellwig /**
77190a80202SJan Kara * pagecache_isize_extended - update pagecache after extension of i_size
77290a80202SJan Kara * @inode: inode for which i_size was extended
77390a80202SJan Kara * @from: original inode size
77490a80202SJan Kara * @to: new inode size
77590a80202SJan Kara *
77690a80202SJan Kara * Handle extension of inode size either caused by extending truncate or by
77790a80202SJan Kara * write starting after current i_size. We mark the page straddling current
77890a80202SJan Kara * i_size RO so that page_mkwrite() is called on the nearest write access to
77990a80202SJan Kara * the page. This way filesystem can be sure that page_mkwrite() is called on
78090a80202SJan Kara * the page before user writes to the page via mmap after the i_size has been
78190a80202SJan Kara * changed.
78290a80202SJan Kara *
78390a80202SJan Kara * The function must be called after i_size is updated so that page fault
78490a80202SJan Kara * coming after we unlock the page will already see the new i_size.
7859608703eSJan Kara * The function must be called while we still hold i_rwsem - this not only
78690a80202SJan Kara * makes sure i_size is stable but also that userspace cannot observe new
78790a80202SJan Kara * i_size value before we are prepared to store mmap writes at new inode size.
78890a80202SJan Kara */
pagecache_isize_extended(struct inode * inode,loff_t from,loff_t to)78990a80202SJan Kara void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
79090a80202SJan Kara {
79193407472SFabian Frederick int bsize = i_blocksize(inode);
79290a80202SJan Kara loff_t rounded_from;
79390a80202SJan Kara struct page *page;
79490a80202SJan Kara pgoff_t index;
79590a80202SJan Kara
79690a80202SJan Kara WARN_ON(to > inode->i_size);
79790a80202SJan Kara
79809cbfeafSKirill A. Shutemov if (from >= to || bsize == PAGE_SIZE)
79990a80202SJan Kara return;
80090a80202SJan Kara /* Page straddling @from will not have any hole block created? */
80190a80202SJan Kara rounded_from = round_up(from, bsize);
80209cbfeafSKirill A. Shutemov if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
80390a80202SJan Kara return;
80490a80202SJan Kara
80509cbfeafSKirill A. Shutemov index = from >> PAGE_SHIFT;
80690a80202SJan Kara page = find_lock_page(inode->i_mapping, index);
80790a80202SJan Kara /* Page not cached? Nothing to do */
80890a80202SJan Kara if (!page)
80990a80202SJan Kara return;
81090a80202SJan Kara /*
81190a80202SJan Kara * See clear_page_dirty_for_io() for details why set_page_dirty()
81290a80202SJan Kara * is needed.
81390a80202SJan Kara */
81490a80202SJan Kara if (page_mkclean(page))
81590a80202SJan Kara set_page_dirty(page);
81690a80202SJan Kara unlock_page(page);
81709cbfeafSKirill A. Shutemov put_page(page);
81890a80202SJan Kara }
81990a80202SJan Kara EXPORT_SYMBOL(pagecache_isize_extended);
82090a80202SJan Kara
82190a80202SJan Kara /**
822623e3db9SHugh Dickins * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
823623e3db9SHugh Dickins * @inode: inode
824623e3db9SHugh Dickins * @lstart: offset of beginning of hole
825623e3db9SHugh Dickins * @lend: offset of last byte of hole
826623e3db9SHugh Dickins *
827623e3db9SHugh Dickins * This function should typically be called before the filesystem
828623e3db9SHugh Dickins * releases resources associated with the freed range (eg. deallocates
829623e3db9SHugh Dickins * blocks). This way, pagecache will always stay logically coherent
830623e3db9SHugh Dickins * with on-disk format, and the filesystem would not have to deal with
831623e3db9SHugh Dickins * situations such as writepage being called for a page that has already
832623e3db9SHugh Dickins * had its underlying blocks deallocated.
833623e3db9SHugh Dickins */
truncate_pagecache_range(struct inode * inode,loff_t lstart,loff_t lend)834623e3db9SHugh Dickins void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
835623e3db9SHugh Dickins {
836623e3db9SHugh Dickins struct address_space *mapping = inode->i_mapping;
837623e3db9SHugh Dickins loff_t unmap_start = round_up(lstart, PAGE_SIZE);
838623e3db9SHugh Dickins loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
839623e3db9SHugh Dickins /*
840623e3db9SHugh Dickins * This rounding is currently just for example: unmap_mapping_range
841623e3db9SHugh Dickins * expands its hole outwards, whereas we want it to contract the hole
842623e3db9SHugh Dickins * inwards. However, existing callers of truncate_pagecache_range are
8435a720394SLukas Czerner * doing their own page rounding first. Note that unmap_mapping_range
8445a720394SLukas Czerner * allows holelen 0 for all, and we allow lend -1 for end of file.
845623e3db9SHugh Dickins */
846623e3db9SHugh Dickins
847623e3db9SHugh Dickins /*
848623e3db9SHugh Dickins * Unlike in truncate_pagecache, unmap_mapping_range is called only
849623e3db9SHugh Dickins * once (before truncating pagecache), and without "even_cows" flag:
850623e3db9SHugh Dickins * hole-punching should not remove private COWed pages from the hole.
851623e3db9SHugh Dickins */
852623e3db9SHugh Dickins if ((u64)unmap_end > (u64)unmap_start)
853623e3db9SHugh Dickins unmap_mapping_range(mapping, unmap_start,
854623e3db9SHugh Dickins 1 + unmap_end - unmap_start, 0);
855623e3db9SHugh Dickins truncate_inode_pages_range(mapping, lstart, lend);
856623e3db9SHugh Dickins }
857623e3db9SHugh Dickins EXPORT_SYMBOL(truncate_pagecache_range);
858