xref: /openbmc/linux/mm/truncate.c (revision b34e08d5)
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002	Andrew Morton
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/backing-dev.h>
12 #include <linux/gfp.h>
13 #include <linux/mm.h>
14 #include <linux/swap.h>
15 #include <linux/export.h>
16 #include <linux/pagemap.h>
17 #include <linux/highmem.h>
18 #include <linux/pagevec.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/buffer_head.h>	/* grr. try_to_release_page,
21 				   do_invalidatepage */
22 #include <linux/cleancache.h>
23 #include "internal.h"
24 
25 static void clear_exceptional_entry(struct address_space *mapping,
26 				    pgoff_t index, void *entry)
27 {
28 	struct radix_tree_node *node;
29 	void **slot;
30 
31 	/* Handled by shmem itself */
32 	if (shmem_mapping(mapping))
33 		return;
34 
35 	spin_lock_irq(&mapping->tree_lock);
36 	/*
37 	 * Regular page slots are stabilized by the page lock even
38 	 * without the tree itself locked.  These unlocked entries
39 	 * need verification under the tree lock.
40 	 */
41 	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
42 		goto unlock;
43 	if (*slot != entry)
44 		goto unlock;
45 	radix_tree_replace_slot(slot, NULL);
46 	mapping->nrshadows--;
47 	if (!node)
48 		goto unlock;
49 	workingset_node_shadows_dec(node);
50 	/*
51 	 * Don't track node without shadow entries.
52 	 *
53 	 * Avoid acquiring the list_lru lock if already untracked.
54 	 * The list_empty() test is safe as node->private_list is
55 	 * protected by mapping->tree_lock.
56 	 */
57 	if (!workingset_node_shadows(node) &&
58 	    !list_empty(&node->private_list))
59 		list_lru_del(&workingset_shadow_nodes, &node->private_list);
60 	__radix_tree_delete_node(&mapping->page_tree, node);
61 unlock:
62 	spin_unlock_irq(&mapping->tree_lock);
63 }
64 
65 /**
66  * do_invalidatepage - invalidate part or all of a page
67  * @page: the page which is affected
68  * @offset: start of the range to invalidate
69  * @length: length of the range to invalidate
70  *
71  * do_invalidatepage() is called when all or part of the page has become
72  * invalidated by a truncate operation.
73  *
74  * do_invalidatepage() does not have to release all buffers, but it must
75  * ensure that no dirty buffer is left outside @offset and that no I/O
76  * is underway against any of the blocks which are outside the truncation
77  * point.  Because the caller is about to free (and possibly reuse) those
78  * blocks on-disk.
79  */
80 void do_invalidatepage(struct page *page, unsigned int offset,
81 		       unsigned int length)
82 {
83 	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
84 
85 	invalidatepage = page->mapping->a_ops->invalidatepage;
86 #ifdef CONFIG_BLOCK
87 	if (!invalidatepage)
88 		invalidatepage = block_invalidatepage;
89 #endif
90 	if (invalidatepage)
91 		(*invalidatepage)(page, offset, length);
92 }
93 
94 /*
95  * This cancels just the dirty bit on the kernel page itself, it
96  * does NOT actually remove dirty bits on any mmap's that may be
97  * around. It also leaves the page tagged dirty, so any sync
98  * activity will still find it on the dirty lists, and in particular,
99  * clear_page_dirty_for_io() will still look at the dirty bits in
100  * the VM.
101  *
102  * Doing this should *normally* only ever be done when a page
103  * is truncated, and is not actually mapped anywhere at all. However,
104  * fs/buffer.c does this when it notices that somebody has cleaned
105  * out all the buffers on a page without actually doing it through
106  * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
107  */
108 void cancel_dirty_page(struct page *page, unsigned int account_size)
109 {
110 	if (TestClearPageDirty(page)) {
111 		struct address_space *mapping = page->mapping;
112 		if (mapping && mapping_cap_account_dirty(mapping)) {
113 			dec_zone_page_state(page, NR_FILE_DIRTY);
114 			dec_bdi_stat(mapping->backing_dev_info,
115 					BDI_RECLAIMABLE);
116 			if (account_size)
117 				task_io_account_cancelled_write(account_size);
118 		}
119 	}
120 }
121 EXPORT_SYMBOL(cancel_dirty_page);
122 
123 /*
124  * If truncate cannot remove the fs-private metadata from the page, the page
125  * becomes orphaned.  It will be left on the LRU and may even be mapped into
126  * user pagetables if we're racing with filemap_fault().
127  *
128  * We need to bale out if page->mapping is no longer equal to the original
129  * mapping.  This happens a) when the VM reclaimed the page while we waited on
130  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
131  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
132  */
133 static int
134 truncate_complete_page(struct address_space *mapping, struct page *page)
135 {
136 	if (page->mapping != mapping)
137 		return -EIO;
138 
139 	if (page_has_private(page))
140 		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
141 
142 	cancel_dirty_page(page, PAGE_CACHE_SIZE);
143 
144 	ClearPageMappedToDisk(page);
145 	delete_from_page_cache(page);
146 	return 0;
147 }
148 
149 /*
150  * This is for invalidate_mapping_pages().  That function can be called at
151  * any time, and is not supposed to throw away dirty pages.  But pages can
152  * be marked dirty at any time too, so use remove_mapping which safely
153  * discards clean, unused pages.
154  *
155  * Returns non-zero if the page was successfully invalidated.
156  */
157 static int
158 invalidate_complete_page(struct address_space *mapping, struct page *page)
159 {
160 	int ret;
161 
162 	if (page->mapping != mapping)
163 		return 0;
164 
165 	if (page_has_private(page) && !try_to_release_page(page, 0))
166 		return 0;
167 
168 	ret = remove_mapping(mapping, page);
169 
170 	return ret;
171 }
172 
173 int truncate_inode_page(struct address_space *mapping, struct page *page)
174 {
175 	if (page_mapped(page)) {
176 		unmap_mapping_range(mapping,
177 				   (loff_t)page->index << PAGE_CACHE_SHIFT,
178 				   PAGE_CACHE_SIZE, 0);
179 	}
180 	return truncate_complete_page(mapping, page);
181 }
182 
183 /*
184  * Used to get rid of pages on hardware memory corruption.
185  */
186 int generic_error_remove_page(struct address_space *mapping, struct page *page)
187 {
188 	if (!mapping)
189 		return -EINVAL;
190 	/*
191 	 * Only punch for normal data pages for now.
192 	 * Handling other types like directories would need more auditing.
193 	 */
194 	if (!S_ISREG(mapping->host->i_mode))
195 		return -EIO;
196 	return truncate_inode_page(mapping, page);
197 }
198 EXPORT_SYMBOL(generic_error_remove_page);
199 
200 /*
201  * Safely invalidate one page from its pagecache mapping.
202  * It only drops clean, unused pages. The page must be locked.
203  *
204  * Returns 1 if the page is successfully invalidated, otherwise 0.
205  */
206 int invalidate_inode_page(struct page *page)
207 {
208 	struct address_space *mapping = page_mapping(page);
209 	if (!mapping)
210 		return 0;
211 	if (PageDirty(page) || PageWriteback(page))
212 		return 0;
213 	if (page_mapped(page))
214 		return 0;
215 	return invalidate_complete_page(mapping, page);
216 }
217 
218 /**
219  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
220  * @mapping: mapping to truncate
221  * @lstart: offset from which to truncate
222  * @lend: offset to which to truncate (inclusive)
223  *
224  * Truncate the page cache, removing the pages that are between
225  * specified offsets (and zeroing out partial pages
226  * if lstart or lend + 1 is not page aligned).
227  *
228  * Truncate takes two passes - the first pass is nonblocking.  It will not
229  * block on page locks and it will not block on writeback.  The second pass
230  * will wait.  This is to prevent as much IO as possible in the affected region.
231  * The first pass will remove most pages, so the search cost of the second pass
232  * is low.
233  *
234  * We pass down the cache-hot hint to the page freeing code.  Even if the
235  * mapping is large, it is probably the case that the final pages are the most
236  * recently touched, and freeing happens in ascending file offset order.
237  *
238  * Note that since ->invalidatepage() accepts range to invalidate
239  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
240  * page aligned properly.
241  */
242 void truncate_inode_pages_range(struct address_space *mapping,
243 				loff_t lstart, loff_t lend)
244 {
245 	pgoff_t		start;		/* inclusive */
246 	pgoff_t		end;		/* exclusive */
247 	unsigned int	partial_start;	/* inclusive */
248 	unsigned int	partial_end;	/* exclusive */
249 	struct pagevec	pvec;
250 	pgoff_t		indices[PAGEVEC_SIZE];
251 	pgoff_t		index;
252 	int		i;
253 
254 	cleancache_invalidate_inode(mapping);
255 	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
256 		return;
257 
258 	/* Offsets within partial pages */
259 	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
260 	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
261 
262 	/*
263 	 * 'start' and 'end' always covers the range of pages to be fully
264 	 * truncated. Partial pages are covered with 'partial_start' at the
265 	 * start of the range and 'partial_end' at the end of the range.
266 	 * Note that 'end' is exclusive while 'lend' is inclusive.
267 	 */
268 	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
269 	if (lend == -1)
270 		/*
271 		 * lend == -1 indicates end-of-file so we have to set 'end'
272 		 * to the highest possible pgoff_t and since the type is
273 		 * unsigned we're using -1.
274 		 */
275 		end = -1;
276 	else
277 		end = (lend + 1) >> PAGE_CACHE_SHIFT;
278 
279 	pagevec_init(&pvec, 0);
280 	index = start;
281 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
282 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
283 			indices)) {
284 		mem_cgroup_uncharge_start();
285 		for (i = 0; i < pagevec_count(&pvec); i++) {
286 			struct page *page = pvec.pages[i];
287 
288 			/* We rely upon deletion not changing page->index */
289 			index = indices[i];
290 			if (index >= end)
291 				break;
292 
293 			if (radix_tree_exceptional_entry(page)) {
294 				clear_exceptional_entry(mapping, index, page);
295 				continue;
296 			}
297 
298 			if (!trylock_page(page))
299 				continue;
300 			WARN_ON(page->index != index);
301 			if (PageWriteback(page)) {
302 				unlock_page(page);
303 				continue;
304 			}
305 			truncate_inode_page(mapping, page);
306 			unlock_page(page);
307 		}
308 		pagevec_remove_exceptionals(&pvec);
309 		pagevec_release(&pvec);
310 		mem_cgroup_uncharge_end();
311 		cond_resched();
312 		index++;
313 	}
314 
315 	if (partial_start) {
316 		struct page *page = find_lock_page(mapping, start - 1);
317 		if (page) {
318 			unsigned int top = PAGE_CACHE_SIZE;
319 			if (start > end) {
320 				/* Truncation within a single page */
321 				top = partial_end;
322 				partial_end = 0;
323 			}
324 			wait_on_page_writeback(page);
325 			zero_user_segment(page, partial_start, top);
326 			cleancache_invalidate_page(mapping, page);
327 			if (page_has_private(page))
328 				do_invalidatepage(page, partial_start,
329 						  top - partial_start);
330 			unlock_page(page);
331 			page_cache_release(page);
332 		}
333 	}
334 	if (partial_end) {
335 		struct page *page = find_lock_page(mapping, end);
336 		if (page) {
337 			wait_on_page_writeback(page);
338 			zero_user_segment(page, 0, partial_end);
339 			cleancache_invalidate_page(mapping, page);
340 			if (page_has_private(page))
341 				do_invalidatepage(page, 0,
342 						  partial_end);
343 			unlock_page(page);
344 			page_cache_release(page);
345 		}
346 	}
347 	/*
348 	 * If the truncation happened within a single page no pages
349 	 * will be released, just zeroed, so we can bail out now.
350 	 */
351 	if (start >= end)
352 		return;
353 
354 	index = start;
355 	for ( ; ; ) {
356 		cond_resched();
357 		if (!pagevec_lookup_entries(&pvec, mapping, index,
358 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
359 			indices)) {
360 			if (index == start)
361 				break;
362 			index = start;
363 			continue;
364 		}
365 		if (index == start && indices[0] >= end) {
366 			pagevec_remove_exceptionals(&pvec);
367 			pagevec_release(&pvec);
368 			break;
369 		}
370 		mem_cgroup_uncharge_start();
371 		for (i = 0; i < pagevec_count(&pvec); i++) {
372 			struct page *page = pvec.pages[i];
373 
374 			/* We rely upon deletion not changing page->index */
375 			index = indices[i];
376 			if (index >= end)
377 				break;
378 
379 			if (radix_tree_exceptional_entry(page)) {
380 				clear_exceptional_entry(mapping, index, page);
381 				continue;
382 			}
383 
384 			lock_page(page);
385 			WARN_ON(page->index != index);
386 			wait_on_page_writeback(page);
387 			truncate_inode_page(mapping, page);
388 			unlock_page(page);
389 		}
390 		pagevec_remove_exceptionals(&pvec);
391 		pagevec_release(&pvec);
392 		mem_cgroup_uncharge_end();
393 		index++;
394 	}
395 	cleancache_invalidate_inode(mapping);
396 }
397 EXPORT_SYMBOL(truncate_inode_pages_range);
398 
399 /**
400  * truncate_inode_pages - truncate *all* the pages from an offset
401  * @mapping: mapping to truncate
402  * @lstart: offset from which to truncate
403  *
404  * Called under (and serialised by) inode->i_mutex.
405  *
406  * Note: When this function returns, there can be a page in the process of
407  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
408  * mapping->nrpages can be non-zero when this function returns even after
409  * truncation of the whole mapping.
410  */
411 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
412 {
413 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
414 }
415 EXPORT_SYMBOL(truncate_inode_pages);
416 
417 /**
418  * truncate_inode_pages_final - truncate *all* pages before inode dies
419  * @mapping: mapping to truncate
420  *
421  * Called under (and serialized by) inode->i_mutex.
422  *
423  * Filesystems have to use this in the .evict_inode path to inform the
424  * VM that this is the final truncate and the inode is going away.
425  */
426 void truncate_inode_pages_final(struct address_space *mapping)
427 {
428 	unsigned long nrshadows;
429 	unsigned long nrpages;
430 
431 	/*
432 	 * Page reclaim can not participate in regular inode lifetime
433 	 * management (can't call iput()) and thus can race with the
434 	 * inode teardown.  Tell it when the address space is exiting,
435 	 * so that it does not install eviction information after the
436 	 * final truncate has begun.
437 	 */
438 	mapping_set_exiting(mapping);
439 
440 	/*
441 	 * When reclaim installs eviction entries, it increases
442 	 * nrshadows first, then decreases nrpages.  Make sure we see
443 	 * this in the right order or we might miss an entry.
444 	 */
445 	nrpages = mapping->nrpages;
446 	smp_rmb();
447 	nrshadows = mapping->nrshadows;
448 
449 	if (nrpages || nrshadows) {
450 		/*
451 		 * As truncation uses a lockless tree lookup, cycle
452 		 * the tree lock to make sure any ongoing tree
453 		 * modification that does not see AS_EXITING is
454 		 * completed before starting the final truncate.
455 		 */
456 		spin_lock_irq(&mapping->tree_lock);
457 		spin_unlock_irq(&mapping->tree_lock);
458 
459 		truncate_inode_pages(mapping, 0);
460 	}
461 }
462 EXPORT_SYMBOL(truncate_inode_pages_final);
463 
464 /**
465  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
466  * @mapping: the address_space which holds the pages to invalidate
467  * @start: the offset 'from' which to invalidate
468  * @end: the offset 'to' which to invalidate (inclusive)
469  *
470  * This function only removes the unlocked pages, if you want to
471  * remove all the pages of one inode, you must call truncate_inode_pages.
472  *
473  * invalidate_mapping_pages() will not block on IO activity. It will not
474  * invalidate pages which are dirty, locked, under writeback or mapped into
475  * pagetables.
476  */
477 unsigned long invalidate_mapping_pages(struct address_space *mapping,
478 		pgoff_t start, pgoff_t end)
479 {
480 	pgoff_t indices[PAGEVEC_SIZE];
481 	struct pagevec pvec;
482 	pgoff_t index = start;
483 	unsigned long ret;
484 	unsigned long count = 0;
485 	int i;
486 
487 	/*
488 	 * Note: this function may get called on a shmem/tmpfs mapping:
489 	 * pagevec_lookup() might then return 0 prematurely (because it
490 	 * got a gangful of swap entries); but it's hardly worth worrying
491 	 * about - it can rarely have anything to free from such a mapping
492 	 * (most pages are dirty), and already skips over any difficulties.
493 	 */
494 
495 	pagevec_init(&pvec, 0);
496 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
497 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
498 			indices)) {
499 		mem_cgroup_uncharge_start();
500 		for (i = 0; i < pagevec_count(&pvec); i++) {
501 			struct page *page = pvec.pages[i];
502 
503 			/* We rely upon deletion not changing page->index */
504 			index = indices[i];
505 			if (index > end)
506 				break;
507 
508 			if (radix_tree_exceptional_entry(page)) {
509 				clear_exceptional_entry(mapping, index, page);
510 				continue;
511 			}
512 
513 			if (!trylock_page(page))
514 				continue;
515 			WARN_ON(page->index != index);
516 			ret = invalidate_inode_page(page);
517 			unlock_page(page);
518 			/*
519 			 * Invalidation is a hint that the page is no longer
520 			 * of interest and try to speed up its reclaim.
521 			 */
522 			if (!ret)
523 				deactivate_page(page);
524 			count += ret;
525 		}
526 		pagevec_remove_exceptionals(&pvec);
527 		pagevec_release(&pvec);
528 		mem_cgroup_uncharge_end();
529 		cond_resched();
530 		index++;
531 	}
532 	return count;
533 }
534 EXPORT_SYMBOL(invalidate_mapping_pages);
535 
536 /*
537  * This is like invalidate_complete_page(), except it ignores the page's
538  * refcount.  We do this because invalidate_inode_pages2() needs stronger
539  * invalidation guarantees, and cannot afford to leave pages behind because
540  * shrink_page_list() has a temp ref on them, or because they're transiently
541  * sitting in the lru_cache_add() pagevecs.
542  */
543 static int
544 invalidate_complete_page2(struct address_space *mapping, struct page *page)
545 {
546 	if (page->mapping != mapping)
547 		return 0;
548 
549 	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
550 		return 0;
551 
552 	spin_lock_irq(&mapping->tree_lock);
553 	if (PageDirty(page))
554 		goto failed;
555 
556 	BUG_ON(page_has_private(page));
557 	__delete_from_page_cache(page, NULL);
558 	spin_unlock_irq(&mapping->tree_lock);
559 	mem_cgroup_uncharge_cache_page(page);
560 
561 	if (mapping->a_ops->freepage)
562 		mapping->a_ops->freepage(page);
563 
564 	page_cache_release(page);	/* pagecache ref */
565 	return 1;
566 failed:
567 	spin_unlock_irq(&mapping->tree_lock);
568 	return 0;
569 }
570 
571 static int do_launder_page(struct address_space *mapping, struct page *page)
572 {
573 	if (!PageDirty(page))
574 		return 0;
575 	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
576 		return 0;
577 	return mapping->a_ops->launder_page(page);
578 }
579 
580 /**
581  * invalidate_inode_pages2_range - remove range of pages from an address_space
582  * @mapping: the address_space
583  * @start: the page offset 'from' which to invalidate
584  * @end: the page offset 'to' which to invalidate (inclusive)
585  *
586  * Any pages which are found to be mapped into pagetables are unmapped prior to
587  * invalidation.
588  *
589  * Returns -EBUSY if any pages could not be invalidated.
590  */
591 int invalidate_inode_pages2_range(struct address_space *mapping,
592 				  pgoff_t start, pgoff_t end)
593 {
594 	pgoff_t indices[PAGEVEC_SIZE];
595 	struct pagevec pvec;
596 	pgoff_t index;
597 	int i;
598 	int ret = 0;
599 	int ret2 = 0;
600 	int did_range_unmap = 0;
601 
602 	cleancache_invalidate_inode(mapping);
603 	pagevec_init(&pvec, 0);
604 	index = start;
605 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
606 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
607 			indices)) {
608 		mem_cgroup_uncharge_start();
609 		for (i = 0; i < pagevec_count(&pvec); i++) {
610 			struct page *page = pvec.pages[i];
611 
612 			/* We rely upon deletion not changing page->index */
613 			index = indices[i];
614 			if (index > end)
615 				break;
616 
617 			if (radix_tree_exceptional_entry(page)) {
618 				clear_exceptional_entry(mapping, index, page);
619 				continue;
620 			}
621 
622 			lock_page(page);
623 			WARN_ON(page->index != index);
624 			if (page->mapping != mapping) {
625 				unlock_page(page);
626 				continue;
627 			}
628 			wait_on_page_writeback(page);
629 			if (page_mapped(page)) {
630 				if (!did_range_unmap) {
631 					/*
632 					 * Zap the rest of the file in one hit.
633 					 */
634 					unmap_mapping_range(mapping,
635 					   (loff_t)index << PAGE_CACHE_SHIFT,
636 					   (loff_t)(1 + end - index)
637 							 << PAGE_CACHE_SHIFT,
638 					    0);
639 					did_range_unmap = 1;
640 				} else {
641 					/*
642 					 * Just zap this page
643 					 */
644 					unmap_mapping_range(mapping,
645 					   (loff_t)index << PAGE_CACHE_SHIFT,
646 					   PAGE_CACHE_SIZE, 0);
647 				}
648 			}
649 			BUG_ON(page_mapped(page));
650 			ret2 = do_launder_page(mapping, page);
651 			if (ret2 == 0) {
652 				if (!invalidate_complete_page2(mapping, page))
653 					ret2 = -EBUSY;
654 			}
655 			if (ret2 < 0)
656 				ret = ret2;
657 			unlock_page(page);
658 		}
659 		pagevec_remove_exceptionals(&pvec);
660 		pagevec_release(&pvec);
661 		mem_cgroup_uncharge_end();
662 		cond_resched();
663 		index++;
664 	}
665 	cleancache_invalidate_inode(mapping);
666 	return ret;
667 }
668 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
669 
670 /**
671  * invalidate_inode_pages2 - remove all pages from an address_space
672  * @mapping: the address_space
673  *
674  * Any pages which are found to be mapped into pagetables are unmapped prior to
675  * invalidation.
676  *
677  * Returns -EBUSY if any pages could not be invalidated.
678  */
679 int invalidate_inode_pages2(struct address_space *mapping)
680 {
681 	return invalidate_inode_pages2_range(mapping, 0, -1);
682 }
683 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
684 
685 /**
686  * truncate_pagecache - unmap and remove pagecache that has been truncated
687  * @inode: inode
688  * @newsize: new file size
689  *
690  * inode's new i_size must already be written before truncate_pagecache
691  * is called.
692  *
693  * This function should typically be called before the filesystem
694  * releases resources associated with the freed range (eg. deallocates
695  * blocks). This way, pagecache will always stay logically coherent
696  * with on-disk format, and the filesystem would not have to deal with
697  * situations such as writepage being called for a page that has already
698  * had its underlying blocks deallocated.
699  */
700 void truncate_pagecache(struct inode *inode, loff_t newsize)
701 {
702 	struct address_space *mapping = inode->i_mapping;
703 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
704 
705 	/*
706 	 * unmap_mapping_range is called twice, first simply for
707 	 * efficiency so that truncate_inode_pages does fewer
708 	 * single-page unmaps.  However after this first call, and
709 	 * before truncate_inode_pages finishes, it is possible for
710 	 * private pages to be COWed, which remain after
711 	 * truncate_inode_pages finishes, hence the second
712 	 * unmap_mapping_range call must be made for correctness.
713 	 */
714 	unmap_mapping_range(mapping, holebegin, 0, 1);
715 	truncate_inode_pages(mapping, newsize);
716 	unmap_mapping_range(mapping, holebegin, 0, 1);
717 }
718 EXPORT_SYMBOL(truncate_pagecache);
719 
720 /**
721  * truncate_setsize - update inode and pagecache for a new file size
722  * @inode: inode
723  * @newsize: new file size
724  *
725  * truncate_setsize updates i_size and performs pagecache truncation (if
726  * necessary) to @newsize. It will be typically be called from the filesystem's
727  * setattr function when ATTR_SIZE is passed in.
728  *
729  * Must be called with inode_mutex held and before all filesystem specific
730  * block truncation has been performed.
731  */
732 void truncate_setsize(struct inode *inode, loff_t newsize)
733 {
734 	i_size_write(inode, newsize);
735 	truncate_pagecache(inode, newsize);
736 }
737 EXPORT_SYMBOL(truncate_setsize);
738 
739 /**
740  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
741  * @inode: inode
742  * @lstart: offset of beginning of hole
743  * @lend: offset of last byte of hole
744  *
745  * This function should typically be called before the filesystem
746  * releases resources associated with the freed range (eg. deallocates
747  * blocks). This way, pagecache will always stay logically coherent
748  * with on-disk format, and the filesystem would not have to deal with
749  * situations such as writepage being called for a page that has already
750  * had its underlying blocks deallocated.
751  */
752 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
753 {
754 	struct address_space *mapping = inode->i_mapping;
755 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
756 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
757 	/*
758 	 * This rounding is currently just for example: unmap_mapping_range
759 	 * expands its hole outwards, whereas we want it to contract the hole
760 	 * inwards.  However, existing callers of truncate_pagecache_range are
761 	 * doing their own page rounding first.  Note that unmap_mapping_range
762 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
763 	 */
764 
765 	/*
766 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
767 	 * once (before truncating pagecache), and without "even_cows" flag:
768 	 * hole-punching should not remove private COWed pages from the hole.
769 	 */
770 	if ((u64)unmap_end > (u64)unmap_start)
771 		unmap_mapping_range(mapping, unmap_start,
772 				    1 + unmap_end - unmap_start, 0);
773 	truncate_inode_pages_range(mapping, lstart, lend);
774 }
775 EXPORT_SYMBOL(truncate_pagecache_range);
776