xref: /openbmc/linux/mm/truncate.c (revision 8b036556)
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002	Andrew Morton
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/backing-dev.h>
12 #include <linux/gfp.h>
13 #include <linux/mm.h>
14 #include <linux/swap.h>
15 #include <linux/export.h>
16 #include <linux/pagemap.h>
17 #include <linux/highmem.h>
18 #include <linux/pagevec.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/buffer_head.h>	/* grr. try_to_release_page,
21 				   do_invalidatepage */
22 #include <linux/cleancache.h>
23 #include <linux/rmap.h>
24 #include "internal.h"
25 
26 static void clear_exceptional_entry(struct address_space *mapping,
27 				    pgoff_t index, void *entry)
28 {
29 	struct radix_tree_node *node;
30 	void **slot;
31 
32 	/* Handled by shmem itself */
33 	if (shmem_mapping(mapping))
34 		return;
35 
36 	spin_lock_irq(&mapping->tree_lock);
37 	/*
38 	 * Regular page slots are stabilized by the page lock even
39 	 * without the tree itself locked.  These unlocked entries
40 	 * need verification under the tree lock.
41 	 */
42 	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
43 		goto unlock;
44 	if (*slot != entry)
45 		goto unlock;
46 	radix_tree_replace_slot(slot, NULL);
47 	mapping->nrshadows--;
48 	if (!node)
49 		goto unlock;
50 	workingset_node_shadows_dec(node);
51 	/*
52 	 * Don't track node without shadow entries.
53 	 *
54 	 * Avoid acquiring the list_lru lock if already untracked.
55 	 * The list_empty() test is safe as node->private_list is
56 	 * protected by mapping->tree_lock.
57 	 */
58 	if (!workingset_node_shadows(node) &&
59 	    !list_empty(&node->private_list))
60 		list_lru_del(&workingset_shadow_nodes, &node->private_list);
61 	__radix_tree_delete_node(&mapping->page_tree, node);
62 unlock:
63 	spin_unlock_irq(&mapping->tree_lock);
64 }
65 
66 /**
67  * do_invalidatepage - invalidate part or all of a page
68  * @page: the page which is affected
69  * @offset: start of the range to invalidate
70  * @length: length of the range to invalidate
71  *
72  * do_invalidatepage() is called when all or part of the page has become
73  * invalidated by a truncate operation.
74  *
75  * do_invalidatepage() does not have to release all buffers, but it must
76  * ensure that no dirty buffer is left outside @offset and that no I/O
77  * is underway against any of the blocks which are outside the truncation
78  * point.  Because the caller is about to free (and possibly reuse) those
79  * blocks on-disk.
80  */
81 void do_invalidatepage(struct page *page, unsigned int offset,
82 		       unsigned int length)
83 {
84 	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
85 
86 	invalidatepage = page->mapping->a_ops->invalidatepage;
87 #ifdef CONFIG_BLOCK
88 	if (!invalidatepage)
89 		invalidatepage = block_invalidatepage;
90 #endif
91 	if (invalidatepage)
92 		(*invalidatepage)(page, offset, length);
93 }
94 
95 /*
96  * This cancels just the dirty bit on the kernel page itself, it
97  * does NOT actually remove dirty bits on any mmap's that may be
98  * around. It also leaves the page tagged dirty, so any sync
99  * activity will still find it on the dirty lists, and in particular,
100  * clear_page_dirty_for_io() will still look at the dirty bits in
101  * the VM.
102  *
103  * Doing this should *normally* only ever be done when a page
104  * is truncated, and is not actually mapped anywhere at all. However,
105  * fs/buffer.c does this when it notices that somebody has cleaned
106  * out all the buffers on a page without actually doing it through
107  * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
108  */
109 void cancel_dirty_page(struct page *page, unsigned int account_size)
110 {
111 	if (TestClearPageDirty(page)) {
112 		struct address_space *mapping = page->mapping;
113 		if (mapping && mapping_cap_account_dirty(mapping)) {
114 			dec_zone_page_state(page, NR_FILE_DIRTY);
115 			dec_bdi_stat(inode_to_bdi(mapping->host),
116 					BDI_RECLAIMABLE);
117 			if (account_size)
118 				task_io_account_cancelled_write(account_size);
119 		}
120 	}
121 }
122 EXPORT_SYMBOL(cancel_dirty_page);
123 
124 /*
125  * If truncate cannot remove the fs-private metadata from the page, the page
126  * becomes orphaned.  It will be left on the LRU and may even be mapped into
127  * user pagetables if we're racing with filemap_fault().
128  *
129  * We need to bale out if page->mapping is no longer equal to the original
130  * mapping.  This happens a) when the VM reclaimed the page while we waited on
131  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
132  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
133  */
134 static int
135 truncate_complete_page(struct address_space *mapping, struct page *page)
136 {
137 	if (page->mapping != mapping)
138 		return -EIO;
139 
140 	if (page_has_private(page))
141 		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
142 
143 	cancel_dirty_page(page, PAGE_CACHE_SIZE);
144 
145 	ClearPageMappedToDisk(page);
146 	delete_from_page_cache(page);
147 	return 0;
148 }
149 
150 /*
151  * This is for invalidate_mapping_pages().  That function can be called at
152  * any time, and is not supposed to throw away dirty pages.  But pages can
153  * be marked dirty at any time too, so use remove_mapping which safely
154  * discards clean, unused pages.
155  *
156  * Returns non-zero if the page was successfully invalidated.
157  */
158 static int
159 invalidate_complete_page(struct address_space *mapping, struct page *page)
160 {
161 	int ret;
162 
163 	if (page->mapping != mapping)
164 		return 0;
165 
166 	if (page_has_private(page) && !try_to_release_page(page, 0))
167 		return 0;
168 
169 	ret = remove_mapping(mapping, page);
170 
171 	return ret;
172 }
173 
174 int truncate_inode_page(struct address_space *mapping, struct page *page)
175 {
176 	if (page_mapped(page)) {
177 		unmap_mapping_range(mapping,
178 				   (loff_t)page->index << PAGE_CACHE_SHIFT,
179 				   PAGE_CACHE_SIZE, 0);
180 	}
181 	return truncate_complete_page(mapping, page);
182 }
183 
184 /*
185  * Used to get rid of pages on hardware memory corruption.
186  */
187 int generic_error_remove_page(struct address_space *mapping, struct page *page)
188 {
189 	if (!mapping)
190 		return -EINVAL;
191 	/*
192 	 * Only punch for normal data pages for now.
193 	 * Handling other types like directories would need more auditing.
194 	 */
195 	if (!S_ISREG(mapping->host->i_mode))
196 		return -EIO;
197 	return truncate_inode_page(mapping, page);
198 }
199 EXPORT_SYMBOL(generic_error_remove_page);
200 
201 /*
202  * Safely invalidate one page from its pagecache mapping.
203  * It only drops clean, unused pages. The page must be locked.
204  *
205  * Returns 1 if the page is successfully invalidated, otherwise 0.
206  */
207 int invalidate_inode_page(struct page *page)
208 {
209 	struct address_space *mapping = page_mapping(page);
210 	if (!mapping)
211 		return 0;
212 	if (PageDirty(page) || PageWriteback(page))
213 		return 0;
214 	if (page_mapped(page))
215 		return 0;
216 	return invalidate_complete_page(mapping, page);
217 }
218 
219 /**
220  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
221  * @mapping: mapping to truncate
222  * @lstart: offset from which to truncate
223  * @lend: offset to which to truncate (inclusive)
224  *
225  * Truncate the page cache, removing the pages that are between
226  * specified offsets (and zeroing out partial pages
227  * if lstart or lend + 1 is not page aligned).
228  *
229  * Truncate takes two passes - the first pass is nonblocking.  It will not
230  * block on page locks and it will not block on writeback.  The second pass
231  * will wait.  This is to prevent as much IO as possible in the affected region.
232  * The first pass will remove most pages, so the search cost of the second pass
233  * is low.
234  *
235  * We pass down the cache-hot hint to the page freeing code.  Even if the
236  * mapping is large, it is probably the case that the final pages are the most
237  * recently touched, and freeing happens in ascending file offset order.
238  *
239  * Note that since ->invalidatepage() accepts range to invalidate
240  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
241  * page aligned properly.
242  */
243 void truncate_inode_pages_range(struct address_space *mapping,
244 				loff_t lstart, loff_t lend)
245 {
246 	pgoff_t		start;		/* inclusive */
247 	pgoff_t		end;		/* exclusive */
248 	unsigned int	partial_start;	/* inclusive */
249 	unsigned int	partial_end;	/* exclusive */
250 	struct pagevec	pvec;
251 	pgoff_t		indices[PAGEVEC_SIZE];
252 	pgoff_t		index;
253 	int		i;
254 
255 	cleancache_invalidate_inode(mapping);
256 	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
257 		return;
258 
259 	/* Offsets within partial pages */
260 	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
261 	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
262 
263 	/*
264 	 * 'start' and 'end' always covers the range of pages to be fully
265 	 * truncated. Partial pages are covered with 'partial_start' at the
266 	 * start of the range and 'partial_end' at the end of the range.
267 	 * Note that 'end' is exclusive while 'lend' is inclusive.
268 	 */
269 	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
270 	if (lend == -1)
271 		/*
272 		 * lend == -1 indicates end-of-file so we have to set 'end'
273 		 * to the highest possible pgoff_t and since the type is
274 		 * unsigned we're using -1.
275 		 */
276 		end = -1;
277 	else
278 		end = (lend + 1) >> PAGE_CACHE_SHIFT;
279 
280 	pagevec_init(&pvec, 0);
281 	index = start;
282 	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
283 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
284 			indices)) {
285 		for (i = 0; i < pagevec_count(&pvec); i++) {
286 			struct page *page = pvec.pages[i];
287 
288 			/* We rely upon deletion not changing page->index */
289 			index = indices[i];
290 			if (index >= end)
291 				break;
292 
293 			if (radix_tree_exceptional_entry(page)) {
294 				clear_exceptional_entry(mapping, index, page);
295 				continue;
296 			}
297 
298 			if (!trylock_page(page))
299 				continue;
300 			WARN_ON(page->index != index);
301 			if (PageWriteback(page)) {
302 				unlock_page(page);
303 				continue;
304 			}
305 			truncate_inode_page(mapping, page);
306 			unlock_page(page);
307 		}
308 		pagevec_remove_exceptionals(&pvec);
309 		pagevec_release(&pvec);
310 		cond_resched();
311 		index++;
312 	}
313 
314 	if (partial_start) {
315 		struct page *page = find_lock_page(mapping, start - 1);
316 		if (page) {
317 			unsigned int top = PAGE_CACHE_SIZE;
318 			if (start > end) {
319 				/* Truncation within a single page */
320 				top = partial_end;
321 				partial_end = 0;
322 			}
323 			wait_on_page_writeback(page);
324 			zero_user_segment(page, partial_start, top);
325 			cleancache_invalidate_page(mapping, page);
326 			if (page_has_private(page))
327 				do_invalidatepage(page, partial_start,
328 						  top - partial_start);
329 			unlock_page(page);
330 			page_cache_release(page);
331 		}
332 	}
333 	if (partial_end) {
334 		struct page *page = find_lock_page(mapping, end);
335 		if (page) {
336 			wait_on_page_writeback(page);
337 			zero_user_segment(page, 0, partial_end);
338 			cleancache_invalidate_page(mapping, page);
339 			if (page_has_private(page))
340 				do_invalidatepage(page, 0,
341 						  partial_end);
342 			unlock_page(page);
343 			page_cache_release(page);
344 		}
345 	}
346 	/*
347 	 * If the truncation happened within a single page no pages
348 	 * will be released, just zeroed, so we can bail out now.
349 	 */
350 	if (start >= end)
351 		return;
352 
353 	index = start;
354 	for ( ; ; ) {
355 		cond_resched();
356 		if (!pagevec_lookup_entries(&pvec, mapping, index,
357 			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
358 			/* If all gone from start onwards, we're done */
359 			if (index == start)
360 				break;
361 			/* Otherwise restart to make sure all gone */
362 			index = start;
363 			continue;
364 		}
365 		if (index == start && indices[0] >= end) {
366 			/* All gone out of hole to be punched, we're done */
367 			pagevec_remove_exceptionals(&pvec);
368 			pagevec_release(&pvec);
369 			break;
370 		}
371 		for (i = 0; i < pagevec_count(&pvec); i++) {
372 			struct page *page = pvec.pages[i];
373 
374 			/* We rely upon deletion not changing page->index */
375 			index = indices[i];
376 			if (index >= end) {
377 				/* Restart punch to make sure all gone */
378 				index = start - 1;
379 				break;
380 			}
381 
382 			if (radix_tree_exceptional_entry(page)) {
383 				clear_exceptional_entry(mapping, index, page);
384 				continue;
385 			}
386 
387 			lock_page(page);
388 			WARN_ON(page->index != index);
389 			wait_on_page_writeback(page);
390 			truncate_inode_page(mapping, page);
391 			unlock_page(page);
392 		}
393 		pagevec_remove_exceptionals(&pvec);
394 		pagevec_release(&pvec);
395 		index++;
396 	}
397 	cleancache_invalidate_inode(mapping);
398 }
399 EXPORT_SYMBOL(truncate_inode_pages_range);
400 
401 /**
402  * truncate_inode_pages - truncate *all* the pages from an offset
403  * @mapping: mapping to truncate
404  * @lstart: offset from which to truncate
405  *
406  * Called under (and serialised by) inode->i_mutex.
407  *
408  * Note: When this function returns, there can be a page in the process of
409  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
410  * mapping->nrpages can be non-zero when this function returns even after
411  * truncation of the whole mapping.
412  */
413 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
414 {
415 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
416 }
417 EXPORT_SYMBOL(truncate_inode_pages);
418 
419 /**
420  * truncate_inode_pages_final - truncate *all* pages before inode dies
421  * @mapping: mapping to truncate
422  *
423  * Called under (and serialized by) inode->i_mutex.
424  *
425  * Filesystems have to use this in the .evict_inode path to inform the
426  * VM that this is the final truncate and the inode is going away.
427  */
428 void truncate_inode_pages_final(struct address_space *mapping)
429 {
430 	unsigned long nrshadows;
431 	unsigned long nrpages;
432 
433 	/*
434 	 * Page reclaim can not participate in regular inode lifetime
435 	 * management (can't call iput()) and thus can race with the
436 	 * inode teardown.  Tell it when the address space is exiting,
437 	 * so that it does not install eviction information after the
438 	 * final truncate has begun.
439 	 */
440 	mapping_set_exiting(mapping);
441 
442 	/*
443 	 * When reclaim installs eviction entries, it increases
444 	 * nrshadows first, then decreases nrpages.  Make sure we see
445 	 * this in the right order or we might miss an entry.
446 	 */
447 	nrpages = mapping->nrpages;
448 	smp_rmb();
449 	nrshadows = mapping->nrshadows;
450 
451 	if (nrpages || nrshadows) {
452 		/*
453 		 * As truncation uses a lockless tree lookup, cycle
454 		 * the tree lock to make sure any ongoing tree
455 		 * modification that does not see AS_EXITING is
456 		 * completed before starting the final truncate.
457 		 */
458 		spin_lock_irq(&mapping->tree_lock);
459 		spin_unlock_irq(&mapping->tree_lock);
460 
461 		truncate_inode_pages(mapping, 0);
462 	}
463 }
464 EXPORT_SYMBOL(truncate_inode_pages_final);
465 
466 /**
467  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
468  * @mapping: the address_space which holds the pages to invalidate
469  * @start: the offset 'from' which to invalidate
470  * @end: the offset 'to' which to invalidate (inclusive)
471  *
472  * This function only removes the unlocked pages, if you want to
473  * remove all the pages of one inode, you must call truncate_inode_pages.
474  *
475  * invalidate_mapping_pages() will not block on IO activity. It will not
476  * invalidate pages which are dirty, locked, under writeback or mapped into
477  * pagetables.
478  */
479 unsigned long invalidate_mapping_pages(struct address_space *mapping,
480 		pgoff_t start, pgoff_t end)
481 {
482 	pgoff_t indices[PAGEVEC_SIZE];
483 	struct pagevec pvec;
484 	pgoff_t index = start;
485 	unsigned long ret;
486 	unsigned long count = 0;
487 	int i;
488 
489 	pagevec_init(&pvec, 0);
490 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
491 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
492 			indices)) {
493 		for (i = 0; i < pagevec_count(&pvec); i++) {
494 			struct page *page = pvec.pages[i];
495 
496 			/* We rely upon deletion not changing page->index */
497 			index = indices[i];
498 			if (index > end)
499 				break;
500 
501 			if (radix_tree_exceptional_entry(page)) {
502 				clear_exceptional_entry(mapping, index, page);
503 				continue;
504 			}
505 
506 			if (!trylock_page(page))
507 				continue;
508 			WARN_ON(page->index != index);
509 			ret = invalidate_inode_page(page);
510 			unlock_page(page);
511 			/*
512 			 * Invalidation is a hint that the page is no longer
513 			 * of interest and try to speed up its reclaim.
514 			 */
515 			if (!ret)
516 				deactivate_page(page);
517 			count += ret;
518 		}
519 		pagevec_remove_exceptionals(&pvec);
520 		pagevec_release(&pvec);
521 		cond_resched();
522 		index++;
523 	}
524 	return count;
525 }
526 EXPORT_SYMBOL(invalidate_mapping_pages);
527 
528 /*
529  * This is like invalidate_complete_page(), except it ignores the page's
530  * refcount.  We do this because invalidate_inode_pages2() needs stronger
531  * invalidation guarantees, and cannot afford to leave pages behind because
532  * shrink_page_list() has a temp ref on them, or because they're transiently
533  * sitting in the lru_cache_add() pagevecs.
534  */
535 static int
536 invalidate_complete_page2(struct address_space *mapping, struct page *page)
537 {
538 	if (page->mapping != mapping)
539 		return 0;
540 
541 	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
542 		return 0;
543 
544 	spin_lock_irq(&mapping->tree_lock);
545 	if (PageDirty(page))
546 		goto failed;
547 
548 	BUG_ON(page_has_private(page));
549 	__delete_from_page_cache(page, NULL);
550 	spin_unlock_irq(&mapping->tree_lock);
551 
552 	if (mapping->a_ops->freepage)
553 		mapping->a_ops->freepage(page);
554 
555 	page_cache_release(page);	/* pagecache ref */
556 	return 1;
557 failed:
558 	spin_unlock_irq(&mapping->tree_lock);
559 	return 0;
560 }
561 
562 static int do_launder_page(struct address_space *mapping, struct page *page)
563 {
564 	if (!PageDirty(page))
565 		return 0;
566 	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
567 		return 0;
568 	return mapping->a_ops->launder_page(page);
569 }
570 
571 /**
572  * invalidate_inode_pages2_range - remove range of pages from an address_space
573  * @mapping: the address_space
574  * @start: the page offset 'from' which to invalidate
575  * @end: the page offset 'to' which to invalidate (inclusive)
576  *
577  * Any pages which are found to be mapped into pagetables are unmapped prior to
578  * invalidation.
579  *
580  * Returns -EBUSY if any pages could not be invalidated.
581  */
582 int invalidate_inode_pages2_range(struct address_space *mapping,
583 				  pgoff_t start, pgoff_t end)
584 {
585 	pgoff_t indices[PAGEVEC_SIZE];
586 	struct pagevec pvec;
587 	pgoff_t index;
588 	int i;
589 	int ret = 0;
590 	int ret2 = 0;
591 	int did_range_unmap = 0;
592 
593 	cleancache_invalidate_inode(mapping);
594 	pagevec_init(&pvec, 0);
595 	index = start;
596 	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
597 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
598 			indices)) {
599 		for (i = 0; i < pagevec_count(&pvec); i++) {
600 			struct page *page = pvec.pages[i];
601 
602 			/* We rely upon deletion not changing page->index */
603 			index = indices[i];
604 			if (index > end)
605 				break;
606 
607 			if (radix_tree_exceptional_entry(page)) {
608 				clear_exceptional_entry(mapping, index, page);
609 				continue;
610 			}
611 
612 			lock_page(page);
613 			WARN_ON(page->index != index);
614 			if (page->mapping != mapping) {
615 				unlock_page(page);
616 				continue;
617 			}
618 			wait_on_page_writeback(page);
619 			if (page_mapped(page)) {
620 				if (!did_range_unmap) {
621 					/*
622 					 * Zap the rest of the file in one hit.
623 					 */
624 					unmap_mapping_range(mapping,
625 					   (loff_t)index << PAGE_CACHE_SHIFT,
626 					   (loff_t)(1 + end - index)
627 							 << PAGE_CACHE_SHIFT,
628 					    0);
629 					did_range_unmap = 1;
630 				} else {
631 					/*
632 					 * Just zap this page
633 					 */
634 					unmap_mapping_range(mapping,
635 					   (loff_t)index << PAGE_CACHE_SHIFT,
636 					   PAGE_CACHE_SIZE, 0);
637 				}
638 			}
639 			BUG_ON(page_mapped(page));
640 			ret2 = do_launder_page(mapping, page);
641 			if (ret2 == 0) {
642 				if (!invalidate_complete_page2(mapping, page))
643 					ret2 = -EBUSY;
644 			}
645 			if (ret2 < 0)
646 				ret = ret2;
647 			unlock_page(page);
648 		}
649 		pagevec_remove_exceptionals(&pvec);
650 		pagevec_release(&pvec);
651 		cond_resched();
652 		index++;
653 	}
654 	cleancache_invalidate_inode(mapping);
655 	return ret;
656 }
657 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
658 
659 /**
660  * invalidate_inode_pages2 - remove all pages from an address_space
661  * @mapping: the address_space
662  *
663  * Any pages which are found to be mapped into pagetables are unmapped prior to
664  * invalidation.
665  *
666  * Returns -EBUSY if any pages could not be invalidated.
667  */
668 int invalidate_inode_pages2(struct address_space *mapping)
669 {
670 	return invalidate_inode_pages2_range(mapping, 0, -1);
671 }
672 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
673 
674 /**
675  * truncate_pagecache - unmap and remove pagecache that has been truncated
676  * @inode: inode
677  * @newsize: new file size
678  *
679  * inode's new i_size must already be written before truncate_pagecache
680  * is called.
681  *
682  * This function should typically be called before the filesystem
683  * releases resources associated with the freed range (eg. deallocates
684  * blocks). This way, pagecache will always stay logically coherent
685  * with on-disk format, and the filesystem would not have to deal with
686  * situations such as writepage being called for a page that has already
687  * had its underlying blocks deallocated.
688  */
689 void truncate_pagecache(struct inode *inode, loff_t newsize)
690 {
691 	struct address_space *mapping = inode->i_mapping;
692 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
693 
694 	/*
695 	 * unmap_mapping_range is called twice, first simply for
696 	 * efficiency so that truncate_inode_pages does fewer
697 	 * single-page unmaps.  However after this first call, and
698 	 * before truncate_inode_pages finishes, it is possible for
699 	 * private pages to be COWed, which remain after
700 	 * truncate_inode_pages finishes, hence the second
701 	 * unmap_mapping_range call must be made for correctness.
702 	 */
703 	unmap_mapping_range(mapping, holebegin, 0, 1);
704 	truncate_inode_pages(mapping, newsize);
705 	unmap_mapping_range(mapping, holebegin, 0, 1);
706 }
707 EXPORT_SYMBOL(truncate_pagecache);
708 
709 /**
710  * truncate_setsize - update inode and pagecache for a new file size
711  * @inode: inode
712  * @newsize: new file size
713  *
714  * truncate_setsize updates i_size and performs pagecache truncation (if
715  * necessary) to @newsize. It will be typically be called from the filesystem's
716  * setattr function when ATTR_SIZE is passed in.
717  *
718  * Must be called with a lock serializing truncates and writes (generally
719  * i_mutex but e.g. xfs uses a different lock) and before all filesystem
720  * specific block truncation has been performed.
721  */
722 void truncate_setsize(struct inode *inode, loff_t newsize)
723 {
724 	loff_t oldsize = inode->i_size;
725 
726 	i_size_write(inode, newsize);
727 	if (newsize > oldsize)
728 		pagecache_isize_extended(inode, oldsize, newsize);
729 	truncate_pagecache(inode, newsize);
730 }
731 EXPORT_SYMBOL(truncate_setsize);
732 
733 /**
734  * pagecache_isize_extended - update pagecache after extension of i_size
735  * @inode:	inode for which i_size was extended
736  * @from:	original inode size
737  * @to:		new inode size
738  *
739  * Handle extension of inode size either caused by extending truncate or by
740  * write starting after current i_size. We mark the page straddling current
741  * i_size RO so that page_mkwrite() is called on the nearest write access to
742  * the page.  This way filesystem can be sure that page_mkwrite() is called on
743  * the page before user writes to the page via mmap after the i_size has been
744  * changed.
745  *
746  * The function must be called after i_size is updated so that page fault
747  * coming after we unlock the page will already see the new i_size.
748  * The function must be called while we still hold i_mutex - this not only
749  * makes sure i_size is stable but also that userspace cannot observe new
750  * i_size value before we are prepared to store mmap writes at new inode size.
751  */
752 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
753 {
754 	int bsize = 1 << inode->i_blkbits;
755 	loff_t rounded_from;
756 	struct page *page;
757 	pgoff_t index;
758 
759 	WARN_ON(to > inode->i_size);
760 
761 	if (from >= to || bsize == PAGE_CACHE_SIZE)
762 		return;
763 	/* Page straddling @from will not have any hole block created? */
764 	rounded_from = round_up(from, bsize);
765 	if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
766 		return;
767 
768 	index = from >> PAGE_CACHE_SHIFT;
769 	page = find_lock_page(inode->i_mapping, index);
770 	/* Page not cached? Nothing to do */
771 	if (!page)
772 		return;
773 	/*
774 	 * See clear_page_dirty_for_io() for details why set_page_dirty()
775 	 * is needed.
776 	 */
777 	if (page_mkclean(page))
778 		set_page_dirty(page);
779 	unlock_page(page);
780 	page_cache_release(page);
781 }
782 EXPORT_SYMBOL(pagecache_isize_extended);
783 
784 /**
785  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
786  * @inode: inode
787  * @lstart: offset of beginning of hole
788  * @lend: offset of last byte of hole
789  *
790  * This function should typically be called before the filesystem
791  * releases resources associated with the freed range (eg. deallocates
792  * blocks). This way, pagecache will always stay logically coherent
793  * with on-disk format, and the filesystem would not have to deal with
794  * situations such as writepage being called for a page that has already
795  * had its underlying blocks deallocated.
796  */
797 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
798 {
799 	struct address_space *mapping = inode->i_mapping;
800 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
801 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
802 	/*
803 	 * This rounding is currently just for example: unmap_mapping_range
804 	 * expands its hole outwards, whereas we want it to contract the hole
805 	 * inwards.  However, existing callers of truncate_pagecache_range are
806 	 * doing their own page rounding first.  Note that unmap_mapping_range
807 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
808 	 */
809 
810 	/*
811 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
812 	 * once (before truncating pagecache), and without "even_cows" flag:
813 	 * hole-punching should not remove private COWed pages from the hole.
814 	 */
815 	if ((u64)unmap_end > (u64)unmap_start)
816 		unmap_mapping_range(mapping, unmap_start,
817 				    1 + unmap_end - unmap_start, 0);
818 	truncate_inode_pages_range(mapping, lstart, lend);
819 }
820 EXPORT_SYMBOL(truncate_pagecache_range);
821