xref: /openbmc/linux/mm/truncate.c (revision 4800cd83)
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002	Andrew Morton
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/backing-dev.h>
12 #include <linux/gfp.h>
13 #include <linux/mm.h>
14 #include <linux/swap.h>
15 #include <linux/module.h>
16 #include <linux/pagemap.h>
17 #include <linux/highmem.h>
18 #include <linux/pagevec.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/buffer_head.h>	/* grr. try_to_release_page,
21 				   do_invalidatepage */
22 #include "internal.h"
23 
24 
25 /**
26  * do_invalidatepage - invalidate part or all of a page
27  * @page: the page which is affected
28  * @offset: the index of the truncation point
29  *
30  * do_invalidatepage() is called when all or part of the page has become
31  * invalidated by a truncate operation.
32  *
33  * do_invalidatepage() does not have to release all buffers, but it must
34  * ensure that no dirty buffer is left outside @offset and that no I/O
35  * is underway against any of the blocks which are outside the truncation
36  * point.  Because the caller is about to free (and possibly reuse) those
37  * blocks on-disk.
38  */
39 void do_invalidatepage(struct page *page, unsigned long offset)
40 {
41 	void (*invalidatepage)(struct page *, unsigned long);
42 	invalidatepage = page->mapping->a_ops->invalidatepage;
43 #ifdef CONFIG_BLOCK
44 	if (!invalidatepage)
45 		invalidatepage = block_invalidatepage;
46 #endif
47 	if (invalidatepage)
48 		(*invalidatepage)(page, offset);
49 }
50 
51 static inline void truncate_partial_page(struct page *page, unsigned partial)
52 {
53 	zero_user_segment(page, partial, PAGE_CACHE_SIZE);
54 	if (page_has_private(page))
55 		do_invalidatepage(page, partial);
56 }
57 
58 /*
59  * This cancels just the dirty bit on the kernel page itself, it
60  * does NOT actually remove dirty bits on any mmap's that may be
61  * around. It also leaves the page tagged dirty, so any sync
62  * activity will still find it on the dirty lists, and in particular,
63  * clear_page_dirty_for_io() will still look at the dirty bits in
64  * the VM.
65  *
66  * Doing this should *normally* only ever be done when a page
67  * is truncated, and is not actually mapped anywhere at all. However,
68  * fs/buffer.c does this when it notices that somebody has cleaned
69  * out all the buffers on a page without actually doing it through
70  * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
71  */
72 void cancel_dirty_page(struct page *page, unsigned int account_size)
73 {
74 	if (TestClearPageDirty(page)) {
75 		struct address_space *mapping = page->mapping;
76 		if (mapping && mapping_cap_account_dirty(mapping)) {
77 			dec_zone_page_state(page, NR_FILE_DIRTY);
78 			dec_bdi_stat(mapping->backing_dev_info,
79 					BDI_RECLAIMABLE);
80 			if (account_size)
81 				task_io_account_cancelled_write(account_size);
82 		}
83 	}
84 }
85 EXPORT_SYMBOL(cancel_dirty_page);
86 
87 /*
88  * If truncate cannot remove the fs-private metadata from the page, the page
89  * becomes orphaned.  It will be left on the LRU and may even be mapped into
90  * user pagetables if we're racing with filemap_fault().
91  *
92  * We need to bale out if page->mapping is no longer equal to the original
93  * mapping.  This happens a) when the VM reclaimed the page while we waited on
94  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
95  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
96  */
97 static int
98 truncate_complete_page(struct address_space *mapping, struct page *page)
99 {
100 	if (page->mapping != mapping)
101 		return -EIO;
102 
103 	if (page_has_private(page))
104 		do_invalidatepage(page, 0);
105 
106 	cancel_dirty_page(page, PAGE_CACHE_SIZE);
107 
108 	clear_page_mlock(page);
109 	remove_from_page_cache(page);
110 	ClearPageMappedToDisk(page);
111 	page_cache_release(page);	/* pagecache ref */
112 	return 0;
113 }
114 
115 /*
116  * This is for invalidate_mapping_pages().  That function can be called at
117  * any time, and is not supposed to throw away dirty pages.  But pages can
118  * be marked dirty at any time too, so use remove_mapping which safely
119  * discards clean, unused pages.
120  *
121  * Returns non-zero if the page was successfully invalidated.
122  */
123 static int
124 invalidate_complete_page(struct address_space *mapping, struct page *page)
125 {
126 	int ret;
127 
128 	if (page->mapping != mapping)
129 		return 0;
130 
131 	if (page_has_private(page) && !try_to_release_page(page, 0))
132 		return 0;
133 
134 	clear_page_mlock(page);
135 	ret = remove_mapping(mapping, page);
136 
137 	return ret;
138 }
139 
140 int truncate_inode_page(struct address_space *mapping, struct page *page)
141 {
142 	if (page_mapped(page)) {
143 		unmap_mapping_range(mapping,
144 				   (loff_t)page->index << PAGE_CACHE_SHIFT,
145 				   PAGE_CACHE_SIZE, 0);
146 	}
147 	return truncate_complete_page(mapping, page);
148 }
149 
150 /*
151  * Used to get rid of pages on hardware memory corruption.
152  */
153 int generic_error_remove_page(struct address_space *mapping, struct page *page)
154 {
155 	if (!mapping)
156 		return -EINVAL;
157 	/*
158 	 * Only punch for normal data pages for now.
159 	 * Handling other types like directories would need more auditing.
160 	 */
161 	if (!S_ISREG(mapping->host->i_mode))
162 		return -EIO;
163 	return truncate_inode_page(mapping, page);
164 }
165 EXPORT_SYMBOL(generic_error_remove_page);
166 
167 /*
168  * Safely invalidate one page from its pagecache mapping.
169  * It only drops clean, unused pages. The page must be locked.
170  *
171  * Returns 1 if the page is successfully invalidated, otherwise 0.
172  */
173 int invalidate_inode_page(struct page *page)
174 {
175 	struct address_space *mapping = page_mapping(page);
176 	if (!mapping)
177 		return 0;
178 	if (PageDirty(page) || PageWriteback(page))
179 		return 0;
180 	if (page_mapped(page))
181 		return 0;
182 	return invalidate_complete_page(mapping, page);
183 }
184 
185 /**
186  * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
187  * @mapping: mapping to truncate
188  * @lstart: offset from which to truncate
189  * @lend: offset to which to truncate
190  *
191  * Truncate the page cache, removing the pages that are between
192  * specified offsets (and zeroing out partial page
193  * (if lstart is not page aligned)).
194  *
195  * Truncate takes two passes - the first pass is nonblocking.  It will not
196  * block on page locks and it will not block on writeback.  The second pass
197  * will wait.  This is to prevent as much IO as possible in the affected region.
198  * The first pass will remove most pages, so the search cost of the second pass
199  * is low.
200  *
201  * When looking at page->index outside the page lock we need to be careful to
202  * copy it into a local to avoid races (it could change at any time).
203  *
204  * We pass down the cache-hot hint to the page freeing code.  Even if the
205  * mapping is large, it is probably the case that the final pages are the most
206  * recently touched, and freeing happens in ascending file offset order.
207  */
208 void truncate_inode_pages_range(struct address_space *mapping,
209 				loff_t lstart, loff_t lend)
210 {
211 	const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
212 	pgoff_t end;
213 	const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
214 	struct pagevec pvec;
215 	pgoff_t next;
216 	int i;
217 
218 	if (mapping->nrpages == 0)
219 		return;
220 
221 	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
222 	end = (lend >> PAGE_CACHE_SHIFT);
223 
224 	pagevec_init(&pvec, 0);
225 	next = start;
226 	while (next <= end &&
227 	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
228 		mem_cgroup_uncharge_start();
229 		for (i = 0; i < pagevec_count(&pvec); i++) {
230 			struct page *page = pvec.pages[i];
231 			pgoff_t page_index = page->index;
232 
233 			if (page_index > end) {
234 				next = page_index;
235 				break;
236 			}
237 
238 			if (page_index > next)
239 				next = page_index;
240 			next++;
241 			if (!trylock_page(page))
242 				continue;
243 			if (PageWriteback(page)) {
244 				unlock_page(page);
245 				continue;
246 			}
247 			truncate_inode_page(mapping, page);
248 			unlock_page(page);
249 		}
250 		pagevec_release(&pvec);
251 		mem_cgroup_uncharge_end();
252 		cond_resched();
253 	}
254 
255 	if (partial) {
256 		struct page *page = find_lock_page(mapping, start - 1);
257 		if (page) {
258 			wait_on_page_writeback(page);
259 			truncate_partial_page(page, partial);
260 			unlock_page(page);
261 			page_cache_release(page);
262 		}
263 	}
264 
265 	next = start;
266 	for ( ; ; ) {
267 		cond_resched();
268 		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
269 			if (next == start)
270 				break;
271 			next = start;
272 			continue;
273 		}
274 		if (pvec.pages[0]->index > end) {
275 			pagevec_release(&pvec);
276 			break;
277 		}
278 		mem_cgroup_uncharge_start();
279 		for (i = 0; i < pagevec_count(&pvec); i++) {
280 			struct page *page = pvec.pages[i];
281 
282 			if (page->index > end)
283 				break;
284 			lock_page(page);
285 			wait_on_page_writeback(page);
286 			truncate_inode_page(mapping, page);
287 			if (page->index > next)
288 				next = page->index;
289 			next++;
290 			unlock_page(page);
291 		}
292 		pagevec_release(&pvec);
293 		mem_cgroup_uncharge_end();
294 	}
295 }
296 EXPORT_SYMBOL(truncate_inode_pages_range);
297 
298 /**
299  * truncate_inode_pages - truncate *all* the pages from an offset
300  * @mapping: mapping to truncate
301  * @lstart: offset from which to truncate
302  *
303  * Called under (and serialised by) inode->i_mutex.
304  */
305 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
306 {
307 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
308 }
309 EXPORT_SYMBOL(truncate_inode_pages);
310 
311 /**
312  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
313  * @mapping: the address_space which holds the pages to invalidate
314  * @start: the offset 'from' which to invalidate
315  * @end: the offset 'to' which to invalidate (inclusive)
316  *
317  * This function only removes the unlocked pages, if you want to
318  * remove all the pages of one inode, you must call truncate_inode_pages.
319  *
320  * invalidate_mapping_pages() will not block on IO activity. It will not
321  * invalidate pages which are dirty, locked, under writeback or mapped into
322  * pagetables.
323  */
324 unsigned long invalidate_mapping_pages(struct address_space *mapping,
325 				       pgoff_t start, pgoff_t end)
326 {
327 	struct pagevec pvec;
328 	pgoff_t next = start;
329 	unsigned long ret = 0;
330 	int i;
331 
332 	pagevec_init(&pvec, 0);
333 	while (next <= end &&
334 			pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
335 		mem_cgroup_uncharge_start();
336 		for (i = 0; i < pagevec_count(&pvec); i++) {
337 			struct page *page = pvec.pages[i];
338 			pgoff_t index;
339 			int lock_failed;
340 
341 			lock_failed = !trylock_page(page);
342 
343 			/*
344 			 * We really shouldn't be looking at the ->index of an
345 			 * unlocked page.  But we're not allowed to lock these
346 			 * pages.  So we rely upon nobody altering the ->index
347 			 * of this (pinned-by-us) page.
348 			 */
349 			index = page->index;
350 			if (index > next)
351 				next = index;
352 			next++;
353 			if (lock_failed)
354 				continue;
355 
356 			ret += invalidate_inode_page(page);
357 
358 			unlock_page(page);
359 			if (next > end)
360 				break;
361 		}
362 		pagevec_release(&pvec);
363 		mem_cgroup_uncharge_end();
364 		cond_resched();
365 	}
366 	return ret;
367 }
368 EXPORT_SYMBOL(invalidate_mapping_pages);
369 
370 /*
371  * This is like invalidate_complete_page(), except it ignores the page's
372  * refcount.  We do this because invalidate_inode_pages2() needs stronger
373  * invalidation guarantees, and cannot afford to leave pages behind because
374  * shrink_page_list() has a temp ref on them, or because they're transiently
375  * sitting in the lru_cache_add() pagevecs.
376  */
377 static int
378 invalidate_complete_page2(struct address_space *mapping, struct page *page)
379 {
380 	if (page->mapping != mapping)
381 		return 0;
382 
383 	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
384 		return 0;
385 
386 	spin_lock_irq(&mapping->tree_lock);
387 	if (PageDirty(page))
388 		goto failed;
389 
390 	clear_page_mlock(page);
391 	BUG_ON(page_has_private(page));
392 	__remove_from_page_cache(page);
393 	spin_unlock_irq(&mapping->tree_lock);
394 	mem_cgroup_uncharge_cache_page(page);
395 
396 	if (mapping->a_ops->freepage)
397 		mapping->a_ops->freepage(page);
398 
399 	page_cache_release(page);	/* pagecache ref */
400 	return 1;
401 failed:
402 	spin_unlock_irq(&mapping->tree_lock);
403 	return 0;
404 }
405 
406 static int do_launder_page(struct address_space *mapping, struct page *page)
407 {
408 	if (!PageDirty(page))
409 		return 0;
410 	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
411 		return 0;
412 	return mapping->a_ops->launder_page(page);
413 }
414 
415 /**
416  * invalidate_inode_pages2_range - remove range of pages from an address_space
417  * @mapping: the address_space
418  * @start: the page offset 'from' which to invalidate
419  * @end: the page offset 'to' which to invalidate (inclusive)
420  *
421  * Any pages which are found to be mapped into pagetables are unmapped prior to
422  * invalidation.
423  *
424  * Returns -EBUSY if any pages could not be invalidated.
425  */
426 int invalidate_inode_pages2_range(struct address_space *mapping,
427 				  pgoff_t start, pgoff_t end)
428 {
429 	struct pagevec pvec;
430 	pgoff_t next;
431 	int i;
432 	int ret = 0;
433 	int ret2 = 0;
434 	int did_range_unmap = 0;
435 	int wrapped = 0;
436 
437 	pagevec_init(&pvec, 0);
438 	next = start;
439 	while (next <= end && !wrapped &&
440 		pagevec_lookup(&pvec, mapping, next,
441 			min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
442 		mem_cgroup_uncharge_start();
443 		for (i = 0; i < pagevec_count(&pvec); i++) {
444 			struct page *page = pvec.pages[i];
445 			pgoff_t page_index;
446 
447 			lock_page(page);
448 			if (page->mapping != mapping) {
449 				unlock_page(page);
450 				continue;
451 			}
452 			page_index = page->index;
453 			next = page_index + 1;
454 			if (next == 0)
455 				wrapped = 1;
456 			if (page_index > end) {
457 				unlock_page(page);
458 				break;
459 			}
460 			wait_on_page_writeback(page);
461 			if (page_mapped(page)) {
462 				if (!did_range_unmap) {
463 					/*
464 					 * Zap the rest of the file in one hit.
465 					 */
466 					unmap_mapping_range(mapping,
467 					   (loff_t)page_index<<PAGE_CACHE_SHIFT,
468 					   (loff_t)(end - page_index + 1)
469 							<< PAGE_CACHE_SHIFT,
470 					    0);
471 					did_range_unmap = 1;
472 				} else {
473 					/*
474 					 * Just zap this page
475 					 */
476 					unmap_mapping_range(mapping,
477 					  (loff_t)page_index<<PAGE_CACHE_SHIFT,
478 					  PAGE_CACHE_SIZE, 0);
479 				}
480 			}
481 			BUG_ON(page_mapped(page));
482 			ret2 = do_launder_page(mapping, page);
483 			if (ret2 == 0) {
484 				if (!invalidate_complete_page2(mapping, page))
485 					ret2 = -EBUSY;
486 			}
487 			if (ret2 < 0)
488 				ret = ret2;
489 			unlock_page(page);
490 		}
491 		pagevec_release(&pvec);
492 		mem_cgroup_uncharge_end();
493 		cond_resched();
494 	}
495 	return ret;
496 }
497 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
498 
499 /**
500  * invalidate_inode_pages2 - remove all pages from an address_space
501  * @mapping: the address_space
502  *
503  * Any pages which are found to be mapped into pagetables are unmapped prior to
504  * invalidation.
505  *
506  * Returns -EBUSY if any pages could not be invalidated.
507  */
508 int invalidate_inode_pages2(struct address_space *mapping)
509 {
510 	return invalidate_inode_pages2_range(mapping, 0, -1);
511 }
512 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
513 
514 /**
515  * truncate_pagecache - unmap and remove pagecache that has been truncated
516  * @inode: inode
517  * @old: old file offset
518  * @new: new file offset
519  *
520  * inode's new i_size must already be written before truncate_pagecache
521  * is called.
522  *
523  * This function should typically be called before the filesystem
524  * releases resources associated with the freed range (eg. deallocates
525  * blocks). This way, pagecache will always stay logically coherent
526  * with on-disk format, and the filesystem would not have to deal with
527  * situations such as writepage being called for a page that has already
528  * had its underlying blocks deallocated.
529  */
530 void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
531 {
532 	struct address_space *mapping = inode->i_mapping;
533 
534 	/*
535 	 * unmap_mapping_range is called twice, first simply for
536 	 * efficiency so that truncate_inode_pages does fewer
537 	 * single-page unmaps.  However after this first call, and
538 	 * before truncate_inode_pages finishes, it is possible for
539 	 * private pages to be COWed, which remain after
540 	 * truncate_inode_pages finishes, hence the second
541 	 * unmap_mapping_range call must be made for correctness.
542 	 */
543 	unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
544 	truncate_inode_pages(mapping, new);
545 	unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
546 }
547 EXPORT_SYMBOL(truncate_pagecache);
548 
549 /**
550  * truncate_setsize - update inode and pagecache for a new file size
551  * @inode: inode
552  * @newsize: new file size
553  *
554  * truncate_setsize updates i_size and performs pagecache truncation (if
555  * necessary) to @newsize. It will be typically be called from the filesystem's
556  * setattr function when ATTR_SIZE is passed in.
557  *
558  * Must be called with inode_mutex held and before all filesystem specific
559  * block truncation has been performed.
560  */
561 void truncate_setsize(struct inode *inode, loff_t newsize)
562 {
563 	loff_t oldsize;
564 
565 	oldsize = inode->i_size;
566 	i_size_write(inode, newsize);
567 
568 	truncate_pagecache(inode, oldsize, newsize);
569 }
570 EXPORT_SYMBOL(truncate_setsize);
571 
572 /**
573  * vmtruncate - unmap mappings "freed" by truncate() syscall
574  * @inode: inode of the file used
575  * @offset: file offset to start truncating
576  *
577  * This function is deprecated and truncate_setsize or truncate_pagecache
578  * should be used instead, together with filesystem specific block truncation.
579  */
580 int vmtruncate(struct inode *inode, loff_t offset)
581 {
582 	int error;
583 
584 	error = inode_newsize_ok(inode, offset);
585 	if (error)
586 		return error;
587 
588 	truncate_setsize(inode, offset);
589 	if (inode->i_op->truncate)
590 		inode->i_op->truncate(inode);
591 	return 0;
592 }
593 EXPORT_SYMBOL(vmtruncate);
594