1 /* 2 * mm/truncate.c - code for taking down pages from address_spaces 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 10Sep2002 akpm@zip.com.au 7 * Initial version. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/backing-dev.h> 12 #include <linux/mm.h> 13 #include <linux/swap.h> 14 #include <linux/module.h> 15 #include <linux/pagemap.h> 16 #include <linux/highmem.h> 17 #include <linux/pagevec.h> 18 #include <linux/task_io_accounting_ops.h> 19 #include <linux/buffer_head.h> /* grr. try_to_release_page, 20 do_invalidatepage */ 21 22 23 /** 24 * do_invalidatepage - invalidate part or all of a page 25 * @page: the page which is affected 26 * @offset: the index of the truncation point 27 * 28 * do_invalidatepage() is called when all or part of the page has become 29 * invalidated by a truncate operation. 30 * 31 * do_invalidatepage() does not have to release all buffers, but it must 32 * ensure that no dirty buffer is left outside @offset and that no I/O 33 * is underway against any of the blocks which are outside the truncation 34 * point. Because the caller is about to free (and possibly reuse) those 35 * blocks on-disk. 36 */ 37 void do_invalidatepage(struct page *page, unsigned long offset) 38 { 39 void (*invalidatepage)(struct page *, unsigned long); 40 invalidatepage = page->mapping->a_ops->invalidatepage; 41 #ifdef CONFIG_BLOCK 42 if (!invalidatepage) 43 invalidatepage = block_invalidatepage; 44 #endif 45 if (invalidatepage) 46 (*invalidatepage)(page, offset); 47 } 48 49 static inline void truncate_partial_page(struct page *page, unsigned partial) 50 { 51 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 52 if (PagePrivate(page)) 53 do_invalidatepage(page, partial); 54 } 55 56 /* 57 * This cancels just the dirty bit on the kernel page itself, it 58 * does NOT actually remove dirty bits on any mmap's that may be 59 * around. It also leaves the page tagged dirty, so any sync 60 * activity will still find it on the dirty lists, and in particular, 61 * clear_page_dirty_for_io() will still look at the dirty bits in 62 * the VM. 63 * 64 * Doing this should *normally* only ever be done when a page 65 * is truncated, and is not actually mapped anywhere at all. However, 66 * fs/buffer.c does this when it notices that somebody has cleaned 67 * out all the buffers on a page without actually doing it through 68 * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 69 */ 70 void cancel_dirty_page(struct page *page, unsigned int account_size) 71 { 72 if (TestClearPageDirty(page)) { 73 struct address_space *mapping = page->mapping; 74 if (mapping && mapping_cap_account_dirty(mapping)) { 75 dec_zone_page_state(page, NR_FILE_DIRTY); 76 dec_bdi_stat(mapping->backing_dev_info, 77 BDI_RECLAIMABLE); 78 if (account_size) 79 task_io_account_cancelled_write(account_size); 80 } 81 } 82 } 83 EXPORT_SYMBOL(cancel_dirty_page); 84 85 /* 86 * If truncate cannot remove the fs-private metadata from the page, the page 87 * becomes orphaned. It will be left on the LRU and may even be mapped into 88 * user pagetables if we're racing with filemap_fault(). 89 * 90 * We need to bale out if page->mapping is no longer equal to the original 91 * mapping. This happens a) when the VM reclaimed the page while we waited on 92 * its lock, b) when a concurrent invalidate_mapping_pages got there first and 93 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 94 */ 95 static void 96 truncate_complete_page(struct address_space *mapping, struct page *page) 97 { 98 if (page->mapping != mapping) 99 return; 100 101 if (PagePrivate(page)) 102 do_invalidatepage(page, 0); 103 104 cancel_dirty_page(page, PAGE_CACHE_SIZE); 105 106 remove_from_page_cache(page); 107 ClearPageMappedToDisk(page); 108 page_cache_release(page); /* pagecache ref */ 109 } 110 111 /* 112 * This is for invalidate_mapping_pages(). That function can be called at 113 * any time, and is not supposed to throw away dirty pages. But pages can 114 * be marked dirty at any time too, so use remove_mapping which safely 115 * discards clean, unused pages. 116 * 117 * Returns non-zero if the page was successfully invalidated. 118 */ 119 static int 120 invalidate_complete_page(struct address_space *mapping, struct page *page) 121 { 122 int ret; 123 124 if (page->mapping != mapping) 125 return 0; 126 127 if (PagePrivate(page) && !try_to_release_page(page, 0)) 128 return 0; 129 130 ret = remove_mapping(mapping, page); 131 132 return ret; 133 } 134 135 /** 136 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets 137 * @mapping: mapping to truncate 138 * @lstart: offset from which to truncate 139 * @lend: offset to which to truncate 140 * 141 * Truncate the page cache, removing the pages that are between 142 * specified offsets (and zeroing out partial page 143 * (if lstart is not page aligned)). 144 * 145 * Truncate takes two passes - the first pass is nonblocking. It will not 146 * block on page locks and it will not block on writeback. The second pass 147 * will wait. This is to prevent as much IO as possible in the affected region. 148 * The first pass will remove most pages, so the search cost of the second pass 149 * is low. 150 * 151 * When looking at page->index outside the page lock we need to be careful to 152 * copy it into a local to avoid races (it could change at any time). 153 * 154 * We pass down the cache-hot hint to the page freeing code. Even if the 155 * mapping is large, it is probably the case that the final pages are the most 156 * recently touched, and freeing happens in ascending file offset order. 157 */ 158 void truncate_inode_pages_range(struct address_space *mapping, 159 loff_t lstart, loff_t lend) 160 { 161 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 162 pgoff_t end; 163 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 164 struct pagevec pvec; 165 pgoff_t next; 166 int i; 167 168 if (mapping->nrpages == 0) 169 return; 170 171 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 172 end = (lend >> PAGE_CACHE_SHIFT); 173 174 pagevec_init(&pvec, 0); 175 next = start; 176 while (next <= end && 177 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 178 for (i = 0; i < pagevec_count(&pvec); i++) { 179 struct page *page = pvec.pages[i]; 180 pgoff_t page_index = page->index; 181 182 if (page_index > end) { 183 next = page_index; 184 break; 185 } 186 187 if (page_index > next) 188 next = page_index; 189 next++; 190 if (!trylock_page(page)) 191 continue; 192 if (PageWriteback(page)) { 193 unlock_page(page); 194 continue; 195 } 196 if (page_mapped(page)) { 197 unmap_mapping_range(mapping, 198 (loff_t)page_index<<PAGE_CACHE_SHIFT, 199 PAGE_CACHE_SIZE, 0); 200 } 201 truncate_complete_page(mapping, page); 202 unlock_page(page); 203 } 204 pagevec_release(&pvec); 205 cond_resched(); 206 } 207 208 if (partial) { 209 struct page *page = find_lock_page(mapping, start - 1); 210 if (page) { 211 wait_on_page_writeback(page); 212 truncate_partial_page(page, partial); 213 unlock_page(page); 214 page_cache_release(page); 215 } 216 } 217 218 next = start; 219 for ( ; ; ) { 220 cond_resched(); 221 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 222 if (next == start) 223 break; 224 next = start; 225 continue; 226 } 227 if (pvec.pages[0]->index > end) { 228 pagevec_release(&pvec); 229 break; 230 } 231 for (i = 0; i < pagevec_count(&pvec); i++) { 232 struct page *page = pvec.pages[i]; 233 234 if (page->index > end) 235 break; 236 lock_page(page); 237 wait_on_page_writeback(page); 238 if (page_mapped(page)) { 239 unmap_mapping_range(mapping, 240 (loff_t)page->index<<PAGE_CACHE_SHIFT, 241 PAGE_CACHE_SIZE, 0); 242 } 243 if (page->index > next) 244 next = page->index; 245 next++; 246 truncate_complete_page(mapping, page); 247 unlock_page(page); 248 } 249 pagevec_release(&pvec); 250 } 251 } 252 EXPORT_SYMBOL(truncate_inode_pages_range); 253 254 /** 255 * truncate_inode_pages - truncate *all* the pages from an offset 256 * @mapping: mapping to truncate 257 * @lstart: offset from which to truncate 258 * 259 * Called under (and serialised by) inode->i_mutex. 260 */ 261 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 262 { 263 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 264 } 265 EXPORT_SYMBOL(truncate_inode_pages); 266 267 unsigned long __invalidate_mapping_pages(struct address_space *mapping, 268 pgoff_t start, pgoff_t end, bool be_atomic) 269 { 270 struct pagevec pvec; 271 pgoff_t next = start; 272 unsigned long ret = 0; 273 int i; 274 275 pagevec_init(&pvec, 0); 276 while (next <= end && 277 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 278 for (i = 0; i < pagevec_count(&pvec); i++) { 279 struct page *page = pvec.pages[i]; 280 pgoff_t index; 281 int lock_failed; 282 283 lock_failed = !trylock_page(page); 284 285 /* 286 * We really shouldn't be looking at the ->index of an 287 * unlocked page. But we're not allowed to lock these 288 * pages. So we rely upon nobody altering the ->index 289 * of this (pinned-by-us) page. 290 */ 291 index = page->index; 292 if (index > next) 293 next = index; 294 next++; 295 if (lock_failed) 296 continue; 297 298 if (PageDirty(page) || PageWriteback(page)) 299 goto unlock; 300 if (page_mapped(page)) 301 goto unlock; 302 ret += invalidate_complete_page(mapping, page); 303 unlock: 304 unlock_page(page); 305 if (next > end) 306 break; 307 } 308 pagevec_release(&pvec); 309 if (likely(!be_atomic)) 310 cond_resched(); 311 } 312 return ret; 313 } 314 315 /** 316 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 317 * @mapping: the address_space which holds the pages to invalidate 318 * @start: the offset 'from' which to invalidate 319 * @end: the offset 'to' which to invalidate (inclusive) 320 * 321 * This function only removes the unlocked pages, if you want to 322 * remove all the pages of one inode, you must call truncate_inode_pages. 323 * 324 * invalidate_mapping_pages() will not block on IO activity. It will not 325 * invalidate pages which are dirty, locked, under writeback or mapped into 326 * pagetables. 327 */ 328 unsigned long invalidate_mapping_pages(struct address_space *mapping, 329 pgoff_t start, pgoff_t end) 330 { 331 return __invalidate_mapping_pages(mapping, start, end, false); 332 } 333 EXPORT_SYMBOL(invalidate_mapping_pages); 334 335 /* 336 * This is like invalidate_complete_page(), except it ignores the page's 337 * refcount. We do this because invalidate_inode_pages2() needs stronger 338 * invalidation guarantees, and cannot afford to leave pages behind because 339 * shrink_page_list() has a temp ref on them, or because they're transiently 340 * sitting in the lru_cache_add() pagevecs. 341 */ 342 static int 343 invalidate_complete_page2(struct address_space *mapping, struct page *page) 344 { 345 if (page->mapping != mapping) 346 return 0; 347 348 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 349 return 0; 350 351 spin_lock_irq(&mapping->tree_lock); 352 if (PageDirty(page)) 353 goto failed; 354 355 BUG_ON(PagePrivate(page)); 356 __remove_from_page_cache(page); 357 spin_unlock_irq(&mapping->tree_lock); 358 page_cache_release(page); /* pagecache ref */ 359 return 1; 360 failed: 361 spin_unlock_irq(&mapping->tree_lock); 362 return 0; 363 } 364 365 static int do_launder_page(struct address_space *mapping, struct page *page) 366 { 367 if (!PageDirty(page)) 368 return 0; 369 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 370 return 0; 371 return mapping->a_ops->launder_page(page); 372 } 373 374 /** 375 * invalidate_inode_pages2_range - remove range of pages from an address_space 376 * @mapping: the address_space 377 * @start: the page offset 'from' which to invalidate 378 * @end: the page offset 'to' which to invalidate (inclusive) 379 * 380 * Any pages which are found to be mapped into pagetables are unmapped prior to 381 * invalidation. 382 * 383 * Returns -EBUSY if any pages could not be invalidated. 384 */ 385 int invalidate_inode_pages2_range(struct address_space *mapping, 386 pgoff_t start, pgoff_t end) 387 { 388 struct pagevec pvec; 389 pgoff_t next; 390 int i; 391 int ret = 0; 392 int ret2 = 0; 393 int did_range_unmap = 0; 394 int wrapped = 0; 395 396 pagevec_init(&pvec, 0); 397 next = start; 398 while (next <= end && !wrapped && 399 pagevec_lookup(&pvec, mapping, next, 400 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 401 for (i = 0; i < pagevec_count(&pvec); i++) { 402 struct page *page = pvec.pages[i]; 403 pgoff_t page_index; 404 405 lock_page(page); 406 if (page->mapping != mapping) { 407 unlock_page(page); 408 continue; 409 } 410 page_index = page->index; 411 next = page_index + 1; 412 if (next == 0) 413 wrapped = 1; 414 if (page_index > end) { 415 unlock_page(page); 416 break; 417 } 418 wait_on_page_writeback(page); 419 if (page_mapped(page)) { 420 if (!did_range_unmap) { 421 /* 422 * Zap the rest of the file in one hit. 423 */ 424 unmap_mapping_range(mapping, 425 (loff_t)page_index<<PAGE_CACHE_SHIFT, 426 (loff_t)(end - page_index + 1) 427 << PAGE_CACHE_SHIFT, 428 0); 429 did_range_unmap = 1; 430 } else { 431 /* 432 * Just zap this page 433 */ 434 unmap_mapping_range(mapping, 435 (loff_t)page_index<<PAGE_CACHE_SHIFT, 436 PAGE_CACHE_SIZE, 0); 437 } 438 } 439 BUG_ON(page_mapped(page)); 440 ret2 = do_launder_page(mapping, page); 441 if (ret2 == 0) { 442 if (!invalidate_complete_page2(mapping, page)) 443 ret2 = -EBUSY; 444 } 445 if (ret2 < 0) 446 ret = ret2; 447 unlock_page(page); 448 } 449 pagevec_release(&pvec); 450 cond_resched(); 451 } 452 return ret; 453 } 454 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 455 456 /** 457 * invalidate_inode_pages2 - remove all pages from an address_space 458 * @mapping: the address_space 459 * 460 * Any pages which are found to be mapped into pagetables are unmapped prior to 461 * invalidation. 462 * 463 * Returns -EIO if any pages could not be invalidated. 464 */ 465 int invalidate_inode_pages2(struct address_space *mapping) 466 { 467 return invalidate_inode_pages2_range(mapping, 0, -1); 468 } 469 EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 470