1 /* 2 * mm/truncate.c - code for taking down pages from address_spaces 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 10Sep2002 akpm@zip.com.au 7 * Initial version. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/mm.h> 12 #include <linux/swap.h> 13 #include <linux/module.h> 14 #include <linux/pagemap.h> 15 #include <linux/highmem.h> 16 #include <linux/pagevec.h> 17 #include <linux/task_io_accounting_ops.h> 18 #include <linux/buffer_head.h> /* grr. try_to_release_page, 19 do_invalidatepage */ 20 21 22 /** 23 * do_invalidatepage - invalidate part of all of a page 24 * @page: the page which is affected 25 * @offset: the index of the truncation point 26 * 27 * do_invalidatepage() is called when all or part of the page has become 28 * invalidated by a truncate operation. 29 * 30 * do_invalidatepage() does not have to release all buffers, but it must 31 * ensure that no dirty buffer is left outside @offset and that no I/O 32 * is underway against any of the blocks which are outside the truncation 33 * point. Because the caller is about to free (and possibly reuse) those 34 * blocks on-disk. 35 */ 36 void do_invalidatepage(struct page *page, unsigned long offset) 37 { 38 void (*invalidatepage)(struct page *, unsigned long); 39 invalidatepage = page->mapping->a_ops->invalidatepage; 40 #ifdef CONFIG_BLOCK 41 if (!invalidatepage) 42 invalidatepage = block_invalidatepage; 43 #endif 44 if (invalidatepage) 45 (*invalidatepage)(page, offset); 46 } 47 48 static inline void truncate_partial_page(struct page *page, unsigned partial) 49 { 50 zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); 51 if (PagePrivate(page)) 52 do_invalidatepage(page, partial); 53 } 54 55 /* 56 * This cancels just the dirty bit on the kernel page itself, it 57 * does NOT actually remove dirty bits on any mmap's that may be 58 * around. It also leaves the page tagged dirty, so any sync 59 * activity will still find it on the dirty lists, and in particular, 60 * clear_page_dirty_for_io() will still look at the dirty bits in 61 * the VM. 62 * 63 * Doing this should *normally* only ever be done when a page 64 * is truncated, and is not actually mapped anywhere at all. However, 65 * fs/buffer.c does this when it notices that somebody has cleaned 66 * out all the buffers on a page without actually doing it through 67 * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 68 */ 69 void cancel_dirty_page(struct page *page, unsigned int account_size) 70 { 71 if (TestClearPageDirty(page)) { 72 struct address_space *mapping = page->mapping; 73 if (mapping && mapping_cap_account_dirty(mapping)) { 74 dec_zone_page_state(page, NR_FILE_DIRTY); 75 if (account_size) 76 task_io_account_cancelled_write(account_size); 77 } 78 } 79 } 80 EXPORT_SYMBOL(cancel_dirty_page); 81 82 /* 83 * If truncate cannot remove the fs-private metadata from the page, the page 84 * becomes anonymous. It will be left on the LRU and may even be mapped into 85 * user pagetables if we're racing with filemap_fault(). 86 * 87 * We need to bale out if page->mapping is no longer equal to the original 88 * mapping. This happens a) when the VM reclaimed the page while we waited on 89 * its lock, b) when a concurrent invalidate_mapping_pages got there first and 90 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 91 */ 92 static void 93 truncate_complete_page(struct address_space *mapping, struct page *page) 94 { 95 if (page->mapping != mapping) 96 return; 97 98 cancel_dirty_page(page, PAGE_CACHE_SIZE); 99 100 if (PagePrivate(page)) 101 do_invalidatepage(page, 0); 102 103 remove_from_page_cache(page); 104 ClearPageUptodate(page); 105 ClearPageMappedToDisk(page); 106 page_cache_release(page); /* pagecache ref */ 107 } 108 109 /* 110 * This is for invalidate_mapping_pages(). That function can be called at 111 * any time, and is not supposed to throw away dirty pages. But pages can 112 * be marked dirty at any time too, so use remove_mapping which safely 113 * discards clean, unused pages. 114 * 115 * Returns non-zero if the page was successfully invalidated. 116 */ 117 static int 118 invalidate_complete_page(struct address_space *mapping, struct page *page) 119 { 120 int ret; 121 122 if (page->mapping != mapping) 123 return 0; 124 125 if (PagePrivate(page) && !try_to_release_page(page, 0)) 126 return 0; 127 128 ret = remove_mapping(mapping, page); 129 130 return ret; 131 } 132 133 /** 134 * truncate_inode_pages - truncate range of pages specified by start and 135 * end byte offsets 136 * @mapping: mapping to truncate 137 * @lstart: offset from which to truncate 138 * @lend: offset to which to truncate 139 * 140 * Truncate the page cache, removing the pages that are between 141 * specified offsets (and zeroing out partial page 142 * (if lstart is not page aligned)). 143 * 144 * Truncate takes two passes - the first pass is nonblocking. It will not 145 * block on page locks and it will not block on writeback. The second pass 146 * will wait. This is to prevent as much IO as possible in the affected region. 147 * The first pass will remove most pages, so the search cost of the second pass 148 * is low. 149 * 150 * When looking at page->index outside the page lock we need to be careful to 151 * copy it into a local to avoid races (it could change at any time). 152 * 153 * We pass down the cache-hot hint to the page freeing code. Even if the 154 * mapping is large, it is probably the case that the final pages are the most 155 * recently touched, and freeing happens in ascending file offset order. 156 */ 157 void truncate_inode_pages_range(struct address_space *mapping, 158 loff_t lstart, loff_t lend) 159 { 160 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 161 pgoff_t end; 162 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 163 struct pagevec pvec; 164 pgoff_t next; 165 int i; 166 167 if (mapping->nrpages == 0) 168 return; 169 170 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 171 end = (lend >> PAGE_CACHE_SHIFT); 172 173 pagevec_init(&pvec, 0); 174 next = start; 175 while (next <= end && 176 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 177 for (i = 0; i < pagevec_count(&pvec); i++) { 178 struct page *page = pvec.pages[i]; 179 pgoff_t page_index = page->index; 180 181 if (page_index > end) { 182 next = page_index; 183 break; 184 } 185 186 if (page_index > next) 187 next = page_index; 188 next++; 189 if (TestSetPageLocked(page)) 190 continue; 191 if (PageWriteback(page)) { 192 unlock_page(page); 193 continue; 194 } 195 if (page_mapped(page)) { 196 unmap_mapping_range(mapping, 197 (loff_t)page_index<<PAGE_CACHE_SHIFT, 198 PAGE_CACHE_SIZE, 0); 199 } 200 truncate_complete_page(mapping, page); 201 unlock_page(page); 202 } 203 pagevec_release(&pvec); 204 cond_resched(); 205 } 206 207 if (partial) { 208 struct page *page = find_lock_page(mapping, start - 1); 209 if (page) { 210 wait_on_page_writeback(page); 211 truncate_partial_page(page, partial); 212 unlock_page(page); 213 page_cache_release(page); 214 } 215 } 216 217 next = start; 218 for ( ; ; ) { 219 cond_resched(); 220 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 221 if (next == start) 222 break; 223 next = start; 224 continue; 225 } 226 if (pvec.pages[0]->index > end) { 227 pagevec_release(&pvec); 228 break; 229 } 230 for (i = 0; i < pagevec_count(&pvec); i++) { 231 struct page *page = pvec.pages[i]; 232 233 if (page->index > end) 234 break; 235 lock_page(page); 236 wait_on_page_writeback(page); 237 if (page_mapped(page)) { 238 unmap_mapping_range(mapping, 239 (loff_t)page->index<<PAGE_CACHE_SHIFT, 240 PAGE_CACHE_SIZE, 0); 241 } 242 if (page->index > next) 243 next = page->index; 244 next++; 245 truncate_complete_page(mapping, page); 246 unlock_page(page); 247 } 248 pagevec_release(&pvec); 249 } 250 } 251 EXPORT_SYMBOL(truncate_inode_pages_range); 252 253 /** 254 * truncate_inode_pages - truncate *all* the pages from an offset 255 * @mapping: mapping to truncate 256 * @lstart: offset from which to truncate 257 * 258 * Called under (and serialised by) inode->i_mutex. 259 */ 260 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 261 { 262 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 263 } 264 EXPORT_SYMBOL(truncate_inode_pages); 265 266 unsigned long __invalidate_mapping_pages(struct address_space *mapping, 267 pgoff_t start, pgoff_t end, bool be_atomic) 268 { 269 struct pagevec pvec; 270 pgoff_t next = start; 271 unsigned long ret = 0; 272 int i; 273 274 pagevec_init(&pvec, 0); 275 while (next <= end && 276 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 277 for (i = 0; i < pagevec_count(&pvec); i++) { 278 struct page *page = pvec.pages[i]; 279 pgoff_t index; 280 int lock_failed; 281 282 lock_failed = TestSetPageLocked(page); 283 284 /* 285 * We really shouldn't be looking at the ->index of an 286 * unlocked page. But we're not allowed to lock these 287 * pages. So we rely upon nobody altering the ->index 288 * of this (pinned-by-us) page. 289 */ 290 index = page->index; 291 if (index > next) 292 next = index; 293 next++; 294 if (lock_failed) 295 continue; 296 297 if (PageDirty(page) || PageWriteback(page)) 298 goto unlock; 299 if (page_mapped(page)) 300 goto unlock; 301 ret += invalidate_complete_page(mapping, page); 302 unlock: 303 unlock_page(page); 304 if (next > end) 305 break; 306 } 307 pagevec_release(&pvec); 308 if (likely(!be_atomic)) 309 cond_resched(); 310 } 311 return ret; 312 } 313 314 /** 315 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 316 * @mapping: the address_space which holds the pages to invalidate 317 * @start: the offset 'from' which to invalidate 318 * @end: the offset 'to' which to invalidate (inclusive) 319 * 320 * This function only removes the unlocked pages, if you want to 321 * remove all the pages of one inode, you must call truncate_inode_pages. 322 * 323 * invalidate_mapping_pages() will not block on IO activity. It will not 324 * invalidate pages which are dirty, locked, under writeback or mapped into 325 * pagetables. 326 */ 327 unsigned long invalidate_mapping_pages(struct address_space *mapping, 328 pgoff_t start, pgoff_t end) 329 { 330 return __invalidate_mapping_pages(mapping, start, end, false); 331 } 332 EXPORT_SYMBOL(invalidate_mapping_pages); 333 334 /* 335 * This is like invalidate_complete_page(), except it ignores the page's 336 * refcount. We do this because invalidate_inode_pages2() needs stronger 337 * invalidation guarantees, and cannot afford to leave pages behind because 338 * shrink_page_list() has a temp ref on them, or because they're transiently 339 * sitting in the lru_cache_add() pagevecs. 340 */ 341 static int 342 invalidate_complete_page2(struct address_space *mapping, struct page *page) 343 { 344 if (page->mapping != mapping) 345 return 0; 346 347 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 348 return 0; 349 350 write_lock_irq(&mapping->tree_lock); 351 if (PageDirty(page)) 352 goto failed; 353 354 BUG_ON(PagePrivate(page)); 355 __remove_from_page_cache(page); 356 write_unlock_irq(&mapping->tree_lock); 357 ClearPageUptodate(page); 358 page_cache_release(page); /* pagecache ref */ 359 return 1; 360 failed: 361 write_unlock_irq(&mapping->tree_lock); 362 return 0; 363 } 364 365 static int do_launder_page(struct address_space *mapping, struct page *page) 366 { 367 if (!PageDirty(page)) 368 return 0; 369 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 370 return 0; 371 return mapping->a_ops->launder_page(page); 372 } 373 374 /** 375 * invalidate_inode_pages2_range - remove range of pages from an address_space 376 * @mapping: the address_space 377 * @start: the page offset 'from' which to invalidate 378 * @end: the page offset 'to' which to invalidate (inclusive) 379 * 380 * Any pages which are found to be mapped into pagetables are unmapped prior to 381 * invalidation. 382 * 383 * Returns -EIO if any pages could not be invalidated. 384 */ 385 int invalidate_inode_pages2_range(struct address_space *mapping, 386 pgoff_t start, pgoff_t end) 387 { 388 struct pagevec pvec; 389 pgoff_t next; 390 int i; 391 int ret = 0; 392 int did_range_unmap = 0; 393 int wrapped = 0; 394 395 pagevec_init(&pvec, 0); 396 next = start; 397 while (next <= end && !wrapped && 398 pagevec_lookup(&pvec, mapping, next, 399 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 400 for (i = 0; i < pagevec_count(&pvec); i++) { 401 struct page *page = pvec.pages[i]; 402 pgoff_t page_index; 403 404 lock_page(page); 405 if (page->mapping != mapping) { 406 unlock_page(page); 407 continue; 408 } 409 page_index = page->index; 410 next = page_index + 1; 411 if (next == 0) 412 wrapped = 1; 413 if (page_index > end) { 414 unlock_page(page); 415 break; 416 } 417 wait_on_page_writeback(page); 418 if (page_mapped(page)) { 419 if (!did_range_unmap) { 420 /* 421 * Zap the rest of the file in one hit. 422 */ 423 unmap_mapping_range(mapping, 424 (loff_t)page_index<<PAGE_CACHE_SHIFT, 425 (loff_t)(end - page_index + 1) 426 << PAGE_CACHE_SHIFT, 427 0); 428 did_range_unmap = 1; 429 } else { 430 /* 431 * Just zap this page 432 */ 433 unmap_mapping_range(mapping, 434 (loff_t)page_index<<PAGE_CACHE_SHIFT, 435 PAGE_CACHE_SIZE, 0); 436 } 437 } 438 BUG_ON(page_mapped(page)); 439 ret = do_launder_page(mapping, page); 440 if (ret == 0 && !invalidate_complete_page2(mapping, page)) 441 ret = -EIO; 442 unlock_page(page); 443 } 444 pagevec_release(&pvec); 445 cond_resched(); 446 } 447 return ret; 448 } 449 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 450 451 /** 452 * invalidate_inode_pages2 - remove all pages from an address_space 453 * @mapping: the address_space 454 * 455 * Any pages which are found to be mapped into pagetables are unmapped prior to 456 * invalidation. 457 * 458 * Returns -EIO if any pages could not be invalidated. 459 */ 460 int invalidate_inode_pages2(struct address_space *mapping) 461 { 462 return invalidate_inode_pages2_range(mapping, 0, -1); 463 } 464 EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 465