1 /* 2 * mm/page-writeback.c. 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains functions related to writing back dirty pages at the 7 * address_space level. 8 * 9 * 10Apr2002 akpm@zip.com.au 10 * Initial version 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/spinlock.h> 16 #include <linux/fs.h> 17 #include <linux/mm.h> 18 #include <linux/swap.h> 19 #include <linux/slab.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/init.h> 23 #include <linux/backing-dev.h> 24 #include <linux/blkdev.h> 25 #include <linux/mpage.h> 26 #include <linux/percpu.h> 27 #include <linux/notifier.h> 28 #include <linux/smp.h> 29 #include <linux/sysctl.h> 30 #include <linux/cpu.h> 31 #include <linux/syscalls.h> 32 33 /* 34 * The maximum number of pages to writeout in a single bdflush/kupdate 35 * operation. We do this so we don't hold I_LOCK against an inode for 36 * enormous amounts of time, which would block a userspace task which has 37 * been forced to throttle against that inode. Also, the code reevaluates 38 * the dirty each time it has written this many pages. 39 */ 40 #define MAX_WRITEBACK_PAGES 1024 41 42 /* 43 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 44 * will look to see if it needs to force writeback or throttling. 45 */ 46 static long ratelimit_pages = 32; 47 48 static long total_pages; /* The total number of pages in the machine. */ 49 static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */ 50 51 /* 52 * When balance_dirty_pages decides that the caller needs to perform some 53 * non-background writeback, this is how many pages it will attempt to write. 54 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably 55 * large amounts of I/O are submitted. 56 */ 57 static inline long sync_writeback_pages(void) 58 { 59 return ratelimit_pages + ratelimit_pages / 2; 60 } 61 62 /* The following parameters are exported via /proc/sys/vm */ 63 64 /* 65 * Start background writeback (via pdflush) at this percentage 66 */ 67 int dirty_background_ratio = 10; 68 69 /* 70 * The generator of dirty data starts writeback at this percentage 71 */ 72 int vm_dirty_ratio = 40; 73 74 /* 75 * The interval between `kupdate'-style writebacks, in centiseconds 76 * (hundredths of a second) 77 */ 78 int dirty_writeback_interval = 5 * HZ; 79 80 /* 81 * The longest number of centiseconds for which data is allowed to remain dirty 82 */ 83 int dirty_expire_interval = 30 * HZ; 84 85 /* 86 * Flag that makes the machine dump writes/reads and block dirtyings. 87 */ 88 int block_dump; 89 90 /* 91 * Flag that puts the machine in "laptop mode". 92 */ 93 int laptop_mode; 94 95 EXPORT_SYMBOL(laptop_mode); 96 97 /* End of sysctl-exported parameters */ 98 99 100 static void background_writeout(unsigned long _min_pages); 101 102 struct writeback_state 103 { 104 unsigned long nr_dirty; 105 unsigned long nr_unstable; 106 unsigned long nr_mapped; 107 unsigned long nr_writeback; 108 }; 109 110 static void get_writeback_state(struct writeback_state *wbs) 111 { 112 wbs->nr_dirty = read_page_state(nr_dirty); 113 wbs->nr_unstable = read_page_state(nr_unstable); 114 wbs->nr_mapped = read_page_state(nr_mapped); 115 wbs->nr_writeback = read_page_state(nr_writeback); 116 } 117 118 /* 119 * Work out the current dirty-memory clamping and background writeout 120 * thresholds. 121 * 122 * The main aim here is to lower them aggressively if there is a lot of mapped 123 * memory around. To avoid stressing page reclaim with lots of unreclaimable 124 * pages. It is better to clamp down on writers than to start swapping, and 125 * performing lots of scanning. 126 * 127 * We only allow 1/2 of the currently-unmapped memory to be dirtied. 128 * 129 * We don't permit the clamping level to fall below 5% - that is getting rather 130 * excessive. 131 * 132 * We make sure that the background writeout level is below the adjusted 133 * clamping level. 134 */ 135 static void 136 get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, 137 struct address_space *mapping) 138 { 139 int background_ratio; /* Percentages */ 140 int dirty_ratio; 141 int unmapped_ratio; 142 long background; 143 long dirty; 144 unsigned long available_memory = total_pages; 145 struct task_struct *tsk; 146 147 get_writeback_state(wbs); 148 149 #ifdef CONFIG_HIGHMEM 150 /* 151 * If this mapping can only allocate from low memory, 152 * we exclude high memory from our count. 153 */ 154 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) 155 available_memory -= totalhigh_pages; 156 #endif 157 158 159 unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; 160 161 dirty_ratio = vm_dirty_ratio; 162 if (dirty_ratio > unmapped_ratio / 2) 163 dirty_ratio = unmapped_ratio / 2; 164 165 if (dirty_ratio < 5) 166 dirty_ratio = 5; 167 168 background_ratio = dirty_background_ratio; 169 if (background_ratio >= dirty_ratio) 170 background_ratio = dirty_ratio / 2; 171 172 background = (background_ratio * available_memory) / 100; 173 dirty = (dirty_ratio * available_memory) / 100; 174 tsk = current; 175 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 176 background += background / 4; 177 dirty += dirty / 4; 178 } 179 *pbackground = background; 180 *pdirty = dirty; 181 } 182 183 /* 184 * balance_dirty_pages() must be called by processes which are generating dirty 185 * data. It looks at the number of dirty pages in the machine and will force 186 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 187 * If we're over `background_thresh' then pdflush is woken to perform some 188 * writeout. 189 */ 190 static void balance_dirty_pages(struct address_space *mapping) 191 { 192 struct writeback_state wbs; 193 long nr_reclaimable; 194 long background_thresh; 195 long dirty_thresh; 196 unsigned long pages_written = 0; 197 unsigned long write_chunk = sync_writeback_pages(); 198 199 struct backing_dev_info *bdi = mapping->backing_dev_info; 200 201 for (;;) { 202 struct writeback_control wbc = { 203 .bdi = bdi, 204 .sync_mode = WB_SYNC_NONE, 205 .older_than_this = NULL, 206 .nr_to_write = write_chunk, 207 }; 208 209 get_dirty_limits(&wbs, &background_thresh, 210 &dirty_thresh, mapping); 211 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 212 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 213 break; 214 215 if (!dirty_exceeded) 216 dirty_exceeded = 1; 217 218 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. 219 * Unstable writes are a feature of certain networked 220 * filesystems (i.e. NFS) in which data may have been 221 * written to the server's write cache, but has not yet 222 * been flushed to permanent storage. 223 */ 224 if (nr_reclaimable) { 225 writeback_inodes(&wbc); 226 get_dirty_limits(&wbs, &background_thresh, 227 &dirty_thresh, mapping); 228 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 229 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 230 break; 231 pages_written += write_chunk - wbc.nr_to_write; 232 if (pages_written >= write_chunk) 233 break; /* We've done our duty */ 234 } 235 blk_congestion_wait(WRITE, HZ/10); 236 } 237 238 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) 239 dirty_exceeded = 0; 240 241 if (writeback_in_progress(bdi)) 242 return; /* pdflush is already working this queue */ 243 244 /* 245 * In laptop mode, we wait until hitting the higher threshold before 246 * starting background writeout, and then write out all the way down 247 * to the lower threshold. So slow writers cause minimal disk activity. 248 * 249 * In normal mode, we start background writeout at the lower 250 * background_thresh, to keep the amount of dirty memory low. 251 */ 252 if ((laptop_mode && pages_written) || 253 (!laptop_mode && (nr_reclaimable > background_thresh))) 254 pdflush_operation(background_writeout, 0); 255 } 256 257 /** 258 * balance_dirty_pages_ratelimited - balance dirty memory state 259 * @mapping: address_space which was dirtied 260 * 261 * Processes which are dirtying memory should call in here once for each page 262 * which was newly dirtied. The function will periodically check the system's 263 * dirty state and will initiate writeback if needed. 264 * 265 * On really big machines, get_writeback_state is expensive, so try to avoid 266 * calling it too often (ratelimiting). But once we're over the dirty memory 267 * limit we decrease the ratelimiting by a lot, to prevent individual processes 268 * from overshooting the limit by (ratelimit_pages) each. 269 */ 270 void balance_dirty_pages_ratelimited(struct address_space *mapping) 271 { 272 static DEFINE_PER_CPU(int, ratelimits) = 0; 273 long ratelimit; 274 275 ratelimit = ratelimit_pages; 276 if (dirty_exceeded) 277 ratelimit = 8; 278 279 /* 280 * Check the rate limiting. Also, we do not want to throttle real-time 281 * tasks in balance_dirty_pages(). Period. 282 */ 283 if (get_cpu_var(ratelimits)++ >= ratelimit) { 284 __get_cpu_var(ratelimits) = 0; 285 put_cpu_var(ratelimits); 286 balance_dirty_pages(mapping); 287 return; 288 } 289 put_cpu_var(ratelimits); 290 } 291 EXPORT_SYMBOL(balance_dirty_pages_ratelimited); 292 293 void throttle_vm_writeout(void) 294 { 295 struct writeback_state wbs; 296 long background_thresh; 297 long dirty_thresh; 298 299 for ( ; ; ) { 300 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 301 302 /* 303 * Boost the allowable dirty threshold a bit for page 304 * allocators so they don't get DoS'ed by heavy writers 305 */ 306 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 307 308 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) 309 break; 310 blk_congestion_wait(WRITE, HZ/10); 311 } 312 } 313 314 315 /* 316 * writeback at least _min_pages, and keep writing until the amount of dirty 317 * memory is less than the background threshold, or until we're all clean. 318 */ 319 static void background_writeout(unsigned long _min_pages) 320 { 321 long min_pages = _min_pages; 322 struct writeback_control wbc = { 323 .bdi = NULL, 324 .sync_mode = WB_SYNC_NONE, 325 .older_than_this = NULL, 326 .nr_to_write = 0, 327 .nonblocking = 1, 328 }; 329 330 for ( ; ; ) { 331 struct writeback_state wbs; 332 long background_thresh; 333 long dirty_thresh; 334 335 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 336 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh 337 && min_pages <= 0) 338 break; 339 wbc.encountered_congestion = 0; 340 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 341 wbc.pages_skipped = 0; 342 writeback_inodes(&wbc); 343 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 344 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 345 /* Wrote less than expected */ 346 blk_congestion_wait(WRITE, HZ/10); 347 if (!wbc.encountered_congestion) 348 break; 349 } 350 } 351 } 352 353 /* 354 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 355 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns 356 * -1 if all pdflush threads were busy. 357 */ 358 int wakeup_pdflush(long nr_pages) 359 { 360 if (nr_pages == 0) { 361 struct writeback_state wbs; 362 363 get_writeback_state(&wbs); 364 nr_pages = wbs.nr_dirty + wbs.nr_unstable; 365 } 366 return pdflush_operation(background_writeout, nr_pages); 367 } 368 369 static void wb_timer_fn(unsigned long unused); 370 static void laptop_timer_fn(unsigned long unused); 371 372 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); 373 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); 374 375 /* 376 * Periodic writeback of "old" data. 377 * 378 * Define "old": the first time one of an inode's pages is dirtied, we mark the 379 * dirtying-time in the inode's address_space. So this periodic writeback code 380 * just walks the superblock inode list, writing back any inodes which are 381 * older than a specific point in time. 382 * 383 * Try to run once per dirty_writeback_interval. But if a writeback event 384 * takes longer than a dirty_writeback_interval interval, then leave a 385 * one-second gap. 386 * 387 * older_than_this takes precedence over nr_to_write. So we'll only write back 388 * all dirty pages if they are all attached to "old" mappings. 389 */ 390 static void wb_kupdate(unsigned long arg) 391 { 392 unsigned long oldest_jif; 393 unsigned long start_jif; 394 unsigned long next_jif; 395 long nr_to_write; 396 struct writeback_state wbs; 397 struct writeback_control wbc = { 398 .bdi = NULL, 399 .sync_mode = WB_SYNC_NONE, 400 .older_than_this = &oldest_jif, 401 .nr_to_write = 0, 402 .nonblocking = 1, 403 .for_kupdate = 1, 404 }; 405 406 sync_supers(); 407 408 get_writeback_state(&wbs); 409 oldest_jif = jiffies - dirty_expire_interval; 410 start_jif = jiffies; 411 next_jif = start_jif + dirty_writeback_interval; 412 nr_to_write = wbs.nr_dirty + wbs.nr_unstable + 413 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 414 while (nr_to_write > 0) { 415 wbc.encountered_congestion = 0; 416 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 417 writeback_inodes(&wbc); 418 if (wbc.nr_to_write > 0) { 419 if (wbc.encountered_congestion) 420 blk_congestion_wait(WRITE, HZ/10); 421 else 422 break; /* All the old data is written */ 423 } 424 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 425 } 426 if (time_before(next_jif, jiffies + HZ)) 427 next_jif = jiffies + HZ; 428 if (dirty_writeback_interval) 429 mod_timer(&wb_timer, next_jif); 430 } 431 432 /* 433 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 434 */ 435 int dirty_writeback_centisecs_handler(ctl_table *table, int write, 436 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 437 { 438 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); 439 if (dirty_writeback_interval) { 440 mod_timer(&wb_timer, 441 jiffies + dirty_writeback_interval); 442 } else { 443 del_timer(&wb_timer); 444 } 445 return 0; 446 } 447 448 static void wb_timer_fn(unsigned long unused) 449 { 450 if (pdflush_operation(wb_kupdate, 0) < 0) 451 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ 452 } 453 454 static void laptop_flush(unsigned long unused) 455 { 456 sys_sync(); 457 } 458 459 static void laptop_timer_fn(unsigned long unused) 460 { 461 pdflush_operation(laptop_flush, 0); 462 } 463 464 /* 465 * We've spun up the disk and we're in laptop mode: schedule writeback 466 * of all dirty data a few seconds from now. If the flush is already scheduled 467 * then push it back - the user is still using the disk. 468 */ 469 void laptop_io_completion(void) 470 { 471 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode * HZ); 472 } 473 474 /* 475 * We're in laptop mode and we've just synced. The sync's writes will have 476 * caused another writeback to be scheduled by laptop_io_completion. 477 * Nothing needs to be written back anymore, so we unschedule the writeback. 478 */ 479 void laptop_sync_completion(void) 480 { 481 del_timer(&laptop_mode_wb_timer); 482 } 483 484 /* 485 * If ratelimit_pages is too high then we can get into dirty-data overload 486 * if a large number of processes all perform writes at the same time. 487 * If it is too low then SMP machines will call the (expensive) 488 * get_writeback_state too often. 489 * 490 * Here we set ratelimit_pages to a level which ensures that when all CPUs are 491 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory 492 * thresholds before writeback cuts in. 493 * 494 * But the limit should not be set too high. Because it also controls the 495 * amount of memory which the balance_dirty_pages() caller has to write back. 496 * If this is too large then the caller will block on the IO queue all the 497 * time. So limit it to four megabytes - the balance_dirty_pages() caller 498 * will write six megabyte chunks, max. 499 */ 500 501 static void set_ratelimit(void) 502 { 503 ratelimit_pages = total_pages / (num_online_cpus() * 32); 504 if (ratelimit_pages < 16) 505 ratelimit_pages = 16; 506 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) 507 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; 508 } 509 510 static int 511 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) 512 { 513 set_ratelimit(); 514 return 0; 515 } 516 517 static struct notifier_block ratelimit_nb = { 518 .notifier_call = ratelimit_handler, 519 .next = NULL, 520 }; 521 522 /* 523 * If the machine has a large highmem:lowmem ratio then scale back the default 524 * dirty memory thresholds: allowing too much dirty highmem pins an excessive 525 * number of buffer_heads. 526 */ 527 void __init page_writeback_init(void) 528 { 529 long buffer_pages = nr_free_buffer_pages(); 530 long correction; 531 532 total_pages = nr_free_pagecache_pages(); 533 534 correction = (100 * 4 * buffer_pages) / total_pages; 535 536 if (correction < 100) { 537 dirty_background_ratio *= correction; 538 dirty_background_ratio /= 100; 539 vm_dirty_ratio *= correction; 540 vm_dirty_ratio /= 100; 541 542 if (dirty_background_ratio <= 0) 543 dirty_background_ratio = 1; 544 if (vm_dirty_ratio <= 0) 545 vm_dirty_ratio = 1; 546 } 547 mod_timer(&wb_timer, jiffies + dirty_writeback_interval); 548 set_ratelimit(); 549 register_cpu_notifier(&ratelimit_nb); 550 } 551 552 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) 553 { 554 int ret; 555 556 if (wbc->nr_to_write <= 0) 557 return 0; 558 wbc->for_writepages = 1; 559 if (mapping->a_ops->writepages) 560 ret = mapping->a_ops->writepages(mapping, wbc); 561 else 562 ret = generic_writepages(mapping, wbc); 563 wbc->for_writepages = 0; 564 return ret; 565 } 566 567 /** 568 * write_one_page - write out a single page and optionally wait on I/O 569 * 570 * @page: the page to write 571 * @wait: if true, wait on writeout 572 * 573 * The page must be locked by the caller and will be unlocked upon return. 574 * 575 * write_one_page() returns a negative error code if I/O failed. 576 */ 577 int write_one_page(struct page *page, int wait) 578 { 579 struct address_space *mapping = page->mapping; 580 int ret = 0; 581 struct writeback_control wbc = { 582 .sync_mode = WB_SYNC_ALL, 583 .nr_to_write = 1, 584 }; 585 586 BUG_ON(!PageLocked(page)); 587 588 if (wait) 589 wait_on_page_writeback(page); 590 591 if (clear_page_dirty_for_io(page)) { 592 page_cache_get(page); 593 ret = mapping->a_ops->writepage(page, &wbc); 594 if (ret == 0 && wait) { 595 wait_on_page_writeback(page); 596 if (PageError(page)) 597 ret = -EIO; 598 } 599 page_cache_release(page); 600 } else { 601 unlock_page(page); 602 } 603 return ret; 604 } 605 EXPORT_SYMBOL(write_one_page); 606 607 /* 608 * For address_spaces which do not use buffers. Just tag the page as dirty in 609 * its radix tree. 610 * 611 * This is also used when a single buffer is being dirtied: we want to set the 612 * page dirty in that case, but not all the buffers. This is a "bottom-up" 613 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. 614 * 615 * Most callers have locked the page, which pins the address_space in memory. 616 * But zap_pte_range() does not lock the page, however in that case the 617 * mapping is pinned by the vma's ->vm_file reference. 618 * 619 * We take care to handle the case where the page was truncated from the 620 * mapping by re-checking page_mapping() insode tree_lock. 621 */ 622 int __set_page_dirty_nobuffers(struct page *page) 623 { 624 int ret = 0; 625 626 if (!TestSetPageDirty(page)) { 627 struct address_space *mapping = page_mapping(page); 628 struct address_space *mapping2; 629 630 if (mapping) { 631 write_lock_irq(&mapping->tree_lock); 632 mapping2 = page_mapping(page); 633 if (mapping2) { /* Race with truncate? */ 634 BUG_ON(mapping2 != mapping); 635 if (mapping_cap_account_dirty(mapping)) 636 inc_page_state(nr_dirty); 637 radix_tree_tag_set(&mapping->page_tree, 638 page_index(page), PAGECACHE_TAG_DIRTY); 639 } 640 write_unlock_irq(&mapping->tree_lock); 641 if (mapping->host) { 642 /* !PageAnon && !swapper_space */ 643 __mark_inode_dirty(mapping->host, 644 I_DIRTY_PAGES); 645 } 646 } 647 } 648 return ret; 649 } 650 EXPORT_SYMBOL(__set_page_dirty_nobuffers); 651 652 /* 653 * When a writepage implementation decides that it doesn't want to write this 654 * page for some reason, it should redirty the locked page via 655 * redirty_page_for_writepage() and it should then unlock the page and return 0 656 */ 657 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) 658 { 659 wbc->pages_skipped++; 660 return __set_page_dirty_nobuffers(page); 661 } 662 EXPORT_SYMBOL(redirty_page_for_writepage); 663 664 /* 665 * If the mapping doesn't provide a set_page_dirty a_op, then 666 * just fall through and assume that it wants buffer_heads. 667 */ 668 int fastcall set_page_dirty(struct page *page) 669 { 670 struct address_space *mapping = page_mapping(page); 671 672 if (likely(mapping)) { 673 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; 674 if (spd) 675 return (*spd)(page); 676 return __set_page_dirty_buffers(page); 677 } 678 if (!PageDirty(page)) 679 SetPageDirty(page); 680 return 0; 681 } 682 EXPORT_SYMBOL(set_page_dirty); 683 684 /* 685 * set_page_dirty() is racy if the caller has no reference against 686 * page->mapping->host, and if the page is unlocked. This is because another 687 * CPU could truncate the page off the mapping and then free the mapping. 688 * 689 * Usually, the page _is_ locked, or the caller is a user-space process which 690 * holds a reference on the inode by having an open file. 691 * 692 * In other cases, the page should be locked before running set_page_dirty(). 693 */ 694 int set_page_dirty_lock(struct page *page) 695 { 696 int ret; 697 698 lock_page(page); 699 ret = set_page_dirty(page); 700 unlock_page(page); 701 return ret; 702 } 703 EXPORT_SYMBOL(set_page_dirty_lock); 704 705 /* 706 * Clear a page's dirty flag, while caring for dirty memory accounting. 707 * Returns true if the page was previously dirty. 708 */ 709 int test_clear_page_dirty(struct page *page) 710 { 711 struct address_space *mapping = page_mapping(page); 712 unsigned long flags; 713 714 if (mapping) { 715 write_lock_irqsave(&mapping->tree_lock, flags); 716 if (TestClearPageDirty(page)) { 717 radix_tree_tag_clear(&mapping->page_tree, 718 page_index(page), 719 PAGECACHE_TAG_DIRTY); 720 write_unlock_irqrestore(&mapping->tree_lock, flags); 721 if (mapping_cap_account_dirty(mapping)) 722 dec_page_state(nr_dirty); 723 return 1; 724 } 725 write_unlock_irqrestore(&mapping->tree_lock, flags); 726 return 0; 727 } 728 return TestClearPageDirty(page); 729 } 730 EXPORT_SYMBOL(test_clear_page_dirty); 731 732 /* 733 * Clear a page's dirty flag, while caring for dirty memory accounting. 734 * Returns true if the page was previously dirty. 735 * 736 * This is for preparing to put the page under writeout. We leave the page 737 * tagged as dirty in the radix tree so that a concurrent write-for-sync 738 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage 739 * implementation will run either set_page_writeback() or set_page_dirty(), 740 * at which stage we bring the page's dirty flag and radix-tree dirty tag 741 * back into sync. 742 * 743 * This incoherency between the page's dirty flag and radix-tree tag is 744 * unfortunate, but it only exists while the page is locked. 745 */ 746 int clear_page_dirty_for_io(struct page *page) 747 { 748 struct address_space *mapping = page_mapping(page); 749 750 if (mapping) { 751 if (TestClearPageDirty(page)) { 752 if (mapping_cap_account_dirty(mapping)) 753 dec_page_state(nr_dirty); 754 return 1; 755 } 756 return 0; 757 } 758 return TestClearPageDirty(page); 759 } 760 EXPORT_SYMBOL(clear_page_dirty_for_io); 761 762 int test_clear_page_writeback(struct page *page) 763 { 764 struct address_space *mapping = page_mapping(page); 765 int ret; 766 767 if (mapping) { 768 unsigned long flags; 769 770 write_lock_irqsave(&mapping->tree_lock, flags); 771 ret = TestClearPageWriteback(page); 772 if (ret) 773 radix_tree_tag_clear(&mapping->page_tree, 774 page_index(page), 775 PAGECACHE_TAG_WRITEBACK); 776 write_unlock_irqrestore(&mapping->tree_lock, flags); 777 } else { 778 ret = TestClearPageWriteback(page); 779 } 780 return ret; 781 } 782 783 int test_set_page_writeback(struct page *page) 784 { 785 struct address_space *mapping = page_mapping(page); 786 int ret; 787 788 if (mapping) { 789 unsigned long flags; 790 791 write_lock_irqsave(&mapping->tree_lock, flags); 792 ret = TestSetPageWriteback(page); 793 if (!ret) 794 radix_tree_tag_set(&mapping->page_tree, 795 page_index(page), 796 PAGECACHE_TAG_WRITEBACK); 797 if (!PageDirty(page)) 798 radix_tree_tag_clear(&mapping->page_tree, 799 page_index(page), 800 PAGECACHE_TAG_DIRTY); 801 write_unlock_irqrestore(&mapping->tree_lock, flags); 802 } else { 803 ret = TestSetPageWriteback(page); 804 } 805 return ret; 806 807 } 808 EXPORT_SYMBOL(test_set_page_writeback); 809 810 /* 811 * Return true if any of the pages in the mapping are marged with the 812 * passed tag. 813 */ 814 int mapping_tagged(struct address_space *mapping, int tag) 815 { 816 unsigned long flags; 817 int ret; 818 819 read_lock_irqsave(&mapping->tree_lock, flags); 820 ret = radix_tree_tagged(&mapping->page_tree, tag); 821 read_unlock_irqrestore(&mapping->tree_lock, flags); 822 return ret; 823 } 824 EXPORT_SYMBOL(mapping_tagged); 825