1 /* 2 * mm/page-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * 7 * Contains functions related to writing back dirty pages at the 8 * address_space level. 9 * 10 * 10Apr2002 Andrew Morton 11 * Initial version 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/export.h> 16 #include <linux/spinlock.h> 17 #include <linux/fs.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/slab.h> 21 #include <linux/pagemap.h> 22 #include <linux/writeback.h> 23 #include <linux/init.h> 24 #include <linux/backing-dev.h> 25 #include <linux/task_io_accounting_ops.h> 26 #include <linux/blkdev.h> 27 #include <linux/mpage.h> 28 #include <linux/rmap.h> 29 #include <linux/percpu.h> 30 #include <linux/notifier.h> 31 #include <linux/smp.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/syscalls.h> 35 #include <linux/buffer_head.h> /* __set_page_dirty_buffers */ 36 #include <linux/pagevec.h> 37 #include <linux/timer.h> 38 #include <linux/sched/rt.h> 39 #include <linux/mm_inline.h> 40 #include <trace/events/writeback.h> 41 42 #include "internal.h" 43 44 /* 45 * Sleep at most 200ms at a time in balance_dirty_pages(). 46 */ 47 #define MAX_PAUSE max(HZ/5, 1) 48 49 /* 50 * Try to keep balance_dirty_pages() call intervals higher than this many pages 51 * by raising pause time to max_pause when falls below it. 52 */ 53 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) 54 55 /* 56 * Estimate write bandwidth at 200ms intervals. 57 */ 58 #define BANDWIDTH_INTERVAL max(HZ/5, 1) 59 60 #define RATELIMIT_CALC_SHIFT 10 61 62 /* 63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 64 * will look to see if it needs to force writeback or throttling. 65 */ 66 static long ratelimit_pages = 32; 67 68 /* The following parameters are exported via /proc/sys/vm */ 69 70 /* 71 * Start background writeback (via writeback threads) at this percentage 72 */ 73 int dirty_background_ratio = 10; 74 75 /* 76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of 77 * dirty_background_ratio * the amount of dirtyable memory 78 */ 79 unsigned long dirty_background_bytes; 80 81 /* 82 * free highmem will not be subtracted from the total free memory 83 * for calculating free ratios if vm_highmem_is_dirtyable is true 84 */ 85 int vm_highmem_is_dirtyable; 86 87 /* 88 * The generator of dirty data starts writeback at this percentage 89 */ 90 int vm_dirty_ratio = 20; 91 92 /* 93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of 94 * vm_dirty_ratio * the amount of dirtyable memory 95 */ 96 unsigned long vm_dirty_bytes; 97 98 /* 99 * The interval between `kupdate'-style writebacks 100 */ 101 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ 102 103 EXPORT_SYMBOL_GPL(dirty_writeback_interval); 104 105 /* 106 * The longest time for which data is allowed to remain dirty 107 */ 108 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ 109 110 /* 111 * Flag that makes the machine dump writes/reads and block dirtyings. 112 */ 113 int block_dump; 114 115 /* 116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: 117 * a full sync is triggered after this time elapses without any disk activity. 118 */ 119 int laptop_mode; 120 121 EXPORT_SYMBOL(laptop_mode); 122 123 /* End of sysctl-exported parameters */ 124 125 unsigned long global_dirty_limit; 126 127 /* 128 * Scale the writeback cache size proportional to the relative writeout speeds. 129 * 130 * We do this by keeping a floating proportion between BDIs, based on page 131 * writeback completions [end_page_writeback()]. Those devices that write out 132 * pages fastest will get the larger share, while the slower will get a smaller 133 * share. 134 * 135 * We use page writeout completions because we are interested in getting rid of 136 * dirty pages. Having them written out is the primary goal. 137 * 138 * We introduce a concept of time, a period over which we measure these events, 139 * because demand can/will vary over time. The length of this period itself is 140 * measured in page writeback completions. 141 * 142 */ 143 static struct fprop_global writeout_completions; 144 145 static void writeout_period(unsigned long t); 146 /* Timer for aging of writeout_completions */ 147 static struct timer_list writeout_period_timer = 148 TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0); 149 static unsigned long writeout_period_time = 0; 150 151 /* 152 * Length of period for aging writeout fractions of bdis. This is an 153 * arbitrarily chosen number. The longer the period, the slower fractions will 154 * reflect changes in current writeout rate. 155 */ 156 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) 157 158 /* 159 * Work out the current dirty-memory clamping and background writeout 160 * thresholds. 161 * 162 * The main aim here is to lower them aggressively if there is a lot of mapped 163 * memory around. To avoid stressing page reclaim with lots of unreclaimable 164 * pages. It is better to clamp down on writers than to start swapping, and 165 * performing lots of scanning. 166 * 167 * We only allow 1/2 of the currently-unmapped memory to be dirtied. 168 * 169 * We don't permit the clamping level to fall below 5% - that is getting rather 170 * excessive. 171 * 172 * We make sure that the background writeout level is below the adjusted 173 * clamping level. 174 */ 175 176 /* 177 * In a memory zone, there is a certain amount of pages we consider 178 * available for the page cache, which is essentially the number of 179 * free and reclaimable pages, minus some zone reserves to protect 180 * lowmem and the ability to uphold the zone's watermarks without 181 * requiring writeback. 182 * 183 * This number of dirtyable pages is the base value of which the 184 * user-configurable dirty ratio is the effictive number of pages that 185 * are allowed to be actually dirtied. Per individual zone, or 186 * globally by using the sum of dirtyable pages over all zones. 187 * 188 * Because the user is allowed to specify the dirty limit globally as 189 * absolute number of bytes, calculating the per-zone dirty limit can 190 * require translating the configured limit into a percentage of 191 * global dirtyable memory first. 192 */ 193 194 /** 195 * zone_dirtyable_memory - number of dirtyable pages in a zone 196 * @zone: the zone 197 * 198 * Returns the zone's number of pages potentially available for dirty 199 * page cache. This is the base value for the per-zone dirty limits. 200 */ 201 static unsigned long zone_dirtyable_memory(struct zone *zone) 202 { 203 unsigned long nr_pages; 204 205 nr_pages = zone_page_state(zone, NR_FREE_PAGES); 206 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); 207 208 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); 209 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); 210 211 return nr_pages; 212 } 213 214 static unsigned long highmem_dirtyable_memory(unsigned long total) 215 { 216 #ifdef CONFIG_HIGHMEM 217 int node; 218 unsigned long x = 0; 219 220 for_each_node_state(node, N_HIGH_MEMORY) { 221 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 222 223 x += zone_dirtyable_memory(z); 224 } 225 /* 226 * Unreclaimable memory (kernel memory or anonymous memory 227 * without swap) can bring down the dirtyable pages below 228 * the zone's dirty balance reserve and the above calculation 229 * will underflow. However we still want to add in nodes 230 * which are below threshold (negative values) to get a more 231 * accurate calculation but make sure that the total never 232 * underflows. 233 */ 234 if ((long)x < 0) 235 x = 0; 236 237 /* 238 * Make sure that the number of highmem pages is never larger 239 * than the number of the total dirtyable memory. This can only 240 * occur in very strange VM situations but we want to make sure 241 * that this does not occur. 242 */ 243 return min(x, total); 244 #else 245 return 0; 246 #endif 247 } 248 249 /** 250 * global_dirtyable_memory - number of globally dirtyable pages 251 * 252 * Returns the global number of pages potentially available for dirty 253 * page cache. This is the base value for the global dirty limits. 254 */ 255 static unsigned long global_dirtyable_memory(void) 256 { 257 unsigned long x; 258 259 x = global_page_state(NR_FREE_PAGES); 260 x -= min(x, dirty_balance_reserve); 261 262 x += global_page_state(NR_INACTIVE_FILE); 263 x += global_page_state(NR_ACTIVE_FILE); 264 265 if (!vm_highmem_is_dirtyable) 266 x -= highmem_dirtyable_memory(x); 267 268 return x + 1; /* Ensure that we never return 0 */ 269 } 270 271 /* 272 * global_dirty_limits - background-writeback and dirty-throttling thresholds 273 * 274 * Calculate the dirty thresholds based on sysctl parameters 275 * - vm.dirty_background_ratio or vm.dirty_background_bytes 276 * - vm.dirty_ratio or vm.dirty_bytes 277 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and 278 * real-time tasks. 279 */ 280 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) 281 { 282 unsigned long background; 283 unsigned long dirty; 284 unsigned long uninitialized_var(available_memory); 285 struct task_struct *tsk; 286 287 if (!vm_dirty_bytes || !dirty_background_bytes) 288 available_memory = global_dirtyable_memory(); 289 290 if (vm_dirty_bytes) 291 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); 292 else 293 dirty = (vm_dirty_ratio * available_memory) / 100; 294 295 if (dirty_background_bytes) 296 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); 297 else 298 background = (dirty_background_ratio * available_memory) / 100; 299 300 if (background >= dirty) 301 background = dirty / 2; 302 tsk = current; 303 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 304 background += background / 4; 305 dirty += dirty / 4; 306 } 307 *pbackground = background; 308 *pdirty = dirty; 309 trace_global_dirty_state(background, dirty); 310 } 311 312 /** 313 * zone_dirty_limit - maximum number of dirty pages allowed in a zone 314 * @zone: the zone 315 * 316 * Returns the maximum number of dirty pages allowed in a zone, based 317 * on the zone's dirtyable memory. 318 */ 319 static unsigned long zone_dirty_limit(struct zone *zone) 320 { 321 unsigned long zone_memory = zone_dirtyable_memory(zone); 322 struct task_struct *tsk = current; 323 unsigned long dirty; 324 325 if (vm_dirty_bytes) 326 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * 327 zone_memory / global_dirtyable_memory(); 328 else 329 dirty = vm_dirty_ratio * zone_memory / 100; 330 331 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) 332 dirty += dirty / 4; 333 334 return dirty; 335 } 336 337 /** 338 * zone_dirty_ok - tells whether a zone is within its dirty limits 339 * @zone: the zone to check 340 * 341 * Returns %true when the dirty pages in @zone are within the zone's 342 * dirty limit, %false if the limit is exceeded. 343 */ 344 bool zone_dirty_ok(struct zone *zone) 345 { 346 unsigned long limit = zone_dirty_limit(zone); 347 348 return zone_page_state(zone, NR_FILE_DIRTY) + 349 zone_page_state(zone, NR_UNSTABLE_NFS) + 350 zone_page_state(zone, NR_WRITEBACK) <= limit; 351 } 352 353 int dirty_background_ratio_handler(struct ctl_table *table, int write, 354 void __user *buffer, size_t *lenp, 355 loff_t *ppos) 356 { 357 int ret; 358 359 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 360 if (ret == 0 && write) 361 dirty_background_bytes = 0; 362 return ret; 363 } 364 365 int dirty_background_bytes_handler(struct ctl_table *table, int write, 366 void __user *buffer, size_t *lenp, 367 loff_t *ppos) 368 { 369 int ret; 370 371 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 372 if (ret == 0 && write) 373 dirty_background_ratio = 0; 374 return ret; 375 } 376 377 int dirty_ratio_handler(struct ctl_table *table, int write, 378 void __user *buffer, size_t *lenp, 379 loff_t *ppos) 380 { 381 int old_ratio = vm_dirty_ratio; 382 int ret; 383 384 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 385 if (ret == 0 && write && vm_dirty_ratio != old_ratio) { 386 writeback_set_ratelimit(); 387 vm_dirty_bytes = 0; 388 } 389 return ret; 390 } 391 392 int dirty_bytes_handler(struct ctl_table *table, int write, 393 void __user *buffer, size_t *lenp, 394 loff_t *ppos) 395 { 396 unsigned long old_bytes = vm_dirty_bytes; 397 int ret; 398 399 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 400 if (ret == 0 && write && vm_dirty_bytes != old_bytes) { 401 writeback_set_ratelimit(); 402 vm_dirty_ratio = 0; 403 } 404 return ret; 405 } 406 407 static unsigned long wp_next_time(unsigned long cur_time) 408 { 409 cur_time += VM_COMPLETIONS_PERIOD_LEN; 410 /* 0 has a special meaning... */ 411 if (!cur_time) 412 return 1; 413 return cur_time; 414 } 415 416 /* 417 * Increment the BDI's writeout completion count and the global writeout 418 * completion count. Called from test_clear_page_writeback(). 419 */ 420 static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) 421 { 422 __inc_bdi_stat(bdi, BDI_WRITTEN); 423 __fprop_inc_percpu_max(&writeout_completions, &bdi->completions, 424 bdi->max_prop_frac); 425 /* First event after period switching was turned off? */ 426 if (!unlikely(writeout_period_time)) { 427 /* 428 * We can race with other __bdi_writeout_inc calls here but 429 * it does not cause any harm since the resulting time when 430 * timer will fire and what is in writeout_period_time will be 431 * roughly the same. 432 */ 433 writeout_period_time = wp_next_time(jiffies); 434 mod_timer(&writeout_period_timer, writeout_period_time); 435 } 436 } 437 438 void bdi_writeout_inc(struct backing_dev_info *bdi) 439 { 440 unsigned long flags; 441 442 local_irq_save(flags); 443 __bdi_writeout_inc(bdi); 444 local_irq_restore(flags); 445 } 446 EXPORT_SYMBOL_GPL(bdi_writeout_inc); 447 448 /* 449 * Obtain an accurate fraction of the BDI's portion. 450 */ 451 static void bdi_writeout_fraction(struct backing_dev_info *bdi, 452 long *numerator, long *denominator) 453 { 454 fprop_fraction_percpu(&writeout_completions, &bdi->completions, 455 numerator, denominator); 456 } 457 458 /* 459 * On idle system, we can be called long after we scheduled because we use 460 * deferred timers so count with missed periods. 461 */ 462 static void writeout_period(unsigned long t) 463 { 464 int miss_periods = (jiffies - writeout_period_time) / 465 VM_COMPLETIONS_PERIOD_LEN; 466 467 if (fprop_new_period(&writeout_completions, miss_periods + 1)) { 468 writeout_period_time = wp_next_time(writeout_period_time + 469 miss_periods * VM_COMPLETIONS_PERIOD_LEN); 470 mod_timer(&writeout_period_timer, writeout_period_time); 471 } else { 472 /* 473 * Aging has zeroed all fractions. Stop wasting CPU on period 474 * updates. 475 */ 476 writeout_period_time = 0; 477 } 478 } 479 480 /* 481 * bdi_min_ratio keeps the sum of the minimum dirty shares of all 482 * registered backing devices, which, for obvious reasons, can not 483 * exceed 100%. 484 */ 485 static unsigned int bdi_min_ratio; 486 487 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) 488 { 489 int ret = 0; 490 491 spin_lock_bh(&bdi_lock); 492 if (min_ratio > bdi->max_ratio) { 493 ret = -EINVAL; 494 } else { 495 min_ratio -= bdi->min_ratio; 496 if (bdi_min_ratio + min_ratio < 100) { 497 bdi_min_ratio += min_ratio; 498 bdi->min_ratio += min_ratio; 499 } else { 500 ret = -EINVAL; 501 } 502 } 503 spin_unlock_bh(&bdi_lock); 504 505 return ret; 506 } 507 508 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) 509 { 510 int ret = 0; 511 512 if (max_ratio > 100) 513 return -EINVAL; 514 515 spin_lock_bh(&bdi_lock); 516 if (bdi->min_ratio > max_ratio) { 517 ret = -EINVAL; 518 } else { 519 bdi->max_ratio = max_ratio; 520 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; 521 } 522 spin_unlock_bh(&bdi_lock); 523 524 return ret; 525 } 526 EXPORT_SYMBOL(bdi_set_max_ratio); 527 528 static unsigned long dirty_freerun_ceiling(unsigned long thresh, 529 unsigned long bg_thresh) 530 { 531 return (thresh + bg_thresh) / 2; 532 } 533 534 static unsigned long hard_dirty_limit(unsigned long thresh) 535 { 536 return max(thresh, global_dirty_limit); 537 } 538 539 /** 540 * bdi_dirty_limit - @bdi's share of dirty throttling threshold 541 * @bdi: the backing_dev_info to query 542 * @dirty: global dirty limit in pages 543 * 544 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of 545 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. 546 * 547 * Note that balance_dirty_pages() will only seriously take it as a hard limit 548 * when sleeping max_pause per page is not enough to keep the dirty pages under 549 * control. For example, when the device is completely stalled due to some error 550 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. 551 * In the other normal situations, it acts more gently by throttling the tasks 552 * more (rather than completely block them) when the bdi dirty pages go high. 553 * 554 * It allocates high/low dirty limits to fast/slow devices, in order to prevent 555 * - starving fast devices 556 * - piling up dirty pages (that will take long time to sync) on slow devices 557 * 558 * The bdi's share of dirty limit will be adapting to its throughput and 559 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. 560 */ 561 unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) 562 { 563 u64 bdi_dirty; 564 long numerator, denominator; 565 566 /* 567 * Calculate this BDI's share of the dirty ratio. 568 */ 569 bdi_writeout_fraction(bdi, &numerator, &denominator); 570 571 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; 572 bdi_dirty *= numerator; 573 do_div(bdi_dirty, denominator); 574 575 bdi_dirty += (dirty * bdi->min_ratio) / 100; 576 if (bdi_dirty > (dirty * bdi->max_ratio) / 100) 577 bdi_dirty = dirty * bdi->max_ratio / 100; 578 579 return bdi_dirty; 580 } 581 582 /* 583 * setpoint - dirty 3 584 * f(dirty) := 1.0 + (----------------) 585 * limit - setpoint 586 * 587 * it's a 3rd order polynomial that subjects to 588 * 589 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast 590 * (2) f(setpoint) = 1.0 => the balance point 591 * (3) f(limit) = 0 => the hard limit 592 * (4) df/dx <= 0 => negative feedback control 593 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) 594 * => fast response on large errors; small oscillation near setpoint 595 */ 596 static inline long long pos_ratio_polynom(unsigned long setpoint, 597 unsigned long dirty, 598 unsigned long limit) 599 { 600 long long pos_ratio; 601 long x; 602 603 x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, 604 limit - setpoint + 1); 605 pos_ratio = x; 606 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; 607 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; 608 pos_ratio += 1 << RATELIMIT_CALC_SHIFT; 609 610 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); 611 } 612 613 /* 614 * Dirty position control. 615 * 616 * (o) global/bdi setpoints 617 * 618 * We want the dirty pages be balanced around the global/bdi setpoints. 619 * When the number of dirty pages is higher/lower than the setpoint, the 620 * dirty position control ratio (and hence task dirty ratelimit) will be 621 * decreased/increased to bring the dirty pages back to the setpoint. 622 * 623 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT 624 * 625 * if (dirty < setpoint) scale up pos_ratio 626 * if (dirty > setpoint) scale down pos_ratio 627 * 628 * if (bdi_dirty < bdi_setpoint) scale up pos_ratio 629 * if (bdi_dirty > bdi_setpoint) scale down pos_ratio 630 * 631 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT 632 * 633 * (o) global control line 634 * 635 * ^ pos_ratio 636 * | 637 * | |<===== global dirty control scope ======>| 638 * 2.0 .............* 639 * | .* 640 * | . * 641 * | . * 642 * | . * 643 * | . * 644 * | . * 645 * 1.0 ................................* 646 * | . . * 647 * | . . * 648 * | . . * 649 * | . . * 650 * | . . * 651 * 0 +------------.------------------.----------------------*-------------> 652 * freerun^ setpoint^ limit^ dirty pages 653 * 654 * (o) bdi control line 655 * 656 * ^ pos_ratio 657 * | 658 * | * 659 * | * 660 * | * 661 * | * 662 * | * |<=========== span ============>| 663 * 1.0 .......................* 664 * | . * 665 * | . * 666 * | . * 667 * | . * 668 * | . * 669 * | . * 670 * | . * 671 * | . * 672 * | . * 673 * | . * 674 * | . * 675 * 1/4 ...............................................* * * * * * * * * * * * 676 * | . . 677 * | . . 678 * | . . 679 * 0 +----------------------.-------------------------------.-------------> 680 * bdi_setpoint^ x_intercept^ 681 * 682 * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can 683 * be smoothly throttled down to normal if it starts high in situations like 684 * - start writing to a slow SD card and a fast disk at the same time. The SD 685 * card's bdi_dirty may rush to many times higher than bdi_setpoint. 686 * - the bdi dirty thresh drops quickly due to change of JBOD workload 687 */ 688 static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, 689 unsigned long thresh, 690 unsigned long bg_thresh, 691 unsigned long dirty, 692 unsigned long bdi_thresh, 693 unsigned long bdi_dirty) 694 { 695 unsigned long write_bw = bdi->avg_write_bandwidth; 696 unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); 697 unsigned long limit = hard_dirty_limit(thresh); 698 unsigned long x_intercept; 699 unsigned long setpoint; /* dirty pages' target balance point */ 700 unsigned long bdi_setpoint; 701 unsigned long span; 702 long long pos_ratio; /* for scaling up/down the rate limit */ 703 long x; 704 705 if (unlikely(dirty >= limit)) 706 return 0; 707 708 /* 709 * global setpoint 710 * 711 * See comment for pos_ratio_polynom(). 712 */ 713 setpoint = (freerun + limit) / 2; 714 pos_ratio = pos_ratio_polynom(setpoint, dirty, limit); 715 716 /* 717 * The strictlimit feature is a tool preventing mistrusted filesystems 718 * from growing a large number of dirty pages before throttling. For 719 * such filesystems balance_dirty_pages always checks bdi counters 720 * against bdi limits. Even if global "nr_dirty" is under "freerun". 721 * This is especially important for fuse which sets bdi->max_ratio to 722 * 1% by default. Without strictlimit feature, fuse writeback may 723 * consume arbitrary amount of RAM because it is accounted in 724 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". 725 * 726 * Here, in bdi_position_ratio(), we calculate pos_ratio based on 727 * two values: bdi_dirty and bdi_thresh. Let's consider an example: 728 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global 729 * limits are set by default to 10% and 20% (background and throttle). 730 * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. 731 * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is 732 * about ~6K pages (as the average of background and throttle bdi 733 * limits). The 3rd order polynomial will provide positive feedback if 734 * bdi_dirty is under bdi_setpoint and vice versa. 735 * 736 * Note, that we cannot use global counters in these calculations 737 * because we want to throttle process writing to a strictlimit BDI 738 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB 739 * in the example above). 740 */ 741 if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { 742 long long bdi_pos_ratio; 743 unsigned long bdi_bg_thresh; 744 745 if (bdi_dirty < 8) 746 return min_t(long long, pos_ratio * 2, 747 2 << RATELIMIT_CALC_SHIFT); 748 749 if (bdi_dirty >= bdi_thresh) 750 return 0; 751 752 bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh); 753 bdi_setpoint = dirty_freerun_ceiling(bdi_thresh, 754 bdi_bg_thresh); 755 756 if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh) 757 return 0; 758 759 bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty, 760 bdi_thresh); 761 762 /* 763 * Typically, for strictlimit case, bdi_setpoint << setpoint 764 * and pos_ratio >> bdi_pos_ratio. In the other words global 765 * state ("dirty") is not limiting factor and we have to 766 * make decision based on bdi counters. But there is an 767 * important case when global pos_ratio should get precedence: 768 * global limits are exceeded (e.g. due to activities on other 769 * BDIs) while given strictlimit BDI is below limit. 770 * 771 * "pos_ratio * bdi_pos_ratio" would work for the case above, 772 * but it would look too non-natural for the case of all 773 * activity in the system coming from a single strictlimit BDI 774 * with bdi->max_ratio == 100%. 775 * 776 * Note that min() below somewhat changes the dynamics of the 777 * control system. Normally, pos_ratio value can be well over 3 778 * (when globally we are at freerun and bdi is well below bdi 779 * setpoint). Now the maximum pos_ratio in the same situation 780 * is 2. We might want to tweak this if we observe the control 781 * system is too slow to adapt. 782 */ 783 return min(pos_ratio, bdi_pos_ratio); 784 } 785 786 /* 787 * We have computed basic pos_ratio above based on global situation. If 788 * the bdi is over/under its share of dirty pages, we want to scale 789 * pos_ratio further down/up. That is done by the following mechanism. 790 */ 791 792 /* 793 * bdi setpoint 794 * 795 * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) 796 * 797 * x_intercept - bdi_dirty 798 * := -------------------------- 799 * x_intercept - bdi_setpoint 800 * 801 * The main bdi control line is a linear function that subjects to 802 * 803 * (1) f(bdi_setpoint) = 1.0 804 * (2) k = - 1 / (8 * write_bw) (in single bdi case) 805 * or equally: x_intercept = bdi_setpoint + 8 * write_bw 806 * 807 * For single bdi case, the dirty pages are observed to fluctuate 808 * regularly within range 809 * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] 810 * for various filesystems, where (2) can yield in a reasonable 12.5% 811 * fluctuation range for pos_ratio. 812 * 813 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its 814 * own size, so move the slope over accordingly and choose a slope that 815 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. 816 */ 817 if (unlikely(bdi_thresh > thresh)) 818 bdi_thresh = thresh; 819 /* 820 * It's very possible that bdi_thresh is close to 0 not because the 821 * device is slow, but that it has remained inactive for long time. 822 * Honour such devices a reasonable good (hopefully IO efficient) 823 * threshold, so that the occasional writes won't be blocked and active 824 * writes can rampup the threshold quickly. 825 */ 826 bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); 827 /* 828 * scale global setpoint to bdi's: 829 * bdi_setpoint = setpoint * bdi_thresh / thresh 830 */ 831 x = div_u64((u64)bdi_thresh << 16, thresh + 1); 832 bdi_setpoint = setpoint * (u64)x >> 16; 833 /* 834 * Use span=(8*write_bw) in single bdi case as indicated by 835 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. 836 * 837 * bdi_thresh thresh - bdi_thresh 838 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh 839 * thresh thresh 840 */ 841 span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; 842 x_intercept = bdi_setpoint + span; 843 844 if (bdi_dirty < x_intercept - span / 4) { 845 pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), 846 x_intercept - bdi_setpoint + 1); 847 } else 848 pos_ratio /= 4; 849 850 /* 851 * bdi reserve area, safeguard against dirty pool underrun and disk idle 852 * It may push the desired control point of global dirty pages higher 853 * than setpoint. 854 */ 855 x_intercept = bdi_thresh / 2; 856 if (bdi_dirty < x_intercept) { 857 if (bdi_dirty > x_intercept / 8) 858 pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); 859 else 860 pos_ratio *= 8; 861 } 862 863 return pos_ratio; 864 } 865 866 static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, 867 unsigned long elapsed, 868 unsigned long written) 869 { 870 const unsigned long period = roundup_pow_of_two(3 * HZ); 871 unsigned long avg = bdi->avg_write_bandwidth; 872 unsigned long old = bdi->write_bandwidth; 873 u64 bw; 874 875 /* 876 * bw = written * HZ / elapsed 877 * 878 * bw * elapsed + write_bandwidth * (period - elapsed) 879 * write_bandwidth = --------------------------------------------------- 880 * period 881 */ 882 bw = written - bdi->written_stamp; 883 bw *= HZ; 884 if (unlikely(elapsed > period)) { 885 do_div(bw, elapsed); 886 avg = bw; 887 goto out; 888 } 889 bw += (u64)bdi->write_bandwidth * (period - elapsed); 890 bw >>= ilog2(period); 891 892 /* 893 * one more level of smoothing, for filtering out sudden spikes 894 */ 895 if (avg > old && old >= (unsigned long)bw) 896 avg -= (avg - old) >> 3; 897 898 if (avg < old && old <= (unsigned long)bw) 899 avg += (old - avg) >> 3; 900 901 out: 902 bdi->write_bandwidth = bw; 903 bdi->avg_write_bandwidth = avg; 904 } 905 906 /* 907 * The global dirtyable memory and dirty threshold could be suddenly knocked 908 * down by a large amount (eg. on the startup of KVM in a swapless system). 909 * This may throw the system into deep dirty exceeded state and throttle 910 * heavy/light dirtiers alike. To retain good responsiveness, maintain 911 * global_dirty_limit for tracking slowly down to the knocked down dirty 912 * threshold. 913 */ 914 static void update_dirty_limit(unsigned long thresh, unsigned long dirty) 915 { 916 unsigned long limit = global_dirty_limit; 917 918 /* 919 * Follow up in one step. 920 */ 921 if (limit < thresh) { 922 limit = thresh; 923 goto update; 924 } 925 926 /* 927 * Follow down slowly. Use the higher one as the target, because thresh 928 * may drop below dirty. This is exactly the reason to introduce 929 * global_dirty_limit which is guaranteed to lie above the dirty pages. 930 */ 931 thresh = max(thresh, dirty); 932 if (limit > thresh) { 933 limit -= (limit - thresh) >> 5; 934 goto update; 935 } 936 return; 937 update: 938 global_dirty_limit = limit; 939 } 940 941 static void global_update_bandwidth(unsigned long thresh, 942 unsigned long dirty, 943 unsigned long now) 944 { 945 static DEFINE_SPINLOCK(dirty_lock); 946 static unsigned long update_time; 947 948 /* 949 * check locklessly first to optimize away locking for the most time 950 */ 951 if (time_before(now, update_time + BANDWIDTH_INTERVAL)) 952 return; 953 954 spin_lock(&dirty_lock); 955 if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { 956 update_dirty_limit(thresh, dirty); 957 update_time = now; 958 } 959 spin_unlock(&dirty_lock); 960 } 961 962 /* 963 * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. 964 * 965 * Normal bdi tasks will be curbed at or below it in long term. 966 * Obviously it should be around (write_bw / N) when there are N dd tasks. 967 */ 968 static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, 969 unsigned long thresh, 970 unsigned long bg_thresh, 971 unsigned long dirty, 972 unsigned long bdi_thresh, 973 unsigned long bdi_dirty, 974 unsigned long dirtied, 975 unsigned long elapsed) 976 { 977 unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); 978 unsigned long limit = hard_dirty_limit(thresh); 979 unsigned long setpoint = (freerun + limit) / 2; 980 unsigned long write_bw = bdi->avg_write_bandwidth; 981 unsigned long dirty_ratelimit = bdi->dirty_ratelimit; 982 unsigned long dirty_rate; 983 unsigned long task_ratelimit; 984 unsigned long balanced_dirty_ratelimit; 985 unsigned long pos_ratio; 986 unsigned long step; 987 unsigned long x; 988 989 /* 990 * The dirty rate will match the writeout rate in long term, except 991 * when dirty pages are truncated by userspace or re-dirtied by FS. 992 */ 993 dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; 994 995 pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, 996 bdi_thresh, bdi_dirty); 997 /* 998 * task_ratelimit reflects each dd's dirty rate for the past 200ms. 999 */ 1000 task_ratelimit = (u64)dirty_ratelimit * 1001 pos_ratio >> RATELIMIT_CALC_SHIFT; 1002 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ 1003 1004 /* 1005 * A linear estimation of the "balanced" throttle rate. The theory is, 1006 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's 1007 * dirty_rate will be measured to be (N * task_ratelimit). So the below 1008 * formula will yield the balanced rate limit (write_bw / N). 1009 * 1010 * Note that the expanded form is not a pure rate feedback: 1011 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) 1012 * but also takes pos_ratio into account: 1013 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) 1014 * 1015 * (1) is not realistic because pos_ratio also takes part in balancing 1016 * the dirty rate. Consider the state 1017 * pos_ratio = 0.5 (3) 1018 * rate = 2 * (write_bw / N) (4) 1019 * If (1) is used, it will stuck in that state! Because each dd will 1020 * be throttled at 1021 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) 1022 * yielding 1023 * dirty_rate = N * task_ratelimit = write_bw (6) 1024 * put (6) into (1) we get 1025 * rate_(i+1) = rate_(i) (7) 1026 * 1027 * So we end up using (2) to always keep 1028 * rate_(i+1) ~= (write_bw / N) (8) 1029 * regardless of the value of pos_ratio. As long as (8) is satisfied, 1030 * pos_ratio is able to drive itself to 1.0, which is not only where 1031 * the dirty count meet the setpoint, but also where the slope of 1032 * pos_ratio is most flat and hence task_ratelimit is least fluctuated. 1033 */ 1034 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, 1035 dirty_rate | 1); 1036 /* 1037 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw 1038 */ 1039 if (unlikely(balanced_dirty_ratelimit > write_bw)) 1040 balanced_dirty_ratelimit = write_bw; 1041 1042 /* 1043 * We could safely do this and return immediately: 1044 * 1045 * bdi->dirty_ratelimit = balanced_dirty_ratelimit; 1046 * 1047 * However to get a more stable dirty_ratelimit, the below elaborated 1048 * code makes use of task_ratelimit to filter out singular points and 1049 * limit the step size. 1050 * 1051 * The below code essentially only uses the relative value of 1052 * 1053 * task_ratelimit - dirty_ratelimit 1054 * = (pos_ratio - 1) * dirty_ratelimit 1055 * 1056 * which reflects the direction and size of dirty position error. 1057 */ 1058 1059 /* 1060 * dirty_ratelimit will follow balanced_dirty_ratelimit iff 1061 * task_ratelimit is on the same side of dirty_ratelimit, too. 1062 * For example, when 1063 * - dirty_ratelimit > balanced_dirty_ratelimit 1064 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) 1065 * lowering dirty_ratelimit will help meet both the position and rate 1066 * control targets. Otherwise, don't update dirty_ratelimit if it will 1067 * only help meet the rate target. After all, what the users ultimately 1068 * feel and care are stable dirty rate and small position error. 1069 * 1070 * |task_ratelimit - dirty_ratelimit| is used to limit the step size 1071 * and filter out the singular points of balanced_dirty_ratelimit. Which 1072 * keeps jumping around randomly and can even leap far away at times 1073 * due to the small 200ms estimation period of dirty_rate (we want to 1074 * keep that period small to reduce time lags). 1075 */ 1076 step = 0; 1077 1078 /* 1079 * For strictlimit case, calculations above were based on bdi counters 1080 * and limits (starting from pos_ratio = bdi_position_ratio() and up to 1081 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). 1082 * Hence, to calculate "step" properly, we have to use bdi_dirty as 1083 * "dirty" and bdi_setpoint as "setpoint". 1084 * 1085 * We rampup dirty_ratelimit forcibly if bdi_dirty is low because 1086 * it's possible that bdi_thresh is close to zero due to inactivity 1087 * of backing device (see the implementation of bdi_dirty_limit()). 1088 */ 1089 if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { 1090 dirty = bdi_dirty; 1091 if (bdi_dirty < 8) 1092 setpoint = bdi_dirty + 1; 1093 else 1094 setpoint = (bdi_thresh + 1095 bdi_dirty_limit(bdi, bg_thresh)) / 2; 1096 } 1097 1098 if (dirty < setpoint) { 1099 x = min(bdi->balanced_dirty_ratelimit, 1100 min(balanced_dirty_ratelimit, task_ratelimit)); 1101 if (dirty_ratelimit < x) 1102 step = x - dirty_ratelimit; 1103 } else { 1104 x = max(bdi->balanced_dirty_ratelimit, 1105 max(balanced_dirty_ratelimit, task_ratelimit)); 1106 if (dirty_ratelimit > x) 1107 step = dirty_ratelimit - x; 1108 } 1109 1110 /* 1111 * Don't pursue 100% rate matching. It's impossible since the balanced 1112 * rate itself is constantly fluctuating. So decrease the track speed 1113 * when it gets close to the target. Helps eliminate pointless tremors. 1114 */ 1115 step >>= dirty_ratelimit / (2 * step + 1); 1116 /* 1117 * Limit the tracking speed to avoid overshooting. 1118 */ 1119 step = (step + 7) / 8; 1120 1121 if (dirty_ratelimit < balanced_dirty_ratelimit) 1122 dirty_ratelimit += step; 1123 else 1124 dirty_ratelimit -= step; 1125 1126 bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); 1127 bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; 1128 1129 trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); 1130 } 1131 1132 void __bdi_update_bandwidth(struct backing_dev_info *bdi, 1133 unsigned long thresh, 1134 unsigned long bg_thresh, 1135 unsigned long dirty, 1136 unsigned long bdi_thresh, 1137 unsigned long bdi_dirty, 1138 unsigned long start_time) 1139 { 1140 unsigned long now = jiffies; 1141 unsigned long elapsed = now - bdi->bw_time_stamp; 1142 unsigned long dirtied; 1143 unsigned long written; 1144 1145 /* 1146 * rate-limit, only update once every 200ms. 1147 */ 1148 if (elapsed < BANDWIDTH_INTERVAL) 1149 return; 1150 1151 dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); 1152 written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); 1153 1154 /* 1155 * Skip quiet periods when disk bandwidth is under-utilized. 1156 * (at least 1s idle time between two flusher runs) 1157 */ 1158 if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) 1159 goto snapshot; 1160 1161 if (thresh) { 1162 global_update_bandwidth(thresh, dirty, now); 1163 bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, 1164 bdi_thresh, bdi_dirty, 1165 dirtied, elapsed); 1166 } 1167 bdi_update_write_bandwidth(bdi, elapsed, written); 1168 1169 snapshot: 1170 bdi->dirtied_stamp = dirtied; 1171 bdi->written_stamp = written; 1172 bdi->bw_time_stamp = now; 1173 } 1174 1175 static void bdi_update_bandwidth(struct backing_dev_info *bdi, 1176 unsigned long thresh, 1177 unsigned long bg_thresh, 1178 unsigned long dirty, 1179 unsigned long bdi_thresh, 1180 unsigned long bdi_dirty, 1181 unsigned long start_time) 1182 { 1183 if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) 1184 return; 1185 spin_lock(&bdi->wb.list_lock); 1186 __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, 1187 bdi_thresh, bdi_dirty, start_time); 1188 spin_unlock(&bdi->wb.list_lock); 1189 } 1190 1191 /* 1192 * After a task dirtied this many pages, balance_dirty_pages_ratelimited() 1193 * will look to see if it needs to start dirty throttling. 1194 * 1195 * If dirty_poll_interval is too low, big NUMA machines will call the expensive 1196 * global_page_state() too often. So scale it near-sqrt to the safety margin 1197 * (the number of pages we may dirty without exceeding the dirty limits). 1198 */ 1199 static unsigned long dirty_poll_interval(unsigned long dirty, 1200 unsigned long thresh) 1201 { 1202 if (thresh > dirty) 1203 return 1UL << (ilog2(thresh - dirty) >> 1); 1204 1205 return 1; 1206 } 1207 1208 static unsigned long bdi_max_pause(struct backing_dev_info *bdi, 1209 unsigned long bdi_dirty) 1210 { 1211 unsigned long bw = bdi->avg_write_bandwidth; 1212 unsigned long t; 1213 1214 /* 1215 * Limit pause time for small memory systems. If sleeping for too long 1216 * time, a small pool of dirty/writeback pages may go empty and disk go 1217 * idle. 1218 * 1219 * 8 serves as the safety ratio. 1220 */ 1221 t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); 1222 t++; 1223 1224 return min_t(unsigned long, t, MAX_PAUSE); 1225 } 1226 1227 static long bdi_min_pause(struct backing_dev_info *bdi, 1228 long max_pause, 1229 unsigned long task_ratelimit, 1230 unsigned long dirty_ratelimit, 1231 int *nr_dirtied_pause) 1232 { 1233 long hi = ilog2(bdi->avg_write_bandwidth); 1234 long lo = ilog2(bdi->dirty_ratelimit); 1235 long t; /* target pause */ 1236 long pause; /* estimated next pause */ 1237 int pages; /* target nr_dirtied_pause */ 1238 1239 /* target for 10ms pause on 1-dd case */ 1240 t = max(1, HZ / 100); 1241 1242 /* 1243 * Scale up pause time for concurrent dirtiers in order to reduce CPU 1244 * overheads. 1245 * 1246 * (N * 10ms) on 2^N concurrent tasks. 1247 */ 1248 if (hi > lo) 1249 t += (hi - lo) * (10 * HZ) / 1024; 1250 1251 /* 1252 * This is a bit convoluted. We try to base the next nr_dirtied_pause 1253 * on the much more stable dirty_ratelimit. However the next pause time 1254 * will be computed based on task_ratelimit and the two rate limits may 1255 * depart considerably at some time. Especially if task_ratelimit goes 1256 * below dirty_ratelimit/2 and the target pause is max_pause, the next 1257 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a 1258 * result task_ratelimit won't be executed faithfully, which could 1259 * eventually bring down dirty_ratelimit. 1260 * 1261 * We apply two rules to fix it up: 1262 * 1) try to estimate the next pause time and if necessary, use a lower 1263 * nr_dirtied_pause so as not to exceed max_pause. When this happens, 1264 * nr_dirtied_pause will be "dancing" with task_ratelimit. 1265 * 2) limit the target pause time to max_pause/2, so that the normal 1266 * small fluctuations of task_ratelimit won't trigger rule (1) and 1267 * nr_dirtied_pause will remain as stable as dirty_ratelimit. 1268 */ 1269 t = min(t, 1 + max_pause / 2); 1270 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); 1271 1272 /* 1273 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test 1274 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. 1275 * When the 16 consecutive reads are often interrupted by some dirty 1276 * throttling pause during the async writes, cfq will go into idles 1277 * (deadline is fine). So push nr_dirtied_pause as high as possible 1278 * until reaches DIRTY_POLL_THRESH=32 pages. 1279 */ 1280 if (pages < DIRTY_POLL_THRESH) { 1281 t = max_pause; 1282 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); 1283 if (pages > DIRTY_POLL_THRESH) { 1284 pages = DIRTY_POLL_THRESH; 1285 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; 1286 } 1287 } 1288 1289 pause = HZ * pages / (task_ratelimit + 1); 1290 if (pause > max_pause) { 1291 t = max_pause; 1292 pages = task_ratelimit * t / roundup_pow_of_two(HZ); 1293 } 1294 1295 *nr_dirtied_pause = pages; 1296 /* 1297 * The minimal pause time will normally be half the target pause time. 1298 */ 1299 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; 1300 } 1301 1302 static inline void bdi_dirty_limits(struct backing_dev_info *bdi, 1303 unsigned long dirty_thresh, 1304 unsigned long background_thresh, 1305 unsigned long *bdi_dirty, 1306 unsigned long *bdi_thresh, 1307 unsigned long *bdi_bg_thresh) 1308 { 1309 unsigned long bdi_reclaimable; 1310 1311 /* 1312 * bdi_thresh is not treated as some limiting factor as 1313 * dirty_thresh, due to reasons 1314 * - in JBOD setup, bdi_thresh can fluctuate a lot 1315 * - in a system with HDD and USB key, the USB key may somehow 1316 * go into state (bdi_dirty >> bdi_thresh) either because 1317 * bdi_dirty starts high, or because bdi_thresh drops low. 1318 * In this case we don't want to hard throttle the USB key 1319 * dirtiers for 100 seconds until bdi_dirty drops under 1320 * bdi_thresh. Instead the auxiliary bdi control line in 1321 * bdi_position_ratio() will let the dirtier task progress 1322 * at some rate <= (write_bw / 2) for bringing down bdi_dirty. 1323 */ 1324 *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 1325 1326 if (bdi_bg_thresh) 1327 *bdi_bg_thresh = div_u64((u64)*bdi_thresh * 1328 background_thresh, 1329 dirty_thresh); 1330 1331 /* 1332 * In order to avoid the stacked BDI deadlock we need 1333 * to ensure we accurately count the 'dirty' pages when 1334 * the threshold is low. 1335 * 1336 * Otherwise it would be possible to get thresh+n pages 1337 * reported dirty, even though there are thresh-m pages 1338 * actually dirty; with m+n sitting in the percpu 1339 * deltas. 1340 */ 1341 if (*bdi_thresh < 2 * bdi_stat_error(bdi)) { 1342 bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); 1343 *bdi_dirty = bdi_reclaimable + 1344 bdi_stat_sum(bdi, BDI_WRITEBACK); 1345 } else { 1346 bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); 1347 *bdi_dirty = bdi_reclaimable + 1348 bdi_stat(bdi, BDI_WRITEBACK); 1349 } 1350 } 1351 1352 /* 1353 * balance_dirty_pages() must be called by processes which are generating dirty 1354 * data. It looks at the number of dirty pages in the machine and will force 1355 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. 1356 * If we're over `background_thresh' then the writeback threads are woken to 1357 * perform some writeout. 1358 */ 1359 static void balance_dirty_pages(struct address_space *mapping, 1360 unsigned long pages_dirtied) 1361 { 1362 unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ 1363 unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ 1364 unsigned long background_thresh; 1365 unsigned long dirty_thresh; 1366 long period; 1367 long pause; 1368 long max_pause; 1369 long min_pause; 1370 int nr_dirtied_pause; 1371 bool dirty_exceeded = false; 1372 unsigned long task_ratelimit; 1373 unsigned long dirty_ratelimit; 1374 unsigned long pos_ratio; 1375 struct backing_dev_info *bdi = mapping->backing_dev_info; 1376 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; 1377 unsigned long start_time = jiffies; 1378 1379 for (;;) { 1380 unsigned long now = jiffies; 1381 unsigned long uninitialized_var(bdi_thresh); 1382 unsigned long thresh; 1383 unsigned long uninitialized_var(bdi_dirty); 1384 unsigned long dirty; 1385 unsigned long bg_thresh; 1386 1387 /* 1388 * Unstable writes are a feature of certain networked 1389 * filesystems (i.e. NFS) in which data may have been 1390 * written to the server's write cache, but has not yet 1391 * been flushed to permanent storage. 1392 */ 1393 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 1394 global_page_state(NR_UNSTABLE_NFS); 1395 nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); 1396 1397 global_dirty_limits(&background_thresh, &dirty_thresh); 1398 1399 if (unlikely(strictlimit)) { 1400 bdi_dirty_limits(bdi, dirty_thresh, background_thresh, 1401 &bdi_dirty, &bdi_thresh, &bg_thresh); 1402 1403 dirty = bdi_dirty; 1404 thresh = bdi_thresh; 1405 } else { 1406 dirty = nr_dirty; 1407 thresh = dirty_thresh; 1408 bg_thresh = background_thresh; 1409 } 1410 1411 /* 1412 * Throttle it only when the background writeback cannot 1413 * catch-up. This avoids (excessively) small writeouts 1414 * when the bdi limits are ramping up in case of !strictlimit. 1415 * 1416 * In strictlimit case make decision based on the bdi counters 1417 * and limits. Small writeouts when the bdi limits are ramping 1418 * up are the price we consciously pay for strictlimit-ing. 1419 */ 1420 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) { 1421 current->dirty_paused_when = now; 1422 current->nr_dirtied = 0; 1423 current->nr_dirtied_pause = 1424 dirty_poll_interval(dirty, thresh); 1425 break; 1426 } 1427 1428 if (unlikely(!writeback_in_progress(bdi))) 1429 bdi_start_background_writeback(bdi); 1430 1431 if (!strictlimit) 1432 bdi_dirty_limits(bdi, dirty_thresh, background_thresh, 1433 &bdi_dirty, &bdi_thresh, NULL); 1434 1435 dirty_exceeded = (bdi_dirty > bdi_thresh) && 1436 ((nr_dirty > dirty_thresh) || strictlimit); 1437 if (dirty_exceeded && !bdi->dirty_exceeded) 1438 bdi->dirty_exceeded = 1; 1439 1440 bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, 1441 nr_dirty, bdi_thresh, bdi_dirty, 1442 start_time); 1443 1444 dirty_ratelimit = bdi->dirty_ratelimit; 1445 pos_ratio = bdi_position_ratio(bdi, dirty_thresh, 1446 background_thresh, nr_dirty, 1447 bdi_thresh, bdi_dirty); 1448 task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> 1449 RATELIMIT_CALC_SHIFT; 1450 max_pause = bdi_max_pause(bdi, bdi_dirty); 1451 min_pause = bdi_min_pause(bdi, max_pause, 1452 task_ratelimit, dirty_ratelimit, 1453 &nr_dirtied_pause); 1454 1455 if (unlikely(task_ratelimit == 0)) { 1456 period = max_pause; 1457 pause = max_pause; 1458 goto pause; 1459 } 1460 period = HZ * pages_dirtied / task_ratelimit; 1461 pause = period; 1462 if (current->dirty_paused_when) 1463 pause -= now - current->dirty_paused_when; 1464 /* 1465 * For less than 1s think time (ext3/4 may block the dirtier 1466 * for up to 800ms from time to time on 1-HDD; so does xfs, 1467 * however at much less frequency), try to compensate it in 1468 * future periods by updating the virtual time; otherwise just 1469 * do a reset, as it may be a light dirtier. 1470 */ 1471 if (pause < min_pause) { 1472 trace_balance_dirty_pages(bdi, 1473 dirty_thresh, 1474 background_thresh, 1475 nr_dirty, 1476 bdi_thresh, 1477 bdi_dirty, 1478 dirty_ratelimit, 1479 task_ratelimit, 1480 pages_dirtied, 1481 period, 1482 min(pause, 0L), 1483 start_time); 1484 if (pause < -HZ) { 1485 current->dirty_paused_when = now; 1486 current->nr_dirtied = 0; 1487 } else if (period) { 1488 current->dirty_paused_when += period; 1489 current->nr_dirtied = 0; 1490 } else if (current->nr_dirtied_pause <= pages_dirtied) 1491 current->nr_dirtied_pause += pages_dirtied; 1492 break; 1493 } 1494 if (unlikely(pause > max_pause)) { 1495 /* for occasional dropped task_ratelimit */ 1496 now += min(pause - max_pause, max_pause); 1497 pause = max_pause; 1498 } 1499 1500 pause: 1501 trace_balance_dirty_pages(bdi, 1502 dirty_thresh, 1503 background_thresh, 1504 nr_dirty, 1505 bdi_thresh, 1506 bdi_dirty, 1507 dirty_ratelimit, 1508 task_ratelimit, 1509 pages_dirtied, 1510 period, 1511 pause, 1512 start_time); 1513 __set_current_state(TASK_KILLABLE); 1514 io_schedule_timeout(pause); 1515 1516 current->dirty_paused_when = now + pause; 1517 current->nr_dirtied = 0; 1518 current->nr_dirtied_pause = nr_dirtied_pause; 1519 1520 /* 1521 * This is typically equal to (nr_dirty < dirty_thresh) and can 1522 * also keep "1000+ dd on a slow USB stick" under control. 1523 */ 1524 if (task_ratelimit) 1525 break; 1526 1527 /* 1528 * In the case of an unresponding NFS server and the NFS dirty 1529 * pages exceeds dirty_thresh, give the other good bdi's a pipe 1530 * to go through, so that tasks on them still remain responsive. 1531 * 1532 * In theory 1 page is enough to keep the comsumer-producer 1533 * pipe going: the flusher cleans 1 page => the task dirties 1 1534 * more page. However bdi_dirty has accounting errors. So use 1535 * the larger and more IO friendly bdi_stat_error. 1536 */ 1537 if (bdi_dirty <= bdi_stat_error(bdi)) 1538 break; 1539 1540 if (fatal_signal_pending(current)) 1541 break; 1542 } 1543 1544 if (!dirty_exceeded && bdi->dirty_exceeded) 1545 bdi->dirty_exceeded = 0; 1546 1547 if (writeback_in_progress(bdi)) 1548 return; 1549 1550 /* 1551 * In laptop mode, we wait until hitting the higher threshold before 1552 * starting background writeout, and then write out all the way down 1553 * to the lower threshold. So slow writers cause minimal disk activity. 1554 * 1555 * In normal mode, we start background writeout at the lower 1556 * background_thresh, to keep the amount of dirty memory low. 1557 */ 1558 if (laptop_mode) 1559 return; 1560 1561 if (nr_reclaimable > background_thresh) 1562 bdi_start_background_writeback(bdi); 1563 } 1564 1565 void set_page_dirty_balance(struct page *page, int page_mkwrite) 1566 { 1567 if (set_page_dirty(page) || page_mkwrite) { 1568 struct address_space *mapping = page_mapping(page); 1569 1570 if (mapping) 1571 balance_dirty_pages_ratelimited(mapping); 1572 } 1573 } 1574 1575 static DEFINE_PER_CPU(int, bdp_ratelimits); 1576 1577 /* 1578 * Normal tasks are throttled by 1579 * loop { 1580 * dirty tsk->nr_dirtied_pause pages; 1581 * take a snap in balance_dirty_pages(); 1582 * } 1583 * However there is a worst case. If every task exit immediately when dirtied 1584 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be 1585 * called to throttle the page dirties. The solution is to save the not yet 1586 * throttled page dirties in dirty_throttle_leaks on task exit and charge them 1587 * randomly into the running tasks. This works well for the above worst case, 1588 * as the new task will pick up and accumulate the old task's leaked dirty 1589 * count and eventually get throttled. 1590 */ 1591 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; 1592 1593 /** 1594 * balance_dirty_pages_ratelimited - balance dirty memory state 1595 * @mapping: address_space which was dirtied 1596 * 1597 * Processes which are dirtying memory should call in here once for each page 1598 * which was newly dirtied. The function will periodically check the system's 1599 * dirty state and will initiate writeback if needed. 1600 * 1601 * On really big machines, get_writeback_state is expensive, so try to avoid 1602 * calling it too often (ratelimiting). But once we're over the dirty memory 1603 * limit we decrease the ratelimiting by a lot, to prevent individual processes 1604 * from overshooting the limit by (ratelimit_pages) each. 1605 */ 1606 void balance_dirty_pages_ratelimited(struct address_space *mapping) 1607 { 1608 struct backing_dev_info *bdi = mapping->backing_dev_info; 1609 int ratelimit; 1610 int *p; 1611 1612 if (!bdi_cap_account_dirty(bdi)) 1613 return; 1614 1615 ratelimit = current->nr_dirtied_pause; 1616 if (bdi->dirty_exceeded) 1617 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); 1618 1619 preempt_disable(); 1620 /* 1621 * This prevents one CPU to accumulate too many dirtied pages without 1622 * calling into balance_dirty_pages(), which can happen when there are 1623 * 1000+ tasks, all of them start dirtying pages at exactly the same 1624 * time, hence all honoured too large initial task->nr_dirtied_pause. 1625 */ 1626 p = &__get_cpu_var(bdp_ratelimits); 1627 if (unlikely(current->nr_dirtied >= ratelimit)) 1628 *p = 0; 1629 else if (unlikely(*p >= ratelimit_pages)) { 1630 *p = 0; 1631 ratelimit = 0; 1632 } 1633 /* 1634 * Pick up the dirtied pages by the exited tasks. This avoids lots of 1635 * short-lived tasks (eg. gcc invocations in a kernel build) escaping 1636 * the dirty throttling and livelock other long-run dirtiers. 1637 */ 1638 p = &__get_cpu_var(dirty_throttle_leaks); 1639 if (*p > 0 && current->nr_dirtied < ratelimit) { 1640 unsigned long nr_pages_dirtied; 1641 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); 1642 *p -= nr_pages_dirtied; 1643 current->nr_dirtied += nr_pages_dirtied; 1644 } 1645 preempt_enable(); 1646 1647 if (unlikely(current->nr_dirtied >= ratelimit)) 1648 balance_dirty_pages(mapping, current->nr_dirtied); 1649 } 1650 EXPORT_SYMBOL(balance_dirty_pages_ratelimited); 1651 1652 void throttle_vm_writeout(gfp_t gfp_mask) 1653 { 1654 unsigned long background_thresh; 1655 unsigned long dirty_thresh; 1656 1657 for ( ; ; ) { 1658 global_dirty_limits(&background_thresh, &dirty_thresh); 1659 dirty_thresh = hard_dirty_limit(dirty_thresh); 1660 1661 /* 1662 * Boost the allowable dirty threshold a bit for page 1663 * allocators so they don't get DoS'ed by heavy writers 1664 */ 1665 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 1666 1667 if (global_page_state(NR_UNSTABLE_NFS) + 1668 global_page_state(NR_WRITEBACK) <= dirty_thresh) 1669 break; 1670 congestion_wait(BLK_RW_ASYNC, HZ/10); 1671 1672 /* 1673 * The caller might hold locks which can prevent IO completion 1674 * or progress in the filesystem. So we cannot just sit here 1675 * waiting for IO to complete. 1676 */ 1677 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) 1678 break; 1679 } 1680 } 1681 1682 /* 1683 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 1684 */ 1685 int dirty_writeback_centisecs_handler(ctl_table *table, int write, 1686 void __user *buffer, size_t *length, loff_t *ppos) 1687 { 1688 proc_dointvec(table, write, buffer, length, ppos); 1689 return 0; 1690 } 1691 1692 #ifdef CONFIG_BLOCK 1693 void laptop_mode_timer_fn(unsigned long data) 1694 { 1695 struct request_queue *q = (struct request_queue *)data; 1696 int nr_pages = global_page_state(NR_FILE_DIRTY) + 1697 global_page_state(NR_UNSTABLE_NFS); 1698 1699 /* 1700 * We want to write everything out, not just down to the dirty 1701 * threshold 1702 */ 1703 if (bdi_has_dirty_io(&q->backing_dev_info)) 1704 bdi_start_writeback(&q->backing_dev_info, nr_pages, 1705 WB_REASON_LAPTOP_TIMER); 1706 } 1707 1708 /* 1709 * We've spun up the disk and we're in laptop mode: schedule writeback 1710 * of all dirty data a few seconds from now. If the flush is already scheduled 1711 * then push it back - the user is still using the disk. 1712 */ 1713 void laptop_io_completion(struct backing_dev_info *info) 1714 { 1715 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); 1716 } 1717 1718 /* 1719 * We're in laptop mode and we've just synced. The sync's writes will have 1720 * caused another writeback to be scheduled by laptop_io_completion. 1721 * Nothing needs to be written back anymore, so we unschedule the writeback. 1722 */ 1723 void laptop_sync_completion(void) 1724 { 1725 struct backing_dev_info *bdi; 1726 1727 rcu_read_lock(); 1728 1729 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) 1730 del_timer(&bdi->laptop_mode_wb_timer); 1731 1732 rcu_read_unlock(); 1733 } 1734 #endif 1735 1736 /* 1737 * If ratelimit_pages is too high then we can get into dirty-data overload 1738 * if a large number of processes all perform writes at the same time. 1739 * If it is too low then SMP machines will call the (expensive) 1740 * get_writeback_state too often. 1741 * 1742 * Here we set ratelimit_pages to a level which ensures that when all CPUs are 1743 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory 1744 * thresholds. 1745 */ 1746 1747 void writeback_set_ratelimit(void) 1748 { 1749 unsigned long background_thresh; 1750 unsigned long dirty_thresh; 1751 global_dirty_limits(&background_thresh, &dirty_thresh); 1752 global_dirty_limit = dirty_thresh; 1753 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); 1754 if (ratelimit_pages < 16) 1755 ratelimit_pages = 16; 1756 } 1757 1758 static int 1759 ratelimit_handler(struct notifier_block *self, unsigned long action, 1760 void *hcpu) 1761 { 1762 1763 switch (action & ~CPU_TASKS_FROZEN) { 1764 case CPU_ONLINE: 1765 case CPU_DEAD: 1766 writeback_set_ratelimit(); 1767 return NOTIFY_OK; 1768 default: 1769 return NOTIFY_DONE; 1770 } 1771 } 1772 1773 static struct notifier_block ratelimit_nb = { 1774 .notifier_call = ratelimit_handler, 1775 .next = NULL, 1776 }; 1777 1778 /* 1779 * Called early on to tune the page writeback dirty limits. 1780 * 1781 * We used to scale dirty pages according to how total memory 1782 * related to pages that could be allocated for buffers (by 1783 * comparing nr_free_buffer_pages() to vm_total_pages. 1784 * 1785 * However, that was when we used "dirty_ratio" to scale with 1786 * all memory, and we don't do that any more. "dirty_ratio" 1787 * is now applied to total non-HIGHPAGE memory (by subtracting 1788 * totalhigh_pages from vm_total_pages), and as such we can't 1789 * get into the old insane situation any more where we had 1790 * large amounts of dirty pages compared to a small amount of 1791 * non-HIGHMEM memory. 1792 * 1793 * But we might still want to scale the dirty_ratio by how 1794 * much memory the box has.. 1795 */ 1796 void __init page_writeback_init(void) 1797 { 1798 writeback_set_ratelimit(); 1799 register_cpu_notifier(&ratelimit_nb); 1800 1801 fprop_global_init(&writeout_completions); 1802 } 1803 1804 /** 1805 * tag_pages_for_writeback - tag pages to be written by write_cache_pages 1806 * @mapping: address space structure to write 1807 * @start: starting page index 1808 * @end: ending page index (inclusive) 1809 * 1810 * This function scans the page range from @start to @end (inclusive) and tags 1811 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is 1812 * that write_cache_pages (or whoever calls this function) will then use 1813 * TOWRITE tag to identify pages eligible for writeback. This mechanism is 1814 * used to avoid livelocking of writeback by a process steadily creating new 1815 * dirty pages in the file (thus it is important for this function to be quick 1816 * so that it can tag pages faster than a dirtying process can create them). 1817 */ 1818 /* 1819 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. 1820 */ 1821 void tag_pages_for_writeback(struct address_space *mapping, 1822 pgoff_t start, pgoff_t end) 1823 { 1824 #define WRITEBACK_TAG_BATCH 4096 1825 unsigned long tagged; 1826 1827 do { 1828 spin_lock_irq(&mapping->tree_lock); 1829 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, 1830 &start, end, WRITEBACK_TAG_BATCH, 1831 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); 1832 spin_unlock_irq(&mapping->tree_lock); 1833 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); 1834 cond_resched(); 1835 /* We check 'start' to handle wrapping when end == ~0UL */ 1836 } while (tagged >= WRITEBACK_TAG_BATCH && start); 1837 } 1838 EXPORT_SYMBOL(tag_pages_for_writeback); 1839 1840 /** 1841 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 1842 * @mapping: address space structure to write 1843 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 1844 * @writepage: function called for each page 1845 * @data: data passed to writepage function 1846 * 1847 * If a page is already under I/O, write_cache_pages() skips it, even 1848 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 1849 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 1850 * and msync() need to guarantee that all the data which was dirty at the time 1851 * the call was made get new I/O started against them. If wbc->sync_mode is 1852 * WB_SYNC_ALL then we were called for data integrity and we must wait for 1853 * existing IO to complete. 1854 * 1855 * To avoid livelocks (when other process dirties new pages), we first tag 1856 * pages which should be written back with TOWRITE tag and only then start 1857 * writing them. For data-integrity sync we have to be careful so that we do 1858 * not miss some pages (e.g., because some other process has cleared TOWRITE 1859 * tag we set). The rule we follow is that TOWRITE tag can be cleared only 1860 * by the process clearing the DIRTY tag (and submitting the page for IO). 1861 */ 1862 int write_cache_pages(struct address_space *mapping, 1863 struct writeback_control *wbc, writepage_t writepage, 1864 void *data) 1865 { 1866 int ret = 0; 1867 int done = 0; 1868 struct pagevec pvec; 1869 int nr_pages; 1870 pgoff_t uninitialized_var(writeback_index); 1871 pgoff_t index; 1872 pgoff_t end; /* Inclusive */ 1873 pgoff_t done_index; 1874 int cycled; 1875 int range_whole = 0; 1876 int tag; 1877 1878 pagevec_init(&pvec, 0); 1879 if (wbc->range_cyclic) { 1880 writeback_index = mapping->writeback_index; /* prev offset */ 1881 index = writeback_index; 1882 if (index == 0) 1883 cycled = 1; 1884 else 1885 cycled = 0; 1886 end = -1; 1887 } else { 1888 index = wbc->range_start >> PAGE_CACHE_SHIFT; 1889 end = wbc->range_end >> PAGE_CACHE_SHIFT; 1890 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1891 range_whole = 1; 1892 cycled = 1; /* ignore range_cyclic tests */ 1893 } 1894 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1895 tag = PAGECACHE_TAG_TOWRITE; 1896 else 1897 tag = PAGECACHE_TAG_DIRTY; 1898 retry: 1899 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 1900 tag_pages_for_writeback(mapping, index, end); 1901 done_index = index; 1902 while (!done && (index <= end)) { 1903 int i; 1904 1905 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 1906 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 1907 if (nr_pages == 0) 1908 break; 1909 1910 for (i = 0; i < nr_pages; i++) { 1911 struct page *page = pvec.pages[i]; 1912 1913 /* 1914 * At this point, the page may be truncated or 1915 * invalidated (changing page->mapping to NULL), or 1916 * even swizzled back from swapper_space to tmpfs file 1917 * mapping. However, page->index will not change 1918 * because we have a reference on the page. 1919 */ 1920 if (page->index > end) { 1921 /* 1922 * can't be range_cyclic (1st pass) because 1923 * end == -1 in that case. 1924 */ 1925 done = 1; 1926 break; 1927 } 1928 1929 done_index = page->index; 1930 1931 lock_page(page); 1932 1933 /* 1934 * Page truncated or invalidated. We can freely skip it 1935 * then, even for data integrity operations: the page 1936 * has disappeared concurrently, so there could be no 1937 * real expectation of this data interity operation 1938 * even if there is now a new, dirty page at the same 1939 * pagecache address. 1940 */ 1941 if (unlikely(page->mapping != mapping)) { 1942 continue_unlock: 1943 unlock_page(page); 1944 continue; 1945 } 1946 1947 if (!PageDirty(page)) { 1948 /* someone wrote it for us */ 1949 goto continue_unlock; 1950 } 1951 1952 if (PageWriteback(page)) { 1953 if (wbc->sync_mode != WB_SYNC_NONE) 1954 wait_on_page_writeback(page); 1955 else 1956 goto continue_unlock; 1957 } 1958 1959 BUG_ON(PageWriteback(page)); 1960 if (!clear_page_dirty_for_io(page)) 1961 goto continue_unlock; 1962 1963 trace_wbc_writepage(wbc, mapping->backing_dev_info); 1964 ret = (*writepage)(page, wbc, data); 1965 if (unlikely(ret)) { 1966 if (ret == AOP_WRITEPAGE_ACTIVATE) { 1967 unlock_page(page); 1968 ret = 0; 1969 } else { 1970 /* 1971 * done_index is set past this page, 1972 * so media errors will not choke 1973 * background writeout for the entire 1974 * file. This has consequences for 1975 * range_cyclic semantics (ie. it may 1976 * not be suitable for data integrity 1977 * writeout). 1978 */ 1979 done_index = page->index + 1; 1980 done = 1; 1981 break; 1982 } 1983 } 1984 1985 /* 1986 * We stop writing back only if we are not doing 1987 * integrity sync. In case of integrity sync we have to 1988 * keep going until we have written all the pages 1989 * we tagged for writeback prior to entering this loop. 1990 */ 1991 if (--wbc->nr_to_write <= 0 && 1992 wbc->sync_mode == WB_SYNC_NONE) { 1993 done = 1; 1994 break; 1995 } 1996 } 1997 pagevec_release(&pvec); 1998 cond_resched(); 1999 } 2000 if (!cycled && !done) { 2001 /* 2002 * range_cyclic: 2003 * We hit the last page and there is more work to be done: wrap 2004 * back to the start of the file 2005 */ 2006 cycled = 1; 2007 index = 0; 2008 end = writeback_index - 1; 2009 goto retry; 2010 } 2011 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2012 mapping->writeback_index = done_index; 2013 2014 return ret; 2015 } 2016 EXPORT_SYMBOL(write_cache_pages); 2017 2018 /* 2019 * Function used by generic_writepages to call the real writepage 2020 * function and set the mapping flags on error 2021 */ 2022 static int __writepage(struct page *page, struct writeback_control *wbc, 2023 void *data) 2024 { 2025 struct address_space *mapping = data; 2026 int ret = mapping->a_ops->writepage(page, wbc); 2027 mapping_set_error(mapping, ret); 2028 return ret; 2029 } 2030 2031 /** 2032 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. 2033 * @mapping: address space structure to write 2034 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 2035 * 2036 * This is a library function, which implements the writepages() 2037 * address_space_operation. 2038 */ 2039 int generic_writepages(struct address_space *mapping, 2040 struct writeback_control *wbc) 2041 { 2042 struct blk_plug plug; 2043 int ret; 2044 2045 /* deal with chardevs and other special file */ 2046 if (!mapping->a_ops->writepage) 2047 return 0; 2048 2049 blk_start_plug(&plug); 2050 ret = write_cache_pages(mapping, wbc, __writepage, mapping); 2051 blk_finish_plug(&plug); 2052 return ret; 2053 } 2054 2055 EXPORT_SYMBOL(generic_writepages); 2056 2057 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) 2058 { 2059 int ret; 2060 2061 if (wbc->nr_to_write <= 0) 2062 return 0; 2063 if (mapping->a_ops->writepages) 2064 ret = mapping->a_ops->writepages(mapping, wbc); 2065 else 2066 ret = generic_writepages(mapping, wbc); 2067 return ret; 2068 } 2069 2070 /** 2071 * write_one_page - write out a single page and optionally wait on I/O 2072 * @page: the page to write 2073 * @wait: if true, wait on writeout 2074 * 2075 * The page must be locked by the caller and will be unlocked upon return. 2076 * 2077 * write_one_page() returns a negative error code if I/O failed. 2078 */ 2079 int write_one_page(struct page *page, int wait) 2080 { 2081 struct address_space *mapping = page->mapping; 2082 int ret = 0; 2083 struct writeback_control wbc = { 2084 .sync_mode = WB_SYNC_ALL, 2085 .nr_to_write = 1, 2086 }; 2087 2088 BUG_ON(!PageLocked(page)); 2089 2090 if (wait) 2091 wait_on_page_writeback(page); 2092 2093 if (clear_page_dirty_for_io(page)) { 2094 page_cache_get(page); 2095 ret = mapping->a_ops->writepage(page, &wbc); 2096 if (ret == 0 && wait) { 2097 wait_on_page_writeback(page); 2098 if (PageError(page)) 2099 ret = -EIO; 2100 } 2101 page_cache_release(page); 2102 } else { 2103 unlock_page(page); 2104 } 2105 return ret; 2106 } 2107 EXPORT_SYMBOL(write_one_page); 2108 2109 /* 2110 * For address_spaces which do not use buffers nor write back. 2111 */ 2112 int __set_page_dirty_no_writeback(struct page *page) 2113 { 2114 if (!PageDirty(page)) 2115 return !TestSetPageDirty(page); 2116 return 0; 2117 } 2118 2119 /* 2120 * Helper function for set_page_dirty family. 2121 * NOTE: This relies on being atomic wrt interrupts. 2122 */ 2123 void account_page_dirtied(struct page *page, struct address_space *mapping) 2124 { 2125 trace_writeback_dirty_page(page, mapping); 2126 2127 if (mapping_cap_account_dirty(mapping)) { 2128 __inc_zone_page_state(page, NR_FILE_DIRTY); 2129 __inc_zone_page_state(page, NR_DIRTIED); 2130 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 2131 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 2132 task_io_account_write(PAGE_CACHE_SIZE); 2133 current->nr_dirtied++; 2134 this_cpu_inc(bdp_ratelimits); 2135 } 2136 } 2137 EXPORT_SYMBOL(account_page_dirtied); 2138 2139 /* 2140 * Helper function for set_page_writeback family. 2141 * 2142 * The caller must hold mem_cgroup_begin/end_update_page_stat() lock 2143 * while calling this function. 2144 * See test_set_page_writeback for example. 2145 * 2146 * NOTE: Unlike account_page_dirtied this does not rely on being atomic 2147 * wrt interrupts. 2148 */ 2149 void account_page_writeback(struct page *page) 2150 { 2151 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2152 inc_zone_page_state(page, NR_WRITEBACK); 2153 } 2154 EXPORT_SYMBOL(account_page_writeback); 2155 2156 /* 2157 * For address_spaces which do not use buffers. Just tag the page as dirty in 2158 * its radix tree. 2159 * 2160 * This is also used when a single buffer is being dirtied: we want to set the 2161 * page dirty in that case, but not all the buffers. This is a "bottom-up" 2162 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. 2163 * 2164 * Most callers have locked the page, which pins the address_space in memory. 2165 * But zap_pte_range() does not lock the page, however in that case the 2166 * mapping is pinned by the vma's ->vm_file reference. 2167 * 2168 * We take care to handle the case where the page was truncated from the 2169 * mapping by re-checking page_mapping() inside tree_lock. 2170 */ 2171 int __set_page_dirty_nobuffers(struct page *page) 2172 { 2173 if (!TestSetPageDirty(page)) { 2174 struct address_space *mapping = page_mapping(page); 2175 struct address_space *mapping2; 2176 unsigned long flags; 2177 2178 if (!mapping) 2179 return 1; 2180 2181 spin_lock_irqsave(&mapping->tree_lock, flags); 2182 mapping2 = page_mapping(page); 2183 if (mapping2) { /* Race with truncate? */ 2184 BUG_ON(mapping2 != mapping); 2185 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2186 account_page_dirtied(page, mapping); 2187 radix_tree_tag_set(&mapping->page_tree, 2188 page_index(page), PAGECACHE_TAG_DIRTY); 2189 } 2190 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2191 if (mapping->host) { 2192 /* !PageAnon && !swapper_space */ 2193 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2194 } 2195 return 1; 2196 } 2197 return 0; 2198 } 2199 EXPORT_SYMBOL(__set_page_dirty_nobuffers); 2200 2201 /* 2202 * Call this whenever redirtying a page, to de-account the dirty counters 2203 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written 2204 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to 2205 * systematic errors in balanced_dirty_ratelimit and the dirty pages position 2206 * control. 2207 */ 2208 void account_page_redirty(struct page *page) 2209 { 2210 struct address_space *mapping = page->mapping; 2211 if (mapping && mapping_cap_account_dirty(mapping)) { 2212 current->nr_dirtied--; 2213 dec_zone_page_state(page, NR_DIRTIED); 2214 dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 2215 } 2216 } 2217 EXPORT_SYMBOL(account_page_redirty); 2218 2219 /* 2220 * When a writepage implementation decides that it doesn't want to write this 2221 * page for some reason, it should redirty the locked page via 2222 * redirty_page_for_writepage() and it should then unlock the page and return 0 2223 */ 2224 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) 2225 { 2226 wbc->pages_skipped++; 2227 account_page_redirty(page); 2228 return __set_page_dirty_nobuffers(page); 2229 } 2230 EXPORT_SYMBOL(redirty_page_for_writepage); 2231 2232 /* 2233 * Dirty a page. 2234 * 2235 * For pages with a mapping this should be done under the page lock 2236 * for the benefit of asynchronous memory errors who prefer a consistent 2237 * dirty state. This rule can be broken in some special cases, 2238 * but should be better not to. 2239 * 2240 * If the mapping doesn't provide a set_page_dirty a_op, then 2241 * just fall through and assume that it wants buffer_heads. 2242 */ 2243 int set_page_dirty(struct page *page) 2244 { 2245 struct address_space *mapping = page_mapping(page); 2246 2247 if (likely(mapping)) { 2248 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; 2249 /* 2250 * readahead/lru_deactivate_page could remain 2251 * PG_readahead/PG_reclaim due to race with end_page_writeback 2252 * About readahead, if the page is written, the flags would be 2253 * reset. So no problem. 2254 * About lru_deactivate_page, if the page is redirty, the flag 2255 * will be reset. So no problem. but if the page is used by readahead 2256 * it will confuse readahead and make it restart the size rampup 2257 * process. But it's a trivial problem. 2258 */ 2259 ClearPageReclaim(page); 2260 #ifdef CONFIG_BLOCK 2261 if (!spd) 2262 spd = __set_page_dirty_buffers; 2263 #endif 2264 return (*spd)(page); 2265 } 2266 if (!PageDirty(page)) { 2267 if (!TestSetPageDirty(page)) 2268 return 1; 2269 } 2270 return 0; 2271 } 2272 EXPORT_SYMBOL(set_page_dirty); 2273 2274 /* 2275 * set_page_dirty() is racy if the caller has no reference against 2276 * page->mapping->host, and if the page is unlocked. This is because another 2277 * CPU could truncate the page off the mapping and then free the mapping. 2278 * 2279 * Usually, the page _is_ locked, or the caller is a user-space process which 2280 * holds a reference on the inode by having an open file. 2281 * 2282 * In other cases, the page should be locked before running set_page_dirty(). 2283 */ 2284 int set_page_dirty_lock(struct page *page) 2285 { 2286 int ret; 2287 2288 lock_page(page); 2289 ret = set_page_dirty(page); 2290 unlock_page(page); 2291 return ret; 2292 } 2293 EXPORT_SYMBOL(set_page_dirty_lock); 2294 2295 /* 2296 * Clear a page's dirty flag, while caring for dirty memory accounting. 2297 * Returns true if the page was previously dirty. 2298 * 2299 * This is for preparing to put the page under writeout. We leave the page 2300 * tagged as dirty in the radix tree so that a concurrent write-for-sync 2301 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage 2302 * implementation will run either set_page_writeback() or set_page_dirty(), 2303 * at which stage we bring the page's dirty flag and radix-tree dirty tag 2304 * back into sync. 2305 * 2306 * This incoherency between the page's dirty flag and radix-tree tag is 2307 * unfortunate, but it only exists while the page is locked. 2308 */ 2309 int clear_page_dirty_for_io(struct page *page) 2310 { 2311 struct address_space *mapping = page_mapping(page); 2312 2313 BUG_ON(!PageLocked(page)); 2314 2315 if (mapping && mapping_cap_account_dirty(mapping)) { 2316 /* 2317 * Yes, Virginia, this is indeed insane. 2318 * 2319 * We use this sequence to make sure that 2320 * (a) we account for dirty stats properly 2321 * (b) we tell the low-level filesystem to 2322 * mark the whole page dirty if it was 2323 * dirty in a pagetable. Only to then 2324 * (c) clean the page again and return 1 to 2325 * cause the writeback. 2326 * 2327 * This way we avoid all nasty races with the 2328 * dirty bit in multiple places and clearing 2329 * them concurrently from different threads. 2330 * 2331 * Note! Normally the "set_page_dirty(page)" 2332 * has no effect on the actual dirty bit - since 2333 * that will already usually be set. But we 2334 * need the side effects, and it can help us 2335 * avoid races. 2336 * 2337 * We basically use the page "master dirty bit" 2338 * as a serialization point for all the different 2339 * threads doing their things. 2340 */ 2341 if (page_mkclean(page)) 2342 set_page_dirty(page); 2343 /* 2344 * We carefully synchronise fault handlers against 2345 * installing a dirty pte and marking the page dirty 2346 * at this point. We do this by having them hold the 2347 * page lock at some point after installing their 2348 * pte, but before marking the page dirty. 2349 * Pages are always locked coming in here, so we get 2350 * the desired exclusion. See mm/memory.c:do_wp_page() 2351 * for more comments. 2352 */ 2353 if (TestClearPageDirty(page)) { 2354 dec_zone_page_state(page, NR_FILE_DIRTY); 2355 dec_bdi_stat(mapping->backing_dev_info, 2356 BDI_RECLAIMABLE); 2357 return 1; 2358 } 2359 return 0; 2360 } 2361 return TestClearPageDirty(page); 2362 } 2363 EXPORT_SYMBOL(clear_page_dirty_for_io); 2364 2365 int test_clear_page_writeback(struct page *page) 2366 { 2367 struct address_space *mapping = page_mapping(page); 2368 int ret; 2369 bool locked; 2370 unsigned long memcg_flags; 2371 2372 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2373 if (mapping) { 2374 struct backing_dev_info *bdi = mapping->backing_dev_info; 2375 unsigned long flags; 2376 2377 spin_lock_irqsave(&mapping->tree_lock, flags); 2378 ret = TestClearPageWriteback(page); 2379 if (ret) { 2380 radix_tree_tag_clear(&mapping->page_tree, 2381 page_index(page), 2382 PAGECACHE_TAG_WRITEBACK); 2383 if (bdi_cap_account_writeback(bdi)) { 2384 __dec_bdi_stat(bdi, BDI_WRITEBACK); 2385 __bdi_writeout_inc(bdi); 2386 } 2387 } 2388 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2389 } else { 2390 ret = TestClearPageWriteback(page); 2391 } 2392 if (ret) { 2393 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2394 dec_zone_page_state(page, NR_WRITEBACK); 2395 inc_zone_page_state(page, NR_WRITTEN); 2396 } 2397 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2398 return ret; 2399 } 2400 2401 int test_set_page_writeback(struct page *page) 2402 { 2403 struct address_space *mapping = page_mapping(page); 2404 int ret; 2405 bool locked; 2406 unsigned long memcg_flags; 2407 2408 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2409 if (mapping) { 2410 struct backing_dev_info *bdi = mapping->backing_dev_info; 2411 unsigned long flags; 2412 2413 spin_lock_irqsave(&mapping->tree_lock, flags); 2414 ret = TestSetPageWriteback(page); 2415 if (!ret) { 2416 radix_tree_tag_set(&mapping->page_tree, 2417 page_index(page), 2418 PAGECACHE_TAG_WRITEBACK); 2419 if (bdi_cap_account_writeback(bdi)) 2420 __inc_bdi_stat(bdi, BDI_WRITEBACK); 2421 } 2422 if (!PageDirty(page)) 2423 radix_tree_tag_clear(&mapping->page_tree, 2424 page_index(page), 2425 PAGECACHE_TAG_DIRTY); 2426 radix_tree_tag_clear(&mapping->page_tree, 2427 page_index(page), 2428 PAGECACHE_TAG_TOWRITE); 2429 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2430 } else { 2431 ret = TestSetPageWriteback(page); 2432 } 2433 if (!ret) 2434 account_page_writeback(page); 2435 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2436 return ret; 2437 2438 } 2439 EXPORT_SYMBOL(test_set_page_writeback); 2440 2441 /* 2442 * Return true if any of the pages in the mapping are marked with the 2443 * passed tag. 2444 */ 2445 int mapping_tagged(struct address_space *mapping, int tag) 2446 { 2447 return radix_tree_tagged(&mapping->page_tree, tag); 2448 } 2449 EXPORT_SYMBOL(mapping_tagged); 2450 2451 /** 2452 * wait_for_stable_page() - wait for writeback to finish, if necessary. 2453 * @page: The page to wait on. 2454 * 2455 * This function determines if the given page is related to a backing device 2456 * that requires page contents to be held stable during writeback. If so, then 2457 * it will wait for any pending writeback to complete. 2458 */ 2459 void wait_for_stable_page(struct page *page) 2460 { 2461 struct address_space *mapping = page_mapping(page); 2462 struct backing_dev_info *bdi = mapping->backing_dev_info; 2463 2464 if (!bdi_cap_stable_pages_required(bdi)) 2465 return; 2466 2467 wait_on_page_writeback(page); 2468 } 2469 EXPORT_SYMBOL_GPL(wait_for_stable_page); 2470