1 /* 2 * linux/mm/swap.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * This file contains the default values for the opereation of the 9 * Linux VM subsystem. Fine-tuning documentation can be found in 10 * Documentation/sysctl/vm.txt. 11 * Started 18.12.91 12 * Swap aging added 23.2.95, Stephen Tweedie. 13 * Buffermem limits added 12.3.98, Rik van Riel. 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/swap.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/pagevec.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/mm_inline.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page() */ 27 #include <linux/module.h> 28 #include <linux/percpu_counter.h> 29 #include <linux/percpu.h> 30 #include <linux/cpu.h> 31 #include <linux/notifier.h> 32 #include <linux/init.h> 33 34 /* How many pages do we try to swap or page in/out together? */ 35 int page_cluster; 36 37 void put_page(struct page *page) 38 { 39 if (unlikely(PageCompound(page))) { 40 page = (struct page *)page_private(page); 41 if (put_page_testzero(page)) { 42 void (*dtor)(struct page *page); 43 44 dtor = (void (*)(struct page *))page[1].mapping; 45 (*dtor)(page); 46 } 47 return; 48 } 49 if (put_page_testzero(page)) 50 __page_cache_release(page); 51 } 52 EXPORT_SYMBOL(put_page); 53 54 /* 55 * Writeback is about to end against a page which has been marked for immediate 56 * reclaim. If it still appears to be reclaimable, move it to the tail of the 57 * inactive list. The page still has PageWriteback set, which will pin it. 58 * 59 * We don't expect many pages to come through here, so don't bother batching 60 * things up. 61 * 62 * To avoid placing the page at the tail of the LRU while PG_writeback is still 63 * set, this function will clear PG_writeback before performing the page 64 * motion. Do that inside the lru lock because once PG_writeback is cleared 65 * we may not touch the page. 66 * 67 * Returns zero if it cleared PG_writeback. 68 */ 69 int rotate_reclaimable_page(struct page *page) 70 { 71 struct zone *zone; 72 unsigned long flags; 73 74 if (PageLocked(page)) 75 return 1; 76 if (PageDirty(page)) 77 return 1; 78 if (PageActive(page)) 79 return 1; 80 if (!PageLRU(page)) 81 return 1; 82 83 zone = page_zone(page); 84 spin_lock_irqsave(&zone->lru_lock, flags); 85 if (PageLRU(page) && !PageActive(page)) { 86 list_del(&page->lru); 87 list_add_tail(&page->lru, &zone->inactive_list); 88 inc_page_state(pgrotated); 89 } 90 if (!test_clear_page_writeback(page)) 91 BUG(); 92 spin_unlock_irqrestore(&zone->lru_lock, flags); 93 return 0; 94 } 95 96 /* 97 * FIXME: speed this up? 98 */ 99 void fastcall activate_page(struct page *page) 100 { 101 struct zone *zone = page_zone(page); 102 103 spin_lock_irq(&zone->lru_lock); 104 if (PageLRU(page) && !PageActive(page)) { 105 del_page_from_inactive_list(zone, page); 106 SetPageActive(page); 107 add_page_to_active_list(zone, page); 108 inc_page_state(pgactivate); 109 } 110 spin_unlock_irq(&zone->lru_lock); 111 } 112 113 /* 114 * Mark a page as having seen activity. 115 * 116 * inactive,unreferenced -> inactive,referenced 117 * inactive,referenced -> active,unreferenced 118 * active,unreferenced -> active,referenced 119 */ 120 void fastcall mark_page_accessed(struct page *page) 121 { 122 if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { 123 activate_page(page); 124 ClearPageReferenced(page); 125 } else if (!PageReferenced(page)) { 126 SetPageReferenced(page); 127 } 128 } 129 130 EXPORT_SYMBOL(mark_page_accessed); 131 132 /** 133 * lru_cache_add: add a page to the page lists 134 * @page: the page to add 135 */ 136 static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; 137 static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; 138 139 void fastcall lru_cache_add(struct page *page) 140 { 141 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 142 143 page_cache_get(page); 144 if (!pagevec_add(pvec, page)) 145 __pagevec_lru_add(pvec); 146 put_cpu_var(lru_add_pvecs); 147 } 148 149 void fastcall lru_cache_add_active(struct page *page) 150 { 151 struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); 152 153 page_cache_get(page); 154 if (!pagevec_add(pvec, page)) 155 __pagevec_lru_add_active(pvec); 156 put_cpu_var(lru_add_active_pvecs); 157 } 158 159 static void __lru_add_drain(int cpu) 160 { 161 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); 162 163 /* CPU is dead, so no locking needed. */ 164 if (pagevec_count(pvec)) 165 __pagevec_lru_add(pvec); 166 pvec = &per_cpu(lru_add_active_pvecs, cpu); 167 if (pagevec_count(pvec)) 168 __pagevec_lru_add_active(pvec); 169 } 170 171 void lru_add_drain(void) 172 { 173 __lru_add_drain(get_cpu()); 174 put_cpu(); 175 } 176 177 /* 178 * This path almost never happens for VM activity - pages are normally 179 * freed via pagevecs. But it gets used by networking. 180 */ 181 void fastcall __page_cache_release(struct page *page) 182 { 183 unsigned long flags; 184 struct zone *zone = page_zone(page); 185 186 spin_lock_irqsave(&zone->lru_lock, flags); 187 if (TestClearPageLRU(page)) 188 del_page_from_lru(zone, page); 189 if (page_count(page) != 0) 190 page = NULL; 191 spin_unlock_irqrestore(&zone->lru_lock, flags); 192 if (page) 193 free_hot_page(page); 194 } 195 196 EXPORT_SYMBOL(__page_cache_release); 197 198 /* 199 * Batched page_cache_release(). Decrement the reference count on all the 200 * passed pages. If it fell to zero then remove the page from the LRU and 201 * free it. 202 * 203 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 204 * for the remainder of the operation. 205 * 206 * The locking in this function is against shrink_cache(): we recheck the 207 * page count inside the lock to see whether shrink_cache grabbed the page 208 * via the LRU. If it did, give up: shrink_cache will free it. 209 */ 210 void release_pages(struct page **pages, int nr, int cold) 211 { 212 int i; 213 struct pagevec pages_to_free; 214 struct zone *zone = NULL; 215 216 pagevec_init(&pages_to_free, cold); 217 for (i = 0; i < nr; i++) { 218 struct page *page = pages[i]; 219 struct zone *pagezone; 220 221 if (!put_page_testzero(page)) 222 continue; 223 224 pagezone = page_zone(page); 225 if (pagezone != zone) { 226 if (zone) 227 spin_unlock_irq(&zone->lru_lock); 228 zone = pagezone; 229 spin_lock_irq(&zone->lru_lock); 230 } 231 if (TestClearPageLRU(page)) 232 del_page_from_lru(zone, page); 233 if (page_count(page) == 0) { 234 if (!pagevec_add(&pages_to_free, page)) { 235 spin_unlock_irq(&zone->lru_lock); 236 __pagevec_free(&pages_to_free); 237 pagevec_reinit(&pages_to_free); 238 zone = NULL; /* No lock is held */ 239 } 240 } 241 } 242 if (zone) 243 spin_unlock_irq(&zone->lru_lock); 244 245 pagevec_free(&pages_to_free); 246 } 247 248 /* 249 * The pages which we're about to release may be in the deferred lru-addition 250 * queues. That would prevent them from really being freed right now. That's 251 * OK from a correctness point of view but is inefficient - those pages may be 252 * cache-warm and we want to give them back to the page allocator ASAP. 253 * 254 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 255 * and __pagevec_lru_add_active() call release_pages() directly to avoid 256 * mutual recursion. 257 */ 258 void __pagevec_release(struct pagevec *pvec) 259 { 260 lru_add_drain(); 261 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 262 pagevec_reinit(pvec); 263 } 264 265 EXPORT_SYMBOL(__pagevec_release); 266 267 /* 268 * pagevec_release() for pages which are known to not be on the LRU 269 * 270 * This function reinitialises the caller's pagevec. 271 */ 272 void __pagevec_release_nonlru(struct pagevec *pvec) 273 { 274 int i; 275 struct pagevec pages_to_free; 276 277 pagevec_init(&pages_to_free, pvec->cold); 278 for (i = 0; i < pagevec_count(pvec); i++) { 279 struct page *page = pvec->pages[i]; 280 281 BUG_ON(PageLRU(page)); 282 if (put_page_testzero(page)) 283 pagevec_add(&pages_to_free, page); 284 } 285 pagevec_free(&pages_to_free); 286 pagevec_reinit(pvec); 287 } 288 289 /* 290 * Add the passed pages to the LRU, then drop the caller's refcount 291 * on them. Reinitialises the caller's pagevec. 292 */ 293 void __pagevec_lru_add(struct pagevec *pvec) 294 { 295 int i; 296 struct zone *zone = NULL; 297 298 for (i = 0; i < pagevec_count(pvec); i++) { 299 struct page *page = pvec->pages[i]; 300 struct zone *pagezone = page_zone(page); 301 302 if (pagezone != zone) { 303 if (zone) 304 spin_unlock_irq(&zone->lru_lock); 305 zone = pagezone; 306 spin_lock_irq(&zone->lru_lock); 307 } 308 if (TestSetPageLRU(page)) 309 BUG(); 310 add_page_to_inactive_list(zone, page); 311 } 312 if (zone) 313 spin_unlock_irq(&zone->lru_lock); 314 release_pages(pvec->pages, pvec->nr, pvec->cold); 315 pagevec_reinit(pvec); 316 } 317 318 EXPORT_SYMBOL(__pagevec_lru_add); 319 320 void __pagevec_lru_add_active(struct pagevec *pvec) 321 { 322 int i; 323 struct zone *zone = NULL; 324 325 for (i = 0; i < pagevec_count(pvec); i++) { 326 struct page *page = pvec->pages[i]; 327 struct zone *pagezone = page_zone(page); 328 329 if (pagezone != zone) { 330 if (zone) 331 spin_unlock_irq(&zone->lru_lock); 332 zone = pagezone; 333 spin_lock_irq(&zone->lru_lock); 334 } 335 if (TestSetPageLRU(page)) 336 BUG(); 337 if (TestSetPageActive(page)) 338 BUG(); 339 add_page_to_active_list(zone, page); 340 } 341 if (zone) 342 spin_unlock_irq(&zone->lru_lock); 343 release_pages(pvec->pages, pvec->nr, pvec->cold); 344 pagevec_reinit(pvec); 345 } 346 347 /* 348 * Try to drop buffers from the pages in a pagevec 349 */ 350 void pagevec_strip(struct pagevec *pvec) 351 { 352 int i; 353 354 for (i = 0; i < pagevec_count(pvec); i++) { 355 struct page *page = pvec->pages[i]; 356 357 if (PagePrivate(page) && !TestSetPageLocked(page)) { 358 try_to_release_page(page, 0); 359 unlock_page(page); 360 } 361 } 362 } 363 364 /** 365 * pagevec_lookup - gang pagecache lookup 366 * @pvec: Where the resulting pages are placed 367 * @mapping: The address_space to search 368 * @start: The starting page index 369 * @nr_pages: The maximum number of pages 370 * 371 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 372 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 373 * reference against the pages in @pvec. 374 * 375 * The search returns a group of mapping-contiguous pages with ascending 376 * indexes. There may be holes in the indices due to not-present pages. 377 * 378 * pagevec_lookup() returns the number of pages which were found. 379 */ 380 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 381 pgoff_t start, unsigned nr_pages) 382 { 383 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 384 return pagevec_count(pvec); 385 } 386 387 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 388 pgoff_t *index, int tag, unsigned nr_pages) 389 { 390 pvec->nr = find_get_pages_tag(mapping, index, tag, 391 nr_pages, pvec->pages); 392 return pagevec_count(pvec); 393 } 394 395 EXPORT_SYMBOL(pagevec_lookup_tag); 396 397 #ifdef CONFIG_SMP 398 /* 399 * We tolerate a little inaccuracy to avoid ping-ponging the counter between 400 * CPUs 401 */ 402 #define ACCT_THRESHOLD max(16, NR_CPUS * 2) 403 404 static DEFINE_PER_CPU(long, committed_space) = 0; 405 406 void vm_acct_memory(long pages) 407 { 408 long *local; 409 410 preempt_disable(); 411 local = &__get_cpu_var(committed_space); 412 *local += pages; 413 if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { 414 atomic_add(*local, &vm_committed_space); 415 *local = 0; 416 } 417 preempt_enable(); 418 } 419 420 #ifdef CONFIG_HOTPLUG_CPU 421 422 /* Drop the CPU's cached committed space back into the central pool. */ 423 static int cpu_swap_callback(struct notifier_block *nfb, 424 unsigned long action, 425 void *hcpu) 426 { 427 long *committed; 428 429 committed = &per_cpu(committed_space, (long)hcpu); 430 if (action == CPU_DEAD) { 431 atomic_add(*committed, &vm_committed_space); 432 *committed = 0; 433 __lru_add_drain((long)hcpu); 434 } 435 return NOTIFY_OK; 436 } 437 #endif /* CONFIG_HOTPLUG_CPU */ 438 #endif /* CONFIG_SMP */ 439 440 #ifdef CONFIG_SMP 441 void percpu_counter_mod(struct percpu_counter *fbc, long amount) 442 { 443 long count; 444 long *pcount; 445 int cpu = get_cpu(); 446 447 pcount = per_cpu_ptr(fbc->counters, cpu); 448 count = *pcount + amount; 449 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 450 spin_lock(&fbc->lock); 451 fbc->count += count; 452 spin_unlock(&fbc->lock); 453 count = 0; 454 } 455 *pcount = count; 456 put_cpu(); 457 } 458 EXPORT_SYMBOL(percpu_counter_mod); 459 #endif 460 461 /* 462 * Perform any setup for the swap system 463 */ 464 void __init swap_setup(void) 465 { 466 unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); 467 468 /* Use a smaller cluster for small-memory machines */ 469 if (megs < 16) 470 page_cluster = 2; 471 else 472 page_cluster = 3; 473 /* 474 * Right now other parts of the system means that we 475 * _really_ don't want to cluster much more 476 */ 477 hotcpu_notifier(cpu_swap_callback, 0); 478 } 479