vmscan.c (ba9ddf49391645e6bb93219131a40446538a5e76) vmscan.c (89e004ea55abe201b29e2d6e35124101f1288ef7)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 2332 unchanged lines hidden (view full) ---

2341
2342 if (mapping_unevictable(page_mapping(page)))
2343 return 0;
2344
2345 /* TODO: test page [!]evictable conditions */
2346
2347 return 1;
2348}
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 2332 unchanged lines hidden (view full) ---

2341
2342 if (mapping_unevictable(page_mapping(page)))
2343 return 0;
2344
2345 /* TODO: test page [!]evictable conditions */
2346
2347 return 1;
2348}
2349
2350/**
2351 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2352 * @page: page to check evictability and move to appropriate lru list
2353 * @zone: zone page is in
2354 *
2355 * Checks a page for evictability and moves the page to the appropriate
2356 * zone lru list.
2357 *
2358 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2359 * have PageUnevictable set.
2360 */
2361static void check_move_unevictable_page(struct page *page, struct zone *zone)
2362{
2363 VM_BUG_ON(PageActive(page));
2364
2365retry:
2366 ClearPageUnevictable(page);
2367 if (page_evictable(page, NULL)) {
2368 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2369 __dec_zone_state(zone, NR_UNEVICTABLE);
2370 list_move(&page->lru, &zone->lru[l].list);
2371 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2372 __count_vm_event(UNEVICTABLE_PGRESCUED);
2373 } else {
2374 /*
2375 * rotate unevictable list
2376 */
2377 SetPageUnevictable(page);
2378 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2379 if (page_evictable(page, NULL))
2380 goto retry;
2381 }
2382}
2383
2384/**
2385 * scan_mapping_unevictable_pages - scan an address space for evictable pages
2386 * @mapping: struct address_space to scan for evictable pages
2387 *
2388 * Scan all pages in mapping. Check unevictable pages for
2389 * evictability and move them to the appropriate zone lru list.
2390 */
2391void scan_mapping_unevictable_pages(struct address_space *mapping)
2392{
2393 pgoff_t next = 0;
2394 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2395 PAGE_CACHE_SHIFT;
2396 struct zone *zone;
2397 struct pagevec pvec;
2398
2399 if (mapping->nrpages == 0)
2400 return;
2401
2402 pagevec_init(&pvec, 0);
2403 while (next < end &&
2404 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2405 int i;
2406 int pg_scanned = 0;
2407
2408 zone = NULL;
2409
2410 for (i = 0; i < pagevec_count(&pvec); i++) {
2411 struct page *page = pvec.pages[i];
2412 pgoff_t page_index = page->index;
2413 struct zone *pagezone = page_zone(page);
2414
2415 pg_scanned++;
2416 if (page_index > next)
2417 next = page_index;
2418 next++;
2419
2420 if (pagezone != zone) {
2421 if (zone)
2422 spin_unlock_irq(&zone->lru_lock);
2423 zone = pagezone;
2424 spin_lock_irq(&zone->lru_lock);
2425 }
2426
2427 if (PageLRU(page) && PageUnevictable(page))
2428 check_move_unevictable_page(page, zone);
2429 }
2430 if (zone)
2431 spin_unlock_irq(&zone->lru_lock);
2432 pagevec_release(&pvec);
2433
2434 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2435 }
2436
2437}
2349#endif
2438#endif