swapfile.c (355cfa73ddff2fb8fa14e93bd94a057cc022512e) swapfile.c (c9e444103b5e7a5a3519f9913f59767f92e33baf)
1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
8#include <linux/mm.h>

--- 65 unchanged lines hidden (view full) ---

74{
75 unsigned short ret = count;
76
77 if (has_cache)
78 return SWAP_HAS_CACHE | ret;
79 return ret;
80}
81
1/*
2 * linux/mm/swapfile.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */
7
8#include <linux/mm.h>

--- 65 unchanged lines hidden (view full) ---

74{
75 unsigned short ret = count;
76
77 if (has_cache)
78 return SWAP_HAS_CACHE | ret;
79 return ret;
80}
81
82/* returnes 1 if swap entry is freed */
83static int
84__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
85{
86 int type = si - swap_info;
87 swp_entry_t entry = swp_entry(type, offset);
88 struct page *page;
89 int ret = 0;
82
90
91 page = find_get_page(&swapper_space, entry.val);
92 if (!page)
93 return 0;
94 /*
95 * This function is called from scan_swap_map() and it's called
96 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
97 * We have to use trylock for avoiding deadlock. This is a special
98 * case and you should use try_to_free_swap() with explicit lock_page()
99 * in usual operations.
100 */
101 if (trylock_page(page)) {
102 ret = try_to_free_swap(page);
103 unlock_page(page);
104 }
105 page_cache_release(page);
106 return ret;
107}
108
83/*
84 * We need this because the bdev->unplug_fn can sleep and we cannot
85 * hold swap_lock while calling the unplug_fn. And swap_lock
86 * cannot be turned into a mutex.
87 */
88static DECLARE_RWSEM(swap_unplug_sem);
89
90void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)

--- 205 unchanged lines hidden (view full) ---

296
297checks:
298 if (!(si->flags & SWP_WRITEOK))
299 goto no_page;
300 if (!si->highest_bit)
301 goto no_page;
302 if (offset > si->highest_bit)
303 scan_base = offset = si->lowest_bit;
109/*
110 * We need this because the bdev->unplug_fn can sleep and we cannot
111 * hold swap_lock while calling the unplug_fn. And swap_lock
112 * cannot be turned into a mutex.
113 */
114static DECLARE_RWSEM(swap_unplug_sem);
115
116void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)

--- 205 unchanged lines hidden (view full) ---

322
323checks:
324 if (!(si->flags & SWP_WRITEOK))
325 goto no_page;
326 if (!si->highest_bit)
327 goto no_page;
328 if (offset > si->highest_bit)
329 scan_base = offset = si->lowest_bit;
330
331 /* reuse swap entry of cache-only swap if not busy. */
332 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
333 int swap_was_freed;
334 spin_unlock(&swap_lock);
335 swap_was_freed = __try_to_reclaim_swap(si, offset);
336 spin_lock(&swap_lock);
337 /* entry was freed successfully, try to use this again */
338 if (swap_was_freed)
339 goto checks;
340 goto scan; /* check next one */
341 }
342
304 if (si->swap_map[offset])
305 goto scan;
306
307 if (offset == si->lowest_bit)
308 si->lowest_bit++;
309 if (offset == si->highest_bit)
310 si->highest_bit--;
311 si->inuse_pages++;

--- 65 unchanged lines hidden (view full) ---

377
378scan:
379 spin_unlock(&swap_lock);
380 while (++offset <= si->highest_bit) {
381 if (!si->swap_map[offset]) {
382 spin_lock(&swap_lock);
383 goto checks;
384 }
343 if (si->swap_map[offset])
344 goto scan;
345
346 if (offset == si->lowest_bit)
347 si->lowest_bit++;
348 if (offset == si->highest_bit)
349 si->highest_bit--;
350 si->inuse_pages++;

--- 65 unchanged lines hidden (view full) ---

416
417scan:
418 spin_unlock(&swap_lock);
419 while (++offset <= si->highest_bit) {
420 if (!si->swap_map[offset]) {
421 spin_lock(&swap_lock);
422 goto checks;
423 }
424 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
425 spin_lock(&swap_lock);
426 goto checks;
427 }
385 if (unlikely(--latency_ration < 0)) {
386 cond_resched();
387 latency_ration = LATENCY_LIMIT;
388 }
389 }
390 offset = si->lowest_bit;
391 while (++offset < scan_base) {
392 if (!si->swap_map[offset]) {
393 spin_lock(&swap_lock);
394 goto checks;
395 }
428 if (unlikely(--latency_ration < 0)) {
429 cond_resched();
430 latency_ration = LATENCY_LIMIT;
431 }
432 }
433 offset = si->lowest_bit;
434 while (++offset < scan_base) {
435 if (!si->swap_map[offset]) {
436 spin_lock(&swap_lock);
437 goto checks;
438 }
439 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
440 spin_lock(&swap_lock);
441 goto checks;
442 }
396 if (unlikely(--latency_ration < 0)) {
397 cond_resched();
398 latency_ration = LATENCY_LIMIT;
399 }
400 }
401 spin_lock(&swap_lock);
402
403no_page:

--- 1758 unchanged lines hidden ---
443 if (unlikely(--latency_ration < 0)) {
444 cond_resched();
445 latency_ration = LATENCY_LIMIT;
446 }
447 }
448 spin_lock(&swap_lock);
449
450no_page:

--- 1758 unchanged lines hidden ---