xref: /openbmc/linux/mm/swapfile.c (revision 915bae9ebe41e52d71ad8b06d50e4ab26189f964)
1 /*
2  *  linux/mm/swapfile.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/mman.h>
11 #include <linux/slab.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/namei.h>
17 #include <linux/shm.h>
18 #include <linux/blkdev.h>
19 #include <linux/writeback.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/rmap.h>
25 #include <linux/security.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mutex.h>
28 #include <linux/capability.h>
29 #include <linux/syscalls.h>
30 
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <linux/swapops.h>
34 
35 DEFINE_SPINLOCK(swap_lock);
36 unsigned int nr_swapfiles;
37 long total_swap_pages;
38 static int swap_overflow;
39 
40 static const char Bad_file[] = "Bad swap file entry ";
41 static const char Unused_file[] = "Unused swap file entry ";
42 static const char Bad_offset[] = "Bad swap offset entry ";
43 static const char Unused_offset[] = "Unused swap offset entry ";
44 
45 struct swap_list_t swap_list = {-1, -1};
46 
47 static struct swap_info_struct swap_info[MAX_SWAPFILES];
48 
49 static DEFINE_MUTEX(swapon_mutex);
50 
51 /*
52  * We need this because the bdev->unplug_fn can sleep and we cannot
53  * hold swap_lock while calling the unplug_fn. And swap_lock
54  * cannot be turned into a mutex.
55  */
56 static DECLARE_RWSEM(swap_unplug_sem);
57 
58 void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
59 {
60 	swp_entry_t entry;
61 
62 	down_read(&swap_unplug_sem);
63 	entry.val = page_private(page);
64 	if (PageSwapCache(page)) {
65 		struct block_device *bdev = swap_info[swp_type(entry)].bdev;
66 		struct backing_dev_info *bdi;
67 
68 		/*
69 		 * If the page is removed from swapcache from under us (with a
70 		 * racy try_to_unuse/swapoff) we need an additional reference
71 		 * count to avoid reading garbage from page_private(page) above.
72 		 * If the WARN_ON triggers during a swapoff it maybe the race
73 		 * condition and it's harmless. However if it triggers without
74 		 * swapoff it signals a problem.
75 		 */
76 		WARN_ON(page_count(page) <= 1);
77 
78 		bdi = bdev->bd_inode->i_mapping->backing_dev_info;
79 		blk_run_backing_dev(bdi, page);
80 	}
81 	up_read(&swap_unplug_sem);
82 }
83 
84 #define SWAPFILE_CLUSTER	256
85 #define LATENCY_LIMIT		256
86 
87 static inline unsigned long scan_swap_map(struct swap_info_struct *si)
88 {
89 	unsigned long offset, last_in_cluster;
90 	int latency_ration = LATENCY_LIMIT;
91 
92 	/*
93 	 * We try to cluster swap pages by allocating them sequentially
94 	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
95 	 * way, however, we resort to first-free allocation, starting
96 	 * a new cluster.  This prevents us from scattering swap pages
97 	 * all over the entire swap partition, so that we reduce
98 	 * overall disk seek times between swap pages.  -- sct
99 	 * But we do now try to find an empty cluster.  -Andrea
100 	 */
101 
102 	si->flags += SWP_SCANNING;
103 	if (unlikely(!si->cluster_nr)) {
104 		si->cluster_nr = SWAPFILE_CLUSTER - 1;
105 		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER)
106 			goto lowest;
107 		spin_unlock(&swap_lock);
108 
109 		offset = si->lowest_bit;
110 		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
111 
112 		/* Locate the first empty (unaligned) cluster */
113 		for (; last_in_cluster <= si->highest_bit; offset++) {
114 			if (si->swap_map[offset])
115 				last_in_cluster = offset + SWAPFILE_CLUSTER;
116 			else if (offset == last_in_cluster) {
117 				spin_lock(&swap_lock);
118 				si->cluster_next = offset-SWAPFILE_CLUSTER+1;
119 				goto cluster;
120 			}
121 			if (unlikely(--latency_ration < 0)) {
122 				cond_resched();
123 				latency_ration = LATENCY_LIMIT;
124 			}
125 		}
126 		spin_lock(&swap_lock);
127 		goto lowest;
128 	}
129 
130 	si->cluster_nr--;
131 cluster:
132 	offset = si->cluster_next;
133 	if (offset > si->highest_bit)
134 lowest:		offset = si->lowest_bit;
135 checks:	if (!(si->flags & SWP_WRITEOK))
136 		goto no_page;
137 	if (!si->highest_bit)
138 		goto no_page;
139 	if (!si->swap_map[offset]) {
140 		if (offset == si->lowest_bit)
141 			si->lowest_bit++;
142 		if (offset == si->highest_bit)
143 			si->highest_bit--;
144 		si->inuse_pages++;
145 		if (si->inuse_pages == si->pages) {
146 			si->lowest_bit = si->max;
147 			si->highest_bit = 0;
148 		}
149 		si->swap_map[offset] = 1;
150 		si->cluster_next = offset + 1;
151 		si->flags -= SWP_SCANNING;
152 		return offset;
153 	}
154 
155 	spin_unlock(&swap_lock);
156 	while (++offset <= si->highest_bit) {
157 		if (!si->swap_map[offset]) {
158 			spin_lock(&swap_lock);
159 			goto checks;
160 		}
161 		if (unlikely(--latency_ration < 0)) {
162 			cond_resched();
163 			latency_ration = LATENCY_LIMIT;
164 		}
165 	}
166 	spin_lock(&swap_lock);
167 	goto lowest;
168 
169 no_page:
170 	si->flags -= SWP_SCANNING;
171 	return 0;
172 }
173 
174 swp_entry_t get_swap_page(void)
175 {
176 	struct swap_info_struct *si;
177 	pgoff_t offset;
178 	int type, next;
179 	int wrapped = 0;
180 
181 	spin_lock(&swap_lock);
182 	if (nr_swap_pages <= 0)
183 		goto noswap;
184 	nr_swap_pages--;
185 
186 	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
187 		si = swap_info + type;
188 		next = si->next;
189 		if (next < 0 ||
190 		    (!wrapped && si->prio != swap_info[next].prio)) {
191 			next = swap_list.head;
192 			wrapped++;
193 		}
194 
195 		if (!si->highest_bit)
196 			continue;
197 		if (!(si->flags & SWP_WRITEOK))
198 			continue;
199 
200 		swap_list.next = next;
201 		offset = scan_swap_map(si);
202 		if (offset) {
203 			spin_unlock(&swap_lock);
204 			return swp_entry(type, offset);
205 		}
206 		next = swap_list.next;
207 	}
208 
209 	nr_swap_pages++;
210 noswap:
211 	spin_unlock(&swap_lock);
212 	return (swp_entry_t) {0};
213 }
214 
215 swp_entry_t get_swap_page_of_type(int type)
216 {
217 	struct swap_info_struct *si;
218 	pgoff_t offset;
219 
220 	spin_lock(&swap_lock);
221 	si = swap_info + type;
222 	if (si->flags & SWP_WRITEOK) {
223 		nr_swap_pages--;
224 		offset = scan_swap_map(si);
225 		if (offset) {
226 			spin_unlock(&swap_lock);
227 			return swp_entry(type, offset);
228 		}
229 		nr_swap_pages++;
230 	}
231 	spin_unlock(&swap_lock);
232 	return (swp_entry_t) {0};
233 }
234 
235 static struct swap_info_struct * swap_info_get(swp_entry_t entry)
236 {
237 	struct swap_info_struct * p;
238 	unsigned long offset, type;
239 
240 	if (!entry.val)
241 		goto out;
242 	type = swp_type(entry);
243 	if (type >= nr_swapfiles)
244 		goto bad_nofile;
245 	p = & swap_info[type];
246 	if (!(p->flags & SWP_USED))
247 		goto bad_device;
248 	offset = swp_offset(entry);
249 	if (offset >= p->max)
250 		goto bad_offset;
251 	if (!p->swap_map[offset])
252 		goto bad_free;
253 	spin_lock(&swap_lock);
254 	return p;
255 
256 bad_free:
257 	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
258 	goto out;
259 bad_offset:
260 	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
261 	goto out;
262 bad_device:
263 	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
264 	goto out;
265 bad_nofile:
266 	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
267 out:
268 	return NULL;
269 }
270 
271 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
272 {
273 	int count = p->swap_map[offset];
274 
275 	if (count < SWAP_MAP_MAX) {
276 		count--;
277 		p->swap_map[offset] = count;
278 		if (!count) {
279 			if (offset < p->lowest_bit)
280 				p->lowest_bit = offset;
281 			if (offset > p->highest_bit)
282 				p->highest_bit = offset;
283 			if (p->prio > swap_info[swap_list.next].prio)
284 				swap_list.next = p - swap_info;
285 			nr_swap_pages++;
286 			p->inuse_pages--;
287 		}
288 	}
289 	return count;
290 }
291 
292 /*
293  * Caller has made sure that the swapdevice corresponding to entry
294  * is still around or has not been recycled.
295  */
296 void swap_free(swp_entry_t entry)
297 {
298 	struct swap_info_struct * p;
299 
300 	p = swap_info_get(entry);
301 	if (p) {
302 		swap_entry_free(p, swp_offset(entry));
303 		spin_unlock(&swap_lock);
304 	}
305 }
306 
307 /*
308  * How many references to page are currently swapped out?
309  */
310 static inline int page_swapcount(struct page *page)
311 {
312 	int count = 0;
313 	struct swap_info_struct *p;
314 	swp_entry_t entry;
315 
316 	entry.val = page_private(page);
317 	p = swap_info_get(entry);
318 	if (p) {
319 		/* Subtract the 1 for the swap cache itself */
320 		count = p->swap_map[swp_offset(entry)] - 1;
321 		spin_unlock(&swap_lock);
322 	}
323 	return count;
324 }
325 
326 /*
327  * We can use this swap cache entry directly
328  * if there are no other references to it.
329  */
330 int can_share_swap_page(struct page *page)
331 {
332 	int count;
333 
334 	BUG_ON(!PageLocked(page));
335 	count = page_mapcount(page);
336 	if (count <= 1 && PageSwapCache(page))
337 		count += page_swapcount(page);
338 	return count == 1;
339 }
340 
341 /*
342  * Work out if there are any other processes sharing this
343  * swap cache page. Free it if you can. Return success.
344  */
345 int remove_exclusive_swap_page(struct page *page)
346 {
347 	int retval;
348 	struct swap_info_struct * p;
349 	swp_entry_t entry;
350 
351 	BUG_ON(PagePrivate(page));
352 	BUG_ON(!PageLocked(page));
353 
354 	if (!PageSwapCache(page))
355 		return 0;
356 	if (PageWriteback(page))
357 		return 0;
358 	if (page_count(page) != 2) /* 2: us + cache */
359 		return 0;
360 
361 	entry.val = page_private(page);
362 	p = swap_info_get(entry);
363 	if (!p)
364 		return 0;
365 
366 	/* Is the only swap cache user the cache itself? */
367 	retval = 0;
368 	if (p->swap_map[swp_offset(entry)] == 1) {
369 		/* Recheck the page count with the swapcache lock held.. */
370 		write_lock_irq(&swapper_space.tree_lock);
371 		if ((page_count(page) == 2) && !PageWriteback(page)) {
372 			__delete_from_swap_cache(page);
373 			SetPageDirty(page);
374 			retval = 1;
375 		}
376 		write_unlock_irq(&swapper_space.tree_lock);
377 	}
378 	spin_unlock(&swap_lock);
379 
380 	if (retval) {
381 		swap_free(entry);
382 		page_cache_release(page);
383 	}
384 
385 	return retval;
386 }
387 
388 /*
389  * Free the swap entry like above, but also try to
390  * free the page cache entry if it is the last user.
391  */
392 void free_swap_and_cache(swp_entry_t entry)
393 {
394 	struct swap_info_struct * p;
395 	struct page *page = NULL;
396 
397 	if (is_migration_entry(entry))
398 		return;
399 
400 	p = swap_info_get(entry);
401 	if (p) {
402 		if (swap_entry_free(p, swp_offset(entry)) == 1) {
403 			page = find_get_page(&swapper_space, entry.val);
404 			if (page && unlikely(TestSetPageLocked(page))) {
405 				page_cache_release(page);
406 				page = NULL;
407 			}
408 		}
409 		spin_unlock(&swap_lock);
410 	}
411 	if (page) {
412 		int one_user;
413 
414 		BUG_ON(PagePrivate(page));
415 		one_user = (page_count(page) == 2);
416 		/* Only cache user (+us), or swap space full? Free it! */
417 		/* Also recheck PageSwapCache after page is locked (above) */
418 		if (PageSwapCache(page) && !PageWriteback(page) &&
419 					(one_user || vm_swap_full())) {
420 			delete_from_swap_cache(page);
421 			SetPageDirty(page);
422 		}
423 		unlock_page(page);
424 		page_cache_release(page);
425 	}
426 }
427 
428 #ifdef CONFIG_SOFTWARE_SUSPEND
429 /*
430  * Find the swap type that corresponds to given device (if any).
431  *
432  * @offset - number of the PAGE_SIZE-sized block of the device, starting
433  * from 0, in which the swap header is expected to be located.
434  *
435  * This is needed for the suspend to disk (aka swsusp).
436  */
437 int swap_type_of(dev_t device, sector_t offset)
438 {
439 	struct block_device *bdev = NULL;
440 	int i;
441 
442 	if (device)
443 		bdev = bdget(device);
444 
445 	spin_lock(&swap_lock);
446 	for (i = 0; i < nr_swapfiles; i++) {
447 		struct swap_info_struct *sis = swap_info + i;
448 
449 		if (!(sis->flags & SWP_WRITEOK))
450 			continue;
451 
452 		if (!bdev) {
453 			spin_unlock(&swap_lock);
454 			return i;
455 		}
456 		if (bdev == sis->bdev) {
457 			struct swap_extent *se;
458 
459 			se = list_entry(sis->extent_list.next,
460 					struct swap_extent, list);
461 			if (se->start_block == offset) {
462 				spin_unlock(&swap_lock);
463 				bdput(bdev);
464 				return i;
465 			}
466 		}
467 	}
468 	spin_unlock(&swap_lock);
469 	if (bdev)
470 		bdput(bdev);
471 
472 	return -ENODEV;
473 }
474 
475 /*
476  * Return either the total number of swap pages of given type, or the number
477  * of free pages of that type (depending on @free)
478  *
479  * This is needed for software suspend
480  */
481 unsigned int count_swap_pages(int type, int free)
482 {
483 	unsigned int n = 0;
484 
485 	if (type < nr_swapfiles) {
486 		spin_lock(&swap_lock);
487 		if (swap_info[type].flags & SWP_WRITEOK) {
488 			n = swap_info[type].pages;
489 			if (free)
490 				n -= swap_info[type].inuse_pages;
491 		}
492 		spin_unlock(&swap_lock);
493 	}
494 	return n;
495 }
496 #endif
497 
498 /*
499  * No need to decide whether this PTE shares the swap entry with others,
500  * just let do_wp_page work it out if a write is requested later - to
501  * force COW, vm_page_prot omits write permission from any private vma.
502  */
503 static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
504 		unsigned long addr, swp_entry_t entry, struct page *page)
505 {
506 	inc_mm_counter(vma->vm_mm, anon_rss);
507 	get_page(page);
508 	set_pte_at(vma->vm_mm, addr, pte,
509 		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
510 	page_add_anon_rmap(page, vma, addr);
511 	swap_free(entry);
512 	/*
513 	 * Move the page to the active list so it is not
514 	 * immediately swapped out again after swapon.
515 	 */
516 	activate_page(page);
517 }
518 
519 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
520 				unsigned long addr, unsigned long end,
521 				swp_entry_t entry, struct page *page)
522 {
523 	pte_t swp_pte = swp_entry_to_pte(entry);
524 	pte_t *pte;
525 	spinlock_t *ptl;
526 	int found = 0;
527 
528 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
529 	do {
530 		/*
531 		 * swapoff spends a _lot_ of time in this loop!
532 		 * Test inline before going to call unuse_pte.
533 		 */
534 		if (unlikely(pte_same(*pte, swp_pte))) {
535 			unuse_pte(vma, pte++, addr, entry, page);
536 			found = 1;
537 			break;
538 		}
539 	} while (pte++, addr += PAGE_SIZE, addr != end);
540 	pte_unmap_unlock(pte - 1, ptl);
541 	return found;
542 }
543 
544 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
545 				unsigned long addr, unsigned long end,
546 				swp_entry_t entry, struct page *page)
547 {
548 	pmd_t *pmd;
549 	unsigned long next;
550 
551 	pmd = pmd_offset(pud, addr);
552 	do {
553 		next = pmd_addr_end(addr, end);
554 		if (pmd_none_or_clear_bad(pmd))
555 			continue;
556 		if (unuse_pte_range(vma, pmd, addr, next, entry, page))
557 			return 1;
558 	} while (pmd++, addr = next, addr != end);
559 	return 0;
560 }
561 
562 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
563 				unsigned long addr, unsigned long end,
564 				swp_entry_t entry, struct page *page)
565 {
566 	pud_t *pud;
567 	unsigned long next;
568 
569 	pud = pud_offset(pgd, addr);
570 	do {
571 		next = pud_addr_end(addr, end);
572 		if (pud_none_or_clear_bad(pud))
573 			continue;
574 		if (unuse_pmd_range(vma, pud, addr, next, entry, page))
575 			return 1;
576 	} while (pud++, addr = next, addr != end);
577 	return 0;
578 }
579 
580 static int unuse_vma(struct vm_area_struct *vma,
581 				swp_entry_t entry, struct page *page)
582 {
583 	pgd_t *pgd;
584 	unsigned long addr, end, next;
585 
586 	if (page->mapping) {
587 		addr = page_address_in_vma(page, vma);
588 		if (addr == -EFAULT)
589 			return 0;
590 		else
591 			end = addr + PAGE_SIZE;
592 	} else {
593 		addr = vma->vm_start;
594 		end = vma->vm_end;
595 	}
596 
597 	pgd = pgd_offset(vma->vm_mm, addr);
598 	do {
599 		next = pgd_addr_end(addr, end);
600 		if (pgd_none_or_clear_bad(pgd))
601 			continue;
602 		if (unuse_pud_range(vma, pgd, addr, next, entry, page))
603 			return 1;
604 	} while (pgd++, addr = next, addr != end);
605 	return 0;
606 }
607 
608 static int unuse_mm(struct mm_struct *mm,
609 				swp_entry_t entry, struct page *page)
610 {
611 	struct vm_area_struct *vma;
612 
613 	if (!down_read_trylock(&mm->mmap_sem)) {
614 		/*
615 		 * Activate page so shrink_cache is unlikely to unmap its
616 		 * ptes while lock is dropped, so swapoff can make progress.
617 		 */
618 		activate_page(page);
619 		unlock_page(page);
620 		down_read(&mm->mmap_sem);
621 		lock_page(page);
622 	}
623 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
624 		if (vma->anon_vma && unuse_vma(vma, entry, page))
625 			break;
626 	}
627 	up_read(&mm->mmap_sem);
628 	/*
629 	 * Currently unuse_mm cannot fail, but leave error handling
630 	 * at call sites for now, since we change it from time to time.
631 	 */
632 	return 0;
633 }
634 
635 /*
636  * Scan swap_map from current position to next entry still in use.
637  * Recycle to start on reaching the end, returning 0 when empty.
638  */
639 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
640 					unsigned int prev)
641 {
642 	unsigned int max = si->max;
643 	unsigned int i = prev;
644 	int count;
645 
646 	/*
647 	 * No need for swap_lock here: we're just looking
648 	 * for whether an entry is in use, not modifying it; false
649 	 * hits are okay, and sys_swapoff() has already prevented new
650 	 * allocations from this area (while holding swap_lock).
651 	 */
652 	for (;;) {
653 		if (++i >= max) {
654 			if (!prev) {
655 				i = 0;
656 				break;
657 			}
658 			/*
659 			 * No entries in use at top of swap_map,
660 			 * loop back to start and recheck there.
661 			 */
662 			max = prev + 1;
663 			prev = 0;
664 			i = 1;
665 		}
666 		count = si->swap_map[i];
667 		if (count && count != SWAP_MAP_BAD)
668 			break;
669 	}
670 	return i;
671 }
672 
673 /*
674  * We completely avoid races by reading each swap page in advance,
675  * and then search for the process using it.  All the necessary
676  * page table adjustments can then be made atomically.
677  */
678 static int try_to_unuse(unsigned int type)
679 {
680 	struct swap_info_struct * si = &swap_info[type];
681 	struct mm_struct *start_mm;
682 	unsigned short *swap_map;
683 	unsigned short swcount;
684 	struct page *page;
685 	swp_entry_t entry;
686 	unsigned int i = 0;
687 	int retval = 0;
688 	int reset_overflow = 0;
689 	int shmem;
690 
691 	/*
692 	 * When searching mms for an entry, a good strategy is to
693 	 * start at the first mm we freed the previous entry from
694 	 * (though actually we don't notice whether we or coincidence
695 	 * freed the entry).  Initialize this start_mm with a hold.
696 	 *
697 	 * A simpler strategy would be to start at the last mm we
698 	 * freed the previous entry from; but that would take less
699 	 * advantage of mmlist ordering, which clusters forked mms
700 	 * together, child after parent.  If we race with dup_mmap(), we
701 	 * prefer to resolve parent before child, lest we miss entries
702 	 * duplicated after we scanned child: using last mm would invert
703 	 * that.  Though it's only a serious concern when an overflowed
704 	 * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
705 	 */
706 	start_mm = &init_mm;
707 	atomic_inc(&init_mm.mm_users);
708 
709 	/*
710 	 * Keep on scanning until all entries have gone.  Usually,
711 	 * one pass through swap_map is enough, but not necessarily:
712 	 * there are races when an instance of an entry might be missed.
713 	 */
714 	while ((i = find_next_to_unuse(si, i)) != 0) {
715 		if (signal_pending(current)) {
716 			retval = -EINTR;
717 			break;
718 		}
719 
720 		/*
721 		 * Get a page for the entry, using the existing swap
722 		 * cache page if there is one.  Otherwise, get a clean
723 		 * page and read the swap into it.
724 		 */
725 		swap_map = &si->swap_map[i];
726 		entry = swp_entry(type, i);
727 		page = read_swap_cache_async(entry, NULL, 0);
728 		if (!page) {
729 			/*
730 			 * Either swap_duplicate() failed because entry
731 			 * has been freed independently, and will not be
732 			 * reused since sys_swapoff() already disabled
733 			 * allocation from here, or alloc_page() failed.
734 			 */
735 			if (!*swap_map)
736 				continue;
737 			retval = -ENOMEM;
738 			break;
739 		}
740 
741 		/*
742 		 * Don't hold on to start_mm if it looks like exiting.
743 		 */
744 		if (atomic_read(&start_mm->mm_users) == 1) {
745 			mmput(start_mm);
746 			start_mm = &init_mm;
747 			atomic_inc(&init_mm.mm_users);
748 		}
749 
750 		/*
751 		 * Wait for and lock page.  When do_swap_page races with
752 		 * try_to_unuse, do_swap_page can handle the fault much
753 		 * faster than try_to_unuse can locate the entry.  This
754 		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
755 		 * defer to do_swap_page in such a case - in some tests,
756 		 * do_swap_page and try_to_unuse repeatedly compete.
757 		 */
758 		wait_on_page_locked(page);
759 		wait_on_page_writeback(page);
760 		lock_page(page);
761 		wait_on_page_writeback(page);
762 
763 		/*
764 		 * Remove all references to entry.
765 		 * Whenever we reach init_mm, there's no address space
766 		 * to search, but use it as a reminder to search shmem.
767 		 */
768 		shmem = 0;
769 		swcount = *swap_map;
770 		if (swcount > 1) {
771 			if (start_mm == &init_mm)
772 				shmem = shmem_unuse(entry, page);
773 			else
774 				retval = unuse_mm(start_mm, entry, page);
775 		}
776 		if (*swap_map > 1) {
777 			int set_start_mm = (*swap_map >= swcount);
778 			struct list_head *p = &start_mm->mmlist;
779 			struct mm_struct *new_start_mm = start_mm;
780 			struct mm_struct *prev_mm = start_mm;
781 			struct mm_struct *mm;
782 
783 			atomic_inc(&new_start_mm->mm_users);
784 			atomic_inc(&prev_mm->mm_users);
785 			spin_lock(&mmlist_lock);
786 			while (*swap_map > 1 && !retval &&
787 					(p = p->next) != &start_mm->mmlist) {
788 				mm = list_entry(p, struct mm_struct, mmlist);
789 				if (!atomic_inc_not_zero(&mm->mm_users))
790 					continue;
791 				spin_unlock(&mmlist_lock);
792 				mmput(prev_mm);
793 				prev_mm = mm;
794 
795 				cond_resched();
796 
797 				swcount = *swap_map;
798 				if (swcount <= 1)
799 					;
800 				else if (mm == &init_mm) {
801 					set_start_mm = 1;
802 					shmem = shmem_unuse(entry, page);
803 				} else
804 					retval = unuse_mm(mm, entry, page);
805 				if (set_start_mm && *swap_map < swcount) {
806 					mmput(new_start_mm);
807 					atomic_inc(&mm->mm_users);
808 					new_start_mm = mm;
809 					set_start_mm = 0;
810 				}
811 				spin_lock(&mmlist_lock);
812 			}
813 			spin_unlock(&mmlist_lock);
814 			mmput(prev_mm);
815 			mmput(start_mm);
816 			start_mm = new_start_mm;
817 		}
818 		if (retval) {
819 			unlock_page(page);
820 			page_cache_release(page);
821 			break;
822 		}
823 
824 		/*
825 		 * How could swap count reach 0x7fff when the maximum
826 		 * pid is 0x7fff, and there's no way to repeat a swap
827 		 * page within an mm (except in shmem, where it's the
828 		 * shared object which takes the reference count)?
829 		 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
830 		 *
831 		 * If that's wrong, then we should worry more about
832 		 * exit_mmap() and do_munmap() cases described above:
833 		 * we might be resetting SWAP_MAP_MAX too early here.
834 		 * We know "Undead"s can happen, they're okay, so don't
835 		 * report them; but do report if we reset SWAP_MAP_MAX.
836 		 */
837 		if (*swap_map == SWAP_MAP_MAX) {
838 			spin_lock(&swap_lock);
839 			*swap_map = 1;
840 			spin_unlock(&swap_lock);
841 			reset_overflow = 1;
842 		}
843 
844 		/*
845 		 * If a reference remains (rare), we would like to leave
846 		 * the page in the swap cache; but try_to_unmap could
847 		 * then re-duplicate the entry once we drop page lock,
848 		 * so we might loop indefinitely; also, that page could
849 		 * not be swapped out to other storage meanwhile.  So:
850 		 * delete from cache even if there's another reference,
851 		 * after ensuring that the data has been saved to disk -
852 		 * since if the reference remains (rarer), it will be
853 		 * read from disk into another page.  Splitting into two
854 		 * pages would be incorrect if swap supported "shared
855 		 * private" pages, but they are handled by tmpfs files.
856 		 *
857 		 * Note shmem_unuse already deleted a swappage from
858 		 * the swap cache, unless the move to filepage failed:
859 		 * in which case it left swappage in cache, lowered its
860 		 * swap count to pass quickly through the loops above,
861 		 * and now we must reincrement count to try again later.
862 		 */
863 		if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
864 			struct writeback_control wbc = {
865 				.sync_mode = WB_SYNC_NONE,
866 			};
867 
868 			swap_writepage(page, &wbc);
869 			lock_page(page);
870 			wait_on_page_writeback(page);
871 		}
872 		if (PageSwapCache(page)) {
873 			if (shmem)
874 				swap_duplicate(entry);
875 			else
876 				delete_from_swap_cache(page);
877 		}
878 
879 		/*
880 		 * So we could skip searching mms once swap count went
881 		 * to 1, we did not mark any present ptes as dirty: must
882 		 * mark page dirty so shrink_list will preserve it.
883 		 */
884 		SetPageDirty(page);
885 		unlock_page(page);
886 		page_cache_release(page);
887 
888 		/*
889 		 * Make sure that we aren't completely killing
890 		 * interactive performance.
891 		 */
892 		cond_resched();
893 	}
894 
895 	mmput(start_mm);
896 	if (reset_overflow) {
897 		printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
898 		swap_overflow = 0;
899 	}
900 	return retval;
901 }
902 
903 /*
904  * After a successful try_to_unuse, if no swap is now in use, we know
905  * we can empty the mmlist.  swap_lock must be held on entry and exit.
906  * Note that mmlist_lock nests inside swap_lock, and an mm must be
907  * added to the mmlist just after page_duplicate - before would be racy.
908  */
909 static void drain_mmlist(void)
910 {
911 	struct list_head *p, *next;
912 	unsigned int i;
913 
914 	for (i = 0; i < nr_swapfiles; i++)
915 		if (swap_info[i].inuse_pages)
916 			return;
917 	spin_lock(&mmlist_lock);
918 	list_for_each_safe(p, next, &init_mm.mmlist)
919 		list_del_init(p);
920 	spin_unlock(&mmlist_lock);
921 }
922 
923 /*
924  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
925  * corresponds to page offset `offset'.
926  */
927 sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
928 {
929 	struct swap_extent *se = sis->curr_swap_extent;
930 	struct swap_extent *start_se = se;
931 
932 	for ( ; ; ) {
933 		struct list_head *lh;
934 
935 		if (se->start_page <= offset &&
936 				offset < (se->start_page + se->nr_pages)) {
937 			return se->start_block + (offset - se->start_page);
938 		}
939 		lh = se->list.next;
940 		if (lh == &sis->extent_list)
941 			lh = lh->next;
942 		se = list_entry(lh, struct swap_extent, list);
943 		sis->curr_swap_extent = se;
944 		BUG_ON(se == start_se);		/* It *must* be present */
945 	}
946 }
947 
948 /*
949  * Free all of a swapdev's extent information
950  */
951 static void destroy_swap_extents(struct swap_info_struct *sis)
952 {
953 	while (!list_empty(&sis->extent_list)) {
954 		struct swap_extent *se;
955 
956 		se = list_entry(sis->extent_list.next,
957 				struct swap_extent, list);
958 		list_del(&se->list);
959 		kfree(se);
960 	}
961 }
962 
963 /*
964  * Add a block range (and the corresponding page range) into this swapdev's
965  * extent list.  The extent list is kept sorted in page order.
966  *
967  * This function rather assumes that it is called in ascending page order.
968  */
969 static int
970 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
971 		unsigned long nr_pages, sector_t start_block)
972 {
973 	struct swap_extent *se;
974 	struct swap_extent *new_se;
975 	struct list_head *lh;
976 
977 	lh = sis->extent_list.prev;	/* The highest page extent */
978 	if (lh != &sis->extent_list) {
979 		se = list_entry(lh, struct swap_extent, list);
980 		BUG_ON(se->start_page + se->nr_pages != start_page);
981 		if (se->start_block + se->nr_pages == start_block) {
982 			/* Merge it */
983 			se->nr_pages += nr_pages;
984 			return 0;
985 		}
986 	}
987 
988 	/*
989 	 * No merge.  Insert a new extent, preserving ordering.
990 	 */
991 	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
992 	if (new_se == NULL)
993 		return -ENOMEM;
994 	new_se->start_page = start_page;
995 	new_se->nr_pages = nr_pages;
996 	new_se->start_block = start_block;
997 
998 	list_add_tail(&new_se->list, &sis->extent_list);
999 	return 1;
1000 }
1001 
1002 /*
1003  * A `swap extent' is a simple thing which maps a contiguous range of pages
1004  * onto a contiguous range of disk blocks.  An ordered list of swap extents
1005  * is built at swapon time and is then used at swap_writepage/swap_readpage
1006  * time for locating where on disk a page belongs.
1007  *
1008  * If the swapfile is an S_ISBLK block device, a single extent is installed.
1009  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1010  * swap files identically.
1011  *
1012  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1013  * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1014  * swapfiles are handled *identically* after swapon time.
1015  *
1016  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1017  * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1018  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1019  * requirements, they are simply tossed out - we will never use those blocks
1020  * for swapping.
1021  *
1022  * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1023  * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1024  * which will scribble on the fs.
1025  *
1026  * The amount of disk space which a single swap extent represents varies.
1027  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1028  * extents in the list.  To avoid much list walking, we cache the previous
1029  * search location in `curr_swap_extent', and start new searches from there.
1030  * This is extremely effective.  The average number of iterations in
1031  * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1032  */
1033 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1034 {
1035 	struct inode *inode;
1036 	unsigned blocks_per_page;
1037 	unsigned long page_no;
1038 	unsigned blkbits;
1039 	sector_t probe_block;
1040 	sector_t last_block;
1041 	sector_t lowest_block = -1;
1042 	sector_t highest_block = 0;
1043 	int nr_extents = 0;
1044 	int ret;
1045 
1046 	inode = sis->swap_file->f_mapping->host;
1047 	if (S_ISBLK(inode->i_mode)) {
1048 		ret = add_swap_extent(sis, 0, sis->max, 0);
1049 		*span = sis->pages;
1050 		goto done;
1051 	}
1052 
1053 	blkbits = inode->i_blkbits;
1054 	blocks_per_page = PAGE_SIZE >> blkbits;
1055 
1056 	/*
1057 	 * Map all the blocks into the extent list.  This code doesn't try
1058 	 * to be very smart.
1059 	 */
1060 	probe_block = 0;
1061 	page_no = 0;
1062 	last_block = i_size_read(inode) >> blkbits;
1063 	while ((probe_block + blocks_per_page) <= last_block &&
1064 			page_no < sis->max) {
1065 		unsigned block_in_page;
1066 		sector_t first_block;
1067 
1068 		first_block = bmap(inode, probe_block);
1069 		if (first_block == 0)
1070 			goto bad_bmap;
1071 
1072 		/*
1073 		 * It must be PAGE_SIZE aligned on-disk
1074 		 */
1075 		if (first_block & (blocks_per_page - 1)) {
1076 			probe_block++;
1077 			goto reprobe;
1078 		}
1079 
1080 		for (block_in_page = 1; block_in_page < blocks_per_page;
1081 					block_in_page++) {
1082 			sector_t block;
1083 
1084 			block = bmap(inode, probe_block + block_in_page);
1085 			if (block == 0)
1086 				goto bad_bmap;
1087 			if (block != first_block + block_in_page) {
1088 				/* Discontiguity */
1089 				probe_block++;
1090 				goto reprobe;
1091 			}
1092 		}
1093 
1094 		first_block >>= (PAGE_SHIFT - blkbits);
1095 		if (page_no) {	/* exclude the header page */
1096 			if (first_block < lowest_block)
1097 				lowest_block = first_block;
1098 			if (first_block > highest_block)
1099 				highest_block = first_block;
1100 		}
1101 
1102 		/*
1103 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1104 		 */
1105 		ret = add_swap_extent(sis, page_no, 1, first_block);
1106 		if (ret < 0)
1107 			goto out;
1108 		nr_extents += ret;
1109 		page_no++;
1110 		probe_block += blocks_per_page;
1111 reprobe:
1112 		continue;
1113 	}
1114 	ret = nr_extents;
1115 	*span = 1 + highest_block - lowest_block;
1116 	if (page_no == 0)
1117 		page_no = 1;	/* force Empty message */
1118 	sis->max = page_no;
1119 	sis->pages = page_no - 1;
1120 	sis->highest_bit = page_no - 1;
1121 done:
1122 	sis->curr_swap_extent = list_entry(sis->extent_list.prev,
1123 					struct swap_extent, list);
1124 	goto out;
1125 bad_bmap:
1126 	printk(KERN_ERR "swapon: swapfile has holes\n");
1127 	ret = -EINVAL;
1128 out:
1129 	return ret;
1130 }
1131 
1132 #if 0	/* We don't need this yet */
1133 #include <linux/backing-dev.h>
1134 int page_queue_congested(struct page *page)
1135 {
1136 	struct backing_dev_info *bdi;
1137 
1138 	BUG_ON(!PageLocked(page));	/* It pins the swap_info_struct */
1139 
1140 	if (PageSwapCache(page)) {
1141 		swp_entry_t entry = { .val = page_private(page) };
1142 		struct swap_info_struct *sis;
1143 
1144 		sis = get_swap_info_struct(swp_type(entry));
1145 		bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info;
1146 	} else
1147 		bdi = page->mapping->backing_dev_info;
1148 	return bdi_write_congested(bdi);
1149 }
1150 #endif
1151 
1152 asmlinkage long sys_swapoff(const char __user * specialfile)
1153 {
1154 	struct swap_info_struct * p = NULL;
1155 	unsigned short *swap_map;
1156 	struct file *swap_file, *victim;
1157 	struct address_space *mapping;
1158 	struct inode *inode;
1159 	char * pathname;
1160 	int i, type, prev;
1161 	int err;
1162 
1163 	if (!capable(CAP_SYS_ADMIN))
1164 		return -EPERM;
1165 
1166 	pathname = getname(specialfile);
1167 	err = PTR_ERR(pathname);
1168 	if (IS_ERR(pathname))
1169 		goto out;
1170 
1171 	victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1172 	putname(pathname);
1173 	err = PTR_ERR(victim);
1174 	if (IS_ERR(victim))
1175 		goto out;
1176 
1177 	mapping = victim->f_mapping;
1178 	prev = -1;
1179 	spin_lock(&swap_lock);
1180 	for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
1181 		p = swap_info + type;
1182 		if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
1183 			if (p->swap_file->f_mapping == mapping)
1184 				break;
1185 		}
1186 		prev = type;
1187 	}
1188 	if (type < 0) {
1189 		err = -EINVAL;
1190 		spin_unlock(&swap_lock);
1191 		goto out_dput;
1192 	}
1193 	if (!security_vm_enough_memory(p->pages))
1194 		vm_unacct_memory(p->pages);
1195 	else {
1196 		err = -ENOMEM;
1197 		spin_unlock(&swap_lock);
1198 		goto out_dput;
1199 	}
1200 	if (prev < 0) {
1201 		swap_list.head = p->next;
1202 	} else {
1203 		swap_info[prev].next = p->next;
1204 	}
1205 	if (type == swap_list.next) {
1206 		/* just pick something that's safe... */
1207 		swap_list.next = swap_list.head;
1208 	}
1209 	nr_swap_pages -= p->pages;
1210 	total_swap_pages -= p->pages;
1211 	p->flags &= ~SWP_WRITEOK;
1212 	spin_unlock(&swap_lock);
1213 
1214 	current->flags |= PF_SWAPOFF;
1215 	err = try_to_unuse(type);
1216 	current->flags &= ~PF_SWAPOFF;
1217 
1218 	if (err) {
1219 		/* re-insert swap space back into swap_list */
1220 		spin_lock(&swap_lock);
1221 		for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
1222 			if (p->prio >= swap_info[i].prio)
1223 				break;
1224 		p->next = i;
1225 		if (prev < 0)
1226 			swap_list.head = swap_list.next = p - swap_info;
1227 		else
1228 			swap_info[prev].next = p - swap_info;
1229 		nr_swap_pages += p->pages;
1230 		total_swap_pages += p->pages;
1231 		p->flags |= SWP_WRITEOK;
1232 		spin_unlock(&swap_lock);
1233 		goto out_dput;
1234 	}
1235 
1236 	/* wait for any unplug function to finish */
1237 	down_write(&swap_unplug_sem);
1238 	up_write(&swap_unplug_sem);
1239 
1240 	destroy_swap_extents(p);
1241 	mutex_lock(&swapon_mutex);
1242 	spin_lock(&swap_lock);
1243 	drain_mmlist();
1244 
1245 	/* wait for anyone still in scan_swap_map */
1246 	p->highest_bit = 0;		/* cuts scans short */
1247 	while (p->flags >= SWP_SCANNING) {
1248 		spin_unlock(&swap_lock);
1249 		schedule_timeout_uninterruptible(1);
1250 		spin_lock(&swap_lock);
1251 	}
1252 
1253 	swap_file = p->swap_file;
1254 	p->swap_file = NULL;
1255 	p->max = 0;
1256 	swap_map = p->swap_map;
1257 	p->swap_map = NULL;
1258 	p->flags = 0;
1259 	spin_unlock(&swap_lock);
1260 	mutex_unlock(&swapon_mutex);
1261 	vfree(swap_map);
1262 	inode = mapping->host;
1263 	if (S_ISBLK(inode->i_mode)) {
1264 		struct block_device *bdev = I_BDEV(inode);
1265 		set_blocksize(bdev, p->old_block_size);
1266 		bd_release(bdev);
1267 	} else {
1268 		mutex_lock(&inode->i_mutex);
1269 		inode->i_flags &= ~S_SWAPFILE;
1270 		mutex_unlock(&inode->i_mutex);
1271 	}
1272 	filp_close(swap_file, NULL);
1273 	err = 0;
1274 
1275 out_dput:
1276 	filp_close(victim, NULL);
1277 out:
1278 	return err;
1279 }
1280 
1281 #ifdef CONFIG_PROC_FS
1282 /* iterator */
1283 static void *swap_start(struct seq_file *swap, loff_t *pos)
1284 {
1285 	struct swap_info_struct *ptr = swap_info;
1286 	int i;
1287 	loff_t l = *pos;
1288 
1289 	mutex_lock(&swapon_mutex);
1290 
1291 	if (!l)
1292 		return SEQ_START_TOKEN;
1293 
1294 	for (i = 0; i < nr_swapfiles; i++, ptr++) {
1295 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
1296 			continue;
1297 		if (!--l)
1298 			return ptr;
1299 	}
1300 
1301 	return NULL;
1302 }
1303 
1304 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1305 {
1306 	struct swap_info_struct *ptr;
1307 	struct swap_info_struct *endptr = swap_info + nr_swapfiles;
1308 
1309 	if (v == SEQ_START_TOKEN)
1310 		ptr = swap_info;
1311 	else {
1312 		ptr = v;
1313 		ptr++;
1314 	}
1315 
1316 	for (; ptr < endptr; ptr++) {
1317 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
1318 			continue;
1319 		++*pos;
1320 		return ptr;
1321 	}
1322 
1323 	return NULL;
1324 }
1325 
1326 static void swap_stop(struct seq_file *swap, void *v)
1327 {
1328 	mutex_unlock(&swapon_mutex);
1329 }
1330 
1331 static int swap_show(struct seq_file *swap, void *v)
1332 {
1333 	struct swap_info_struct *ptr = v;
1334 	struct file *file;
1335 	int len;
1336 
1337 	if (ptr == SEQ_START_TOKEN) {
1338 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1339 		return 0;
1340 	}
1341 
1342 	file = ptr->swap_file;
1343 	len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
1344 	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1345 		       len < 40 ? 40 - len : 1, " ",
1346 		       S_ISBLK(file->f_dentry->d_inode->i_mode) ?
1347 				"partition" : "file\t",
1348 		       ptr->pages << (PAGE_SHIFT - 10),
1349 		       ptr->inuse_pages << (PAGE_SHIFT - 10),
1350 		       ptr->prio);
1351 	return 0;
1352 }
1353 
1354 static struct seq_operations swaps_op = {
1355 	.start =	swap_start,
1356 	.next =		swap_next,
1357 	.stop =		swap_stop,
1358 	.show =		swap_show
1359 };
1360 
1361 static int swaps_open(struct inode *inode, struct file *file)
1362 {
1363 	return seq_open(file, &swaps_op);
1364 }
1365 
1366 static struct file_operations proc_swaps_operations = {
1367 	.open		= swaps_open,
1368 	.read		= seq_read,
1369 	.llseek		= seq_lseek,
1370 	.release	= seq_release,
1371 };
1372 
1373 static int __init procswaps_init(void)
1374 {
1375 	struct proc_dir_entry *entry;
1376 
1377 	entry = create_proc_entry("swaps", 0, NULL);
1378 	if (entry)
1379 		entry->proc_fops = &proc_swaps_operations;
1380 	return 0;
1381 }
1382 __initcall(procswaps_init);
1383 #endif /* CONFIG_PROC_FS */
1384 
1385 /*
1386  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1387  *
1388  * The swapon system call
1389  */
1390 asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1391 {
1392 	struct swap_info_struct * p;
1393 	char *name = NULL;
1394 	struct block_device *bdev = NULL;
1395 	struct file *swap_file = NULL;
1396 	struct address_space *mapping;
1397 	unsigned int type;
1398 	int i, prev;
1399 	int error;
1400 	static int least_priority;
1401 	union swap_header *swap_header = NULL;
1402 	int swap_header_version;
1403 	unsigned int nr_good_pages = 0;
1404 	int nr_extents = 0;
1405 	sector_t span;
1406 	unsigned long maxpages = 1;
1407 	int swapfilesize;
1408 	unsigned short *swap_map;
1409 	struct page *page = NULL;
1410 	struct inode *inode = NULL;
1411 	int did_down = 0;
1412 
1413 	if (!capable(CAP_SYS_ADMIN))
1414 		return -EPERM;
1415 	spin_lock(&swap_lock);
1416 	p = swap_info;
1417 	for (type = 0 ; type < nr_swapfiles ; type++,p++)
1418 		if (!(p->flags & SWP_USED))
1419 			break;
1420 	error = -EPERM;
1421 	if (type >= MAX_SWAPFILES) {
1422 		spin_unlock(&swap_lock);
1423 		goto out;
1424 	}
1425 	if (type >= nr_swapfiles)
1426 		nr_swapfiles = type+1;
1427 	INIT_LIST_HEAD(&p->extent_list);
1428 	p->flags = SWP_USED;
1429 	p->swap_file = NULL;
1430 	p->old_block_size = 0;
1431 	p->swap_map = NULL;
1432 	p->lowest_bit = 0;
1433 	p->highest_bit = 0;
1434 	p->cluster_nr = 0;
1435 	p->inuse_pages = 0;
1436 	p->next = -1;
1437 	if (swap_flags & SWAP_FLAG_PREFER) {
1438 		p->prio =
1439 		  (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
1440 	} else {
1441 		p->prio = --least_priority;
1442 	}
1443 	spin_unlock(&swap_lock);
1444 	name = getname(specialfile);
1445 	error = PTR_ERR(name);
1446 	if (IS_ERR(name)) {
1447 		name = NULL;
1448 		goto bad_swap_2;
1449 	}
1450 	swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
1451 	error = PTR_ERR(swap_file);
1452 	if (IS_ERR(swap_file)) {
1453 		swap_file = NULL;
1454 		goto bad_swap_2;
1455 	}
1456 
1457 	p->swap_file = swap_file;
1458 	mapping = swap_file->f_mapping;
1459 	inode = mapping->host;
1460 
1461 	error = -EBUSY;
1462 	for (i = 0; i < nr_swapfiles; i++) {
1463 		struct swap_info_struct *q = &swap_info[i];
1464 
1465 		if (i == type || !q->swap_file)
1466 			continue;
1467 		if (mapping == q->swap_file->f_mapping)
1468 			goto bad_swap;
1469 	}
1470 
1471 	error = -EINVAL;
1472 	if (S_ISBLK(inode->i_mode)) {
1473 		bdev = I_BDEV(inode);
1474 		error = bd_claim(bdev, sys_swapon);
1475 		if (error < 0) {
1476 			bdev = NULL;
1477 			error = -EINVAL;
1478 			goto bad_swap;
1479 		}
1480 		p->old_block_size = block_size(bdev);
1481 		error = set_blocksize(bdev, PAGE_SIZE);
1482 		if (error < 0)
1483 			goto bad_swap;
1484 		p->bdev = bdev;
1485 	} else if (S_ISREG(inode->i_mode)) {
1486 		p->bdev = inode->i_sb->s_bdev;
1487 		mutex_lock(&inode->i_mutex);
1488 		did_down = 1;
1489 		if (IS_SWAPFILE(inode)) {
1490 			error = -EBUSY;
1491 			goto bad_swap;
1492 		}
1493 	} else {
1494 		goto bad_swap;
1495 	}
1496 
1497 	swapfilesize = i_size_read(inode) >> PAGE_SHIFT;
1498 
1499 	/*
1500 	 * Read the swap header.
1501 	 */
1502 	if (!mapping->a_ops->readpage) {
1503 		error = -EINVAL;
1504 		goto bad_swap;
1505 	}
1506 	page = read_mapping_page(mapping, 0, swap_file);
1507 	if (IS_ERR(page)) {
1508 		error = PTR_ERR(page);
1509 		goto bad_swap;
1510 	}
1511 	wait_on_page_locked(page);
1512 	if (!PageUptodate(page))
1513 		goto bad_swap;
1514 	kmap(page);
1515 	swap_header = page_address(page);
1516 
1517 	if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
1518 		swap_header_version = 1;
1519 	else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
1520 		swap_header_version = 2;
1521 	else {
1522 		printk(KERN_ERR "Unable to find swap-space signature\n");
1523 		error = -EINVAL;
1524 		goto bad_swap;
1525 	}
1526 
1527 	switch (swap_header_version) {
1528 	case 1:
1529 		printk(KERN_ERR "version 0 swap is no longer supported. "
1530 			"Use mkswap -v1 %s\n", name);
1531 		error = -EINVAL;
1532 		goto bad_swap;
1533 	case 2:
1534 		/* Check the swap header's sub-version and the size of
1535                    the swap file and bad block lists */
1536 		if (swap_header->info.version != 1) {
1537 			printk(KERN_WARNING
1538 			       "Unable to handle swap header version %d\n",
1539 			       swap_header->info.version);
1540 			error = -EINVAL;
1541 			goto bad_swap;
1542 		}
1543 
1544 		p->lowest_bit  = 1;
1545 		p->cluster_next = 1;
1546 
1547 		/*
1548 		 * Find out how many pages are allowed for a single swap
1549 		 * device. There are two limiting factors: 1) the number of
1550 		 * bits for the swap offset in the swp_entry_t type and
1551 		 * 2) the number of bits in the a swap pte as defined by
1552 		 * the different architectures. In order to find the
1553 		 * largest possible bit mask a swap entry with swap type 0
1554 		 * and swap offset ~0UL is created, encoded to a swap pte,
1555 		 * decoded to a swp_entry_t again and finally the swap
1556 		 * offset is extracted. This will mask all the bits from
1557 		 * the initial ~0UL mask that can't be encoded in either
1558 		 * the swp_entry_t or the architecture definition of a
1559 		 * swap pte.
1560 		 */
1561 		maxpages = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0,~0UL)))) - 1;
1562 		if (maxpages > swap_header->info.last_page)
1563 			maxpages = swap_header->info.last_page;
1564 		p->highest_bit = maxpages - 1;
1565 
1566 		error = -EINVAL;
1567 		if (!maxpages)
1568 			goto bad_swap;
1569 		if (swapfilesize && maxpages > swapfilesize) {
1570 			printk(KERN_WARNING
1571 			       "Swap area shorter than signature indicates\n");
1572 			goto bad_swap;
1573 		}
1574 		if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1575 			goto bad_swap;
1576 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1577 			goto bad_swap;
1578 
1579 		/* OK, set up the swap map and apply the bad block list */
1580 		if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
1581 			error = -ENOMEM;
1582 			goto bad_swap;
1583 		}
1584 
1585 		error = 0;
1586 		memset(p->swap_map, 0, maxpages * sizeof(short));
1587 		for (i = 0; i < swap_header->info.nr_badpages; i++) {
1588 			int page_nr = swap_header->info.badpages[i];
1589 			if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
1590 				error = -EINVAL;
1591 			else
1592 				p->swap_map[page_nr] = SWAP_MAP_BAD;
1593 		}
1594 		nr_good_pages = swap_header->info.last_page -
1595 				swap_header->info.nr_badpages -
1596 				1 /* header page */;
1597 		if (error)
1598 			goto bad_swap;
1599 	}
1600 
1601 	if (nr_good_pages) {
1602 		p->swap_map[0] = SWAP_MAP_BAD;
1603 		p->max = maxpages;
1604 		p->pages = nr_good_pages;
1605 		nr_extents = setup_swap_extents(p, &span);
1606 		if (nr_extents < 0) {
1607 			error = nr_extents;
1608 			goto bad_swap;
1609 		}
1610 		nr_good_pages = p->pages;
1611 	}
1612 	if (!nr_good_pages) {
1613 		printk(KERN_WARNING "Empty swap-file\n");
1614 		error = -EINVAL;
1615 		goto bad_swap;
1616 	}
1617 
1618 	mutex_lock(&swapon_mutex);
1619 	spin_lock(&swap_lock);
1620 	p->flags = SWP_ACTIVE;
1621 	nr_swap_pages += nr_good_pages;
1622 	total_swap_pages += nr_good_pages;
1623 
1624 	printk(KERN_INFO "Adding %uk swap on %s.  "
1625 			"Priority:%d extents:%d across:%lluk\n",
1626 		nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
1627 		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10));
1628 
1629 	/* insert swap space into swap_list: */
1630 	prev = -1;
1631 	for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1632 		if (p->prio >= swap_info[i].prio) {
1633 			break;
1634 		}
1635 		prev = i;
1636 	}
1637 	p->next = i;
1638 	if (prev < 0) {
1639 		swap_list.head = swap_list.next = p - swap_info;
1640 	} else {
1641 		swap_info[prev].next = p - swap_info;
1642 	}
1643 	spin_unlock(&swap_lock);
1644 	mutex_unlock(&swapon_mutex);
1645 	error = 0;
1646 	goto out;
1647 bad_swap:
1648 	if (bdev) {
1649 		set_blocksize(bdev, p->old_block_size);
1650 		bd_release(bdev);
1651 	}
1652 	destroy_swap_extents(p);
1653 bad_swap_2:
1654 	spin_lock(&swap_lock);
1655 	swap_map = p->swap_map;
1656 	p->swap_file = NULL;
1657 	p->swap_map = NULL;
1658 	p->flags = 0;
1659 	if (!(swap_flags & SWAP_FLAG_PREFER))
1660 		++least_priority;
1661 	spin_unlock(&swap_lock);
1662 	vfree(swap_map);
1663 	if (swap_file)
1664 		filp_close(swap_file, NULL);
1665 out:
1666 	if (page && !IS_ERR(page)) {
1667 		kunmap(page);
1668 		page_cache_release(page);
1669 	}
1670 	if (name)
1671 		putname(name);
1672 	if (did_down) {
1673 		if (!error)
1674 			inode->i_flags |= S_SWAPFILE;
1675 		mutex_unlock(&inode->i_mutex);
1676 	}
1677 	return error;
1678 }
1679 
1680 void si_swapinfo(struct sysinfo *val)
1681 {
1682 	unsigned int i;
1683 	unsigned long nr_to_be_unused = 0;
1684 
1685 	spin_lock(&swap_lock);
1686 	for (i = 0; i < nr_swapfiles; i++) {
1687 		if (!(swap_info[i].flags & SWP_USED) ||
1688 		     (swap_info[i].flags & SWP_WRITEOK))
1689 			continue;
1690 		nr_to_be_unused += swap_info[i].inuse_pages;
1691 	}
1692 	val->freeswap = nr_swap_pages + nr_to_be_unused;
1693 	val->totalswap = total_swap_pages + nr_to_be_unused;
1694 	spin_unlock(&swap_lock);
1695 }
1696 
1697 /*
1698  * Verify that a swap entry is valid and increment its swap map count.
1699  *
1700  * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
1701  * "permanent", but will be reclaimed by the next swapoff.
1702  */
1703 int swap_duplicate(swp_entry_t entry)
1704 {
1705 	struct swap_info_struct * p;
1706 	unsigned long offset, type;
1707 	int result = 0;
1708 
1709 	if (is_migration_entry(entry))
1710 		return 1;
1711 
1712 	type = swp_type(entry);
1713 	if (type >= nr_swapfiles)
1714 		goto bad_file;
1715 	p = type + swap_info;
1716 	offset = swp_offset(entry);
1717 
1718 	spin_lock(&swap_lock);
1719 	if (offset < p->max && p->swap_map[offset]) {
1720 		if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
1721 			p->swap_map[offset]++;
1722 			result = 1;
1723 		} else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
1724 			if (swap_overflow++ < 5)
1725 				printk(KERN_WARNING "swap_dup: swap entry overflow\n");
1726 			p->swap_map[offset] = SWAP_MAP_MAX;
1727 			result = 1;
1728 		}
1729 	}
1730 	spin_unlock(&swap_lock);
1731 out:
1732 	return result;
1733 
1734 bad_file:
1735 	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
1736 	goto out;
1737 }
1738 
1739 struct swap_info_struct *
1740 get_swap_info_struct(unsigned type)
1741 {
1742 	return &swap_info[type];
1743 }
1744 
1745 /*
1746  * swap_lock prevents swap_map being freed. Don't grab an extra
1747  * reference on the swaphandle, it doesn't matter if it becomes unused.
1748  */
1749 int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
1750 {
1751 	int our_page_cluster = page_cluster;
1752 	int ret = 0, i = 1 << our_page_cluster;
1753 	unsigned long toff;
1754 	struct swap_info_struct *swapdev = swp_type(entry) + swap_info;
1755 
1756 	if (!our_page_cluster)	/* no readahead */
1757 		return 0;
1758 	toff = (swp_offset(entry) >> our_page_cluster) << our_page_cluster;
1759 	if (!toff)		/* first page is swap header */
1760 		toff++, i--;
1761 	*offset = toff;
1762 
1763 	spin_lock(&swap_lock);
1764 	do {
1765 		/* Don't read-ahead past the end of the swap area */
1766 		if (toff >= swapdev->max)
1767 			break;
1768 		/* Don't read in free or bad pages */
1769 		if (!swapdev->swap_map[toff])
1770 			break;
1771 		if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
1772 			break;
1773 		toff++;
1774 		ret++;
1775 	} while (--i);
1776 	spin_unlock(&swap_lock);
1777 	return ret;
1778 }
1779