Lines Matching +full:se +full:- +full:pos

1 // SPDX-License-Identifier: GPL-2.0-only
22 #include <linux/blk-cgroup.h>
31 #include <linux/backing-dev.h>
67 static int least_priority = -1;
91 * swap_info_struct changes between not-full/full, it needs to
92 * add/remove itself to/from this list, but the swap_info_struct->lock
94 * before any swap_info_struct->lock.
136 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
163 struct rb_node *rb = rb_first(&sis->swap_extent_root); in first_se()
167 static inline struct swap_extent *next_se(struct swap_extent *se) in next_se() argument
169 struct rb_node *rb = rb_next(&se->rb_node); in next_se()
175 * to allow the swap device to optimize its wear-levelling.
179 struct swap_extent *se; in discard_swap() local
185 se = first_se(si); in discard_swap()
186 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); in discard_swap()
187 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); in discard_swap()
189 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
196 for (se = next_se(se); se; se = next_se(se)) { in discard_swap()
197 start_block = se->start_block << (PAGE_SHIFT - 9); in discard_swap()
198 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); in discard_swap()
200 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
207 return err; /* That will often be -EOPNOTSUPP */ in discard_swap()
213 struct swap_extent *se; in offset_to_swap_extent() local
216 rb = sis->swap_extent_root.rb_node; in offset_to_swap_extent()
218 se = rb_entry(rb, struct swap_extent, rb_node); in offset_to_swap_extent()
219 if (offset < se->start_page) in offset_to_swap_extent()
220 rb = rb->rb_left; in offset_to_swap_extent()
221 else if (offset >= se->start_page + se->nr_pages) in offset_to_swap_extent()
222 rb = rb->rb_right; in offset_to_swap_extent()
224 return se; in offset_to_swap_extent()
233 struct swap_extent *se; in swap_page_sector() local
238 se = offset_to_swap_extent(sis, offset); in swap_page_sector()
239 sector = se->start_block + (offset - se->start_page); in swap_page_sector()
240 return sector << (PAGE_SHIFT - 9); in swap_page_sector()
245 * to allow the swap device to optimize its wear-levelling.
250 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster() local
253 pgoff_t offset = start_page - se->start_page; in discard_swap_cluster()
254 sector_t start_block = se->start_block + offset; in discard_swap_cluster()
255 sector_t nr_blocks = se->nr_pages - offset; in discard_swap_cluster()
260 nr_pages -= nr_blocks; in discard_swap_cluster()
262 start_block <<= PAGE_SHIFT - 9; in discard_swap_cluster()
263 nr_blocks <<= PAGE_SHIFT - 9; in discard_swap_cluster()
264 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
268 se = next_se(se); in discard_swap_cluster()
290 info->flags = flag; in cluster_set_flag()
295 return info->data; in cluster_count()
301 info->data = c; in cluster_set_count()
307 info->flags = f; in cluster_set_count_flag()
308 info->data = c; in cluster_set_count_flag()
313 return info->data; in cluster_next()
319 info->data = n; in cluster_set_next()
325 info->flags = f; in cluster_set_next_flag()
326 info->data = n; in cluster_set_next_flag()
331 return info->flags & CLUSTER_FLAG_FREE; in cluster_is_free()
336 return info->flags & CLUSTER_FLAG_NEXT_NULL; in cluster_is_null()
341 info->flags = CLUSTER_FLAG_NEXT_NULL; in cluster_set_null()
342 info->data = 0; in cluster_set_null()
348 return info->flags & CLUSTER_FLAG_HUGE; in cluster_is_huge()
354 info->flags &= ~CLUSTER_FLAG_HUGE; in cluster_clear_huge()
362 ci = si->cluster_info; in lock_cluster()
365 spin_lock(&ci->lock); in lock_cluster()
373 spin_unlock(&ci->lock); in unlock_cluster()
378 * swap_cluster_info if SSD-style cluster-based locking is in place.
385 /* Try to use fine-grained SSD-style locking if available: */ in lock_cluster_or_swap_info()
389 spin_lock(&si->lock); in lock_cluster_or_swap_info()
400 spin_unlock(&si->lock); in unlock_cluster_or_swap_info()
405 return cluster_is_null(&list->head); in cluster_list_empty()
410 return cluster_next(&list->head); in cluster_list_first()
415 cluster_set_null(&list->head); in cluster_list_init()
416 cluster_set_null(&list->tail); in cluster_list_init()
424 cluster_set_next_flag(&list->head, idx, 0); in cluster_list_add_tail()
425 cluster_set_next_flag(&list->tail, idx, 0); in cluster_list_add_tail()
428 unsigned int tail = cluster_next(&list->tail); in cluster_list_add_tail()
432 * only acquired when we held swap_info_struct->lock in cluster_list_add_tail()
435 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); in cluster_list_add_tail()
437 spin_unlock(&ci_tail->lock); in cluster_list_add_tail()
438 cluster_set_next_flag(&list->tail, idx, 0); in cluster_list_add_tail()
447 idx = cluster_next(&list->head); in cluster_list_del_first()
448 if (cluster_next(&list->tail) == idx) { in cluster_list_del_first()
449 cluster_set_null(&list->head); in cluster_list_del_first()
450 cluster_set_null(&list->tail); in cluster_list_del_first()
452 cluster_set_next_flag(&list->head, in cluster_list_del_first()
464 * si->swap_map directly. To make sure the discarding cluster isn't in swap_cluster_schedule_discard()
468 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
471 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); in swap_cluster_schedule_discard()
473 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
478 struct swap_cluster_info *ci = si->cluster_info; in __free_cluster()
481 cluster_list_add_tail(&si->free_clusters, ci, idx); in __free_cluster()
486 * will be added to free cluster list. caller should hold si->lock.
493 info = si->cluster_info; in swap_do_scheduled_discard()
495 while (!cluster_list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
496 idx = cluster_list_del_first(&si->discard_clusters, info); in swap_do_scheduled_discard()
497 spin_unlock(&si->lock); in swap_do_scheduled_discard()
502 spin_lock(&si->lock); in swap_do_scheduled_discard()
505 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
517 spin_lock(&si->lock); in swap_discard_work()
519 spin_unlock(&si->lock); in swap_discard_work()
527 complete(&si->comp); in swap_users_ref_free()
532 struct swap_cluster_info *ci = si->cluster_info; in alloc_cluster()
534 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); in alloc_cluster()
535 cluster_list_del_first(&si->free_clusters, ci); in alloc_cluster()
541 struct swap_cluster_info *ci = si->cluster_info + idx; in free_cluster()
549 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
592 cluster_count(&cluster_info[idx]) - 1); in dec_cluster_info_page()
610 conflict = !cluster_list_empty(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
611 offset != cluster_list_first(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
612 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
617 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
618 cluster_set_null(&percpu_cluster->index); in scan_swap_map_ssd_cluster_conflict()
634 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
635 if (cluster_is_null(&cluster->index)) { in scan_swap_map_try_ssd_cluster()
636 if (!cluster_list_empty(&si->free_clusters)) { in scan_swap_map_try_ssd_cluster()
637 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
638 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster()
640 } else if (!cluster_list_empty(&si->discard_clusters)) { in scan_swap_map_try_ssd_cluster()
644 * reread cluster_next_cpu since we dropped si->lock in scan_swap_map_try_ssd_cluster()
647 *scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_try_ssd_cluster()
658 tmp = cluster->next; in scan_swap_map_try_ssd_cluster()
659 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
660 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); in scan_swap_map_try_ssd_cluster()
664 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster()
671 cluster_set_null(&cluster->index); in scan_swap_map_try_ssd_cluster()
674 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster()
684 assert_spin_locked(&p->lock); in __del_from_avail_list()
686 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
699 unsigned int end = offset + nr_entries - 1; in swap_range_alloc()
701 if (offset == si->lowest_bit) in swap_range_alloc()
702 si->lowest_bit += nr_entries; in swap_range_alloc()
703 if (end == si->highest_bit) in swap_range_alloc()
704 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); in swap_range_alloc()
705 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); in swap_range_alloc()
706 if (si->inuse_pages == si->pages) { in swap_range_alloc()
707 si->lowest_bit = si->max; in swap_range_alloc()
708 si->highest_bit = 0; in swap_range_alloc()
719 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
727 unsigned long end = offset + nr_entries - 1; in swap_range_free()
730 if (offset < si->lowest_bit) in swap_range_free()
731 si->lowest_bit = offset; in swap_range_free()
732 if (end > si->highest_bit) { in swap_range_free()
733 bool was_full = !si->highest_bit; in swap_range_free()
735 WRITE_ONCE(si->highest_bit, end); in swap_range_free()
736 if (was_full && (si->flags & SWP_WRITEOK)) in swap_range_free()
740 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); in swap_range_free()
741 if (si->flags & SWP_BLKDEV) in swap_range_free()
743 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
747 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
748 zswap_invalidate(si->type, offset); in swap_range_free()
750 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
753 clear_shadow_from_swap_cache(si->type, begin, end); in swap_range_free()
760 if (!(si->flags & SWP_SOLIDSTATE)) { in set_cluster_next()
761 si->cluster_next = next; in set_cluster_next()
765 prev = this_cpu_read(*si->cluster_next_cpu); in set_cluster_next()
774 if (si->highest_bit <= si->lowest_bit) in set_cluster_next()
776 next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit); in set_cluster_next()
778 next = max_t(unsigned int, next, si->lowest_bit); in set_cluster_next()
780 this_cpu_write(*si->cluster_next_cpu, next); in set_cluster_next()
786 if (data_race(!si->swap_map[offset])) { in swap_offset_available_and_locked()
787 spin_lock(&si->lock); in swap_offset_available_and_locked()
791 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in swap_offset_available_and_locked()
792 spin_lock(&si->lock); in swap_offset_available_and_locked()
814 * way, however, we resort to first-free allocation, starting in scan_swap_map_slots()
817 * overall disk seek times between swap pages. -- sct in scan_swap_map_slots()
818 * But we do now try to find an empty cluster. -Andrea in scan_swap_map_slots()
822 si->flags += SWP_SCANNING; in scan_swap_map_slots()
828 if (si->flags & SWP_SOLIDSTATE) in scan_swap_map_slots()
829 scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_slots()
831 scan_base = si->cluster_next; in scan_swap_map_slots()
835 if (si->cluster_info) { in scan_swap_map_slots()
838 } else if (unlikely(!si->cluster_nr--)) { in scan_swap_map_slots()
839 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map_slots()
840 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
844 spin_unlock(&si->lock); in scan_swap_map_slots()
849 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info in scan_swap_map_slots()
852 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
853 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
856 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map_slots()
857 if (si->swap_map[offset]) in scan_swap_map_slots()
860 spin_lock(&si->lock); in scan_swap_map_slots()
861 offset -= SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
862 si->cluster_next = offset; in scan_swap_map_slots()
863 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
866 if (unlikely(--latency_ration < 0)) { in scan_swap_map_slots()
873 spin_lock(&si->lock); in scan_swap_map_slots()
874 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
878 if (si->cluster_info) { in scan_swap_map_slots()
888 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map_slots()
890 if (!si->highest_bit) in scan_swap_map_slots()
892 if (offset > si->highest_bit) in scan_swap_map_slots()
893 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
896 /* reuse swap entry of cache-only swap if not busy. */ in scan_swap_map_slots()
897 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
900 spin_unlock(&si->lock); in scan_swap_map_slots()
902 spin_lock(&si->lock); in scan_swap_map_slots()
909 if (si->swap_map[offset]) { in scan_swap_map_slots()
916 WRITE_ONCE(si->swap_map[offset], usage); in scan_swap_map_slots()
917 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map_slots()
921 slots[n_ret++] = swp_entry(si->type, offset); in scan_swap_map_slots()
924 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
930 if (unlikely(--latency_ration < 0)) { in scan_swap_map_slots()
933 spin_unlock(&si->lock); in scan_swap_map_slots()
935 spin_lock(&si->lock); in scan_swap_map_slots()
940 if (si->cluster_info) { in scan_swap_map_slots()
943 } else if (si->cluster_nr && !si->swap_map[++offset]) { in scan_swap_map_slots()
944 /* non-ssd case, still more slots in cluster? */ in scan_swap_map_slots()
945 --si->cluster_nr; in scan_swap_map_slots()
960 scan_limit = si->highest_bit; in scan_swap_map_slots()
961 for (; offset <= scan_limit && --latency_ration > 0; in scan_swap_map_slots()
963 if (!si->swap_map[offset]) in scan_swap_map_slots()
970 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
974 spin_unlock(&si->lock); in scan_swap_map_slots()
975 while (++offset <= READ_ONCE(si->highest_bit)) { in scan_swap_map_slots()
976 if (unlikely(--latency_ration < 0)) { in scan_swap_map_slots()
984 offset = si->lowest_bit; in scan_swap_map_slots()
986 if (unlikely(--latency_ration < 0)) { in scan_swap_map_slots()
995 spin_lock(&si->lock); in scan_swap_map_slots()
998 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
1017 if (cluster_list_empty(&si->free_clusters)) in swap_alloc_cluster()
1020 idx = cluster_list_first(&si->free_clusters); in swap_alloc_cluster()
1026 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1029 *slot = swp_entry(si->type, offset); in swap_alloc_cluster()
1040 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); in swap_free_cluster()
1073 /* requeue si to after same-priority siblings */ in get_swap_pages()
1074 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1076 spin_lock(&si->lock); in get_swap_pages()
1077 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_pages()
1079 if (plist_node_empty(&si->avail_lists[node])) { in get_swap_pages()
1080 spin_unlock(&si->lock); in get_swap_pages()
1083 WARN(!si->highest_bit, in get_swap_pages()
1085 si->type); in get_swap_pages()
1086 WARN(!(si->flags & SWP_WRITEOK), in get_swap_pages()
1088 si->type); in get_swap_pages()
1090 spin_unlock(&si->lock); in get_swap_pages()
1094 if (si->flags & SWP_BLKDEV) in get_swap_pages()
1099 spin_unlock(&si->lock); in get_swap_pages()
1108 * and since scan_swap_map_slots() can drop the si->lock, in get_swap_pages()
1112 * si->lock. Since we dropped the swap_avail_lock, the in get_swap_pages()
1117 if (plist_node_empty(&next->avail_lists[node])) in get_swap_pages()
1125 atomic_long_add((long)(n_goal - n_ret) * size, in get_swap_pages()
1141 if (data_race(!(p->flags & SWP_USED))) in _swap_info_get()
1144 if (offset >= p->max) in _swap_info_get()
1146 if (data_race(!p->swap_map[swp_offset(entry)])) in _swap_info_get()
1174 spin_unlock(&q->lock); in swap_info_get_cont()
1176 spin_lock(&p->lock); in swap_info_get_cont()
1188 count = p->swap_map[offset]; in __swap_entry_free_locked()
1209 count--; in __swap_entry_free_locked()
1214 WRITE_ONCE(p->swap_map[offset], usage); in __swap_entry_free_locked()
1216 WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE); in __swap_entry_free_locked()
1271 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device()
1274 * Guarantee the si->users are checked before accessing other in get_swap_device()
1282 if (offset >= si->max) in get_swap_device()
1292 percpu_ref_put(&si->users); in get_swap_device()
1319 count = p->swap_map[offset]; in swap_entry_free()
1321 p->swap_map[offset] = 0; in swap_entry_free()
1322 dec_cluster_info_page(p, p->cluster_info, offset); in swap_entry_free()
1363 map = si->swap_map + offset; in put_swap_folio()
1373 spin_lock(&si->lock); in put_swap_folio()
1376 spin_unlock(&si->lock); in put_swap_folio()
1384 if (i == size - 1) in put_swap_folio()
1401 return -EBUSY; in split_swap_cluster()
1413 return (int)swp_type(*e1) - (int)swp_type(*e2); in swp_entry_cmp()
1441 spin_unlock(&p->lock); in swapcache_free_entries()
1449 return swap_count(si->swap_map[offset]); in __swap_count()
1464 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1490 count = swap_count(p->swap_map[offset]); in swp_swapcount()
1497 page = vmalloc_to_page(p->swap_map + offset); in swp_swapcount()
1519 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1544 swp_entry_t entry = folio->swap; in folio_swapped()
1557 * folio_free_swap() - Free the swap space used for this folio.
1579 * - most probably a call from __try_to_reclaim_swap() while in folio_free_swap()
1581 * but conceivably even a call from memory reclaim - will free in folio_free_swap()
1613 if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { in free_swap_and_cache()
1639 spin_lock(&si->lock); in get_swap_page_of_type()
1640 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) in get_swap_page_of_type()
1642 spin_unlock(&si->lock); in get_swap_page_of_type()
1650 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1660 return -1; in swap_type_of()
1666 if (!(sis->flags & SWP_WRITEOK)) in swap_type_of()
1669 if (device == sis->bdev->bd_dev) { in swap_type_of()
1670 struct swap_extent *se = first_se(sis); in swap_type_of() local
1672 if (se->start_block == offset) { in swap_type_of()
1679 return -ENODEV; in swap_type_of()
1690 if (!(sis->flags & SWP_WRITEOK)) in find_first_swap()
1692 *device = sis->bdev->bd_dev; in find_first_swap()
1697 return -ENODEV; in find_first_swap()
1707 struct swap_extent *se; in swapdev_block() local
1709 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1711 se = offset_to_swap_extent(si, offset); in swapdev_block()
1712 return se->start_block + (offset - se->start_page); in swapdev_block()
1729 spin_lock(&sis->lock); in count_swap_pages()
1730 if (sis->flags & SWP_WRITEOK) { in count_swap_pages()
1731 n = sis->pages; in count_swap_pages()
1733 n -= sis->inuse_pages; in count_swap_pages()
1735 spin_unlock(&sis->lock); in count_swap_pages()
1749 * just let do_wp_page work it out if a write is requested later - to
1765 return -ENOMEM; in unuse_pte()
1766 else if (unlikely(PTR_ERR(page) == -EHWPOISON)) in unuse_pte()
1769 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1781 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1804 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in unuse_pte()
1805 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in unuse_pte()
1824 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); in unuse_pte()
1830 set_pte_at(vma->vm_mm, addr, pte, new_pte); in unuse_pte()
1893 swp_count = READ_ONCE(si->swap_map[offset]); in unuse_pte_range()
1896 return -ENOMEM; in unuse_pte_range()
1983 addr = vma->vm_start; in unuse_vma()
1984 end = vma->vm_end; in unuse_vma()
1986 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma()
2006 if (vma->anon_vma && !is_vm_hugetlb_page(vma)) { in unuse_mm()
2035 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2036 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2043 if (i == si->max) in find_next_to_unuse()
2060 if (!READ_ONCE(si->inuse_pages)) in try_to_unuse()
2073 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2075 (p = p->next) != &init_mm.mmlist) { in try_to_unuse()
2101 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2134 * and robust (though cpu-intensive) just to keep retrying. in try_to_unuse()
2136 if (READ_ONCE(si->inuse_pages)) { in try_to_unuse()
2139 return -EINTR; in try_to_unuse()
2149 * added to the mmlist just after page_duplicate - before would be racy.
2157 if (swap_info[type]->inuse_pages) in drain_mmlist()
2170 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { in destroy_swap_extents()
2171 struct rb_node *rb = sis->swap_extent_root.rb_node; in destroy_swap_extents()
2172 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); in destroy_swap_extents() local
2174 rb_erase(rb, &sis->swap_extent_root); in destroy_swap_extents()
2175 kfree(se); in destroy_swap_extents()
2178 if (sis->flags & SWP_ACTIVATED) { in destroy_swap_extents()
2179 struct file *swap_file = sis->swap_file; in destroy_swap_extents()
2180 struct address_space *mapping = swap_file->f_mapping; in destroy_swap_extents()
2182 sis->flags &= ~SWP_ACTIVATED; in destroy_swap_extents()
2183 if (mapping->a_ops->swap_deactivate) in destroy_swap_extents()
2184 mapping->a_ops->swap_deactivate(swap_file); in destroy_swap_extents()
2198 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; in add_swap_extent()
2199 struct swap_extent *se; in add_swap_extent() local
2208 link = &parent->rb_right; in add_swap_extent()
2212 se = rb_entry(parent, struct swap_extent, rb_node); in add_swap_extent()
2213 BUG_ON(se->start_page + se->nr_pages != start_page); in add_swap_extent()
2214 if (se->start_block + se->nr_pages == start_block) { in add_swap_extent()
2216 se->nr_pages += nr_pages; in add_swap_extent()
2222 new_se = kmalloc(sizeof(*se), GFP_KERNEL); in add_swap_extent()
2224 return -ENOMEM; in add_swap_extent()
2225 new_se->start_page = start_page; in add_swap_extent()
2226 new_se->nr_pages = nr_pages; in add_swap_extent()
2227 new_se->start_block = start_block; in add_swap_extent()
2229 rb_link_node(&new_se->rb_node, parent, link); in add_swap_extent()
2230 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); in add_swap_extent()
2252 * requirements, they are simply tossed out - we will never use those blocks
2259 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2260 * extents in the rbtree. - akpm.
2264 struct file *swap_file = sis->swap_file; in setup_swap_extents()
2265 struct address_space *mapping = swap_file->f_mapping; in setup_swap_extents()
2266 struct inode *inode = mapping->host; in setup_swap_extents()
2269 if (S_ISBLK(inode->i_mode)) { in setup_swap_extents()
2270 ret = add_swap_extent(sis, 0, sis->max, 0); in setup_swap_extents()
2271 *span = sis->pages; in setup_swap_extents()
2275 if (mapping->a_ops->swap_activate) { in setup_swap_extents()
2276 ret = mapping->a_ops->swap_activate(sis, swap_file, span); in setup_swap_extents()
2279 sis->flags |= SWP_ACTIVATED; in setup_swap_extents()
2280 if ((sis->flags & SWP_FS_OPS) && in setup_swap_extents()
2283 return -ENOMEM; in setup_swap_extents()
2295 if (p->bdev) in swap_node()
2296 bdev = p->bdev; in swap_node()
2298 bdev = p->swap_file->f_inode->i_sb->s_bdev; in swap_node()
2300 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; in swap_node()
2310 p->prio = prio; in setup_swap_info()
2312 p->prio = --least_priority; in setup_swap_info()
2315 * low-to-high, while swap ordering is high-to-low in setup_swap_info()
2317 p->list.prio = -p->prio; in setup_swap_info()
2319 if (p->prio >= 0) in setup_swap_info()
2320 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2323 p->avail_lists[i].prio = 1; in setup_swap_info()
2325 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2328 p->swap_map = swap_map; in setup_swap_info()
2329 p->cluster_info = cluster_info; in setup_swap_info()
2334 p->flags |= SWP_WRITEOK; in _enable_swap_info()
2335 atomic_long_add(p->pages, &nr_swap_pages); in _enable_swap_info()
2336 total_swap_pages += p->pages; in _enable_swap_info()
2342 * which on removal of any swap_info_struct with an auto-assigned in _enable_swap_info()
2343 * (i.e. negative) priority increments the auto-assigned priority in _enable_swap_info()
2344 * of any lower-priority swap_info_structs. in _enable_swap_info()
2349 plist_add(&p->list, &swap_active_head); in _enable_swap_info()
2352 if (p->highest_bit) in _enable_swap_info()
2360 zswap_swapon(p->type); in enable_swap_info()
2363 spin_lock(&p->lock); in enable_swap_info()
2365 spin_unlock(&p->lock); in enable_swap_info()
2370 percpu_ref_resurrect(&p->users); in enable_swap_info()
2372 spin_lock(&p->lock); in enable_swap_info()
2374 spin_unlock(&p->lock); in enable_swap_info()
2381 spin_lock(&p->lock); in reinsert_swap_info()
2382 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); in reinsert_swap_info()
2384 spin_unlock(&p->lock); in reinsert_swap_info()
2412 return -EPERM; in SYSCALL_DEFINE1()
2414 BUG_ON(!current->mm); in SYSCALL_DEFINE1()
2425 mapping = victim->f_mapping; in SYSCALL_DEFINE1()
2428 if (p->flags & SWP_WRITEOK) { in SYSCALL_DEFINE1()
2429 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1()
2436 err = -EINVAL; in SYSCALL_DEFINE1()
2440 if (!security_vm_enough_memory_mm(current->mm, p->pages)) in SYSCALL_DEFINE1()
2441 vm_unacct_memory(p->pages); in SYSCALL_DEFINE1()
2443 err = -ENOMEM; in SYSCALL_DEFINE1()
2447 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2449 if (p->prio < 0) { in SYSCALL_DEFINE1()
2454 si->prio++; in SYSCALL_DEFINE1()
2455 si->list.prio--; in SYSCALL_DEFINE1()
2457 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2458 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2463 plist_del(&p->list, &swap_active_head); in SYSCALL_DEFINE1()
2464 atomic_long_sub(p->pages, &nr_swap_pages); in SYSCALL_DEFINE1()
2465 total_swap_pages -= p->pages; in SYSCALL_DEFINE1()
2466 p->flags &= ~SWP_WRITEOK; in SYSCALL_DEFINE1()
2467 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2473 err = try_to_unuse(p->type); in SYSCALL_DEFINE1()
2477 /* re-insert swap space back into swap_list */ in SYSCALL_DEFINE1()
2492 percpu_ref_kill(&p->users); in SYSCALL_DEFINE1()
2494 wait_for_completion(&p->comp); in SYSCALL_DEFINE1()
2496 flush_work(&p->discard_work); in SYSCALL_DEFINE1()
2499 if (p->flags & SWP_CONTINUED) in SYSCALL_DEFINE1()
2502 if (!p->bdev || !bdev_nonrot(p->bdev)) in SYSCALL_DEFINE1()
2507 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2511 p->highest_bit = 0; /* cuts scans short */ in SYSCALL_DEFINE1()
2512 while (p->flags >= SWP_SCANNING) { in SYSCALL_DEFINE1()
2513 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2517 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2520 swap_file = p->swap_file; in SYSCALL_DEFINE1()
2521 old_block_size = p->old_block_size; in SYSCALL_DEFINE1()
2522 p->swap_file = NULL; in SYSCALL_DEFINE1()
2523 p->max = 0; in SYSCALL_DEFINE1()
2524 swap_map = p->swap_map; in SYSCALL_DEFINE1()
2525 p->swap_map = NULL; in SYSCALL_DEFINE1()
2526 cluster_info = p->cluster_info; in SYSCALL_DEFINE1()
2527 p->cluster_info = NULL; in SYSCALL_DEFINE1()
2528 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2530 arch_swap_invalidate_area(p->type); in SYSCALL_DEFINE1()
2531 zswap_swapoff(p->type); in SYSCALL_DEFINE1()
2533 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE1()
2534 p->percpu_cluster = NULL; in SYSCALL_DEFINE1()
2535 free_percpu(p->cluster_next_cpu); in SYSCALL_DEFINE1()
2536 p->cluster_next_cpu = NULL; in SYSCALL_DEFINE1()
2540 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE1()
2541 exit_swap_address_space(p->type); in SYSCALL_DEFINE1()
2543 inode = mapping->host; in SYSCALL_DEFINE1()
2544 if (S_ISBLK(inode->i_mode)) { in SYSCALL_DEFINE1()
2552 inode->i_flags &= ~S_SWAPFILE; in SYSCALL_DEFINE1()
2559 * not hold p->lock after we cleared its SWP_WRITEOK. in SYSCALL_DEFINE1()
2562 p->flags = 0; in SYSCALL_DEFINE1()
2579 struct seq_file *seq = file->private_data; in swaps_poll()
2583 if (seq->poll_event != atomic_read(&proc_poll_event)) { in swaps_poll()
2584 seq->poll_event = atomic_read(&proc_poll_event); in swaps_poll()
2592 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument
2596 loff_t l = *pos; in swap_start()
2604 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2606 if (!--l) in swap_start()
2613 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument
2621 type = si->type + 1; in swap_next()
2623 ++(*pos); in swap_next()
2625 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2650 bytes = K(si->pages); in swap_show()
2651 inuse = K(READ_ONCE(si->inuse_pages)); in swap_show()
2653 file = si->swap_file; in swap_show()
2656 len < 40 ? 40 - len : 1, " ", in swap_show()
2657 S_ISBLK(file_inode(file)->i_mode) ? in swap_show()
2661 si->prio); in swap_show()
2681 seq = file->private_data; in swaps_open()
2682 seq->poll_event = atomic_read(&proc_poll_event); in swaps_open()
2721 return ERR_PTR(-ENOMEM); in alloc_swap_info()
2723 if (percpu_ref_init(&p->users, swap_users_ref_free, in alloc_swap_info()
2726 return ERR_PTR(-ENOMEM); in alloc_swap_info()
2731 if (!(swap_info[type]->flags & SWP_USED)) in alloc_swap_info()
2736 percpu_ref_exit(&p->users); in alloc_swap_info()
2738 return ERR_PTR(-EPERM); in alloc_swap_info()
2741 p->type = type; in alloc_swap_info()
2753 * would be relying on p->type to remain valid. in alloc_swap_info()
2756 p->swap_extent_root = RB_ROOT; in alloc_swap_info()
2757 plist_node_init(&p->list, 0); in alloc_swap_info()
2759 plist_node_init(&p->avail_lists[i], 0); in alloc_swap_info()
2760 p->flags = SWP_USED; in alloc_swap_info()
2763 percpu_ref_exit(&defer->users); in alloc_swap_info()
2766 spin_lock_init(&p->lock); in alloc_swap_info()
2767 spin_lock_init(&p->cont_lock); in alloc_swap_info()
2768 init_completion(&p->comp); in alloc_swap_info()
2777 if (S_ISBLK(inode->i_mode)) { in claim_swapfile()
2778 p->bdev = blkdev_get_by_dev(inode->i_rdev, in claim_swapfile()
2780 if (IS_ERR(p->bdev)) { in claim_swapfile()
2781 error = PTR_ERR(p->bdev); in claim_swapfile()
2782 p->bdev = NULL; in claim_swapfile()
2785 p->old_block_size = block_size(p->bdev); in claim_swapfile()
2786 error = set_blocksize(p->bdev, PAGE_SIZE); in claim_swapfile()
2794 if (bdev_is_zoned(p->bdev)) in claim_swapfile()
2795 return -EINVAL; in claim_swapfile()
2796 p->flags |= SWP_BLKDEV; in claim_swapfile()
2797 } else if (S_ISREG(inode->i_mode)) { in claim_swapfile()
2798 p->bdev = inode->i_sb->s_bdev; in claim_swapfile()
2842 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { in read_swap_header()
2843 pr_err("Unable to find swap-space signature\n"); in read_swap_header()
2848 if (swab32(swap_header->info.version) == 1) { in read_swap_header()
2849 swab32s(&swap_header->info.version); in read_swap_header()
2850 swab32s(&swap_header->info.last_page); in read_swap_header()
2851 swab32s(&swap_header->info.nr_badpages); in read_swap_header()
2852 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) in read_swap_header()
2854 for (i = 0; i < swap_header->info.nr_badpages; i++) in read_swap_header()
2855 swab32s(&swap_header->info.badpages[i]); in read_swap_header()
2857 /* Check the swap header's sub-version */ in read_swap_header()
2858 if (swap_header->info.version != 1) { in read_swap_header()
2860 swap_header->info.version); in read_swap_header()
2864 p->lowest_bit = 1; in read_swap_header()
2865 p->cluster_next = 1; in read_swap_header()
2866 p->cluster_nr = 0; in read_swap_header()
2869 last_page = swap_header->info.last_page; in read_swap_header()
2871 pr_warn("Empty swap-file\n"); in read_swap_header()
2880 /* p->max is an unsigned int: don't overflow it */ in read_swap_header()
2884 p->highest_bit = maxpages - 1; in read_swap_header()
2893 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) in read_swap_header()
2895 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) in read_swap_header()
2919 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; in setup_swap_map_and_extents()
2922 nr_good_pages = maxpages - 1; /* omit header page */ in setup_swap_map_and_extents()
2924 cluster_list_init(&p->free_clusters); in setup_swap_map_and_extents()
2925 cluster_list_init(&p->discard_clusters); in setup_swap_map_and_extents()
2927 for (i = 0; i < swap_header->info.nr_badpages; i++) { in setup_swap_map_and_extents()
2928 unsigned int page_nr = swap_header->info.badpages[i]; in setup_swap_map_and_extents()
2929 if (page_nr == 0 || page_nr > swap_header->info.last_page) in setup_swap_map_and_extents()
2930 return -EINVAL; in setup_swap_map_and_extents()
2933 nr_good_pages--; in setup_swap_map_and_extents()
2953 p->max = maxpages; in setup_swap_map_and_extents()
2954 p->pages = nr_good_pages; in setup_swap_map_and_extents()
2958 nr_good_pages = p->pages; in setup_swap_map_and_extents()
2961 pr_warn("Empty swap-file\n"); in setup_swap_map_and_extents()
2962 return -EINVAL; in setup_swap_map_and_extents()
2982 cluster_list_add_tail(&p->free_clusters, cluster_info, in setup_swap_map_and_extents()
3009 return -EINVAL; in SYSCALL_DEFINE2()
3012 return -EPERM; in SYSCALL_DEFINE2()
3015 return -ENOMEM; in SYSCALL_DEFINE2()
3021 INIT_WORK(&p->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3036 p->swap_file = swap_file; in SYSCALL_DEFINE2()
3037 mapping = swap_file->f_mapping; in SYSCALL_DEFINE2()
3038 dentry = swap_file->f_path.dentry; in SYSCALL_DEFINE2()
3039 inode = mapping->host; in SYSCALL_DEFINE2()
3047 error = -ENOENT; in SYSCALL_DEFINE2()
3051 error = -EBUSY; in SYSCALL_DEFINE2()
3058 if (!mapping->a_ops->read_folio) { in SYSCALL_DEFINE2()
3059 error = -EINVAL; in SYSCALL_DEFINE2()
3071 error = -EINVAL; in SYSCALL_DEFINE2()
3078 error = -ENOMEM; in SYSCALL_DEFINE2()
3082 if (p->bdev && bdev_stable_writes(p->bdev)) in SYSCALL_DEFINE2()
3083 p->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3085 if (p->bdev && bdev_synchronous(p->bdev)) in SYSCALL_DEFINE2()
3086 p->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3088 if (p->bdev && bdev_nonrot(p->bdev)) { in SYSCALL_DEFINE2()
3092 p->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3093 p->cluster_next_cpu = alloc_percpu(unsigned int); in SYSCALL_DEFINE2()
3094 if (!p->cluster_next_cpu) { in SYSCALL_DEFINE2()
3095 error = -ENOMEM; in SYSCALL_DEFINE2()
3103 per_cpu(*p->cluster_next_cpu, cpu) = in SYSCALL_DEFINE2()
3104 get_random_u32_inclusive(1, p->highest_bit); in SYSCALL_DEFINE2()
3111 error = -ENOMEM; in SYSCALL_DEFINE2()
3116 spin_lock_init(&((cluster_info + ci)->lock)); in SYSCALL_DEFINE2()
3118 p->percpu_cluster = alloc_percpu(struct percpu_cluster); in SYSCALL_DEFINE2()
3119 if (!p->percpu_cluster) { in SYSCALL_DEFINE2()
3120 error = -ENOMEM; in SYSCALL_DEFINE2()
3125 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()
3126 cluster_set_null(&cluster->index); in SYSCALL_DEFINE2()
3133 error = swap_cgroup_swapon(p->type, maxpages); in SYSCALL_DEFINE2()
3145 p->bdev && bdev_max_discard_sectors(p->bdev)) { in SYSCALL_DEFINE2()
3152 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3157 * either do single-time area discards only, or to just in SYSCALL_DEFINE2()
3158 * perform discards for released swap page-clusters. in SYSCALL_DEFINE2()
3159 * Now it's time to adjust the p->flags accordingly. in SYSCALL_DEFINE2()
3162 p->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3164 p->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3166 /* issue a swapon-time discard if it's still required */ in SYSCALL_DEFINE2()
3167 if (p->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3175 error = init_swap_address_space(p->type, maxpages); in SYSCALL_DEFINE2()
3183 inode->i_flags |= S_SWAPFILE; in SYSCALL_DEFINE2()
3186 inode->i_flags &= ~S_SWAPFILE; in SYSCALL_DEFINE2()
3191 prio = -1; in SYSCALL_DEFINE2()
3198 K(p->pages), name->name, p->prio, nr_extents, in SYSCALL_DEFINE2()
3200 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3201 (p->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3202 (p->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3203 (p->flags & SWP_PAGE_DISCARD) ? "c" : ""); in SYSCALL_DEFINE2()
3212 exit_swap_address_space(p->type); in SYSCALL_DEFINE2()
3216 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE2()
3217 p->percpu_cluster = NULL; in SYSCALL_DEFINE2()
3218 free_percpu(p->cluster_next_cpu); in SYSCALL_DEFINE2()
3219 p->cluster_next_cpu = NULL; in SYSCALL_DEFINE2()
3220 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { in SYSCALL_DEFINE2()
3221 set_blocksize(p->bdev, p->old_block_size); in SYSCALL_DEFINE2()
3222 blkdev_put(p->bdev, p); in SYSCALL_DEFINE2()
3226 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE2()
3228 p->swap_file = NULL; in SYSCALL_DEFINE2()
3229 p->flags = 0; in SYSCALL_DEFINE2()
3260 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3261 nr_to_be_unused += READ_ONCE(si->inuse_pages); in si_swapinfo()
3263 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; in si_swapinfo()
3264 val->totalswap = total_swap_pages + nr_to_be_unused; in si_swapinfo()
3272 * - success -> 0
3273 * - swp_entry is invalid -> EINVAL
3274 * - swp_entry is migration entry -> EINVAL
3275 * - swap-cache reference is requested but there is already one. -> EEXIST
3276 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3277 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3293 count = p->swap_map[offset]; in __swap_duplicate()
3300 err = -ENOENT; in __swap_duplicate()
3314 err = -EEXIST; in __swap_duplicate()
3316 err = -ENOENT; in __swap_duplicate()
3323 err = -EINVAL; in __swap_duplicate()
3327 err = -ENOMEM; in __swap_duplicate()
3329 err = -ENOENT; /* unused swap entry */ in __swap_duplicate()
3331 WRITE_ONCE(p->swap_map[offset], count | has_cache); in __swap_duplicate()
3349 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3351 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3358 while (!err && __swap_duplicate(entry, 1) == -ENOMEM) in swap_duplicate()
3368 * -EEXIST means there is a swap cache.
3401 * out-of-line methods to avoid include hell.
3405 return page_swap_info(&folio->page)->swap_file->f_mapping; in swapcache_mapping()
3417 * add_swap_count_continuation - called when a swap count is duplicated
3456 spin_lock(&si->lock); in add_swap_count_continuation()
3462 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3468 * over-provisioning. in add_swap_count_continuation()
3474 ret = -ENOMEM; in add_swap_count_continuation()
3478 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3481 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3488 INIT_LIST_HEAD(&head->lru); in add_swap_count_continuation()
3490 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3493 list_for_each_entry(list_page, &head->lru, lru) { in add_swap_count_continuation()
3515 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation()
3518 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3521 spin_unlock(&si->lock); in add_swap_count_continuation()
3530 * swap_count_continued - when the original swap_map count is incremented
3546 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3552 spin_lock(&si->cont_lock); in swap_count_continued()
3601 *map -= 1; in swap_count_continued()
3614 spin_unlock(&si->cont_lock); in swap_count_continued()
3619 * free_swap_count_continuations - swapoff free all the continuation pages
3626 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3628 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3632 list_for_each_entry_safe(page, next, &head->lru, lru) { in free_swap_count_continuations()
3633 list_del(&page->lru); in free_swap_count_continuations()
3656 if (current->throttle_disk) in __folio_throttle_swaprate()
3662 if (si->bdev) { in __folio_throttle_swaprate()
3663 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __folio_throttle_swaprate()
3679 return -ENOMEM; in swapfile_init()