Lines Matching refs:p
562 static void inc_cluster_info_page(struct swap_info_struct *p, in inc_cluster_info_page() argument
570 alloc_cluster(p, idx); in inc_cluster_info_page()
582 static void dec_cluster_info_page(struct swap_info_struct *p, in dec_cluster_info_page() argument
595 free_cluster(p, idx); in dec_cluster_info_page()
680 static void __del_from_avail_list(struct swap_info_struct *p) in __del_from_avail_list() argument
684 assert_spin_locked(&p->lock); in __del_from_avail_list()
686 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
689 static void del_from_avail_list(struct swap_info_struct *p) in del_from_avail_list() argument
692 __del_from_avail_list(p); in del_from_avail_list()
713 static void add_to_avail_list(struct swap_info_struct *p) in add_to_avail_list() argument
719 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
1133 struct swap_info_struct *p; in _swap_info_get() local
1138 p = swp_swap_info(entry); in _swap_info_get()
1139 if (!p) in _swap_info_get()
1141 if (data_race(!(p->flags & SWP_USED))) in _swap_info_get()
1144 if (offset >= p->max) in _swap_info_get()
1146 if (data_race(!p->swap_map[swp_offset(entry)])) in _swap_info_get()
1148 return p; in _swap_info_get()
1168 struct swap_info_struct *p; in swap_info_get_cont() local
1170 p = _swap_info_get(entry); in swap_info_get_cont()
1172 if (p != q) { in swap_info_get_cont()
1175 if (p != NULL) in swap_info_get_cont()
1176 spin_lock(&p->lock); in swap_info_get_cont()
1178 return p; in swap_info_get_cont()
1181 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, in __swap_entry_free_locked() argument
1188 count = p->swap_map[offset]; in __swap_entry_free_locked()
1204 if (swap_count_continued(p, offset, count)) in __swap_entry_free_locked()
1214 WRITE_ONCE(p->swap_map[offset], usage); in __swap_entry_free_locked()
1216 WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE); in __swap_entry_free_locked()
1296 static unsigned char __swap_entry_free(struct swap_info_struct *p, in __swap_entry_free() argument
1303 ci = lock_cluster_or_swap_info(p, offset); in __swap_entry_free()
1304 usage = __swap_entry_free_locked(p, offset, 1); in __swap_entry_free()
1305 unlock_cluster_or_swap_info(p, ci); in __swap_entry_free()
1312 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) in swap_entry_free() argument
1318 ci = lock_cluster(p, offset); in swap_entry_free()
1319 count = p->swap_map[offset]; in swap_entry_free()
1321 p->swap_map[offset] = 0; in swap_entry_free()
1322 dec_cluster_info_page(p, p->cluster_info, offset); in swap_entry_free()
1326 swap_range_free(p, offset, 1); in swap_entry_free()
1335 struct swap_info_struct *p; in swap_free() local
1337 p = _swap_info_get(entry); in swap_free()
1338 if (p) in swap_free()
1339 __swap_entry_free(p, entry); in swap_free()
1418 struct swap_info_struct *p, *prev; in swapcache_free_entries() local
1425 p = NULL; in swapcache_free_entries()
1435 p = swap_info_get_cont(entries[i], prev); in swapcache_free_entries()
1436 if (p) in swapcache_free_entries()
1437 swap_entry_free(p, entries[i]); in swapcache_free_entries()
1438 prev = p; in swapcache_free_entries()
1440 if (p) in swapcache_free_entries()
1441 spin_unlock(&p->lock); in swapcache_free_entries()
1476 struct swap_info_struct *p; in swp_swapcount() local
1482 p = _swap_info_get(entry); in swp_swapcount()
1483 if (!p) in swp_swapcount()
1488 ci = lock_cluster_or_swap_info(p, offset); in swp_swapcount()
1490 count = swap_count(p->swap_map[offset]); in swp_swapcount()
1497 page = vmalloc_to_page(p->swap_map + offset); in swp_swapcount()
1511 unlock_cluster_or_swap_info(p, ci); in swp_swapcount()
1605 struct swap_info_struct *p; in free_swap_and_cache() local
1611 p = get_swap_device(entry); in free_swap_and_cache()
1612 if (p) { in free_swap_and_cache()
1613 if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { in free_swap_and_cache()
1614 put_swap_device(p); in free_swap_and_cache()
1618 count = __swap_entry_free(p, entry); in free_swap_and_cache()
1620 !swap_page_trans_huge_swapped(p, entry)) in free_swap_and_cache()
1621 __try_to_reclaim_swap(p, swp_offset(entry), in free_swap_and_cache()
1623 put_swap_device(p); in free_swap_and_cache()
1625 return p != NULL; in free_swap_and_cache()
2053 struct list_head *p; in try_to_unuse() local
2072 p = &init_mm.mmlist; in try_to_unuse()
2075 (p = p->next) != &init_mm.mmlist) { in try_to_unuse()
2077 mm = list_entry(p, struct mm_struct, mmlist); in try_to_unuse()
2153 struct list_head *p, *next; in drain_mmlist() local
2160 list_for_each_safe(p, next, &init_mm.mmlist) in drain_mmlist()
2161 list_del_init(p); in drain_mmlist()
2291 static int swap_node(struct swap_info_struct *p) in swap_node() argument
2295 if (p->bdev) in swap_node()
2296 bdev = p->bdev; in swap_node()
2298 bdev = p->swap_file->f_inode->i_sb->s_bdev; in swap_node()
2303 static void setup_swap_info(struct swap_info_struct *p, int prio, in setup_swap_info() argument
2310 p->prio = prio; in setup_swap_info()
2312 p->prio = --least_priority; in setup_swap_info()
2317 p->list.prio = -p->prio; in setup_swap_info()
2319 if (p->prio >= 0) in setup_swap_info()
2320 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2322 if (swap_node(p) == i) in setup_swap_info()
2323 p->avail_lists[i].prio = 1; in setup_swap_info()
2325 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2328 p->swap_map = swap_map; in setup_swap_info()
2329 p->cluster_info = cluster_info; in setup_swap_info()
2332 static void _enable_swap_info(struct swap_info_struct *p) in _enable_swap_info() argument
2334 p->flags |= SWP_WRITEOK; in _enable_swap_info()
2335 atomic_long_add(p->pages, &nr_swap_pages); in _enable_swap_info()
2336 total_swap_pages += p->pages; in _enable_swap_info()
2349 plist_add(&p->list, &swap_active_head); in _enable_swap_info()
2352 if (p->highest_bit) in _enable_swap_info()
2353 add_to_avail_list(p); in _enable_swap_info()
2356 static void enable_swap_info(struct swap_info_struct *p, int prio, in enable_swap_info() argument
2360 zswap_swapon(p->type); in enable_swap_info()
2363 spin_lock(&p->lock); in enable_swap_info()
2364 setup_swap_info(p, prio, swap_map, cluster_info); in enable_swap_info()
2365 spin_unlock(&p->lock); in enable_swap_info()
2370 percpu_ref_resurrect(&p->users); in enable_swap_info()
2372 spin_lock(&p->lock); in enable_swap_info()
2373 _enable_swap_info(p); in enable_swap_info()
2374 spin_unlock(&p->lock); in enable_swap_info()
2378 static void reinsert_swap_info(struct swap_info_struct *p) in reinsert_swap_info() argument
2381 spin_lock(&p->lock); in reinsert_swap_info()
2382 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); in reinsert_swap_info()
2383 _enable_swap_info(p); in reinsert_swap_info()
2384 spin_unlock(&p->lock); in reinsert_swap_info()
2401 struct swap_info_struct *p = NULL; in SYSCALL_DEFINE1() local
2427 plist_for_each_entry(p, &swap_active_head, list) { in SYSCALL_DEFINE1()
2428 if (p->flags & SWP_WRITEOK) { in SYSCALL_DEFINE1()
2429 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1()
2440 if (!security_vm_enough_memory_mm(current->mm, p->pages)) in SYSCALL_DEFINE1()
2441 vm_unacct_memory(p->pages); in SYSCALL_DEFINE1()
2447 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2448 del_from_avail_list(p); in SYSCALL_DEFINE1()
2449 if (p->prio < 0) { in SYSCALL_DEFINE1()
2450 struct swap_info_struct *si = p; in SYSCALL_DEFINE1()
2463 plist_del(&p->list, &swap_active_head); in SYSCALL_DEFINE1()
2464 atomic_long_sub(p->pages, &nr_swap_pages); in SYSCALL_DEFINE1()
2465 total_swap_pages -= p->pages; in SYSCALL_DEFINE1()
2466 p->flags &= ~SWP_WRITEOK; in SYSCALL_DEFINE1()
2467 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2473 err = try_to_unuse(p->type); in SYSCALL_DEFINE1()
2478 reinsert_swap_info(p); in SYSCALL_DEFINE1()
2492 percpu_ref_kill(&p->users); in SYSCALL_DEFINE1()
2494 wait_for_completion(&p->comp); in SYSCALL_DEFINE1()
2496 flush_work(&p->discard_work); in SYSCALL_DEFINE1()
2498 destroy_swap_extents(p); in SYSCALL_DEFINE1()
2499 if (p->flags & SWP_CONTINUED) in SYSCALL_DEFINE1()
2500 free_swap_count_continuations(p); in SYSCALL_DEFINE1()
2502 if (!p->bdev || !bdev_nonrot(p->bdev)) in SYSCALL_DEFINE1()
2507 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2511 p->highest_bit = 0; /* cuts scans short */ in SYSCALL_DEFINE1()
2512 while (p->flags >= SWP_SCANNING) { in SYSCALL_DEFINE1()
2513 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2517 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2520 swap_file = p->swap_file; in SYSCALL_DEFINE1()
2521 old_block_size = p->old_block_size; in SYSCALL_DEFINE1()
2522 p->swap_file = NULL; in SYSCALL_DEFINE1()
2523 p->max = 0; in SYSCALL_DEFINE1()
2524 swap_map = p->swap_map; in SYSCALL_DEFINE1()
2525 p->swap_map = NULL; in SYSCALL_DEFINE1()
2526 cluster_info = p->cluster_info; in SYSCALL_DEFINE1()
2527 p->cluster_info = NULL; in SYSCALL_DEFINE1()
2528 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2530 arch_swap_invalidate_area(p->type); in SYSCALL_DEFINE1()
2531 zswap_swapoff(p->type); in SYSCALL_DEFINE1()
2533 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE1()
2534 p->percpu_cluster = NULL; in SYSCALL_DEFINE1()
2535 free_percpu(p->cluster_next_cpu); in SYSCALL_DEFINE1()
2536 p->cluster_next_cpu = NULL; in SYSCALL_DEFINE1()
2540 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE1()
2541 exit_swap_address_space(p->type); in SYSCALL_DEFINE1()
2548 blkdev_put(bdev, p); in SYSCALL_DEFINE1()
2562 p->flags = 0; in SYSCALL_DEFINE1()
2714 struct swap_info_struct *p; in alloc_swap_info() local
2719 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); in alloc_swap_info()
2720 if (!p) in alloc_swap_info()
2723 if (percpu_ref_init(&p->users, swap_users_ref_free, in alloc_swap_info()
2725 kvfree(p); in alloc_swap_info()
2736 percpu_ref_exit(&p->users); in alloc_swap_info()
2737 kvfree(p); in alloc_swap_info()
2741 p->type = type; in alloc_swap_info()
2746 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */ in alloc_swap_info()
2749 defer = p; in alloc_swap_info()
2750 p = swap_info[type]; in alloc_swap_info()
2756 p->swap_extent_root = RB_ROOT; in alloc_swap_info()
2757 plist_node_init(&p->list, 0); in alloc_swap_info()
2759 plist_node_init(&p->avail_lists[i], 0); in alloc_swap_info()
2760 p->flags = SWP_USED; in alloc_swap_info()
2766 spin_lock_init(&p->lock); in alloc_swap_info()
2767 spin_lock_init(&p->cont_lock); in alloc_swap_info()
2768 init_completion(&p->comp); in alloc_swap_info()
2770 return p; in alloc_swap_info()
2773 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) in claim_swapfile() argument
2778 p->bdev = blkdev_get_by_dev(inode->i_rdev, in claim_swapfile()
2779 BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL); in claim_swapfile()
2780 if (IS_ERR(p->bdev)) { in claim_swapfile()
2781 error = PTR_ERR(p->bdev); in claim_swapfile()
2782 p->bdev = NULL; in claim_swapfile()
2785 p->old_block_size = block_size(p->bdev); in claim_swapfile()
2786 error = set_blocksize(p->bdev, PAGE_SIZE); in claim_swapfile()
2794 if (bdev_is_zoned(p->bdev)) in claim_swapfile()
2796 p->flags |= SWP_BLKDEV; in claim_swapfile()
2798 p->bdev = inode->i_sb->s_bdev; in claim_swapfile()
2833 static unsigned long read_swap_header(struct swap_info_struct *p, in read_swap_header() argument
2864 p->lowest_bit = 1; in read_swap_header()
2865 p->cluster_next = 1; in read_swap_header()
2866 p->cluster_nr = 0; in read_swap_header()
2884 p->highest_bit = maxpages - 1; in read_swap_header()
2908 static int setup_swap_map_and_extents(struct swap_info_struct *p, in setup_swap_map_and_extents() argument
2919 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; in setup_swap_map_and_extents()
2924 cluster_list_init(&p->free_clusters); in setup_swap_map_and_extents()
2925 cluster_list_init(&p->discard_clusters); in setup_swap_map_and_extents()
2938 inc_cluster_info_page(p, cluster_info, page_nr); in setup_swap_map_and_extents()
2944 inc_cluster_info_page(p, cluster_info, i); in setup_swap_map_and_extents()
2952 inc_cluster_info_page(p, cluster_info, 0); in setup_swap_map_and_extents()
2953 p->max = maxpages; in setup_swap_map_and_extents()
2954 p->pages = nr_good_pages; in setup_swap_map_and_extents()
2955 nr_extents = setup_swap_extents(p, span); in setup_swap_map_and_extents()
2958 nr_good_pages = p->pages; in setup_swap_map_and_extents()
2982 cluster_list_add_tail(&p->free_clusters, cluster_info, in setup_swap_map_and_extents()
2991 struct swap_info_struct *p; in SYSCALL_DEFINE2() local
3017 p = alloc_swap_info(); in SYSCALL_DEFINE2()
3018 if (IS_ERR(p)) in SYSCALL_DEFINE2()
3019 return PTR_ERR(p); in SYSCALL_DEFINE2()
3021 INIT_WORK(&p->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3036 p->swap_file = swap_file; in SYSCALL_DEFINE2()
3041 error = claim_swapfile(p, inode); in SYSCALL_DEFINE2()
3069 maxpages = read_swap_header(p, swap_header, inode); in SYSCALL_DEFINE2()
3082 if (p->bdev && bdev_stable_writes(p->bdev)) in SYSCALL_DEFINE2()
3083 p->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3085 if (p->bdev && bdev_synchronous(p->bdev)) in SYSCALL_DEFINE2()
3086 p->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3088 if (p->bdev && bdev_nonrot(p->bdev)) { in SYSCALL_DEFINE2()
3092 p->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3093 p->cluster_next_cpu = alloc_percpu(unsigned int); in SYSCALL_DEFINE2()
3094 if (!p->cluster_next_cpu) { in SYSCALL_DEFINE2()
3103 per_cpu(*p->cluster_next_cpu, cpu) = in SYSCALL_DEFINE2()
3104 get_random_u32_inclusive(1, p->highest_bit); in SYSCALL_DEFINE2()
3118 p->percpu_cluster = alloc_percpu(struct percpu_cluster); in SYSCALL_DEFINE2()
3119 if (!p->percpu_cluster) { in SYSCALL_DEFINE2()
3125 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()
3133 error = swap_cgroup_swapon(p->type, maxpages); in SYSCALL_DEFINE2()
3137 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, in SYSCALL_DEFINE2()
3145 p->bdev && bdev_max_discard_sectors(p->bdev)) { in SYSCALL_DEFINE2()
3152 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3162 p->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3164 p->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3167 if (p->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3168 int err = discard_swap(p); in SYSCALL_DEFINE2()
3171 p, err); in SYSCALL_DEFINE2()
3175 error = init_swap_address_space(p->type, maxpages); in SYSCALL_DEFINE2()
3195 enable_swap_info(p, prio, swap_map, cluster_info); in SYSCALL_DEFINE2()
3198 K(p->pages), name->name, p->prio, nr_extents, in SYSCALL_DEFINE2()
3200 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3201 (p->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3202 (p->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3203 (p->flags & SWP_PAGE_DISCARD) ? "c" : ""); in SYSCALL_DEFINE2()
3212 exit_swap_address_space(p->type); in SYSCALL_DEFINE2()
3216 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE2()
3217 p->percpu_cluster = NULL; in SYSCALL_DEFINE2()
3218 free_percpu(p->cluster_next_cpu); in SYSCALL_DEFINE2()
3219 p->cluster_next_cpu = NULL; in SYSCALL_DEFINE2()
3220 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { in SYSCALL_DEFINE2()
3221 set_blocksize(p->bdev, p->old_block_size); in SYSCALL_DEFINE2()
3222 blkdev_put(p->bdev, p); in SYSCALL_DEFINE2()
3225 destroy_swap_extents(p); in SYSCALL_DEFINE2()
3226 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE2()
3228 p->swap_file = NULL; in SYSCALL_DEFINE2()
3229 p->flags = 0; in SYSCALL_DEFINE2()
3281 struct swap_info_struct *p; in __swap_duplicate() local
3288 p = swp_swap_info(entry); in __swap_duplicate()
3291 ci = lock_cluster_or_swap_info(p, offset); in __swap_duplicate()
3293 count = p->swap_map[offset]; in __swap_duplicate()
3324 else if (swap_count_continued(p, offset, count)) in __swap_duplicate()
3331 WRITE_ONCE(p->swap_map[offset], count | has_cache); in __swap_duplicate()
3334 unlock_cluster_or_swap_info(p, ci); in __swap_duplicate()