/openbmc/linux/mm/ |
H A D | page_counter.c | 56 new, nr_pages)) { in page_counter_cancel() 98 unsigned long nr_pages, in page_counter_try_charge() argument 121 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge() 143 page_counter_cancel(c, nr_pages); in page_counter_try_charge() 158 page_counter_cancel(c, nr_pages); in page_counter_uncharge() 190 if (usage > nr_pages) in page_counter_set_max() 193 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 214 WRITE_ONCE(counter->min, nr_pages); in page_counter_set_min() 231 WRITE_ONCE(counter->low, nr_pages); in page_counter_set_low() 247 unsigned long *nr_pages) in page_counter_memparse() argument [all …]
|
H A D | memory_hotplug.c | 69 return nr_pages; in memory_block_memmap_on_memory_pages() 705 unsigned long nr_pages) in resize_zone_range() argument 887 movable_pages += nr_pages; in auto_movable_can_online_movable() 897 unsigned long nr_pages) in default_kernel_zone_for_pfn() argument 981 max_pages = nr_pages; in auto_movable_zone_for_pfn() 1024 unsigned long nr_pages) in default_zone_for_pfn() argument 1027 nr_pages); in default_zone_for_pfn() 1049 unsigned long nr_pages) in zone_for_pfn_range() argument 1068 long nr_pages) in adjust_present_page_count() argument 1162 arg.nr_pages = nr_pages; in online_pages() [all …]
|
H A D | percpu-km.c | 55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 71 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 98 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
H A D | sparse.c | 185 unsigned long nr_pages) in subsection_mask_set() argument 198 if (!nr_pages) in subsection_map_init() 205 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 215 nr_pages -= pfns; in subsection_map_init() 662 subsection_mask_set(map, pfn, nr_pages); in clear_subsection_map() 668 pfn, nr_pages)) in clear_subsection_map() 688 subsection_mask_set(map, pfn, nr_pages); in fill_subsection_map() 720 unsigned long magic, nr_pages; in free_map_bootmem() local 726 for (i = 0; i < nr_pages; i++, page++) { in free_map_bootmem() 787 if (clear_subsection_map(pfn, nr_pages)) in section_deactivate() [all …]
|
H A D | gup_test.c | 18 for (i = 0; i < nr_pages; i++) in put_back_pages() 25 unpin_user_pages(pages, nr_pages); in put_back_pages() 29 unpin_user_pages(pages, nr_pages); in put_back_pages() 31 for (i = 0; i < nr_pages; i++) in put_back_pages() 40 unsigned long nr_pages) in verify_dma_pinned() argument 49 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned() 70 unsigned long nr_pages) in dump_pages_test() argument 114 nr_pages = gup->size / PAGE_SIZE; in __gup_test_ioctl() 177 nr_pages = i; in __gup_test_ioctl() 244 nr_pages = args.size / PAGE_SIZE; in pin_longterm_test_start() [all …]
|
H A D | hugetlb_cgroup.c | 190 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local 205 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent() 282 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup() 328 usage + nr_pages); in __hugetlb_cgroup_commit_charge() 364 nr_pages); in __hugetlb_cgroup_uncharge_folio() 377 usage - nr_pages); in __hugetlb_cgroup_uncharge_folio() 405 nr_pages); in __hugetlb_cgroup_uncharge_cgroup() 437 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument 599 unsigned long nr_pages; in hugetlb_cgroup_write() local 612 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx])); in hugetlb_cgroup_write() [all …]
|
H A D | process_vm_access.c | 80 unsigned long nr_pages; in process_vm_rw_single_vec() local 89 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 94 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 95 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 122 nr_pages -= pinned_pages; in process_vm_rw_single_vec() 161 unsigned long nr_pages = 0; in process_vm_rw_core() local 177 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 181 if (nr_pages == 0) in process_vm_rw_core() 184 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 188 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
H A D | memcontrol.c | 294 if (nr_pages) in obj_cgroup_release() 942 if (nr_pages > 0) in mem_cgroup_charge_statistics() 946 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics() 1397 if (nr_pages < 0) in mem_cgroup_update_lru_size() 1408 if (nr_pages > 0) in mem_cgroup_update_lru_size() 2246 stock->nr_pages -= nr_pages; in consume_stock() 2313 stock->nr_pages += nr_pages; in __refill_stock() 3259 if (nr_pages) { in drain_obj_stock() 3356 if (nr_pages) in refill_obj_stock() 3395 nr_pages += 1; in obj_cgroup_charge() [all …]
|
/openbmc/linux/include/linux/ |
H A D | hugetlb_cgroup.h | 138 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 142 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 148 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, 153 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 163 unsigned long nr_pages, 173 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument 225 unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument 238 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge_rsvd() argument 250 unsigned long nr_pages, in hugetlb_cgroup_uncharge_folio_rsvd() argument 255 unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument [all …]
|
H A D | page_counter.h | 60 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); 61 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); 63 unsigned long nr_pages, 65 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 66 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); 67 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); 70 unsigned long nr_pages) in page_counter_set_high() argument 72 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high() 75 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); 77 unsigned long *nr_pages);
|
H A D | memory_hotplug.h | 154 long nr_pages); 159 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 188 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 197 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument 199 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 202 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 308 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 341 unsigned long nr_pages, 345 unsigned long nr_pages); 347 unsigned long nr_pages, struct vmem_altmap *altmap, [all …]
|
/openbmc/linux/fs/iomap/ |
H A D | swapfile.c | 33 unsigned long nr_pages; in iomap_swapfile_add_extent() local 40 if (unlikely(isi->nr_pages >= isi->sis->max)) in iomap_swapfile_add_extent() 42 max_pages = isi->sis->max - isi->nr_pages; in iomap_swapfile_add_extent() 55 nr_pages = next_ppage - first_ppage; in iomap_swapfile_add_extent() 56 nr_pages = min(nr_pages, max_pages); in iomap_swapfile_add_extent() 72 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); in iomap_swapfile_add_extent() 76 isi->nr_pages += nr_pages; in iomap_swapfile_add_extent() 184 if (isi.nr_pages == 0) { in iomap_swapfile_activate() 190 sis->max = isi.nr_pages; in iomap_swapfile_activate() 191 sis->pages = isi.nr_pages - 1; in iomap_swapfile_activate() [all …]
|
/openbmc/linux/drivers/media/v4l2-core/ |
H A D | videobuf-dma-sg.c | 63 int nr_pages) in videobuf_vmalloc_to_sg() argument 72 sg_init_table(sglist, nr_pages); in videobuf_vmalloc_to_sg() 173 dma->nr_pages = last-first+1; in videobuf_dma_init_user_locked() 180 data, size, dma->nr_pages); in videobuf_dma_init_user_locked() 185 if (err != dma->nr_pages) { in videobuf_dma_init_user_locked() 188 dma->nr_pages); in videobuf_dma_init_user_locked() 207 unsigned long nr_pages) in videobuf_dma_init_kernel() argument 245 dma->nr_pages = nr_pages; in videobuf_dma_init_kernel() 276 dma->nr_pages = nr_pages; in videobuf_dma_init_overlay() 284 BUG_ON(0 == dma->nr_pages); in videobuf_dma_map() [all …]
|
/openbmc/linux/drivers/xen/ |
H A D | balloon.c | 392 if (nr_pages > ARRAY_SIZE(frame_list)) in increase_reservation() 393 nr_pages = ARRAY_SIZE(frame_list); in increase_reservation() 396 for (i = 0; i < nr_pages; i++) { in increase_reservation() 398 nr_pages = i; in increase_reservation() 434 nr_pages = ARRAY_SIZE(frame_list); in decrease_reservation() 436 for (i = 0; i < nr_pages; i++) { in decrease_reservation() 439 nr_pages = i; in decrease_reservation() 475 BUG_ON(ret != nr_pages); in decrease_reservation() 584 if (si_mem_available() < nr_pages) in add_ballooned_pages() 610 while (pgno < nr_pages) { in xen_alloc_ballooned_pages() [all …]
|
H A D | unpopulated-alloc.c | 34 static int fill_list(unsigned int nr_pages) in fill_list() argument 39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list() 158 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_alloc_unpopulated_pages() argument 169 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages() 172 if (list_count < nr_pages) { in xen_alloc_unpopulated_pages() 173 ret = fill_list(nr_pages - list_count); in xen_alloc_unpopulated_pages() 178 for (i = 0; i < nr_pages; i++) { in xen_alloc_unpopulated_pages() 214 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) in xen_free_unpopulated_pages() argument 219 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages() 224 for (i = 0; i < nr_pages; i++) { in xen_free_unpopulated_pages()
|
/openbmc/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | setup.c | 38 unsigned long nr_pages; in divide_memory_pool() local 43 vmemmap_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 47 nr_pages = hyp_vm_table_pages(); in divide_memory_pool() 48 vm_table_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 52 nr_pages = hyp_s1_pgtable_pages(); in divide_memory_pool() 53 hyp_pgt_base = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 57 nr_pages = host_s2_pgtable_pages(); in divide_memory_pool() 62 nr_pages = hyp_ffa_proxy_pages(); in divide_memory_pool() 63 ffa_proxy_pages = hyp_early_alloc_contig(nr_pages); in divide_memory_pool() 262 unsigned long nr_pages, reserved_pages, pfn; in __pkvm_init_finalise() local [all …]
|
H A D | mem_protect.c | 101 unsigned long nr_pages, pfn; in prepare_s2_pool() local 105 nr_pages = host_s2_pgtable_pages(); in prepare_s2_pool() 235 unsigned long nr_pages; in kvm_guest_prepare_stage2() local 544 u64 nr_pages; member 635 u64 size = tx->nr_pages * PAGE_SIZE; in host_request_owned_transition() 1093 .nr_pages = 1, in __pkvm_host_share_hyp() 1126 .nr_pages = 1, in __pkvm_host_unshare_hyp() 1159 .nr_pages = nr_pages, in __pkvm_host_donate_hyp() 1191 .nr_pages = nr_pages, in __pkvm_hyp_donate_host() 1266 .nr_pages = nr_pages, in __pkvm_host_share_ffa() [all …]
|
/openbmc/linux/include/xen/ |
H A D | xen.h | 65 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); 66 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); 71 static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, in xen_alloc_unpopulated_pages() argument 74 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages() 76 static inline void xen_free_unpopulated_pages(unsigned int nr_pages, in xen_free_unpopulated_pages() argument 79 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
|
/openbmc/linux/drivers/firmware/efi/libstub/ |
H A D | relocate.c | 28 unsigned long nr_pages; in efi_low_alloc_above() local 45 nr_pages = size / EFI_PAGE_SIZE; in efi_low_alloc_above() 60 if (desc->num_pages < nr_pages) in efi_low_alloc_above() 74 EFI_LOADER_DATA, nr_pages, &start); in efi_low_alloc_above() 120 unsigned long nr_pages; in efi_relocate_kernel() local 138 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; in efi_relocate_kernel() 140 EFI_LOADER_DATA, nr_pages, &efi_addr); in efi_relocate_kernel()
|
/openbmc/linux/net/rds/ |
H A D | info.c | 163 unsigned long nr_pages = 0; in rds_info_getsockopt() local 187 nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK)) in rds_info_getsockopt() 190 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in rds_info_getsockopt() 195 ret = pin_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); in rds_info_getsockopt() 196 if (ret != nr_pages) { in rds_info_getsockopt() 198 nr_pages = ret; in rds_info_getsockopt() 200 nr_pages = 0; in rds_info_getsockopt() 205 rdsdebug("len %d nr_pages %lu\n", len, nr_pages); in rds_info_getsockopt() 238 unpin_user_pages(pages, nr_pages); in rds_info_getsockopt()
|
/openbmc/linux/arch/arm64/include/asm/ |
H A D | kvm_pkvm.h | 29 unsigned long nr_pages = reg->size >> PAGE_SHIFT; in hyp_vmemmap_memblock_size() local 33 end = start + nr_pages * vmemmap_entry_size; in hyp_vmemmap_memblock_size() 57 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) in __hyp_pgtable_max_pages() argument 63 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); in __hyp_pgtable_max_pages() 64 total += nr_pages; in __hyp_pgtable_max_pages()
|
/openbmc/linux/fs/crypto/ |
H A D | bio.c | 119 unsigned int nr_pages; in fscrypt_zeroout_range() local 133 nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), in fscrypt_zeroout_range() 143 for (i = 0; i < nr_pages; i++) { in fscrypt_zeroout_range() 149 nr_pages = i; in fscrypt_zeroout_range() 150 if (WARN_ON_ONCE(nr_pages <= 0)) in fscrypt_zeroout_range() 154 bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS); in fscrypt_zeroout_range() 179 } while (i != nr_pages && len != 0); in fscrypt_zeroout_range() 189 for (i = 0; i < nr_pages; i++) in fscrypt_zeroout_range()
|
/openbmc/linux/kernel/events/ |
H A D | ring_buffer.c | 175 if (rb->nr_pages) { in __perf_output_begin() 334 if (!rb->nr_pages) in ring_buffer_init() 703 max_order = ilog2(nr_pages); in rb_alloc_aux() 789 if (pgoff > rb->nr_pages) in __perf_mmap_to_page() 826 size += nr_pages * sizeof(void *); in rb_alloc() 840 for (i = 0; i < nr_pages; i++) { in rb_alloc() 846 rb->nr_pages = nr_pages; in rb_alloc() 870 for (i = 0; i < rb->nr_pages; i++) in rb_free() 939 if (nr_pages) { in rb_alloc() 940 rb->nr_pages = 1; in rb_alloc() [all …]
|
/openbmc/linux/arch/powerpc/platforms/powernv/ |
H A D | memtrace.c | 92 unsigned long nr_pages) in memtrace_clear_range() argument 97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range() 107 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_clear_range() 113 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_alloc_node() local 121 page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | in memtrace_alloc_node() 132 memtrace_clear_range(start_pfn, nr_pages); in memtrace_alloc_node() 138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 216 const unsigned long nr_pages = PHYS_PFN(size); in memtrace_free() local 225 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free() 228 free_contig_range(start_pfn, nr_pages); in memtrace_free()
|
/openbmc/linux/drivers/misc/bcm-vk/ |
H A D | bcm_vk_sg.c | 60 dma->nr_pages = last - first + 1; in bcm_vk_dma_alloc() 63 dma->pages = kmalloc_array(dma->nr_pages, in bcm_vk_dma_alloc() 70 data, vkdata->size, dma->nr_pages); in bcm_vk_dma_alloc() 76 dma->nr_pages, in bcm_vk_dma_alloc() 79 if (err != dma->nr_pages) { in bcm_vk_dma_alloc() 80 dma->nr_pages = (err >= 0) ? err : 0; in bcm_vk_dma_alloc() 82 err, dma->nr_pages); in bcm_vk_dma_alloc() 87 dma->sglen = (dma->nr_pages * sizeof(*sgdata)) + in bcm_vk_dma_alloc() 117 for (i = 1; i < dma->nr_pages; i++) { in bcm_vk_dma_alloc() 250 for (i = 0; i < dma->nr_pages; i++) in bcm_vk_dma_free()
|