Home
last modified time | relevance | path

Searched refs:nr_pages (Results 26 – 50 of 402) sorted by relevance

12345678910>>...17

/openbmc/linux/mm/
H A Dpage_ext.c180 unsigned long nr_pages; in alloc_node_page_ext() local
182 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext()
183 if (!nr_pages) in alloc_node_page_ext()
193 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext()
195 table_size = page_ext_size * nr_pages; in alloc_node_page_ext()
355 unsigned long nr_pages, in online_page_ext() argument
362 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
388 unsigned long nr_pages) in offline_page_ext() argument
422 mn->nr_pages, mn->status_change_nid); in page_ext_callback()
426 mn->nr_pages); in page_ext_callback()
[all …]
H A Dpage_isolation.c181 unsigned long nr_pages; in set_migratetype_isolate() local
189 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
209 unsigned long flags, nr_pages; in unset_migratetype_isolate() local
256 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate()
267 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
271 for (i = 0; i < nr_pages; i++) { in __first_valid_page()
393 unsigned long nr_pages = compound_nr(head); in isolate_single_pageblock() local
395 if (head_pfn + nr_pages <= boundary_pfn) { in isolate_single_pageblock()
396 pfn = head_pfn + nr_pages; in isolate_single_pageblock()
430 flags, head_pfn, head_pfn + nr_pages); in isolate_single_pageblock()
[all …]
H A Dcma.h45 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
46 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
49 unsigned long nr_pages) {}; in cma_sysfs_account_success_pages() argument
51 unsigned long nr_pages) {}; in cma_sysfs_account_fail_pages() argument
H A Dswap.c94 long nr_pages = folio_nr_pages(folio); in __page_cache_release() local
166 long nr_pages = folio_nr_pages(folio); in lru_add_fn() local
330 long nr_pages = folio_nr_pages(folio); in folio_activate_fn() local
339 nr_pages); in folio_activate_fn()
564 long nr_pages = folio_nr_pages(folio); in lru_deactivate_file_fn() local
592 __count_vm_events(PGROTATED, nr_pages); in lru_deactivate_file_fn()
598 nr_pages); in lru_deactivate_file_fn()
605 long nr_pages = folio_nr_pages(folio); in lru_deactivate_fn() local
614 nr_pages); in lru_deactivate_fn()
622 long nr_pages = folio_nr_pages(folio); in lru_lazyfree_fn() local
[all …]
H A Dgup.c1193 if (!nr_pages) in __get_user_pages()
1288 page_increm = nr_pages; in __get_user_pages()
1335 nr_pages -= page_increm; in __get_user_pages()
1336 } while (nr_pages); in __get_user_pages()
1535 nr_pages -= ret; in __get_user_pages_locked()
1537 if (!nr_pages) in __get_user_pages_locked()
1597 nr_pages--; in __get_user_pages_locked()
1599 if (!nr_pages) in __get_user_pages_locked()
1657 return nr_pages; in populate_vma_page_range()
1810 if (!nr_pages) in __get_user_pages_locked()
[all …]
H A Dmigrate.c1529 int rc, nr_pages; in migrate_hugetlbs() local
1578 nr_retry_pages += nr_pages; in migrate_hugetlbs()
1724 nr_retry_pages += nr_pages; in migrate_pages_batch()
1782 nr_retry_pages += nr_pages; in migrate_pages_batch()
1900 int nr_pages; in migrate_pages() local
1917 nr_pages = 0; in migrate_pages()
2159 err += nr_pages - i; in move_pages_and_store_status()
2170 unsigned long nr_pages, in do_pages_move() argument
2346 while (nr_pages) { in do_pages_stat()
2366 nr_pages -= chunk_nr; in do_pages_stat()
[all …]
H A Dmlock.c124 int nr_pages = folio_nr_pages(folio); in __munlock_folio() local
144 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __munlock_folio()
251 int nr_pages = folio_nr_pages(folio); in mlock_folio() local
253 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); in mlock_folio()
271 int nr_pages = folio_nr_pages(folio); in mlock_new_folio() local
277 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); in mlock_new_folio()
418 int nr_pages; in mlock_fixup() local
453 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
455 nr_pages = -nr_pages; in mlock_fixup()
457 nr_pages = 0; in mlock_fixup()
[all …]
/openbmc/linux/net/rds/
H A Drdma.c178 unsigned int nr_pages; in __rds_rdma_map() local
214 if (nr_pages == 0) { in __rds_rdma_map()
535 unsigned int nr_pages; in rds_rdma_pages() local
541 if (nr_pages == 0) in rds_rdma_pages()
544 tot_pages += nr_pages; in rds_rdma_pages()
563 unsigned int nr_pages; in rds_rdma_extra_size() local
591 if (nr_pages == 0) in rds_rdma_extra_size()
594 tot_pages += nr_pages; in rds_rdma_extra_size()
617 int nr_pages; in rds_cmsg_rdma_args() local
652 if (nr_pages < 0) { in rds_cmsg_rdma_args()
[all …]
/openbmc/linux/drivers/virt/acrn/
H A Dmm.c158 int nr_pages, i, order, nr_regions = 0; in acrn_vm_ram_map() local
171 nr_pages = memmap->len >> PAGE_SHIFT; in acrn_vm_ram_map()
172 if (!nr_pages) in acrn_vm_ram_map()
188 for (i = 0; i < nr_pages; i++) { in acrn_vm_ram_map()
235 pages = vzalloc(array_size(nr_pages, sizeof(*pages))); in acrn_vm_ram_map()
241 nr_pages, FOLL_WRITE | FOLL_LONGTERM, in acrn_vm_ram_map()
246 } else if (pinned != nr_pages) { in acrn_vm_ram_map()
252 remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); in acrn_vm_ram_map()
263 region_mapping->npages = nr_pages; in acrn_vm_ram_map()
278 for (i = 0; i < nr_pages; i += 1 << order) { in acrn_vm_ram_map()
[all …]
/openbmc/linux/io_uring/
H A Drsrc.c45 if (!nr_pages) in __io_account_mem()
53 new_pages = cur_pages + nr_pages; in __io_account_mem()
822 for (i = 0; i < nr_pages; i++) { in headpage_already_acct()
884 nr_pages = end - start; in io_pin_pages()
894 if (pret == nr_pages) in io_pin_pages()
895 *npages = nr_pages; in io_pin_pages()
923 int ret, nr_pages, i; in io_sqe_buffer_register() local
932 &nr_pages); in io_sqe_buffer_register()
940 if (nr_pages > 1) { in io_sqe_buffer_register()
961 nr_pages = 1; in io_sqe_buffer_register()
[all …]
/openbmc/linux/kernel/events/
H A Dinternal.h20 int nr_pages; /* nr of data pages */ member
73 if (!pause && rb->nr_pages) in rb_toggle_paused()
80 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
83 pgoff_t pgoff, int nr_pages, long watermark, int flags);
121 return rb->nr_pages << page_order(rb); in data_page_nr()
126 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size()
152 handle->page &= rb->nr_pages - 1; \
/openbmc/linux/drivers/hwtracing/coresight/
H A Dcoresight-tmc-etr.c43 int nr_pages; member
121 tmc_etr_sg_table_entries(int nr_pages) in tmc_etr_sg_table_entries() argument
178 tmc_pages->nr_pages = 0; in tmc_pages_free()
193 int i, nr_pages; in tmc_pages_alloc() local
198 nr_pages = tmc_pages->nr_pages; in tmc_pages_alloc()
211 for (i = 0; i < nr_pages; i++) { in tmc_pages_alloc()
277 table_pages->nr_pages, in tmc_alloc_table_pages()
361 index = i % data->nr_pages; in tmc_sg_table_sync_data_range()
1421 int nr_pages, bool snapshot) in tmc_alloc_etr_buffer() argument
1427 nr_pages, pages, snapshot); in tmc_alloc_etr_buffer()
[all …]
/openbmc/linux/arch/arm/mach-rpc/include/mach/
H A Duncompress.h20 unsigned long nr_pages; member
116 unsigned int nr_pages = 0, page_size = PAGE_SIZE; in arch_decomp_setup() local
130 nr_pages += (t->u.mem.size / PAGE_SIZE); in arch_decomp_setup()
134 nr_pages = params->nr_pages; in arch_decomp_setup()
179 if (nr_pages * page_size < 4096*1024) error("<4M of mem\n"); in arch_decomp_setup()
/openbmc/linux/include/linux/
H A Dgfp.h183 nodemask_t *nodemask, int nr_pages,
188 unsigned long nr_pages,
193 alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) in alloc_pages_bulk_list() argument
195 return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); in alloc_pages_bulk_list()
199 alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) in alloc_pages_bulk_array() argument
201 return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); in alloc_pages_bulk_array()
205 alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) in alloc_pages_bulk_array_node() argument
210 return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); in alloc_pages_bulk_array_node()
361 extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
364 void free_contig_range(unsigned long pfn, unsigned long nr_pages);
H A Dmm_inline.h39 long nr_pages) in __update_lru_size() argument
44 WARN_ON_ONCE(nr_pages != (int)nr_pages); in __update_lru_size()
46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size()
48 NR_ZONE_LRU_BASE + lru, nr_pages); in __update_lru_size()
53 long nr_pages) in update_lru_size() argument
55 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
188 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], in lru_gen_update_size()
189 lrugen->nr_pages[old_gen][type][zone] - delta); in lru_gen_update_size()
191 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], in lru_gen_update_size()
[all …]
H A Dswap.h196 pgoff_t nr_pages; member
347 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
410 unsigned long nr_pages,
417 extern unsigned long shrink_all_memory(unsigned long nr_pages);
443 unsigned long nr_pages, sector_t start_block);
495 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
597 unsigned long nr_pages, sector_t start_block) in add_swap_extent() argument
657 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
658 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) in mem_cgroup_uncharge_swap() argument
662 __mem_cgroup_uncharge_swap(entry, nr_pages); in mem_cgroup_uncharge_swap()
[all …]
/openbmc/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c93 int i, nr_pages; in hw_queue_dtor() local
100 nr_pages = queue->queue_length / queue->pagesize; in hw_queue_dtor()
102 for (i = 0; i < nr_pages; i += pages_per_kpage) in hw_queue_dtor()
152 cq, hret, counter, cq->attr.nr_pages); in ehea_create_cq()
156 if (counter == (cq->attr.nr_pages - 1)) { in ehea_create_cq()
262 for (i = 0; i < eq->attr.nr_pages; i++) { in ehea_create_eq()
276 if (i == (eq->attr.nr_pages - 1)) { in ehea_create_eq()
369 for (cnt = 0; cnt < nr_pages; cnt++) { in ehea_qp_alloc_register()
612 if (!nr_pages) in ehea_update_busmap()
700 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback()
[all …]
/openbmc/linux/fs/
H A Dfs-writeback.c43 long nr_pages; member
987 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs()
992 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
1011 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
2006 .nr_pages = nr_pages, in writeback_inodes_wb()
2021 return nr_pages - work.nr_pages; in writeback_inodes_wb()
2042 long nr_pages = work->nr_pages; in wb_writeback() local
2135 return nr_pages - work->nr_pages; in wb_writeback()
2176 long nr_pages; in wb_check_old_data_flush() local
2194 .nr_pages = nr_pages, in wb_check_old_data_flush()
[all …]
/openbmc/linux/drivers/xen/xenbus/
H A Dxenbus_client.c380 unsigned int nr_pages, grant_ref_t *grefs) in xenbus_setup_ring() argument
382 unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; in xenbus_setup_ring()
397 nr_pages); in xenbus_setup_ring()
401 for (i = 0; i < nr_pages; i++) { in xenbus_setup_ring()
421 for (i = 0; i < nr_pages; i++) in xenbus_setup_ring()
443 for (i = 0; i < nr_pages; i++) { in xenbus_teardown_ring()
451 free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE); in xenbus_teardown_ring()
666 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); in xenbus_map_ring_hvm() local
706 addr, nr_pages); in xenbus_map_ring_hvm()
879 unsigned int nr_pages; in xenbus_unmap_ring_hvm() local
[all …]
/openbmc/linux/tools/testing/selftests/mm/
H A Duffd-unit-tests.c544 for (i = 0; i < nr_pages; ++i) { in check_memory_contents()
576 for (p = 0; p < nr_pages; ++p) in uffd_minor_test_common()
606 nr_pages * page_size / in uffd_minor_test_common()
677 split_nr_pages = (nr_pages + 1) / 2; in faulting_process()
734 area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size, in faulting_process()
741 for (; nr < nr_pages; nr++) { in faulting_process()
756 for (nr = 0; nr < nr_pages; nr++) in faulting_process()
861 if (args.missing_faults != nr_pages) in uffd_events_test_common()
1041 for (nr = 0; nr < nr_pages; ++nr) { in uffd_poison_test()
1068 if (nr_sigbus != nr_pages / 2) in uffd_poison_test()
[all …]
/openbmc/linux/drivers/media/pci/cx23885/
H A Dcx23885-alsa.c72 unsigned long nr_pages) in cx23885_alsa_dma_init() argument
78 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init()
80 dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages); in cx23885_alsa_dma_init()
85 buf->vaddr, nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init()
87 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT); in cx23885_alsa_dma_init()
88 buf->nr_pages = nr_pages; in cx23885_alsa_dma_init()
90 buf->sglist = vzalloc(array_size(sizeof(*buf->sglist), buf->nr_pages)); in cx23885_alsa_dma_init()
94 sg_init_table(buf->sglist, buf->nr_pages); in cx23885_alsa_dma_init()
95 for (i = 0; i < buf->nr_pages; i++) { in cx23885_alsa_dma_init()
117 buf->nr_pages, DMA_FROM_DEVICE); in cx23885_alsa_dma_map()
[all …]
/openbmc/linux/drivers/xen/
H A Dxlate_mmu.c222 unsigned long nr_pages; in xen_xlate_map_ballooned_pages() local
225 nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE); in xen_xlate_map_ballooned_pages()
226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
235 rc = xen_alloc_unpopulated_pages(nr_pages, pages); in xen_xlate_map_ballooned_pages()
238 nr_pages, rc); in xen_xlate_map_ballooned_pages()
248 vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL); in xen_xlate_map_ballooned_pages()
251 nr_pages, rc); in xen_xlate_map_ballooned_pages()
252 xen_free_unpopulated_pages(nr_pages, pages); in xen_xlate_map_ballooned_pages()
/openbmc/linux/arch/arm64/kvm/hyp/include/nvhe/
H A Dmem_protect.h68 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
69 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
70 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
71 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
/openbmc/linux/drivers/edac/
H A Dpasemi_edac.c145 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
148 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
152 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
155 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
158 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
168 csrow->last_page = csrow->first_page + dimm->nr_pages - 1; in pasemi_edac_init_csrows()
169 last_page_in_mmc += dimm->nr_pages; in pasemi_edac_init_csrows()
/openbmc/linux/arch/arm/xen/
H A Dp2m.c25 unsigned long nr_pages; member
75 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn()
151 unsigned long mfn, unsigned long nr_pages) in __set_phys_to_machine_multi() argument
164 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { in __set_phys_to_machine_multi()
184 p2m_entry->nr_pages = nr_pages; in __set_phys_to_machine_multi()

12345678910>>...17