/openbmc/linux/tools/testing/selftests/mm/ |
H A D | hmm-tests.c | 189 cmd.npages = npages; in hmm_dmirror_cmd() 295 unsigned long npages; in TEST_F() local 303 ASSERT_NE(npages, 0); in TEST_F() 1159 npages = 6; in TEST_F() 1335 npages); in TEST_F() 1404 npages); in TEST_F() 1432 npages = 1; in TEST_F() 1477 npages = 7; in TEST_F() 1652 npages = 6; in TEST_F() 1913 npages = 4; in TEST_F() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/selftests/ |
H A D | scatterlist.c | 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() argument 211 return first + npages == last; in page_contiguous() 246 pfn_to_page(pfn + npages), in alloc_table() 247 npages)) { in alloc_table() 260 pfn += npages; in alloc_table() 292 const npages_fn_t *npages; in igt_sg_alloc() local 296 for (npages = npages_funcs; *npages; npages++) { in igt_sg_alloc() 334 const npages_fn_t *npages; in igt_sg_trim() local 337 for (npages = npages_funcs; *npages; npages++) { in igt_sg_trim() [all …]
|
/openbmc/linux/drivers/infiniband/hw/hfi1/ |
H A D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages() 66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages() 74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages() 80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 96 size_t npages, bool dirty) in hfi1_release_user_pages() argument 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
H A D | pin_system.c | 20 unsigned int npages; member 60 evict_data.target = npages; in sdma_cache_evict() 79 if (node->npages) { in free_system_node() 81 node->npages); in free_system_node() 128 npages)) { in pin_system_pages() 132 if (cleared >= npages) in pin_system_pages() 137 start_address, node->npages, npages); in pin_system_pages() 146 if (pinned != npages) { in pin_system_pages() 154 node->npages = npages; in pin_system_pages() 306 page_index, cache_entry->npages); in add_mapping_to_sdma_packet() [all …]
|
H A D | user_exp_rcv.c | 20 u16 pageidx, unsigned int npages); 136 unsigned int npages, in unpin_rcv_pages() argument 153 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 572 if (!npages) in find_phys_blocks() 699 npages = tbuf->psets[setidx].count; in program_rcvarray() 704 npages); in program_rcvarray() 707 mapped += npages; in program_rcvarray() 755 node->npages = npages; in set_rcvarray_entry() 829 node->npages, in __clear_tid_node() [all …]
|
/openbmc/linux/lib/ |
H A D | kunit_iov_iter.c | 49 size_t npages) in iov_kunit_create_buffer() argument 60 if (got != npages) { in iov_kunit_create_buffer() 109 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_kvec() 159 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_kvec() 268 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_bvec() 322 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_bvec() 415 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_xarray() 473 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_from_xarray() 537 npages = bufsize / PAGE_SIZE; in iov_kunit_extract_pages_kvec() 616 npages = bufsize / PAGE_SIZE; in iov_kunit_extract_pages_bvec() [all …]
|
/openbmc/linux/arch/sparc/kernel/ |
H A D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 204 int npages, nid; in dma_4u_alloc_coherent() local 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 284 npages >>= IO_PAGE_SHIFT; in dma_4u_map_page() 383 vaddr, ctx, npages); in strbuf_flush() 405 npages >>= IO_PAGE_SHIFT; in dma_4u_unmap_page() 420 npages, direction); in dma_4u_unmap_page() 510 while (npages--) { in dma_4u_map_sg() 642 npages, direction); in dma_4u_unmap_sg() [all …]
|
H A D | pci_sun4v.c | 75 p->npages = 0; in iommu_batch_start() 92 unsigned long npages = p->npages; in iommu_batch_flush() local 101 while (npages != 0) { in iommu_batch_flush() 105 npages, in iommu_batch_flush() 136 npages -= num; in iommu_batch_flush() 141 p->npages = 0; in iommu_batch_flush() 308 npages); in dma_4v_iommu_demap() 318 npages -= num; in dma_4v_iommu_demap() 319 } while (npages != 0); in dma_4v_iommu_demap() 437 unsigned long npages; in dma_4v_unmap_page() local [all …]
|
/openbmc/linux/fs/netfs/ |
H A D | iterator.c | 44 unsigned int npages = 0; in netfs_extract_user_iter() local 66 while (count && npages < max_pages) { in netfs_extract_user_iter() 68 max_pages - npages, extraction_flags, in netfs_extract_user_iter() 84 if (npages + cur_npages > max_pages) { in netfs_extract_user_iter() 86 npages + cur_npages, max_pages); in netfs_extract_user_iter() 92 bvec_set_page(bv + npages + i, *pages++, len - offset, offset); in netfs_extract_user_iter() 97 npages += cur_npages; in netfs_extract_user_iter() 100 iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count); in netfs_extract_user_iter() 101 return npages; in netfs_extract_user_iter()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | pagealloc.c | 52 s32 npages; member 199 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 440 int npages = 0; in release_all_pages() local 465 u32 npages) in fwp_fill_manage_pages_out() argument 475 if (!--npages) in fwp_fill_manage_pages_out() 490 u32 npages; in reclaim_pages_cmd() local 506 while (p && i < npages) { in reclaim_pages_cmd() 593 else if (req->npages < 0) in pages_work_handler() 621 s32 npages; in req_pages_handler() local 642 req->npages = npages; in req_pages_handler() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/ |
H A D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap() 98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 110 mock->npages = npages; in mock_dmabuf() 111 for (i = 0; i < npages; i++) { in mock_dmabuf() 118 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
/openbmc/linux/drivers/vfio/ |
H A D | iova_bitmap.c | 45 unsigned long npages; member 164 unsigned long npages; in iova_bitmap_get() local 174 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get() 188 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get() 191 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get() 196 mapped->npages = (unsigned long)ret; in iova_bitmap_get() 218 if (mapped->npages) { in iova_bitmap_put() 219 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() 220 mapped->npages = 0; in iova_bitmap_put() 302 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_migrate.c | 65 num_bytes = npages * 8; in svm_migrate_gart_map() 138 while (npages) { in svm_migrate_copy_memory_gart() 164 npages -= size; in svm_migrate_copy_memory_gart() 165 if (npages) { in svm_migrate_copy_memory_gart() 416 buf = kvcalloc(npages, in svm_migrate_vma_to_vram() 444 if (cpages != npages) in svm_migrate_vma_to_vram() 446 cpages, npages); in svm_migrate_vma_to_vram() 711 buf = kvcalloc(npages, in svm_migrate_vma_to_ram() 741 if (cpages != npages) in svm_migrate_vma_to_ram() 743 cpages, npages); in svm_migrate_vma_to_ram() [all …]
|
/openbmc/linux/mm/ |
H A D | migrate_device.c | 28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 50 migrate->npages++; in migrate_vma_collect_hole() 358 unsigned long npages, in migrate_device_unmap() argument 367 for (i = 0; i < npages; i++) { in migrate_device_unmap() 537 args->npages = 0; in migrate_vma_setup() 689 for (i = 0; i < npages; i++) { in __migrate_device_pages() 781 unsigned long npages) in migrate_device_pages() argument 816 for (i = 0; i < npages; i++) { in migrate_device_finalize() [all …]
|
/openbmc/linux/drivers/fpga/ |
H A D | dfl-afu-dma-region.c | 37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local 41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages() 45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages() 85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local 88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages() 90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages() 92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages() 104 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages() local [all …]
|
/openbmc/linux/arch/x86/mm/ |
H A D | cpu_entry_area.c | 108 unsigned int npages; in percpu_setup_debug_store() local 115 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 125 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 126 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store() 134 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ 136 estacks->name## _stack, npages, PAGE_KERNEL); \ 143 unsigned int npages; in percpu_setup_exception_stacks() local
|
/openbmc/linux/drivers/infiniband/core/ |
H A D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
H A D | umem.c | 152 unsigned long npages; in ib_umem_get() local 191 npages = ib_umem_num_pages(umem); in ib_umem_get() 192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 211 while (npages) { in ib_umem_get() 214 min_t(unsigned long, npages, in ib_umem_get() 224 npages -= pinned; in ib_umem_get() 228 npages, GFP_KERNEL); in ib_umem_get()
|
/openbmc/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_memfree.c | 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() 189 chunk->npages, in mthca_alloc_icm() 199 npages -= 1 << cur_order; in mthca_alloc_icm() 528 int npages; in mthca_init_user_db_tab() local 540 for (i = 0; i < npages; ++i) { in mthca_init_user_db_tab() [all …]
|
H A D | mthca_allocator.c | 195 int npages, shift; in mthca_buf_alloc() local 202 npages = 1; in mthca_buf_alloc() 214 npages *= 2; in mthca_buf_alloc() 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 222 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 226 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 234 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 240 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 243 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
/openbmc/linux/arch/powerpc/kernel/ |
H A D | iommu.c | 222 int largealloc = npages > 15; in iommu_range_alloc() 234 if (unlikely(npages == 0)) { in iommu_range_alloc() 324 end = n + npages; in iommu_range_alloc() 389 unsigned int npages) in iommu_free_check() argument 436 unsigned int npages) in __iommu_free() argument 458 unsigned int npages) in iommu_free() argument 525 npages); in ppc_iommu_map_sg() 626 unsigned int npages; in ppc_iommu_unmap_sg() local 860 unsigned int npages, align; in iommu_map_page() local 882 npages); in iommu_map_page() [all …]
|
/openbmc/linux/arch/x86/include/asm/ |
H A D | sev.h | 203 unsigned long npages); 205 unsigned long npages); 206 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages); 207 void snp_set_memory_private(unsigned long vaddr, unsigned long npages); 227 early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } in early_snp_set_memory_private() argument 229 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } in early_snp_set_memory_shared() argument 230 static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { } in snp_set_memory_shared() argument 231 static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { } in snp_set_memory_private() argument
|
/openbmc/linux/tools/testing/selftests/kvm/ |
H A D | memslot_perf_test.c | 88 uint64_t npages; member 264 mempages = data->npages; in get_max_slots() 297 data->npages = mempages; in prepare_vm() 316 uint64_t npages; in prepare_vm() local 318 npages = data->pages_per_slot; in prepare_vm() 320 npages += rempages; in prepare_vm() 330 uint64_t npages; in prepare_vm() local 335 npages += rempages; in prepare_vm() 636 uint64_t npages; in test_memslot_do_unmap() local 642 npages = min(npages, count - ctr); in test_memslot_do_unmap() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_internal.c | 38 unsigned int npages; /* restricted by sg_alloc_table */ in i915_gem_object_get_pages_internal() local 43 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages)) in i915_gem_object_get_pages_internal() 46 npages = obj->base.size >> PAGE_SHIFT; in i915_gem_object_get_pages_internal() 62 if (sg_alloc_table(st, npages, GFP_KERNEL)) { in i915_gem_object_get_pages_internal() 71 int order = min(fls(npages) - 1, max_order); in i915_gem_object_get_pages_internal() 89 npages -= 1 << order; in i915_gem_object_get_pages_internal() 90 if (!npages) { in i915_gem_object_get_pages_internal()
|
/openbmc/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|