Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 1043) sorted by relevance

12345678910>>...42

/openbmc/linux/drivers/gpu/drm/i915/
H A Di915_vma.h131 return vma->node.size - 2 * vma->guard; in __i915_vma_size()
154 return vma->node.start + vma->guard; in __i915_vma_offset()
191 return vma; in i915_vma_get()
197 return vma; in i915_vma_tryget()
221 cmp = vma->gtt_view.type; in i915_vma_compare()
276 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) argument
328 atomic_inc(&vma->flags); in __i915_vma_pin()
335 atomic_dec(&vma->flags); in __i915_vma_unpin()
341 __i915_vma_unpin(vma); in i915_vma_unpin()
417 if (vma->fence) in i915_vma_unpin_fence()
[all …]
H A Di915_vma.c81 vma->node.start, vma->node.size, reason); in vma_print_allocator()
88 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
257 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
443 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma()
446 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma()
447 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma()
475 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind()
564 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind()
1353 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages()
1391 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages()
[all …]
H A Di915_gem_evict.c88 if (dying_vma(vma)) in ungrab_vma()
98 struct i915_vma *vma, in mark_free() argument
105 if (!grab_vma(vma, ww)) in mark_free()
224 active = vma; in i915_gem_evict_something()
238 ungrab_vma(vma); in i915_gem_evict_something()
287 ungrab_vma(vma); in i915_gem_evict_something()
297 ungrab_vma(vma); in i915_gem_evict_something()
307 ungrab_vma(vma); in i915_gem_evict_something()
380 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node()
424 __i915_vma_pin(vma); in i915_gem_evict_for_node()
[all …]
/openbmc/linux/drivers/gpu/drm/
H A Ddrm_vm.c112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() local
240 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_shm_close()
248 if (pt->vma == vma) { in drm_vm_shm_close()
395 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open_locked()
399 vma_entry->vma = vma; in drm_vm_open_locked()
421 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close_locked()
424 if (pt->vma == vma) { in drm_vm_close_locked()
470 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_dma()
532 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_locked()
[all …]
/openbmc/linux/mm/
H A Dmmap.c140 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
141 vma->vm_ops->close(vma); in remove_vma()
433 vp->vma = vma; in init_multi_vma_prep()
466 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
1114 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); in find_mergeable_anon_vma()
1127 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
2177 if (vma && (vma->vm_start <= addr)) in find_extend_vma_locked()
2282 if (vma && vma->vm_start <= addr) in expand_stack()
2903 vma->vm_ops->close(vma); in mmap_region()
3101 if (vma && vma->vm_end == addr && !vma_policy(vma) && in do_brk_flags()
[all …]
H A Dmremap.c527 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
548 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
612 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma()
614 err = vma->vm_ops->may_split(vma, old_addr); in move_vma()
652 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
709 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
754 if (!vma) in vma_to_resize()
1056 vma = vma_merge(&vmi, mm, vma, extension_start, in SYSCALL_DEFINE5()
1057 extension_end, vma->vm_flags, vma->anon_vma, in SYSCALL_DEFINE5()
1058 vma->vm_file, extension_pgoff, vma_policy(vma), in SYSCALL_DEFINE5()
[all …]
H A Dnommu.c102 if (vma) in kobjsize()
103 return vma->vm_end - vma->vm_start; in kobjsize()
584 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm()
603 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
604 vma->vm_ops->close(vma); in delete_vma()
899 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
932 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
1571 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1581 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1620 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
[all …]
H A Dmadvise.c148 *prev = vma; in madvise_update_vma()
154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in madvise_update_vma()
161 *prev = vma; in madvise_update_vma()
179 if (!vma->vm_file || vma_is_anon_shmem(vma)) { in madvise_update_vma()
287 *prev = vma; in madvise_willneed()
350 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local
631 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local
878 if (!vma) in madvise_dontneed_free()
1055 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior()
1233 if (vma && start > vma->vm_start) in madvise_walk_vmas()
[all …]
H A Dmprotect.c588 *pprev = vma; in mprotect_fixup()
631 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
633 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
634 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mprotect_fixup()
636 vma = *pprev; in mprotect_fixup()
641 *pprev = vma; in mprotect_fixup()
735 if (!vma) in do_mprotect_pkey()
758 prev = vma; in do_mprotect_pkey()
809 tmp = vma->vm_end; in do_mprotect_pkey()
813 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
[all …]
H A Dmemory.c532 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
2552 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2811 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
2958 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3012 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3056 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3255 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
5055 .vma = vma, in __handle_mm_fault()
5897 if (vma && vma->vm_file) { in print_vma_addr()
5908 vma->vm_end - vma->vm_start); in print_vma_addr()
[all …]
H A Dhuge_memory.c649 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
776 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
785 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page()
895 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd() local
984 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud() local
1292 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1299 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1490 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1995 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
3210 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
[all …]
H A Drmap.c154 avc->vma = vma; in anon_vma_chain_link()
346 vma->anon_vma = NULL; in anon_vma_fork()
357 if (vma->anon_vma) in anon_vma_fork()
393 unlink_anon_vmas(vma); in anon_vma_fork()
424 if (vma->anon_vma) { in unlink_anon_vmas()
938 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one() local
1060 .vma = vma, in pfn_mkclean_range()
1277 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in folio_add_new_anon_rmap()
2197 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2433 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
[all …]
H A Dpagewalk.c159 if (walk->vma) in walk_pmd_range()
312 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local
355 struct vm_area_struct *vma = walk->vma; in walk_page_test() local
382 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local
505 walk.vma = vma; in walk_page_range()
507 vma = find_vma(mm, vma->vm_end); in walk_page_range()
571 .vma = vma, in walk_page_range_vma()
577 if (start < vma->vm_start || end > vma->vm_end) in walk_page_range_vma()
591 .vma = vma, in walk_page_vma()
662 walk.vma = vma; in walk_page_mapping()
[all …]
H A Dmlock.c312 struct vm_area_struct *vma = walk->vma; in mlock_pte_range() local
424 vma_is_dax(vma) || vma_is_secretmem(vma)) in mlock_fixup()
428 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
430 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
431 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mlock_fixup()
433 vma = *prev; in mlock_fixup()
473 *prev = vma; in mlock_fixup()
492 if (!vma) in apply_vma_lock_flags()
497 prev = vma; in apply_vma_lock_flags()
556 count += vma->vm_end - vma->vm_start; in count_mm_mlocked_page_nr()
[all …]
/openbmc/linux/include/linux/
H A Duserfaultfd_k.h133 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
138 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
143 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
149 return userfaultfd_wp(vma) && pte_uffd_wp(pte); in userfaultfd_pte_wp()
160 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed()
167 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) in vma_can_userfault()
178 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault()
179 vma_is_shmem(vma); in vma_can_userfault()
305 if (!userfaultfd_wp(vma)) in userfaultfd_wp_use_markers()
309 if (!vma_is_anonymous(vma)) in userfaultfd_wp_use_markers()
[all …]
H A Dhuge_mm.h17 struct vm_area_struct *vma);
106 if (!vma_is_anonymous(vma)) { in transhuge_vma_suitable()
107 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in transhuge_vma_suitable()
114 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable()
123 if (!vma->vm_file) in file_thp_enabled()
126 inode = vma->vm_file->f_inode; in file_thp_enabled()
129 (vma->vm_flags & VM_EXEC) && in file_thp_enabled()
196 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
204 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
339 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
[all …]
/openbmc/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_vmm.c31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
44 vma->mem = mem; in nouveau_vma_map()
55 return vma; in nouveau_vma_find()
65 if (vma && --vma->refs <= 0) { in nouveau_vma_del()
86 vma->refs++; in nouveau_vma_new()
90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new()
92 vma->vmm = vmm; in nouveau_vma_new()
93 vma->refs = 1; in nouveau_vma_new()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/display/
H A Dintel_fb_pin.c30 struct i915_vma *vma; in intel_pin_fb_obj_dpt() local
74 if (IS_ERR(vma)) { in intel_pin_fb_obj_dpt()
94 vma->display_alignment = max(vma->display_alignment, alignment); in intel_pin_fb_obj_dpt()
98 i915_vma_get(vma); in intel_pin_fb_obj_dpt()
102 return vma; in intel_pin_fb_obj_dpt()
207 if (vma->fence) in intel_pin_and_fence_fb_obj()
227 return vma; in intel_pin_and_fence_fb_obj()
253 if (IS_ERR(vma)) in intel_plane_pin_fb()
299 if (vma) in intel_plane_unpin_fb()
305 if (vma) in intel_plane_unpin_fb()
[all …]
/openbmc/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c48 if (!vma->mapped) in msm_gem_vma_purge()
53 vma->mapped = false; in msm_gem_vma_purge()
67 if (vma->mapped) in msm_gem_vma_map()
70 vma->mapped = true; in msm_gem_vma_map()
101 if (vma->iova) in msm_gem_vma_close()
105 vma->iova = 0; in msm_gem_vma_close()
114 vma = kzalloc(sizeof(*vma), GFP_KERNEL); in msm_gem_vma_new()
115 if (!vma) in msm_gem_vma_new()
120 return vma; in msm_gem_vma_new()
145 vma->iova = vma->node.start; in msm_gem_vma_init()
[all …]
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search()
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split()
1006 vma->addr, (u64)vma->size, in nvkm_vma_dump()
1214 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge()
1242 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap()
1256 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap()
1314 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map()
1370 vma = node(vma, next); in nvkm_vmm_pfn_map()
1611 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked()
1652 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c481 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
514 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
523 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
560 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
593 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
602 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
1446 vma->resource->bi.pages = vma->pages; in track_vma_bind()
1449 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind()
1583 vma->node.start, vma->node.size, in igt_gtt_reserve()
1629 vma->node.start, vma->node.size, in igt_gtt_reserve()
[all …]
H A Di915_vma.c73 return vma; in checked_vma_instance()
93 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance()
94 i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) { in checked_vma_instance()
104 return vma; in checked_vma_instance()
656 if (vma->node.size < vma->size) { in igt_vma_rotate_remap()
658 vma->size, vma->node.size); in igt_vma_rotate_remap()
756 if (vma->node.size < vma->size) { in assert_pin()
758 name, vma->size, vma->node.size); in assert_pin()
769 if (vma->pages == vma->obj->mm.pages) { in assert_pin()
781 if (vma->pages != vma->obj->mm.pages) { in assert_pin()
[all …]
/openbmc/linux/drivers/pci/
H A Dmmap.c23 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
30 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
34 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
36 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
39 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
43 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range()
45 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
47 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range()
48 vma->vm_end - vma->vm_start, in pci_mmap_resource_range()
49 vma->vm_page_prot); in pci_mmap_resource_range()
/openbmc/linux/arch/powerpc/mm/book3s64/
H A Dradix_hugetlbpage.c13 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_page()
16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page()
22 struct hstate *hstate = hstate_file(vma->vm_file); in radix__local_flush_hugetlb_page()
25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page()
32 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_tlb_range()
39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit()
50 unsigned long psize = huge_page_size(hstate_vma(vma)); in radix__huge_ptep_modify_prot_commit()
60 radix__flush_hugetlb_page(vma, addr); in radix__huge_ptep_modify_prot_commit()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ring.c37 struct i915_vma *vma = ring->vma; in intel_ring_pin() local
57 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) { in intel_ring_pin()
60 int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false); in intel_ring_pin()
79 i915_vma_unpin(vma); in intel_ring_pin()
96 struct i915_vma *vma = ring->vma; in intel_ring_unpin() local
102 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) in intel_ring_unpin()
135 if (IS_ERR(vma)) in create_ring_vma()
138 return vma; in create_ring_vma()
142 return vma; in create_ring_vma()
175 if (IS_ERR(vma)) { in intel_engine_create_ring()
[all …]

12345678910>>...42