Home
last modified time | relevance | path

Searched refs:vma (Results 176 – 200 of 1042) sorted by relevance

12345678910>>...42

/openbmc/linux/arch/loongarch/include/asm/
H A Dhugetlb.h45 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
50 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush()
51 flush_tlb_page(vma, addr); in huge_ptep_clear_flush()
63 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
71 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
76 flush_tlb_range(vma, addr, addr + HPAGE_SIZE); in huge_ptep_set_access_flags()
/openbmc/linux/fs/ocfs2/
H A Dmmap.c33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault() local
41 trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno, in ocfs2_fault()
42 vma, vmf->page, vmf->pgoff); in ocfs2_fault()
116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite()
144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite()
162 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) in ocfs2_mmap() argument
174 vma->vm_ops = &ocfs2_file_vm_ops; in ocfs2_mmap()
/openbmc/linux/arch/hexagon/mm/
H A Dvm_fault.c38 struct vm_area_struct *vma; in do_page_fault() local
60 vma = lock_mm_and_find_vma(mm, address, regs); in do_page_fault()
61 if (unlikely(!vma)) in do_page_fault()
69 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
73 if (!(vma->vm_flags & VM_READ)) in do_page_fault()
77 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
83 fault = handle_mm_fault(vma, address, flags, regs); in do_page_fault()
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ring_submission.c531 struct i915_vma *vma; in alloc_context_vma() local
557 if (IS_ERR(vma)) { in alloc_context_vma()
558 err = PTR_ERR(vma); in alloc_context_vma()
562 return vma; in alloc_context_vma()
580 struct i915_vma *vma; in ring_context_alloc() local
583 if (IS_ERR(vma)) in ring_context_alloc()
586 ce->state = vma; in ring_context_alloc()
1267 engine->wa_ctx.vma = vma; in gen7_ctx_switch_bb_init()
1271 i915_vma_unpin(vma); in gen7_ctx_switch_bb_init()
1297 if (IS_ERR(vma)) { in gen7_ctx_vma()
[all …]
H A Dgen7_renderclear.c28 struct i915_vma *vma; member
92 struct i915_vma *vma, in batch_init() argument
95 bc->vma = vma; in batch_init()
109 return i915_vma_offset(bc->vma); in batch_addr()
380 struct drm_i915_private *i915 = vma->vm->i915; in emit_batch()
432 struct i915_vma * const vma) in gen7_setup_clear_gpr_bb() argument
438 if (!vma) in gen7_setup_clear_gpr_bb()
441 GEM_BUG_ON(vma->obj->base.size < bv.size); in gen7_setup_clear_gpr_bb()
447 emit_batch(vma, memset(batch, 0, bv.size), &bv); in gen7_setup_clear_gpr_bb()
449 i915_gem_object_flush_map(vma->obj); in gen7_setup_clear_gpr_bb()
[all …]
H A Dselftest_tlb.c25 GEM_BUG_ON(addr < i915_vma_offset(vma)); in vma_set_qw()
26 GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val)); in vma_set_qw()
27 memset64(page_mask_bits(vma->obj->mm.mapping) + in vma_set_qw()
28 (addr - i915_vma_offset(vma)), val, 1); in vma_set_qw()
45 struct i915_vma *vma; in pte_tlbinv() local
54 vma = i915_vma_instance(batch, ce->vm, NULL); in pte_tlbinv()
55 if (IS_ERR(vma)) { in pte_tlbinv()
56 err = PTR_ERR(vma); in pte_tlbinv()
60 err = i915_vma_pin(vma, 0, 0, PIN_USER); in pte_tlbinv()
65 addr = round_up(vma->node.start + vma->node.size, align); in pte_tlbinv()
[all …]
/openbmc/linux/include/asm-generic/
H A Dtlb.h273 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
416 struct vm_area_struct vma = { in tlb_flush() local
422 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
430 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
443 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
444 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
445 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
531 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
536 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
538 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
[all …]
/openbmc/linux/drivers/soc/qcom/
H A Drmtfs_mem.c132 static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma) in qcom_rmtfs_mem_mmap() argument
136 if (vma->vm_end - vma->vm_start > rmtfs_mem->size) { in qcom_rmtfs_mem_mmap()
139 vma->vm_end, vma->vm_start, in qcom_rmtfs_mem_mmap()
140 (vma->vm_end - vma->vm_start), &rmtfs_mem->size); in qcom_rmtfs_mem_mmap()
144 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in qcom_rmtfs_mem_mmap()
145 return remap_pfn_range(vma, in qcom_rmtfs_mem_mmap()
146 vma->vm_start, in qcom_rmtfs_mem_mmap()
148 vma->vm_end - vma->vm_start, in qcom_rmtfs_mem_mmap()
149 vma->vm_page_prot); in qcom_rmtfs_mem_mmap()
/openbmc/linux/drivers/gpu/drm/i915/selftests/
H A Digt_spinner.c45 unsigned int mode, struct i915_vma **vma) in igt_spinner_pin_obj() argument
50 *vma = i915_vma_instance(obj, ce->vm, NULL); in igt_spinner_pin_obj()
51 if (IS_ERR(*vma)) in igt_spinner_pin_obj()
52 return ERR_CAST(*vma); in igt_spinner_pin_obj()
67 ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); in igt_spinner_pin_obj()
69 ret = i915_vma_pin(*vma, 0, 0, PIN_USER); in igt_spinner_pin_obj()
129 struct i915_vma *hws, *vma; in igt_spinner_create_request() local
146 vma = spin->batch_vma; in igt_spinner_create_request()
152 err = igt_vma_move_to_active_unlocked(vma, rq, 0); in igt_spinner_create_request()
190 *batch++ = lower_32_bits(i915_vma_offset(vma)); in igt_spinner_create_request()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_mman.c99 struct i915_vma *vma; in check_partial_mapping() local
130 if (IS_ERR(vma)) { in check_partial_mapping()
133 return PTR_ERR(vma); in check_partial_mapping()
140 i915_vma_unpin(vma); in check_partial_mapping()
167 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, in check_partial_mapping()
192 struct i915_vma *vma; in check_partial_mappings() local
227 if (IS_ERR(vma)) { in check_partial_mappings()
263 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, in check_partial_mappings()
552 if (IS_ERR(vma)) in make_obj_busy()
786 if (IS_ERR(vma)) in gtt_set()
[all …]
/openbmc/linux/drivers/infiniband/hw/qib/
H A Dqib_file_ops.c715 if ((vma->vm_end - vma->vm_start) > len) { in qib_mmap_mem()
718 vma->vm_end - vma->vm_start, len); in qib_mmap_mem()
763 if ((vma->vm_end - vma->vm_start) > sz) { in mmap_ureg()
766 vma->vm_end - vma->vm_start); in mmap_ureg()
773 ret = io_remap_pfn_range(vma, vma->vm_start, in mmap_ureg()
775 vma->vm_end - vma->vm_start, in mmap_ureg()
798 vma->vm_end - vma->vm_start); in mmap_piobufs()
820 vma->vm_end - vma->vm_start, in mmap_piobufs()
840 vma->vm_end - vma->vm_start, in mmap_rcvegrbufs()
949 len = vma->vm_end - vma->vm_start; in mmap_kvaddr()
[all …]
/openbmc/linux/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c360 struct vm_area_struct *vma) in vfio_fsl_mc_mmap_mmio() argument
362 u64 size = vma->vm_end - vma->vm_start; in vfio_fsl_mc_mmap_mmio()
366 pgoff = vma->vm_pgoff & in vfio_fsl_mc_mmap_mmio()
376 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in vfio_fsl_mc_mmap_mmio()
380 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in vfio_fsl_mc_mmap_mmio()
394 if (vma->vm_end < vma->vm_start) in vfio_fsl_mc_mmap()
396 if (vma->vm_start & ~PAGE_MASK) in vfio_fsl_mc_mmap()
398 if (vma->vm_end & ~PAGE_MASK) in vfio_fsl_mc_mmap()
409 && (vma->vm_flags & VM_READ)) in vfio_fsl_mc_mmap()
413 && (vma->vm_flags & VM_WRITE)) in vfio_fsl_mc_mmap()
[all …]
/openbmc/linux/drivers/infiniband/hw/cxgb4/
H A Dprovider.c127 int len = vma->vm_end - vma->vm_start; in c4iw_mmap()
157 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in c4iw_mmap()
158 ret = io_remap_pfn_range(vma, vma->vm_start, in c4iw_mmap()
160 len, vma->vm_page_prot); in c4iw_mmap()
169 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); in c4iw_mmap()
172 vma->vm_page_prot = in c4iw_mmap()
175 vma->vm_page_prot = in c4iw_mmap()
178 ret = io_remap_pfn_range(vma, vma->vm_start, in c4iw_mmap()
180 len, vma->vm_page_prot); in c4iw_mmap()
186 ret = remap_pfn_range(vma, vma->vm_start, in c4iw_mmap()
[all …]
/openbmc/linux/arch/sh/mm/
H A Dtlbflush_32.c15 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
/openbmc/linux/arch/powerpc/mm/book3s64/
H A Dpkeys.c378 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) in vma_is_pkey_exec_only() argument
381 if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) in vma_is_pkey_exec_only()
384 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); in vma_is_pkey_exec_only()
390 int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, in __arch_override_mprotect_pkey() argument
397 if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC)) in __arch_override_mprotect_pkey()
405 pkey = execute_only_pkey(vma->vm_mm); in __arch_override_mprotect_pkey()
411 return vma_pkey(vma); in __arch_override_mprotect_pkey()
446 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, in arch_vma_access_permitted() argument
454 if (foreign || vma_is_foreign(vma)) in arch_vma_access_permitted()
457 return pkey_access_permitted(vma_pkey(vma), write, execute); in arch_vma_access_permitted()
/openbmc/linux/arch/um/kernel/
H A Dtrap.c28 struct vm_area_struct *vma; in handle_page_fault() local
47 vma = find_vma(mm, address); in handle_page_fault()
48 if (!vma) in handle_page_fault()
50 if (vma->vm_start <= address) in handle_page_fault()
52 if (!(vma->vm_flags & VM_GROWSDOWN)) in handle_page_fault()
56 vma = expand_stack(mm, address); in handle_page_fault()
57 if (!vma) in handle_page_fault()
63 if (!(vma->vm_flags & VM_WRITE)) in handle_page_fault()
68 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in handle_page_fault()
75 fault = handle_mm_fault(vma, address, flags, NULL); in handle_page_fault()
[all …]
/openbmc/linux/drivers/misc/sgi-gru/
H A Dgrufault.c50 struct vm_area_struct *vma; in gru_find_vma() local
53 if (vma && vma->vm_ops == &gru_vm_ops) in gru_find_vma()
54 return vma; in gru_find_vma()
69 struct vm_area_struct *vma; in gru_find_lock_gts() local
73 vma = gru_find_vma(vaddr); in gru_find_lock_gts()
74 if (vma) in gru_find_lock_gts()
75 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); in gru_find_lock_gts()
90 vma = gru_find_vma(vaddr); in gru_alloc_locked_gts()
91 if (!vma) in gru_alloc_locked_gts()
94 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); in gru_alloc_locked_gts()
[all …]
/openbmc/linux/mm/
H A Dswap_state.c58 #define GET_SWAP_RA_VAL(vma) \ argument
349 if (vma && vma_ra) { in swap_cache_get_folio()
353 ra_val = GET_SWAP_RA_VAL(vma); in swap_cache_get_folio()
364 if (!vma || !vma_ra) in swap_cache_get_folio()
528 struct vm_area_struct *vma, in read_swap_cache_async() argument
533 vma, addr, &page_was_allocated); in read_swap_cache_async()
632 struct vm_area_struct *vma = vmf->vma; in swap_cluster_readahead() local
719 struct vm_area_struct *vma = vmf->vma; in swap_ra_info() local
734 ra_val = GET_SWAP_RA_VAL(vma); in swap_ra_info()
759 end = min3(rpfn, PFN_DOWN(vma->vm_end), in swap_ra_info()
[all …]
/openbmc/linux/drivers/uio/
H A Duio.c664 return (int)vma->vm_pgoff; in uio_find_mem_index()
739 if (vma->vm_end - vma->vm_start > mem->size) in uio_mmap_physical()
744 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in uio_mmap_physical()
755 return remap_pfn_range(vma, in uio_mmap_physical()
756 vma->vm_start, in uio_mmap_physical()
758 vma->vm_end - vma->vm_start, in uio_mmap_physical()
759 vma->vm_page_prot); in uio_mmap_physical()
770 if (vma->vm_end < vma->vm_start) in uio_mmap()
773 vma->vm_private_data = idev; in uio_mmap()
781 mi = uio_find_mem_index(vma); in uio_mmap()
[all …]
/openbmc/linux/arch/csky/abiv1/
H A Dcacheflush.c44 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
50 flush_tlb_page(vma, addr); in update_mmu_cache_range()
63 if (vma->vm_flags & VM_EXEC) in update_mmu_cache_range()
68 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument
73 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
/openbmc/linux/arch/parisc/include/asm/
H A Dcacheflush.h61 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
70 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
72 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
74 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
76 void flush_cache_range(struct vm_area_struct *vma,
80 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
/openbmc/linux/ipc/
H A Dshm.c309 sfd->vm_ops->open(vma); in shm_open()
586 pol = vma->vm_policy; in shm_get_policy()
1779 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { in ksys_shmdt()
1787 file = vma->vm_file; in ksys_shmdt()
1789 do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end, in ksys_shmdt()
1809 while (vma && (loff_t)(vma->vm_end - addr) <= size) { in ksys_shmdt()
1812 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && in ksys_shmdt()
1814 do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end, in ksys_shmdt()
1818 vma = vma_next(&vmi); in ksys_shmdt()
1826 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { in ksys_shmdt()
[all …]
/openbmc/linux/arch/arm64/mm/
H A Dhugetlbpage.c214 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); in get_clear_contig_flush() local
216 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig)); in get_clear_contig_flush()
235 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); in clear_flush() local
241 flush_tlb_range(&vma, saddr, addr); in clear_flush()
302 ptep = huge_pmd_share(mm, vma, addr, pudp); in huge_pte_alloc()
457 struct mm_struct *mm = vma->vm_mm; in huge_ptep_set_access_flags()
513 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
516 struct mm_struct *mm = vma->vm_mm; in huge_ptep_clear_flush()
521 return ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush()
555 return huge_ptep_clear_flush(vma, addr, ptep); in huge_ptep_modify_prot_start()
[all …]
/openbmc/linux/arch/s390/mm/
H A Dfault.c364 struct vm_area_struct *vma; in do_exception() local
413 if (!vma) in do_exception()
415 if (!(vma->vm_flags & access)) { in do_exception()
416 vma_end_read(vma); in do_exception()
421 vma_end_read(vma); in do_exception()
454 vma = find_vma(mm, address); in do_exception()
455 if (!vma) in do_exception()
462 if (!vma) in do_exception()
590 struct vm_area_struct *vma; in do_secure_storage_access() local
637 vma = find_vma(mm, addr); in do_secure_storage_access()
[all …]
/openbmc/linux/arch/x86/kernel/cpu/sgx/
H A Ddriver.c84 static int sgx_mmap(struct file *file, struct vm_area_struct *vma) in sgx_mmap() argument
89 ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags); in sgx_mmap()
93 ret = sgx_encl_mm_add(encl, vma->vm_mm); in sgx_mmap()
97 vma->vm_ops = &sgx_vm_ops; in sgx_mmap()
98 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); in sgx_mmap()
99 vma->vm_private_data = encl; in sgx_mmap()

12345678910>>...42