Home
last modified time | relevance | path

Searched refs:mmu (Results 1 – 25 of 393) sorted by relevance

12345678910>>...16

/openbmc/linux/drivers/staging/media/ipu3/
H A Dipu3-mmu.c89 func(mmu); in call_if_imgu_is_powered()
217 if (!mmu) in __imgu_mmu_map()
346 if (!mmu) in __imgu_mmu_unmap()
432 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in imgu_mmu_init()
433 if (!mmu) in imgu_mmu_init()
467 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts)); in imgu_mmu_init()
468 if (!mmu->l2pts) in imgu_mmu_init()
472 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval); in imgu_mmu_init()
473 if (!mmu->l1pt) in imgu_mmu_init()
493 kfree(mmu); in imgu_mmu_init()
[all …]
/openbmc/linux/drivers/gpu/drm/nouveau/nvif/
H A Dmmu.c33 kfree(mmu->kind); in nvif_mmu_dtor()
34 kfree(mmu->type); in nvif_mmu_dtor()
35 kfree(mmu->heap); in nvif_mmu_dtor()
53 mmu->heap = NULL; in nvif_mmu_ctor()
54 mmu->type = NULL; in nvif_mmu_ctor()
55 mmu->kind = NULL; in nvif_mmu_ctor()
72 mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), in nvif_mmu_ctor()
74 mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type), in nvif_mmu_ctor()
76 if (ret = -ENOMEM, !mmu->heap || !mmu->type) in nvif_mmu_ctor()
79 mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind), in nvif_mmu_ctor()
[all …]
H A Dmem.c28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument
31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map()
48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument
72 ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass, in nvif_mem_ctor_type()
75 mem->type = mmu->type[type].type; in nvif_mem_ctor_type()
88 nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type, in nvif_mem_ctor() argument
95 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_ctor()
96 if ((mmu->type[i].type & type) == type) { in nvif_mem_ctor()
97 ret = nvif_mem_ctor_type(mmu, name, oclass, i, page, in nvif_mem_ctor()
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type()
259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type()
260 mmu->type_nr++; in nvkm_mmu_type()
268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap()
269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap()
270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap()
373 if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram) in nvkm_mmu_oneinit()
393 mmu->func->init(mmu); in nvkm_mmu_init()
406 return mmu; in nvkm_mmu_dtor()
421 mmu->func = func; in nvkm_mmu_ctor()
[all …]
H A DKbuild2 nvkm-y += nvkm/subdev/mmu/base.o
3 nvkm-y += nvkm/subdev/mmu/nv04.o
4 nvkm-y += nvkm/subdev/mmu/nv41.o
5 nvkm-y += nvkm/subdev/mmu/nv44.o
6 nvkm-y += nvkm/subdev/mmu/nv50.o
7 nvkm-y += nvkm/subdev/mmu/g84.o
8 nvkm-y += nvkm/subdev/mmu/mcp77.o
9 nvkm-y += nvkm/subdev/mmu/gf100.o
10 nvkm-y += nvkm/subdev/mmu/gk104.o
11 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
H A Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
89 type = mmu->type[index].type; in nvkm_ummu_type()
108 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_kind() local
116 if (mmu->func->kind) in nvkm_ummu_kind()
117 kind = mmu->func->kind(mmu, &count, &kind_inv); in nvkm_ummu_kind()
159 struct nvkm_mmu *mmu = device->mmu; in nvkm_ummu_new() local
164 if (mmu->func->kind) in nvkm_ummu_new()
165 mmu->func->kind(mmu, &kinds, &unused); in nvkm_ummu_new()
[all …]
H A Dumem.c72 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap()
90 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local
109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map()
145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local
161 if (type >= mmu->type_nr) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
168 umem->type = mmu->type[type].type; in nvkm_umem_new()
172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new()
177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
/openbmc/linux/drivers/staging/media/atomisp/pci/mmu/
H A Disp_mmu.c82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr()
88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid()
325 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_map()
340 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt); in mmu_map()
344 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_map()
455 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in mmu_unmap()
456 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte); in mmu_unmap()
461 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); in mmu_unmap()
545 if (!mmu) in isp_mmu_exit()
548 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { in isp_mmu_exit()
[all …]
/openbmc/linux/drivers/iommu/
H A Dipmmu-vmsa.c152 return mmu->root == mmu; in ipmmu_is_root()
227 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
283 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local
328 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); in ipmmu_domain_allocate_context()
479 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq() local
592 if (!mmu) { in ipmmu_attach_device()
601 domain->mmu = mmu; in ipmmu_attach_device()
610 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
803 if (!mmu) in ipmmu_probe_device()
985 if (!mmu) { in ipmmu_probe()
[all …]
/openbmc/linux/arch/x86/kernel/
H A Dparavirt.c268 .mmu.pmd_val = PTE_IDENT,
269 .mmu.make_pmd = PTE_IDENT,
271 .mmu.pud_val = PTE_IDENT,
272 .mmu.make_pud = PTE_IDENT,
277 .mmu.p4d_val = PTE_IDENT,
278 .mmu.make_p4d = PTE_IDENT,
283 .mmu.pte_val = PTE_IDENT,
284 .mmu.pgd_val = PTE_IDENT,
286 .mmu.make_pte = PTE_IDENT,
287 .mmu.make_pgd = PTE_IDENT,
[all …]
/openbmc/linux/drivers/staging/media/atomisp/include/mmu/
H A Disp_mmu.h100 void (*tlb_flush_range)(struct isp_mmu *mmu,
102 void (*tlb_flush_all)(struct isp_mmu *mmu);
120 #define ISP_PTE_VALID_MASK(mmu) \ argument
121 ((mmu)->driver->pte_valid_mask)
123 #define ISP_PTE_VALID(mmu, pte) \ argument
124 ((pte) & ISP_PTE_VALID_MASK(mmu))
136 void isp_mmu_exit(struct isp_mmu *mmu);
156 if (mmu->driver && mmu->driver->tlb_flush_all) in isp_mmu_flush_tlb_all()
157 mmu->driver->tlb_flush_all(mmu); in isp_mmu_flush_tlb_all()
165 if (mmu->driver && mmu->driver->tlb_flush_range) in isp_mmu_flush_tlb_range()
[all …]
/openbmc/qemu/target/microblaze/
H A Dmmu.c39 MicroBlazeMMU *mmu = &env->mmu; in mmu_flush_idx() local
59 MicroBlazeMMU *mmu = &env->mmu; in mmu_change_pid() local
70 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i])) in mmu_change_pid()
80 MicroBlazeMMU *mmu = &cpu->env.mmu; in mmu_translate() local
104 if (mmu->tids[i] in mmu_translate()
105 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) { in mmu_translate()
206 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i]; in mmu_read()
239 rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]); in mmu_write()
260 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff; in mmu_write()
294 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v); in mmu_write()
[all …]
/openbmc/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_mmu.c164 as = mmu->as; in panfrost_mmu_as_get()
210 mmu->as = as; in panfrost_mmu_as_get()
240 mmu->as = -1; in panfrost_mmu_reset()
276 if (mmu->as < 0) in panfrost_mmu_flush_range()
572 kfree(mmu); in panfrost_mmu_release_ctx()
584 return mmu; in panfrost_mmu_ctx_get()
617 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); in panfrost_mmu_ctx_create()
618 if (!mmu) in panfrost_mmu_ctx_create()
629 mmu->as = -1; in panfrost_mmu_ctx_create()
643 kfree(mmu); in panfrost_mmu_ctx_create()
[all …]
/openbmc/linux/drivers/gpu/drm/msm/
H A Dmsm_mmu.h13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
18 void (*resume_translation)(struct msm_mmu *mmu);
38 mmu->dev = dev; in msm_mmu_init()
39 mmu->funcs = funcs; in msm_mmu_init()
40 mmu->type = type; in msm_mmu_init()
50 mmu->arg = arg; in msm_mmu_set_fault_handler()
51 mmu->handler = handler; in msm_mmu_set_fault_handler()
[all …]
H A Dmsm_iommu.c177 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) in msm_iommu_pagetable_params()
180 pagetable = to_pagetable(mmu); in msm_iommu_pagetable_params()
326 struct msm_mmu *mmu = &iommu->base; in msm_fault_handler() local
340 if (mmu->funcs->resume_translation) in msm_fault_handler()
341 mmu->funcs->resume_translation(mmu); in msm_fault_handler()
441 struct msm_mmu *mmu; in msm_iommu_gpu_new() local
443 mmu = msm_iommu_new(dev, quirks); in msm_iommu_gpu_new()
444 if (IS_ERR_OR_NULL(mmu)) in msm_iommu_gpu_new()
445 return mmu; in msm_iommu_gpu_new()
447 iommu = to_msm_iommu(mmu); in msm_iommu_gpu_new()
[all …]
H A Dmsm_gem_vma.c19 if (aspace->mmu) in msm_gem_address_space_destroy()
20 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy()
51 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_vma_purge()
84 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); in msm_gem_vma_map()
154 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, in msm_gem_address_space_create() argument
159 if (IS_ERR(mmu)) in msm_gem_address_space_create()
160 return ERR_CAST(mmu); in msm_gem_address_space_create()
168 aspace->mmu = mmu; in msm_gem_address_space_create()
H A Dmsm_gpummu.c24 static void msm_gpummu_detach(struct msm_mmu *mmu) in msm_gpummu_detach() argument
28 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in msm_gpummu_map() argument
31 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_map()
56 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in msm_gpummu_unmap() argument
58 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_unmap()
71 static void msm_gpummu_resume_translation(struct msm_mmu *mmu) in msm_gpummu_resume_translation() argument
75 static void msm_gpummu_destroy(struct msm_mmu *mmu) in msm_gpummu_destroy() argument
77 struct msm_gpummu *gpummu = to_msm_gpummu(mmu); in msm_gpummu_destroy()
79 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in msm_gpummu_destroy()
114 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in msm_gpummu_params() argument
[all …]
/openbmc/linux/arch/arc/mm/
H A Dtlb.c139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
579 mmu->ver = (bcr >> 24); in arc_mmu_mumbojumbo()
584 mmu->sets = 1 << mmu3->sets; in arc_mmu_mumbojumbo()
598 mmu->pae = mmu4->pae; in arc_mmu_mumbojumbo()
601 if (mmu->s_pg_sz_m) in arc_mmu_mumbojumbo()
603 mmu->s_pg_sz_m, in arc_mmu_mumbojumbo()
608 mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, in arc_mmu_mumbojumbo()
609 mmu->sets, mmu->ways, in arc_mmu_mumbojumbo()
688 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument
705 int set, n_ways = mmu->ways; in do_tlb_overlap_fault()
[all …]
/openbmc/linux/drivers/accel/ivpu/
H A Divpu_mmu.c279 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc() local
294 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc() local
314 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc() local
333 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc() local
484 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset() local
557 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd() local
598 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb() local
617 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cd_add() local
705 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init() local
742 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable() local
[all …]
/openbmc/linux/arch/arm64/kvm/hyp/nvhe/
H A Dtlb.c17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
65 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __tlb_switch_to_guest()
81 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
87 __tlb_switch_to_guest(mmu, &cxt, false); in __kvm_tlb_flush_vmid_ipa()
139 __tlb_switch_to_guest(mmu, &cxt, true); in __kvm_tlb_flush_vmid_ipa_nsh()
185 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
199 __tlb_switch_to_guest(mmu, &cxt, false); in __kvm_tlb_flush_vmid_range()
215 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
220 __tlb_switch_to_guest(mmu, &cxt, false); in __kvm_tlb_flush_vmid()
229 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
[all …]
/openbmc/linux/arch/x86/include/asm/
H A Dparavirt.h71 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local()
76 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global()
97 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap()
151 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2()
162 PVOP_ALT_VCALL1(mmu.write_cr3, x, in write_cr3()
339 PVOP_VCALL1(mmu.enter_mmap, next); in paravirt_enter_mmap()
358 PVOP_VCALL1(mmu.release_pte, pfn); in paravirt_release_pte()
368 PVOP_VCALL1(mmu.release_pmd, pfn); in paravirt_release_pmd()
561 PVOP_VCALL0(mmu.lazy_mode.enter); in arch_enter_lazy_mmu_mode()
566 PVOP_VCALL0(mmu.lazy_mode.leave); in arch_leave_lazy_mmu_mode()
[all …]
/openbmc/linux/arch/arm64/kvm/hyp/vhe/
H A Dtlb.c19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, in __tlb_switch_to_guest() argument
56 __load_stage2(mmu, mmu->arch); in __tlb_switch_to_guest()
82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
90 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
122 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa_nsh()
146 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
162 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid_range()
174 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
181 __tlb_switch_to_guest(mmu, &cxt); in __kvm_tlb_flush_vmid()
190 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
[all …]
/openbmc/linux/drivers/xen/
H A Dgrant-dma-iommu.c36 struct grant_dma_iommu_device *mmu; in grant_dma_iommu_probe() local
39 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); in grant_dma_iommu_probe()
40 if (!mmu) in grant_dma_iommu_probe()
43 mmu->dev = &pdev->dev; in grant_dma_iommu_probe()
45 ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); in grant_dma_iommu_probe()
49 platform_set_drvdata(pdev, mmu); in grant_dma_iommu_probe()
56 struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); in grant_dma_iommu_remove() local
59 iommu_device_unregister(&mmu->iommu); in grant_dma_iommu_remove()
/openbmc/qemu/target/nios2/
H A Dmmu.c36 int pid = FIELD_EX32(env->mmu.tlbmisc_wr, CR_TLBMISC, PID); in mmu_translate()
42 Nios2TLBEntry *entry = &env->mmu.tlb[index]; in mmu_translate()
70 Nios2TLBEntry *entry = &env->mmu.tlb[idx]; in mmu_flush_pid()
105 &env->mmu.tlb[(way * cpu->tlb_num_ways) + in helper_mmu_write_tlbacc()
106 (vpn & env->mmu.tlb_entry_mask)]; in helper_mmu_write_tlbacc()
126 env->mmu.tlbacc_wr = v; in helper_mmu_write_tlbacc()
153 &env->mmu.tlb[(way * cpu->tlb_num_ways) + in helper_mmu_write_tlbmisc()
169 env->mmu.tlbmisc_wr = v; in helper_mmu_write_tlbmisc()
180 env->mmu.pteaddr_wr = v; in helper_mmu_write_pteaddr()
186 Nios2MMU *mmu = &env->mmu; in mmu_init() local
[all …]
/openbmc/linux/drivers/gpu/drm/nouveau/include/nvif/
H A Dmmu.h39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid()
49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()

12345678910>>...16