1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #ifndef __IVPU_MMU_CONTEXT_H__ 7 #define __IVPU_MMU_CONTEXT_H__ 8 9 #include <drm/drm_mm.h> 10 11 struct ivpu_device; 12 struct ivpu_file_priv; 13 struct ivpu_addr_range; 14 15 #define IVPU_MMU_PGTABLE_ENTRIES 512 16 17 struct ivpu_mmu_pgtable { 18 u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES]; 19 u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES]; 20 u64 *pgd; 21 dma_addr_t pgd_dma; 22 }; 23 24 struct ivpu_mmu_context { 25 struct mutex lock; /* protects: mm, pgtable, bo_list */ 26 struct drm_mm mm; 27 struct ivpu_mmu_pgtable pgtable; 28 struct list_head bo_list; 29 u32 id; 30 }; 31 32 int ivpu_mmu_global_context_init(struct ivpu_device *vdev); 33 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev); 34 35 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id); 36 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); 37 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid); 38 39 int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, 40 const struct ivpu_addr_range *range, 41 u64 size, struct drm_mm_node *node); 42 void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, 43 struct drm_mm_node *node); 44 45 int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 46 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); 47 void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 48 u64 vpu_addr, struct sg_table *sgt); 49 50 #endif /* __IVPU_MMU_CONTEXT_H__ */ 51