1647371a6SJacek Lawrynowicz /* SPDX-License-Identifier: GPL-2.0-only */
2647371a6SJacek Lawrynowicz /*
3647371a6SJacek Lawrynowicz * Copyright (C) 2020-2023 Intel Corporation
4647371a6SJacek Lawrynowicz */
5647371a6SJacek Lawrynowicz #ifndef __IVPU_GEM_H__
6647371a6SJacek Lawrynowicz #define __IVPU_GEM_H__
7647371a6SJacek Lawrynowicz
8647371a6SJacek Lawrynowicz #include <drm/drm_gem.h>
9647371a6SJacek Lawrynowicz #include <drm/drm_mm.h>
10647371a6SJacek Lawrynowicz
11647371a6SJacek Lawrynowicz struct dma_buf;
12647371a6SJacek Lawrynowicz struct ivpu_bo_ops;
13647371a6SJacek Lawrynowicz struct ivpu_file_priv;
14647371a6SJacek Lawrynowicz
15647371a6SJacek Lawrynowicz struct ivpu_bo {
16647371a6SJacek Lawrynowicz struct drm_gem_object base;
17647371a6SJacek Lawrynowicz const struct ivpu_bo_ops *ops;
18647371a6SJacek Lawrynowicz
19647371a6SJacek Lawrynowicz struct ivpu_mmu_context *ctx;
20647371a6SJacek Lawrynowicz struct list_head ctx_node;
21647371a6SJacek Lawrynowicz struct drm_mm_node mm_node;
22647371a6SJacek Lawrynowicz
23647371a6SJacek Lawrynowicz struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
24647371a6SJacek Lawrynowicz struct sg_table *sgt;
25647371a6SJacek Lawrynowicz struct page **pages;
26647371a6SJacek Lawrynowicz bool mmu_mapped;
27647371a6SJacek Lawrynowicz
28647371a6SJacek Lawrynowicz void *kvaddr;
29647371a6SJacek Lawrynowicz u64 vpu_addr;
30647371a6SJacek Lawrynowicz u32 handle;
31647371a6SJacek Lawrynowicz u32 flags;
32647371a6SJacek Lawrynowicz uintptr_t user_ptr;
33*cd727221SJacek Lawrynowicz u32 job_status;
34647371a6SJacek Lawrynowicz };
35647371a6SJacek Lawrynowicz
36647371a6SJacek Lawrynowicz enum ivpu_bo_type {
37647371a6SJacek Lawrynowicz IVPU_BO_TYPE_SHMEM = 1,
38647371a6SJacek Lawrynowicz IVPU_BO_TYPE_INTERNAL,
39647371a6SJacek Lawrynowicz IVPU_BO_TYPE_PRIME,
40647371a6SJacek Lawrynowicz };
41647371a6SJacek Lawrynowicz
42647371a6SJacek Lawrynowicz struct ivpu_bo_ops {
43647371a6SJacek Lawrynowicz enum ivpu_bo_type type;
44647371a6SJacek Lawrynowicz const char *name;
45647371a6SJacek Lawrynowicz int (*alloc_pages)(struct ivpu_bo *bo);
46647371a6SJacek Lawrynowicz void (*free_pages)(struct ivpu_bo *bo);
47647371a6SJacek Lawrynowicz int (*map_pages)(struct ivpu_bo *bo);
48647371a6SJacek Lawrynowicz void (*unmap_pages)(struct ivpu_bo *bo);
49647371a6SJacek Lawrynowicz };
50647371a6SJacek Lawrynowicz
51647371a6SJacek Lawrynowicz int ivpu_bo_pin(struct ivpu_bo *bo);
52647371a6SJacek Lawrynowicz void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
53647371a6SJacek Lawrynowicz void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
54647371a6SJacek Lawrynowicz void ivpu_bo_list_print(struct drm_device *dev);
55647371a6SJacek Lawrynowicz
56647371a6SJacek Lawrynowicz struct ivpu_bo *
57647371a6SJacek Lawrynowicz ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
58647371a6SJacek Lawrynowicz void ivpu_bo_free_internal(struct ivpu_bo *bo);
59647371a6SJacek Lawrynowicz struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
60647371a6SJacek Lawrynowicz void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
61647371a6SJacek Lawrynowicz
62647371a6SJacek Lawrynowicz int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
63647371a6SJacek Lawrynowicz int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
64647371a6SJacek Lawrynowicz int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
65647371a6SJacek Lawrynowicz
to_ivpu_bo(struct drm_gem_object * obj)66647371a6SJacek Lawrynowicz static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
67647371a6SJacek Lawrynowicz {
68647371a6SJacek Lawrynowicz return container_of(obj, struct ivpu_bo, base);
69647371a6SJacek Lawrynowicz }
70647371a6SJacek Lawrynowicz
ivpu_bo_get_page(struct ivpu_bo * bo,u64 offset)71647371a6SJacek Lawrynowicz static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
72647371a6SJacek Lawrynowicz {
73647371a6SJacek Lawrynowicz if (offset > bo->base.size || !bo->pages)
74647371a6SJacek Lawrynowicz return NULL;
75647371a6SJacek Lawrynowicz
76647371a6SJacek Lawrynowicz return bo->pages[offset / PAGE_SIZE];
77647371a6SJacek Lawrynowicz }
78647371a6SJacek Lawrynowicz
ivpu_bo_cache_mode(struct ivpu_bo * bo)79647371a6SJacek Lawrynowicz static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
80647371a6SJacek Lawrynowicz {
81647371a6SJacek Lawrynowicz return bo->flags & DRM_IVPU_BO_CACHE_MASK;
82647371a6SJacek Lawrynowicz }
83647371a6SJacek Lawrynowicz
ivpu_bo_is_snooped(struct ivpu_bo * bo)84647371a6SJacek Lawrynowicz static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
85647371a6SJacek Lawrynowicz {
86647371a6SJacek Lawrynowicz return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
87647371a6SJacek Lawrynowicz }
88647371a6SJacek Lawrynowicz
ivpu_bo_pgprot(struct ivpu_bo * bo,pgprot_t prot)89647371a6SJacek Lawrynowicz static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
90647371a6SJacek Lawrynowicz {
91647371a6SJacek Lawrynowicz if (bo->flags & DRM_IVPU_BO_WC)
92647371a6SJacek Lawrynowicz return pgprot_writecombine(prot);
93647371a6SJacek Lawrynowicz
94647371a6SJacek Lawrynowicz if (bo->flags & DRM_IVPU_BO_UNCACHED)
95647371a6SJacek Lawrynowicz return pgprot_noncached(prot);
96647371a6SJacek Lawrynowicz
97647371a6SJacek Lawrynowicz return prot;
98647371a6SJacek Lawrynowicz }
99647371a6SJacek Lawrynowicz
ivpu_bo_to_vdev(struct ivpu_bo * bo)100647371a6SJacek Lawrynowicz static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
101647371a6SJacek Lawrynowicz {
102647371a6SJacek Lawrynowicz return to_ivpu_device(bo->base.dev);
103647371a6SJacek Lawrynowicz }
104647371a6SJacek Lawrynowicz
ivpu_to_cpu_addr(struct ivpu_bo * bo,u32 vpu_addr)105647371a6SJacek Lawrynowicz static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
106647371a6SJacek Lawrynowicz {
107647371a6SJacek Lawrynowicz if (vpu_addr < bo->vpu_addr)
108647371a6SJacek Lawrynowicz return NULL;
109647371a6SJacek Lawrynowicz
110647371a6SJacek Lawrynowicz if (vpu_addr >= (bo->vpu_addr + bo->base.size))
111647371a6SJacek Lawrynowicz return NULL;
112647371a6SJacek Lawrynowicz
113647371a6SJacek Lawrynowicz return bo->kvaddr + (vpu_addr - bo->vpu_addr);
114647371a6SJacek Lawrynowicz }
115647371a6SJacek Lawrynowicz
cpu_to_vpu_addr(struct ivpu_bo * bo,void * cpu_addr)116647371a6SJacek Lawrynowicz static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
117647371a6SJacek Lawrynowicz {
118647371a6SJacek Lawrynowicz if (cpu_addr < bo->kvaddr)
119647371a6SJacek Lawrynowicz return 0;
120647371a6SJacek Lawrynowicz
121647371a6SJacek Lawrynowicz if (cpu_addr >= (bo->kvaddr + bo->base.size))
122647371a6SJacek Lawrynowicz return 0;
123647371a6SJacek Lawrynowicz
124647371a6SJacek Lawrynowicz return bo->vpu_addr + (cpu_addr - bo->kvaddr);
125647371a6SJacek Lawrynowicz }
126647371a6SJacek Lawrynowicz
127647371a6SJacek Lawrynowicz #endif /* __IVPU_GEM_H__ */
128