xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.h (revision d4d7d363)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_GEM_H__
8 #define __MSM_GEM_H__
9 
10 #include <linux/kref.h>
11 #include <linux/dma-resv.h>
12 #include "drm/gpu_scheduler.h"
13 #include "msm_drv.h"
14 
15 /* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
16  * tend to go wrong 1000s of times in a short timespan.
17  */
18 #define GEM_WARN_ON(x)  WARN_RATELIMIT(x, "%s", __stringify(x))
19 
20 /* Additional internal-use only BO flags: */
21 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
22 #define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
23 
24 struct msm_gem_address_space {
25 	const char *name;
26 	/* NOTE: mm managed at the page level, size is in # of pages
27 	 * and position mm_node->start is in # of pages:
28 	 */
29 	struct drm_mm mm;
30 	spinlock_t lock; /* Protects drm_mm node allocation/removal */
31 	struct msm_mmu *mmu;
32 	struct kref kref;
33 
34 	/* For address spaces associated with a specific process, this
35 	 * will be non-NULL:
36 	 */
37 	struct pid *pid;
38 
39 	/* @faults: the number of GPU hangs associated with this address space */
40 	int faults;
41 
42 	/** @va_start: lowest possible address to allocate */
43 	uint64_t va_start;
44 
45 	/** @va_size: the size of the address space (in bytes) */
46 	uint64_t va_size;
47 };
48 
49 struct msm_gem_address_space *
50 msm_gem_address_space_get(struct msm_gem_address_space *aspace);
51 
52 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
53 
54 struct msm_gem_address_space *
55 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
56 		u64 va_start, u64 size);
57 
58 struct msm_fence_context;
59 
60 struct msm_gem_vma {
61 	struct drm_mm_node node;
62 	uint64_t iova;
63 	struct msm_gem_address_space *aspace;
64 	struct list_head list;    /* node in msm_gem_object::vmas */
65 	bool mapped;
66 	int inuse;
67 	uint32_t fence_mask;
68 	uint32_t fence[MSM_GPU_MAX_RINGS];
69 	struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
70 };
71 
72 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
73 		struct msm_gem_vma *vma, int size,
74 		u64 range_start, u64 range_end);
75 bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
76 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
77 		struct msm_gem_vma *vma);
78 void msm_gem_unpin_vma(struct msm_gem_vma *vma);
79 void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
80 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
81 		struct msm_gem_vma *vma, int prot,
82 		struct sg_table *sgt, int size);
83 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
84 		struct msm_gem_vma *vma);
85 
86 struct msm_gem_object {
87 	struct drm_gem_object base;
88 
89 	uint32_t flags;
90 
91 	/**
92 	 * Advice: are the backing pages purgeable?
93 	 */
94 	uint8_t madv;
95 
96 	/**
97 	 * count of active vmap'ing
98 	 */
99 	uint8_t vmap_count;
100 
101 	/**
102 	 * Node in list of all objects (mainly for debugfs, protected by
103 	 * priv->obj_lock
104 	 */
105 	struct list_head node;
106 
107 	struct page **pages;
108 	struct sg_table *sgt;
109 	void *vaddr;
110 
111 	struct list_head vmas;    /* list of msm_gem_vma */
112 
113 	/* For physically contiguous buffers.  Used when we don't have
114 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
115 	 */
116 	struct drm_mm_node *vram_node;
117 
118 	char name[32]; /* Identifier to print for the debugfs files */
119 
120 	int pin_count;
121 };
122 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
123 
124 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
125 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
126 void msm_gem_unpin_locked(struct drm_gem_object *obj);
127 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
128 					   struct msm_gem_address_space *aspace);
129 int msm_gem_get_iova(struct drm_gem_object *obj,
130 		struct msm_gem_address_space *aspace, uint64_t *iova);
131 int msm_gem_set_iova(struct drm_gem_object *obj,
132 		struct msm_gem_address_space *aspace, uint64_t iova);
133 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
134 		struct msm_gem_address_space *aspace, uint64_t *iova,
135 		u64 range_start, u64 range_end);
136 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
137 		struct msm_gem_address_space *aspace, uint64_t *iova);
138 void msm_gem_unpin_iova(struct drm_gem_object *obj,
139 		struct msm_gem_address_space *aspace);
140 struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
141 void msm_gem_unpin_pages(struct drm_gem_object *obj);
142 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
143 		struct drm_mode_create_dumb *args);
144 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
145 		uint32_t handle, uint64_t *offset);
146 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
147 void *msm_gem_get_vaddr(struct drm_gem_object *obj);
148 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
149 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
150 void msm_gem_put_vaddr(struct drm_gem_object *obj);
151 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
152 bool msm_gem_active(struct drm_gem_object *obj);
153 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
154 int msm_gem_cpu_fini(struct drm_gem_object *obj);
155 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
156 		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
157 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
158 		uint32_t size, uint32_t flags);
159 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
160 		uint32_t flags, struct msm_gem_address_space *aspace,
161 		struct drm_gem_object **bo, uint64_t *iova);
162 void msm_gem_kernel_put(struct drm_gem_object *bo,
163 		struct msm_gem_address_space *aspace);
164 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
165 		struct dma_buf *dmabuf, struct sg_table *sgt);
166 __printf(2, 3)
167 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
168 
169 #ifdef CONFIG_DEBUG_FS
170 struct msm_gem_stats {
171 	struct {
172 		unsigned count;
173 		size_t size;
174 	} all, active, resident, purgeable, purged;
175 };
176 
177 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
178 		struct msm_gem_stats *stats);
179 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
180 #endif
181 
182 static inline void
183 msm_gem_lock(struct drm_gem_object *obj)
184 {
185 	dma_resv_lock(obj->resv, NULL);
186 }
187 
188 static inline int
189 msm_gem_lock_interruptible(struct drm_gem_object *obj)
190 {
191 	return dma_resv_lock_interruptible(obj->resv, NULL);
192 }
193 
194 static inline void
195 msm_gem_unlock(struct drm_gem_object *obj)
196 {
197 	dma_resv_unlock(obj->resv);
198 }
199 
200 static inline bool
201 msm_gem_is_locked(struct drm_gem_object *obj)
202 {
203 	/*
204 	 * Destroying the object is a special case.. msm_gem_free_object()
205 	 * calls many things that WARN_ON if the obj lock is not held.  But
206 	 * acquiring the obj lock in msm_gem_free_object() can cause a
207 	 * locking order inversion between reservation_ww_class_mutex and
208 	 * fs_reclaim.
209 	 *
210 	 * This deadlock is not actually possible, because no one should
211 	 * be already holding the lock when msm_gem_free_object() is called.
212 	 * Unfortunately lockdep is not aware of this detail.  So when the
213 	 * refcount drops to zero, we pretend it is already locked.
214 	 */
215 	return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
216 }
217 
218 static inline void
219 msm_gem_assert_locked(struct drm_gem_object *obj)
220 {
221 	GEM_WARN_ON(!msm_gem_is_locked(obj));
222 }
223 
224 /* imported/exported objects are not purgeable: */
225 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
226 {
227 	return msm_obj->base.import_attach || msm_obj->pin_count;
228 }
229 
230 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
231 {
232 	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
233 			!is_unpurgeable(msm_obj);
234 }
235 
236 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
237 {
238 	msm_gem_assert_locked(&msm_obj->base);
239 	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
240 }
241 
242 static inline bool is_unevictable(struct msm_gem_object *msm_obj)
243 {
244 	return is_unpurgeable(msm_obj) || msm_obj->vaddr;
245 }
246 
247 void msm_gem_purge(struct drm_gem_object *obj);
248 void msm_gem_evict(struct drm_gem_object *obj);
249 void msm_gem_vunmap(struct drm_gem_object *obj);
250 
251 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
252  * associated with the cmdstream submission for synchronization (and
253  * make it easier to unwind when things go wrong, etc).
254  */
255 struct msm_gem_submit {
256 	struct drm_sched_job base;
257 	struct kref ref;
258 	struct drm_device *dev;
259 	struct msm_gpu *gpu;
260 	struct msm_gem_address_space *aspace;
261 	struct list_head node;   /* node in ring submit list */
262 	struct ww_acquire_ctx ticket;
263 	uint32_t seqno;		/* Sequence number of the submit on the ring */
264 
265 	/* Hw fence, which is created when the scheduler executes the job, and
266 	 * is signaled when the hw finishes (via seqno write from cmdstream)
267 	 */
268 	struct dma_fence *hw_fence;
269 
270 	/* Userspace visible fence, which is signaled by the scheduler after
271 	 * the hw_fence is signaled.
272 	 */
273 	struct dma_fence *user_fence;
274 
275 	int fence_id;       /* key into queue->fence_idr */
276 	struct msm_gpu_submitqueue *queue;
277 	struct pid *pid;    /* submitting process */
278 	bool fault_dumped;  /* Limit devcoredump dumping to one per submit */
279 	bool valid;         /* true if no cmdstream patching needed */
280 	bool in_rb;         /* "sudo" mode, copy cmds into RB */
281 	struct msm_ringbuffer *ring;
282 	unsigned int nr_cmds;
283 	unsigned int nr_bos;
284 	u32 ident;	   /* A "identifier" for the submit for logging */
285 	struct {
286 		uint32_t type;
287 		uint32_t size;  /* in dwords */
288 		uint64_t iova;
289 		uint32_t offset;/* in dwords */
290 		uint32_t idx;   /* cmdstream buffer idx in bos[] */
291 		uint32_t nr_relocs;
292 		struct drm_msm_gem_submit_reloc *relocs;
293 	} *cmd;  /* array of size nr_cmds */
294 	struct {
295 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
296 #define BO_VALID	0x8000	/* is current addr in cmdstream correct/valid? */
297 #define BO_LOCKED	0x4000	/* obj lock is held */
298 #define BO_OBJ_PINNED	0x2000	/* obj (pages) is pinned and on active list */
299 #define BO_VMA_PINNED	0x1000	/* vma (virtual address) is pinned */
300 		uint32_t flags;
301 		union {
302 			struct msm_gem_object *obj;
303 			uint32_t handle;
304 		};
305 		uint64_t iova;
306 		struct msm_gem_vma *vma;
307 	} bos[];
308 };
309 
310 static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
311 {
312 	return container_of(job, struct msm_gem_submit, base);
313 }
314 
315 void __msm_gem_submit_destroy(struct kref *kref);
316 
317 static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
318 {
319 	kref_get(&submit->ref);
320 }
321 
322 static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
323 {
324 	kref_put(&submit->ref, __msm_gem_submit_destroy);
325 }
326 
327 void msm_submit_retire(struct msm_gem_submit *submit);
328 
329 /* helper to determine of a buffer in submit should be dumped, used for both
330  * devcoredump and debugfs cmdstream dumping:
331  */
332 static inline bool
333 should_dump(struct msm_gem_submit *submit, int idx)
334 {
335 	extern bool rd_full;
336 	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
337 }
338 
339 #endif /* __MSM_GEM_H__ */
340