1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #ifndef __MSM_GEM_H__ 8 #define __MSM_GEM_H__ 9 10 #include <linux/kref.h> 11 #include <linux/dma-resv.h> 12 #include "drm/gpu_scheduler.h" 13 #include "msm_drv.h" 14 15 /* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they 16 * tend to go wrong 1000s of times in a short timespan. 17 */ 18 #define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x)) 19 20 /* Additional internal-use only BO flags: */ 21 #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ 22 #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ 23 24 struct msm_gem_address_space { 25 const char *name; 26 /* NOTE: mm managed at the page level, size is in # of pages 27 * and position mm_node->start is in # of pages: 28 */ 29 struct drm_mm mm; 30 spinlock_t lock; /* Protects drm_mm node allocation/removal */ 31 struct msm_mmu *mmu; 32 struct kref kref; 33 34 /* For address spaces associated with a specific process, this 35 * will be non-NULL: 36 */ 37 struct pid *pid; 38 39 /* @faults: the number of GPU hangs associated with this address space */ 40 int faults; 41 }; 42 43 struct msm_gem_address_space * 44 msm_gem_address_space_get(struct msm_gem_address_space *aspace); 45 46 void msm_gem_address_space_put(struct msm_gem_address_space *aspace); 47 48 struct msm_gem_address_space * 49 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 50 u64 va_start, u64 size); 51 52 struct msm_gem_vma { 53 struct drm_mm_node node; 54 uint64_t iova; 55 struct msm_gem_address_space *aspace; 56 struct list_head list; /* node in msm_gem_object::vmas */ 57 bool mapped; 58 int inuse; 59 }; 60 61 int msm_gem_init_vma(struct msm_gem_address_space *aspace, 62 struct msm_gem_vma *vma, int npages, 63 u64 range_start, u64 range_end); 64 void msm_gem_purge_vma(struct msm_gem_address_space *aspace, 65 struct msm_gem_vma *vma); 66 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 67 struct msm_gem_vma *vma); 68 int msm_gem_map_vma(struct msm_gem_address_space *aspace, 69 struct msm_gem_vma *vma, int prot, 70 struct sg_table *sgt, int npages); 71 void msm_gem_close_vma(struct msm_gem_address_space *aspace, 72 struct msm_gem_vma *vma); 73 74 struct msm_gem_object { 75 struct drm_gem_object base; 76 77 uint32_t flags; 78 79 /** 80 * Advice: are the backing pages purgeable? 81 */ 82 uint8_t madv; 83 84 /** 85 * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? 86 */ 87 bool dontneed : 1; 88 89 /** 90 * Is object evictable (ie. counted in priv->evictable_count)? 91 */ 92 bool evictable : 1; 93 94 /** 95 * count of active vmap'ing 96 */ 97 uint8_t vmap_count; 98 99 /** 100 * Node in list of all objects (mainly for debugfs, protected by 101 * priv->obj_lock 102 */ 103 struct list_head node; 104 105 /** 106 * An object is either: 107 * inactive - on priv->inactive_dontneed or priv->inactive_willneed 108 * (depending on purgeability status) 109 * active - on one one of the gpu's active_list.. well, at 110 * least for now we don't have (I don't think) hw sync between 111 * 2d and 3d one devices which have both, meaning we need to 112 * block on submit if a bo is already on other ring 113 */ 114 struct list_head mm_list; 115 116 struct page **pages; 117 struct sg_table *sgt; 118 void *vaddr; 119 120 struct list_head vmas; /* list of msm_gem_vma */ 121 122 /* For physically contiguous buffers. Used when we don't have 123 * an IOMMU. Also used for stolen/splashscreen buffer. 124 */ 125 struct drm_mm_node *vram_node; 126 127 char name[32]; /* Identifier to print for the debugfs files */ 128 129 int active_count; 130 int pin_count; 131 }; 132 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 133 134 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 135 int msm_gem_get_iova(struct drm_gem_object *obj, 136 struct msm_gem_address_space *aspace, uint64_t *iova); 137 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 138 struct msm_gem_address_space *aspace, uint64_t *iova, 139 u64 range_start, u64 range_end); 140 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 141 struct msm_gem_address_space *aspace, uint64_t *iova); 142 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 143 struct msm_gem_address_space *aspace, uint64_t *iova); 144 uint64_t msm_gem_iova(struct drm_gem_object *obj, 145 struct msm_gem_address_space *aspace); 146 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 147 struct msm_gem_address_space *aspace); 148 void msm_gem_unpin_iova(struct drm_gem_object *obj, 149 struct msm_gem_address_space *aspace); 150 struct page **msm_gem_get_pages(struct drm_gem_object *obj); 151 void msm_gem_put_pages(struct drm_gem_object *obj); 152 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 153 struct drm_mode_create_dumb *args); 154 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 155 uint32_t handle, uint64_t *offset); 156 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); 157 void *msm_gem_get_vaddr(struct drm_gem_object *obj); 158 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); 159 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); 160 void msm_gem_put_vaddr(struct drm_gem_object *obj); 161 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); 162 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); 163 void msm_gem_active_put(struct drm_gem_object *obj); 164 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 165 int msm_gem_cpu_fini(struct drm_gem_object *obj); 166 void msm_gem_free_object(struct drm_gem_object *obj); 167 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 168 uint32_t size, uint32_t flags, uint32_t *handle, char *name); 169 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 170 uint32_t size, uint32_t flags); 171 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 172 uint32_t flags, struct msm_gem_address_space *aspace, 173 struct drm_gem_object **bo, uint64_t *iova); 174 void msm_gem_kernel_put(struct drm_gem_object *bo, 175 struct msm_gem_address_space *aspace); 176 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 177 struct dma_buf *dmabuf, struct sg_table *sgt); 178 __printf(2, 3) 179 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 180 181 #ifdef CONFIG_DEBUG_FS 182 struct msm_gem_stats { 183 struct { 184 unsigned count; 185 size_t size; 186 } all, active, resident, purgeable, purged; 187 }; 188 189 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 190 struct msm_gem_stats *stats); 191 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 192 #endif 193 194 static inline void 195 msm_gem_lock(struct drm_gem_object *obj) 196 { 197 dma_resv_lock(obj->resv, NULL); 198 } 199 200 static inline bool __must_check 201 msm_gem_trylock(struct drm_gem_object *obj) 202 { 203 return dma_resv_trylock(obj->resv); 204 } 205 206 static inline int 207 msm_gem_lock_interruptible(struct drm_gem_object *obj) 208 { 209 return dma_resv_lock_interruptible(obj->resv, NULL); 210 } 211 212 static inline void 213 msm_gem_unlock(struct drm_gem_object *obj) 214 { 215 dma_resv_unlock(obj->resv); 216 } 217 218 static inline bool 219 msm_gem_is_locked(struct drm_gem_object *obj) 220 { 221 return dma_resv_is_locked(obj->resv); 222 } 223 224 static inline bool is_active(struct msm_gem_object *msm_obj) 225 { 226 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 227 return msm_obj->active_count; 228 } 229 230 /* imported/exported objects are not purgeable: */ 231 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) 232 { 233 return msm_obj->base.import_attach || msm_obj->pin_count; 234 } 235 236 static inline bool is_purgeable(struct msm_gem_object *msm_obj) 237 { 238 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && 239 !is_unpurgeable(msm_obj); 240 } 241 242 static inline bool is_vunmapable(struct msm_gem_object *msm_obj) 243 { 244 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 245 return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 246 } 247 248 static inline void mark_purgeable(struct msm_gem_object *msm_obj) 249 { 250 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 251 252 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 253 254 if (is_unpurgeable(msm_obj)) 255 return; 256 257 if (GEM_WARN_ON(msm_obj->dontneed)) 258 return; 259 260 priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; 261 msm_obj->dontneed = true; 262 } 263 264 static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) 265 { 266 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 267 268 GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 269 270 if (is_unpurgeable(msm_obj)) 271 return; 272 273 if (GEM_WARN_ON(!msm_obj->dontneed)) 274 return; 275 276 priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; 277 GEM_WARN_ON(priv->shrinkable_count < 0); 278 msm_obj->dontneed = false; 279 } 280 281 static inline bool is_unevictable(struct msm_gem_object *msm_obj) 282 { 283 return is_unpurgeable(msm_obj) || msm_obj->vaddr; 284 } 285 286 static inline void mark_evictable(struct msm_gem_object *msm_obj) 287 { 288 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 289 290 WARN_ON(!mutex_is_locked(&priv->mm_lock)); 291 292 if (is_unevictable(msm_obj)) 293 return; 294 295 if (WARN_ON(msm_obj->evictable)) 296 return; 297 298 priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT; 299 msm_obj->evictable = true; 300 } 301 302 static inline void mark_unevictable(struct msm_gem_object *msm_obj) 303 { 304 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 305 306 WARN_ON(!mutex_is_locked(&priv->mm_lock)); 307 308 if (is_unevictable(msm_obj)) 309 return; 310 311 if (WARN_ON(!msm_obj->evictable)) 312 return; 313 314 priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT; 315 WARN_ON(priv->evictable_count < 0); 316 msm_obj->evictable = false; 317 } 318 319 void msm_gem_purge(struct drm_gem_object *obj); 320 void msm_gem_evict(struct drm_gem_object *obj); 321 void msm_gem_vunmap(struct drm_gem_object *obj); 322 323 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 324 * associated with the cmdstream submission for synchronization (and 325 * make it easier to unwind when things go wrong, etc). 326 */ 327 struct msm_gem_submit { 328 struct drm_sched_job base; 329 struct kref ref; 330 struct drm_device *dev; 331 struct msm_gpu *gpu; 332 struct msm_gem_address_space *aspace; 333 struct list_head node; /* node in ring submit list */ 334 struct ww_acquire_ctx ticket; 335 uint32_t seqno; /* Sequence number of the submit on the ring */ 336 337 /* Hw fence, which is created when the scheduler executes the job, and 338 * is signaled when the hw finishes (via seqno write from cmdstream) 339 */ 340 struct dma_fence *hw_fence; 341 342 /* Userspace visible fence, which is signaled by the scheduler after 343 * the hw_fence is signaled. 344 */ 345 struct dma_fence *user_fence; 346 347 int fence_id; /* key into queue->fence_idr */ 348 struct msm_gpu_submitqueue *queue; 349 struct pid *pid; /* submitting process */ 350 bool fault_dumped; /* Limit devcoredump dumping to one per submit */ 351 bool valid; /* true if no cmdstream patching needed */ 352 bool in_rb; /* "sudo" mode, copy cmds into RB */ 353 struct msm_ringbuffer *ring; 354 unsigned int nr_cmds; 355 unsigned int nr_bos; 356 u32 ident; /* A "identifier" for the submit for logging */ 357 struct { 358 uint32_t type; 359 uint32_t size; /* in dwords */ 360 uint64_t iova; 361 uint32_t offset;/* in dwords */ 362 uint32_t idx; /* cmdstream buffer idx in bos[] */ 363 uint32_t nr_relocs; 364 struct drm_msm_gem_submit_reloc *relocs; 365 } *cmd; /* array of size nr_cmds */ 366 struct { 367 uint32_t flags; 368 union { 369 struct msm_gem_object *obj; 370 uint32_t handle; 371 }; 372 uint64_t iova; 373 } bos[]; 374 }; 375 376 static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job) 377 { 378 return container_of(job, struct msm_gem_submit, base); 379 } 380 381 void __msm_gem_submit_destroy(struct kref *kref); 382 383 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) 384 { 385 kref_get(&submit->ref); 386 } 387 388 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) 389 { 390 kref_put(&submit->ref, __msm_gem_submit_destroy); 391 } 392 393 void msm_submit_retire(struct msm_gem_submit *submit); 394 395 /* helper to determine of a buffer in submit should be dumped, used for both 396 * devcoredump and debugfs cmdstream dumping: 397 */ 398 static inline bool 399 should_dump(struct msm_gem_submit *submit, int idx) 400 { 401 extern bool rd_full; 402 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); 403 } 404 405 #endif /* __MSM_GEM_H__ */ 406