1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 2c8afe684SRob Clark /* 3c8afe684SRob Clark * Copyright (C) 2013 Red Hat 4c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 5c8afe684SRob Clark */ 6c8afe684SRob Clark 7c8afe684SRob Clark #ifndef __MSM_GEM_H__ 8c8afe684SRob Clark #define __MSM_GEM_H__ 9c8afe684SRob Clark 10ee546cd3SJordan Crouse #include <linux/kref.h> 1152791eeeSChristian König #include <linux/dma-resv.h> 121d8a5ca4SRob Clark #include "drm/gpu_scheduler.h" 13c8afe684SRob Clark #include "msm_drv.h" 14c8afe684SRob Clark 1590643a24SRob Clark /* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they 1690643a24SRob Clark * tend to go wrong 1000s of times in a short timespan. 1790643a24SRob Clark */ 1890643a24SRob Clark #define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x)) 1990643a24SRob Clark 20072f1f91SRob Clark /* Additional internal-use only BO flags: */ 21072f1f91SRob Clark #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ 220b462d7aSJonathan Marek #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ 23072f1f91SRob Clark 24667ce33eSRob Clark struct msm_gem_address_space { 25667ce33eSRob Clark const char *name; 26667ce33eSRob Clark /* NOTE: mm managed at the page level, size is in # of pages 27667ce33eSRob Clark * and position mm_node->start is in # of pages: 28667ce33eSRob Clark */ 29667ce33eSRob Clark struct drm_mm mm; 300e08270aSSushmita Susheelendra spinlock_t lock; /* Protects drm_mm node allocation/removal */ 31667ce33eSRob Clark struct msm_mmu *mmu; 32ee546cd3SJordan Crouse struct kref kref; 3325faf2f2SRob Clark 3425faf2f2SRob Clark /* For address spaces associated with a specific process, this 3525faf2f2SRob Clark * will be non-NULL: 3625faf2f2SRob Clark */ 3725faf2f2SRob Clark struct pid *pid; 38bc211258SRob Clark 39bc211258SRob Clark /* @faults: the number of GPU hangs associated with this address space */ 40bc211258SRob Clark int faults; 41a636a0ffSRob Clark 42a636a0ffSRob Clark /** @va_start: lowest possible address to allocate */ 43a636a0ffSRob Clark uint64_t va_start; 44a636a0ffSRob Clark 45a636a0ffSRob Clark /** @va_size: the size of the address space (in bytes) */ 46a636a0ffSRob Clark uint64_t va_size; 47667ce33eSRob Clark }; 48667ce33eSRob Clark 49695383a1SRob Clark struct msm_gem_address_space * 50695383a1SRob Clark msm_gem_address_space_get(struct msm_gem_address_space *aspace); 51695383a1SRob Clark 52695383a1SRob Clark void msm_gem_address_space_put(struct msm_gem_address_space *aspace); 53695383a1SRob Clark 54695383a1SRob Clark struct msm_gem_address_space * 55695383a1SRob Clark msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 56695383a1SRob Clark u64 va_start, u64 size); 57695383a1SRob Clark 5895d1deb0SRob Clark struct msm_fence_context; 5995d1deb0SRob Clark 60667ce33eSRob Clark struct msm_gem_vma { 61667ce33eSRob Clark struct drm_mm_node node; 62667ce33eSRob Clark uint64_t iova; 634b85f7f5SRob Clark struct msm_gem_address_space *aspace; 644b85f7f5SRob Clark struct list_head list; /* node in msm_gem_object::vmas */ 65c0ee9794SJordan Crouse bool mapped; 667ad0e8cfSJordan Crouse int inuse; 6795d1deb0SRob Clark uint32_t fence_mask; 6895d1deb0SRob Clark uint32_t fence[MSM_GPU_MAX_RINGS]; 6995d1deb0SRob Clark struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS]; 70667ce33eSRob Clark }; 71667ce33eSRob Clark 72695383a1SRob Clark int msm_gem_init_vma(struct msm_gem_address_space *aspace, 732ee4b5d2SRob Clark struct msm_gem_vma *vma, int size, 74695383a1SRob Clark u64 range_start, u64 range_end); 75ca35ab2aSRob Clark bool msm_gem_vma_inuse(struct msm_gem_vma *vma); 76695383a1SRob Clark void msm_gem_purge_vma(struct msm_gem_address_space *aspace, 77695383a1SRob Clark struct msm_gem_vma *vma); 7895d1deb0SRob Clark void msm_gem_unpin_vma(struct msm_gem_vma *vma); 7995d1deb0SRob Clark void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx); 80695383a1SRob Clark int msm_gem_map_vma(struct msm_gem_address_space *aspace, 81695383a1SRob Clark struct msm_gem_vma *vma, int prot, 822ee4b5d2SRob Clark struct sg_table *sgt, int size); 83695383a1SRob Clark void msm_gem_close_vma(struct msm_gem_address_space *aspace, 84695383a1SRob Clark struct msm_gem_vma *vma); 85695383a1SRob Clark 86c8afe684SRob Clark struct msm_gem_object { 87c8afe684SRob Clark struct drm_gem_object base; 88c8afe684SRob Clark 89c8afe684SRob Clark uint32_t flags; 90c8afe684SRob Clark 914cd33c48SRob Clark /** 924cd33c48SRob Clark * Advice: are the backing pages purgeable? 934cd33c48SRob Clark */ 944cd33c48SRob Clark uint8_t madv; 954cd33c48SRob Clark 96e1e9db2cSRob Clark /** 97cc8a4d5aSRob Clark * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? 98cc8a4d5aSRob Clark */ 99cc8a4d5aSRob Clark bool dontneed : 1; 100cc8a4d5aSRob Clark 101cc8a4d5aSRob Clark /** 10264fcbde7SRob Clark * Is object evictable (ie. counted in priv->evictable_count)? 10364fcbde7SRob Clark */ 10464fcbde7SRob Clark bool evictable : 1; 10564fcbde7SRob Clark 10664fcbde7SRob Clark /** 107e1e9db2cSRob Clark * count of active vmap'ing 108e1e9db2cSRob Clark */ 109e1e9db2cSRob Clark uint8_t vmap_count; 110e1e9db2cSRob Clark 111cc8a4d5aSRob Clark /** 1126ed0897cSRob Clark * Node in list of all objects (mainly for debugfs, protected by 1136ed0897cSRob Clark * priv->obj_lock 1146ed0897cSRob Clark */ 1156ed0897cSRob Clark struct list_head node; 1166ed0897cSRob Clark 1176ed0897cSRob Clark /** 118cc8a4d5aSRob Clark * An object is either: 1196ed0897cSRob Clark * inactive - on priv->inactive_dontneed or priv->inactive_willneed 12064fcbde7SRob Clark * (depending on purgeability status) 1217198e6b0SRob Clark * active - on one one of the gpu's active_list.. well, at 1227198e6b0SRob Clark * least for now we don't have (I don't think) hw sync between 1237198e6b0SRob Clark * 2d and 3d one devices which have both, meaning we need to 1247198e6b0SRob Clark * block on submit if a bo is already on other ring 1257198e6b0SRob Clark */ 126c8afe684SRob Clark struct list_head mm_list; 1277198e6b0SRob Clark 128c8afe684SRob Clark struct page **pages; 129c8afe684SRob Clark struct sg_table *sgt; 130c8afe684SRob Clark void *vaddr; 131c8afe684SRob Clark 1324b85f7f5SRob Clark struct list_head vmas; /* list of msm_gem_vma */ 1337198e6b0SRob Clark 134871d812aSRob Clark /* For physically contiguous buffers. Used when we don't have 135072f1f91SRob Clark * an IOMMU. Also used for stolen/splashscreen buffer. 136871d812aSRob Clark */ 137871d812aSRob Clark struct drm_mm_node *vram_node; 1380815d774SJordan Crouse 1390815d774SJordan Crouse char name[32]; /* Identifier to print for the debugfs files */ 1409d8baa2bSAkhil P Oommen 141ab5c54cbSRob Clark int active_count; 14264fcbde7SRob Clark int pin_count; 143c8afe684SRob Clark }; 144c8afe684SRob Clark #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 145c8afe684SRob Clark 1468f642378SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 14727674c66SRob Clark int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); 148311e03c2SRob Clark void msm_gem_unpin_locked(struct drm_gem_object *obj); 14927674c66SRob Clark struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, 15027674c66SRob Clark struct msm_gem_address_space *aspace); 1518f642378SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, 1528f642378SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova); 153a636a0ffSRob Clark int msm_gem_set_iova(struct drm_gem_object *obj, 154a636a0ffSRob Clark struct msm_gem_address_space *aspace, uint64_t iova); 1558f642378SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 1568f642378SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova, 1578f642378SRob Clark u64 range_start, u64 range_end); 1588f642378SRob Clark int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 1598f642378SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova); 1608f642378SRob Clark void msm_gem_unpin_iova(struct drm_gem_object *obj, 1618f642378SRob Clark struct msm_gem_address_space *aspace); 1628f642378SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj); 1638f642378SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj); 1648f642378SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 1658f642378SRob Clark struct drm_mode_create_dumb *args); 1668f642378SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 1678f642378SRob Clark uint32_t handle, uint64_t *offset); 168e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); 1698f642378SRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj); 1708f642378SRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); 171e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); 1728f642378SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj); 1738f642378SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); 1748f642378SRob Clark void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); 1758f642378SRob Clark void msm_gem_active_put(struct drm_gem_object *obj); 176*01780d02SRob Clark bool msm_gem_active(struct drm_gem_object *obj); 1778f642378SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 1788f642378SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj); 1798f642378SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1808f642378SRob Clark uint32_t size, uint32_t flags, uint32_t *handle, char *name); 1818f642378SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1828f642378SRob Clark uint32_t size, uint32_t flags); 1838f642378SRob Clark void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1848f642378SRob Clark uint32_t flags, struct msm_gem_address_space *aspace, 1858f642378SRob Clark struct drm_gem_object **bo, uint64_t *iova); 1868f642378SRob Clark void msm_gem_kernel_put(struct drm_gem_object *bo, 187030af2b0SRob Clark struct msm_gem_address_space *aspace); 1888f642378SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1898f642378SRob Clark struct dma_buf *dmabuf, struct sg_table *sgt); 1908f642378SRob Clark __printf(2, 3) 1918f642378SRob Clark void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 192528107c8SRob Clark 193f48f3563SRob Clark #ifdef CONFIG_DEBUG_FS 194528107c8SRob Clark struct msm_gem_stats { 195528107c8SRob Clark struct { 196528107c8SRob Clark unsigned count; 197528107c8SRob Clark size_t size; 198f48f3563SRob Clark } all, active, resident, purgeable, purged; 199528107c8SRob Clark }; 200528107c8SRob Clark 201528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 202528107c8SRob Clark struct msm_gem_stats *stats); 2038f642378SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 2048f642378SRob Clark #endif 2058f642378SRob Clark 206a6ae74c9SRob Clark static inline void 207a6ae74c9SRob Clark msm_gem_lock(struct drm_gem_object *obj) 208a6ae74c9SRob Clark { 2096c0e3ea2SRob Clark dma_resv_lock(obj->resv, NULL); 210a6ae74c9SRob Clark } 211a6ae74c9SRob Clark 212599089c6SRob Clark static inline bool __must_check 213599089c6SRob Clark msm_gem_trylock(struct drm_gem_object *obj) 214599089c6SRob Clark { 2156c0e3ea2SRob Clark return dma_resv_trylock(obj->resv); 216599089c6SRob Clark } 217599089c6SRob Clark 218a6ae74c9SRob Clark static inline int 219a6ae74c9SRob Clark msm_gem_lock_interruptible(struct drm_gem_object *obj) 220a6ae74c9SRob Clark { 2216c0e3ea2SRob Clark return dma_resv_lock_interruptible(obj->resv, NULL); 222a6ae74c9SRob Clark } 223a6ae74c9SRob Clark 224a6ae74c9SRob Clark static inline void 225a6ae74c9SRob Clark msm_gem_unlock(struct drm_gem_object *obj) 226a6ae74c9SRob Clark { 2276c0e3ea2SRob Clark dma_resv_unlock(obj->resv); 228a6ae74c9SRob Clark } 229a6ae74c9SRob Clark 230a6ae74c9SRob Clark static inline bool 231a6ae74c9SRob Clark msm_gem_is_locked(struct drm_gem_object *obj) 232a6ae74c9SRob Clark { 233a414fe3aSRob Clark /* 234a414fe3aSRob Clark * Destroying the object is a special case.. msm_gem_free_object() 235a414fe3aSRob Clark * calls many things that WARN_ON if the obj lock is not held. But 236a414fe3aSRob Clark * acquiring the obj lock in msm_gem_free_object() can cause a 237a414fe3aSRob Clark * locking order inversion between reservation_ww_class_mutex and 238a414fe3aSRob Clark * fs_reclaim. 239a414fe3aSRob Clark * 240a414fe3aSRob Clark * This deadlock is not actually possible, because no one should 241a414fe3aSRob Clark * be already holding the lock when msm_gem_free_object() is called. 242a414fe3aSRob Clark * Unfortunately lockdep is not aware of this detail. So when the 243a414fe3aSRob Clark * refcount drops to zero, we pretend it is already locked. 244a414fe3aSRob Clark */ 245a414fe3aSRob Clark return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0); 246a6ae74c9SRob Clark } 247a6ae74c9SRob Clark 2487198e6b0SRob Clark static inline bool is_active(struct msm_gem_object *msm_obj) 2497198e6b0SRob Clark { 25090643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 251ab5c54cbSRob Clark return msm_obj->active_count; 2527198e6b0SRob Clark } 2537198e6b0SRob Clark 2540054eeb7SRob Clark /* imported/exported objects are not purgeable: */ 2550054eeb7SRob Clark static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) 256cc8a4d5aSRob Clark { 25710f76165SRob Clark return msm_obj->base.import_attach || msm_obj->pin_count; 258cc8a4d5aSRob Clark } 259cc8a4d5aSRob Clark 26068209390SRob Clark static inline bool is_purgeable(struct msm_gem_object *msm_obj) 26168209390SRob Clark { 26268209390SRob Clark return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && 2630054eeb7SRob Clark !is_unpurgeable(msm_obj); 26468209390SRob Clark } 26568209390SRob Clark 266e1e9db2cSRob Clark static inline bool is_vunmapable(struct msm_gem_object *msm_obj) 267e1e9db2cSRob Clark { 26890643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 269e1e9db2cSRob Clark return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 270e1e9db2cSRob Clark } 271e1e9db2cSRob Clark 2720054eeb7SRob Clark static inline void mark_purgeable(struct msm_gem_object *msm_obj) 273cc8a4d5aSRob Clark { 274cc8a4d5aSRob Clark struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 275cc8a4d5aSRob Clark 27690643a24SRob Clark GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 277cc8a4d5aSRob Clark 2780054eeb7SRob Clark if (is_unpurgeable(msm_obj)) 279cc8a4d5aSRob Clark return; 280cc8a4d5aSRob Clark 28190643a24SRob Clark if (GEM_WARN_ON(msm_obj->dontneed)) 282cc8a4d5aSRob Clark return; 283cc8a4d5aSRob Clark 284cc8a4d5aSRob Clark priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; 285cc8a4d5aSRob Clark msm_obj->dontneed = true; 286cc8a4d5aSRob Clark } 287cc8a4d5aSRob Clark 2880054eeb7SRob Clark static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) 289cc8a4d5aSRob Clark { 290cc8a4d5aSRob Clark struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 291cc8a4d5aSRob Clark 29290643a24SRob Clark GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); 293cc8a4d5aSRob Clark 2940054eeb7SRob Clark if (is_unpurgeable(msm_obj)) 295cc8a4d5aSRob Clark return; 296cc8a4d5aSRob Clark 29790643a24SRob Clark if (GEM_WARN_ON(!msm_obj->dontneed)) 298cc8a4d5aSRob Clark return; 299cc8a4d5aSRob Clark 300cc8a4d5aSRob Clark priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; 30190643a24SRob Clark GEM_WARN_ON(priv->shrinkable_count < 0); 302cc8a4d5aSRob Clark msm_obj->dontneed = false; 303cc8a4d5aSRob Clark } 304cc8a4d5aSRob Clark 30564fcbde7SRob Clark static inline bool is_unevictable(struct msm_gem_object *msm_obj) 30664fcbde7SRob Clark { 30710f76165SRob Clark return is_unpurgeable(msm_obj) || msm_obj->vaddr; 30864fcbde7SRob Clark } 30964fcbde7SRob Clark 31064fcbde7SRob Clark static inline void mark_evictable(struct msm_gem_object *msm_obj) 31164fcbde7SRob Clark { 31264fcbde7SRob Clark struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 31364fcbde7SRob Clark 31464fcbde7SRob Clark WARN_ON(!mutex_is_locked(&priv->mm_lock)); 31564fcbde7SRob Clark 31664fcbde7SRob Clark if (is_unevictable(msm_obj)) 31764fcbde7SRob Clark return; 31864fcbde7SRob Clark 31964fcbde7SRob Clark if (WARN_ON(msm_obj->evictable)) 32064fcbde7SRob Clark return; 32164fcbde7SRob Clark 32264fcbde7SRob Clark priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT; 32364fcbde7SRob Clark msm_obj->evictable = true; 32464fcbde7SRob Clark } 32564fcbde7SRob Clark 32664fcbde7SRob Clark static inline void mark_unevictable(struct msm_gem_object *msm_obj) 32764fcbde7SRob Clark { 32864fcbde7SRob Clark struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 32964fcbde7SRob Clark 33064fcbde7SRob Clark WARN_ON(!mutex_is_locked(&priv->mm_lock)); 33164fcbde7SRob Clark 33264fcbde7SRob Clark if (is_unevictable(msm_obj)) 33364fcbde7SRob Clark return; 33464fcbde7SRob Clark 33564fcbde7SRob Clark if (WARN_ON(!msm_obj->evictable)) 33664fcbde7SRob Clark return; 33764fcbde7SRob Clark 33864fcbde7SRob Clark priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT; 33964fcbde7SRob Clark WARN_ON(priv->evictable_count < 0); 34064fcbde7SRob Clark msm_obj->evictable = false; 34164fcbde7SRob Clark } 34264fcbde7SRob Clark 343599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj); 34464fcbde7SRob Clark void msm_gem_evict(struct drm_gem_object *obj); 345599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj); 3460e08270aSSushmita Susheelendra 3477198e6b0SRob Clark /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 3487198e6b0SRob Clark * associated with the cmdstream submission for synchronization (and 349375f9a63SRob Clark * make it easier to unwind when things go wrong, etc). 3507198e6b0SRob Clark */ 3517198e6b0SRob Clark struct msm_gem_submit { 3521d8a5ca4SRob Clark struct drm_sched_job base; 353964d2f97SRob Clark struct kref ref; 3547198e6b0SRob Clark struct drm_device *dev; 3557198e6b0SRob Clark struct msm_gpu *gpu; 356295b22aeSJordan Crouse struct msm_gem_address_space *aspace; 357f97decacSJordan Crouse struct list_head node; /* node in ring submit list */ 3587198e6b0SRob Clark struct ww_acquire_ctx ticket; 359f97decacSJordan Crouse uint32_t seqno; /* Sequence number of the submit on the ring */ 3601d8a5ca4SRob Clark 3611d8a5ca4SRob Clark /* Hw fence, which is created when the scheduler executes the job, and 3621d8a5ca4SRob Clark * is signaled when the hw finishes (via seqno write from cmdstream) 3631d8a5ca4SRob Clark */ 3641d8a5ca4SRob Clark struct dma_fence *hw_fence; 3651d8a5ca4SRob Clark 3661d8a5ca4SRob Clark /* Userspace visible fence, which is signaled by the scheduler after 3671d8a5ca4SRob Clark * the hw_fence is signaled. 3681d8a5ca4SRob Clark */ 3691d8a5ca4SRob Clark struct dma_fence *user_fence; 3701d8a5ca4SRob Clark 371a61acbbeSRob Clark int fence_id; /* key into queue->fence_idr */ 372f7de1545SJordan Crouse struct msm_gpu_submitqueue *queue; 3734816b626SRob Clark struct pid *pid; /* submitting process */ 374e25e92e0SRob Clark bool fault_dumped; /* Limit devcoredump dumping to one per submit */ 375340faef2SRob Clark bool valid; /* true if no cmdstream patching needed */ 3766a8bd08dSRob Clark bool in_rb; /* "sudo" mode, copy cmds into RB */ 377f97decacSJordan Crouse struct msm_ringbuffer *ring; 3787198e6b0SRob Clark unsigned int nr_cmds; 3797198e6b0SRob Clark unsigned int nr_bos; 3804241db42SJordan Crouse u32 ident; /* A "identifier" for the submit for logging */ 3817198e6b0SRob Clark struct { 3827198e6b0SRob Clark uint32_t type; 3837198e6b0SRob Clark uint32_t size; /* in dwords */ 38478babc16SRob Clark uint64_t iova; 38520224d71SRob Clark uint32_t offset;/* in dwords */ 386a7d3c950SRob Clark uint32_t idx; /* cmdstream buffer idx in bos[] */ 38720224d71SRob Clark uint32_t nr_relocs; 38820224d71SRob Clark struct drm_msm_gem_submit_reloc *relocs; 3896b597ce2SRob Clark } *cmd; /* array of size nr_cmds */ 3907198e6b0SRob Clark struct { 39195d1deb0SRob Clark /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ 39295d1deb0SRob Clark #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ 39395d1deb0SRob Clark #define BO_LOCKED 0x4000 /* obj lock is held */ 39495d1deb0SRob Clark #define BO_ACTIVE 0x2000 /* active refcnt is held */ 395311e03c2SRob Clark #define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */ 396311e03c2SRob Clark #define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */ 3977198e6b0SRob Clark uint32_t flags; 398b673499aSKristian H. Kristensen union { 3997198e6b0SRob Clark struct msm_gem_object *obj; 400b673499aSKristian H. Kristensen uint32_t handle; 401b673499aSKristian H. Kristensen }; 40278babc16SRob Clark uint64_t iova; 40327674c66SRob Clark struct msm_gem_vma *vma; 4044c145df1SGustavo A. R. Silva } bos[]; 4057198e6b0SRob Clark }; 4067198e6b0SRob Clark 4071d8a5ca4SRob Clark static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job) 4081d8a5ca4SRob Clark { 4091d8a5ca4SRob Clark return container_of(job, struct msm_gem_submit, base); 4101d8a5ca4SRob Clark } 4111d8a5ca4SRob Clark 412964d2f97SRob Clark void __msm_gem_submit_destroy(struct kref *kref); 413964d2f97SRob Clark 414964d2f97SRob Clark static inline void msm_gem_submit_get(struct msm_gem_submit *submit) 415964d2f97SRob Clark { 416964d2f97SRob Clark kref_get(&submit->ref); 417964d2f97SRob Clark } 418964d2f97SRob Clark 419964d2f97SRob Clark static inline void msm_gem_submit_put(struct msm_gem_submit *submit) 420964d2f97SRob Clark { 421964d2f97SRob Clark kref_put(&submit->ref, __msm_gem_submit_destroy); 422964d2f97SRob Clark } 423964d2f97SRob Clark 424be40596bSRob Clark void msm_submit_retire(struct msm_gem_submit *submit); 425be40596bSRob Clark 426e515af8dSRob Clark /* helper to determine of a buffer in submit should be dumped, used for both 427e515af8dSRob Clark * devcoredump and debugfs cmdstream dumping: 428e515af8dSRob Clark */ 429e515af8dSRob Clark static inline bool 430e515af8dSRob Clark should_dump(struct msm_gem_submit *submit, int idx) 431e515af8dSRob Clark { 432e515af8dSRob Clark extern bool rd_full; 433e515af8dSRob Clark return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); 434e515af8dSRob Clark } 435e515af8dSRob Clark 436c8afe684SRob Clark #endif /* __MSM_GEM_H__ */ 437