1c8afe684SRob Clark /* 2c8afe684SRob Clark * Copyright (C) 2013 Red Hat 3c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 4c8afe684SRob Clark * 5c8afe684SRob Clark * This program is free software; you can redistribute it and/or modify it 6c8afe684SRob Clark * under the terms of the GNU General Public License version 2 as published by 7c8afe684SRob Clark * the Free Software Foundation. 8c8afe684SRob Clark * 9c8afe684SRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 10c8afe684SRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11c8afe684SRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12c8afe684SRob Clark * more details. 13c8afe684SRob Clark * 14c8afe684SRob Clark * You should have received a copy of the GNU General Public License along with 15c8afe684SRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 16c8afe684SRob Clark */ 17c8afe684SRob Clark 18c8afe684SRob Clark #ifndef __MSM_GEM_H__ 19c8afe684SRob Clark #define __MSM_GEM_H__ 20c8afe684SRob Clark 21ee546cd3SJordan Crouse #include <linux/kref.h> 227198e6b0SRob Clark #include <linux/reservation.h> 23c8afe684SRob Clark #include "msm_drv.h" 24c8afe684SRob Clark 25072f1f91SRob Clark /* Additional internal-use only BO flags: */ 26072f1f91SRob Clark #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ 27072f1f91SRob Clark 28667ce33eSRob Clark struct msm_gem_address_space { 29667ce33eSRob Clark const char *name; 30667ce33eSRob Clark /* NOTE: mm managed at the page level, size is in # of pages 31667ce33eSRob Clark * and position mm_node->start is in # of pages: 32667ce33eSRob Clark */ 33667ce33eSRob Clark struct drm_mm mm; 340e08270aSSushmita Susheelendra spinlock_t lock; /* Protects drm_mm node allocation/removal */ 35667ce33eSRob Clark struct msm_mmu *mmu; 36ee546cd3SJordan Crouse struct kref kref; 37667ce33eSRob Clark }; 38667ce33eSRob Clark 39667ce33eSRob Clark struct msm_gem_vma { 40667ce33eSRob Clark struct drm_mm_node node; 41667ce33eSRob Clark uint64_t iova; 424b85f7f5SRob Clark struct msm_gem_address_space *aspace; 434b85f7f5SRob Clark struct list_head list; /* node in msm_gem_object::vmas */ 44c0ee9794SJordan Crouse bool mapped; 457ad0e8cfSJordan Crouse int inuse; 46667ce33eSRob Clark }; 47667ce33eSRob Clark 48c8afe684SRob Clark struct msm_gem_object { 49c8afe684SRob Clark struct drm_gem_object base; 50c8afe684SRob Clark 51c8afe684SRob Clark uint32_t flags; 52c8afe684SRob Clark 534cd33c48SRob Clark /** 544cd33c48SRob Clark * Advice: are the backing pages purgeable? 554cd33c48SRob Clark */ 564cd33c48SRob Clark uint8_t madv; 574cd33c48SRob Clark 58e1e9db2cSRob Clark /** 59e1e9db2cSRob Clark * count of active vmap'ing 60e1e9db2cSRob Clark */ 61e1e9db2cSRob Clark uint8_t vmap_count; 62e1e9db2cSRob Clark 637198e6b0SRob Clark /* And object is either: 647198e6b0SRob Clark * inactive - on priv->inactive_list 657198e6b0SRob Clark * active - on one one of the gpu's active_list.. well, at 667198e6b0SRob Clark * least for now we don't have (I don't think) hw sync between 677198e6b0SRob Clark * 2d and 3d one devices which have both, meaning we need to 687198e6b0SRob Clark * block on submit if a bo is already on other ring 697198e6b0SRob Clark * 707198e6b0SRob Clark */ 71c8afe684SRob Clark struct list_head mm_list; 727198e6b0SRob Clark struct msm_gpu *gpu; /* non-null if active */ 737198e6b0SRob Clark 747198e6b0SRob Clark /* Transiently in the process of submit ioctl, objects associated 757198e6b0SRob Clark * with the submit are on submit->bo_list.. this only lasts for 767198e6b0SRob Clark * the duration of the ioctl, so one bo can never be on multiple 777198e6b0SRob Clark * submit lists. 787198e6b0SRob Clark */ 797198e6b0SRob Clark struct list_head submit_entry; 807198e6b0SRob Clark 81c8afe684SRob Clark struct page **pages; 82c8afe684SRob Clark struct sg_table *sgt; 83c8afe684SRob Clark void *vaddr; 84c8afe684SRob Clark 854b85f7f5SRob Clark struct list_head vmas; /* list of msm_gem_vma */ 867198e6b0SRob Clark 877198e6b0SRob Clark /* normally (resv == &_resv) except for imported bo's */ 887198e6b0SRob Clark struct reservation_object *resv; 897198e6b0SRob Clark struct reservation_object _resv; 90871d812aSRob Clark 91871d812aSRob Clark /* For physically contiguous buffers. Used when we don't have 92072f1f91SRob Clark * an IOMMU. Also used for stolen/splashscreen buffer. 93871d812aSRob Clark */ 94871d812aSRob Clark struct drm_mm_node *vram_node; 950e08270aSSushmita Susheelendra struct mutex lock; /* Protects resources associated with bo */ 96c8afe684SRob Clark }; 97c8afe684SRob Clark #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) 98c8afe684SRob Clark 997198e6b0SRob Clark static inline bool is_active(struct msm_gem_object *msm_obj) 1007198e6b0SRob Clark { 1017198e6b0SRob Clark return msm_obj->gpu != NULL; 1027198e6b0SRob Clark } 1037198e6b0SRob Clark 10468209390SRob Clark static inline bool is_purgeable(struct msm_gem_object *msm_obj) 10568209390SRob Clark { 1060e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex)); 10768209390SRob Clark return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && 10868209390SRob Clark !msm_obj->base.dma_buf && !msm_obj->base.import_attach; 10968209390SRob Clark } 11068209390SRob Clark 111e1e9db2cSRob Clark static inline bool is_vunmapable(struct msm_gem_object *msm_obj) 112e1e9db2cSRob Clark { 113e1e9db2cSRob Clark return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 114e1e9db2cSRob Clark } 115e1e9db2cSRob Clark 1160e08270aSSushmita Susheelendra /* The shrinker can be triggered while we hold objA->lock, and need 1170e08270aSSushmita Susheelendra * to grab objB->lock to purge it. Lockdep just sees these as a single 1180e08270aSSushmita Susheelendra * class of lock, so we use subclasses to teach it the difference. 1190e08270aSSushmita Susheelendra * 1200e08270aSSushmita Susheelendra * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and 1210e08270aSSushmita Susheelendra * OBJ_LOCK_SHRINKER is used by shrinker. 1220e08270aSSushmita Susheelendra * 1230e08270aSSushmita Susheelendra * It is *essential* that we never go down paths that could trigger the 1240e08270aSSushmita Susheelendra * shrinker for a purgable object. This is ensured by checking that 1250e08270aSSushmita Susheelendra * msm_obj->madv == MSM_MADV_WILLNEED. 1260e08270aSSushmita Susheelendra */ 1270e08270aSSushmita Susheelendra enum msm_gem_lock { 1280e08270aSSushmita Susheelendra OBJ_LOCK_NORMAL, 1290e08270aSSushmita Susheelendra OBJ_LOCK_SHRINKER, 1300e08270aSSushmita Susheelendra }; 1310e08270aSSushmita Susheelendra 1320e08270aSSushmita Susheelendra void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass); 1330e08270aSSushmita Susheelendra void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); 1340e08270aSSushmita Susheelendra 1357198e6b0SRob Clark /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 1367198e6b0SRob Clark * associated with the cmdstream submission for synchronization (and 1377198e6b0SRob Clark * make it easier to unwind when things go wrong, etc). This only 1387198e6b0SRob Clark * lasts for the duration of the submit-ioctl. 1397198e6b0SRob Clark */ 1407198e6b0SRob Clark struct msm_gem_submit { 1417198e6b0SRob Clark struct drm_device *dev; 1427198e6b0SRob Clark struct msm_gpu *gpu; 143f97decacSJordan Crouse struct list_head node; /* node in ring submit list */ 1447198e6b0SRob Clark struct list_head bo_list; 1457198e6b0SRob Clark struct ww_acquire_ctx ticket; 146f97decacSJordan Crouse uint32_t seqno; /* Sequence number of the submit on the ring */ 147f54d1867SChris Wilson struct dma_fence *fence; 148f7de1545SJordan Crouse struct msm_gpu_submitqueue *queue; 1494816b626SRob Clark struct pid *pid; /* submitting process */ 150340faef2SRob Clark bool valid; /* true if no cmdstream patching needed */ 1516a8bd08dSRob Clark bool in_rb; /* "sudo" mode, copy cmds into RB */ 152f97decacSJordan Crouse struct msm_ringbuffer *ring; 1537198e6b0SRob Clark unsigned int nr_cmds; 1547198e6b0SRob Clark unsigned int nr_bos; 1554241db42SJordan Crouse u32 ident; /* A "identifier" for the submit for logging */ 1567198e6b0SRob Clark struct { 1577198e6b0SRob Clark uint32_t type; 1587198e6b0SRob Clark uint32_t size; /* in dwords */ 15978babc16SRob Clark uint64_t iova; 160a7d3c950SRob Clark uint32_t idx; /* cmdstream buffer idx in bos[] */ 1616b597ce2SRob Clark } *cmd; /* array of size nr_cmds */ 1627198e6b0SRob Clark struct { 1637198e6b0SRob Clark uint32_t flags; 1647198e6b0SRob Clark struct msm_gem_object *obj; 16578babc16SRob Clark uint64_t iova; 1667198e6b0SRob Clark } bos[0]; 1677198e6b0SRob Clark }; 1687198e6b0SRob Clark 169c8afe684SRob Clark #endif /* __MSM_GEM_H__ */ 170