xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.h (revision 4bb1eb3c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_GEM_H__
8 #define __MSM_GEM_H__
9 
10 #include <linux/kref.h>
11 #include <linux/dma-resv.h>
12 #include "msm_drv.h"
13 
14 /* Additional internal-use only BO flags: */
15 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
16 #define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
17 
18 struct msm_gem_address_space {
19 	const char *name;
20 	/* NOTE: mm managed at the page level, size is in # of pages
21 	 * and position mm_node->start is in # of pages:
22 	 */
23 	struct drm_mm mm;
24 	spinlock_t lock; /* Protects drm_mm node allocation/removal */
25 	struct msm_mmu *mmu;
26 	struct kref kref;
27 };
28 
29 struct msm_gem_vma {
30 	struct drm_mm_node node;
31 	uint64_t iova;
32 	struct msm_gem_address_space *aspace;
33 	struct list_head list;    /* node in msm_gem_object::vmas */
34 	bool mapped;
35 	int inuse;
36 };
37 
38 struct msm_gem_object {
39 	struct drm_gem_object base;
40 
41 	uint32_t flags;
42 
43 	/**
44 	 * Advice: are the backing pages purgeable?
45 	 */
46 	uint8_t madv;
47 
48 	/**
49 	 * count of active vmap'ing
50 	 */
51 	uint8_t vmap_count;
52 
53 	/* And object is either:
54 	 *  inactive - on priv->inactive_list
55 	 *  active   - on one one of the gpu's active_list..  well, at
56 	 *     least for now we don't have (I don't think) hw sync between
57 	 *     2d and 3d one devices which have both, meaning we need to
58 	 *     block on submit if a bo is already on other ring
59 	 *
60 	 */
61 	struct list_head mm_list;
62 	struct msm_gpu *gpu;     /* non-null if active */
63 
64 	/* Transiently in the process of submit ioctl, objects associated
65 	 * with the submit are on submit->bo_list.. this only lasts for
66 	 * the duration of the ioctl, so one bo can never be on multiple
67 	 * submit lists.
68 	 */
69 	struct list_head submit_entry;
70 
71 	struct page **pages;
72 	struct sg_table *sgt;
73 	void *vaddr;
74 
75 	struct list_head vmas;    /* list of msm_gem_vma */
76 
77 	struct llist_node freed;
78 
79 	/* For physically contiguous buffers.  Used when we don't have
80 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
81 	 */
82 	struct drm_mm_node *vram_node;
83 	struct mutex lock; /* Protects resources associated with bo */
84 
85 	char name[32]; /* Identifier to print for the debugfs files */
86 };
87 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
88 
89 static inline bool is_active(struct msm_gem_object *msm_obj)
90 {
91 	return msm_obj->gpu != NULL;
92 }
93 
94 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
95 {
96 	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
97 	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
98 			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
99 }
100 
101 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
102 {
103 	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
104 }
105 
106 /* The shrinker can be triggered while we hold objA->lock, and need
107  * to grab objB->lock to purge it.  Lockdep just sees these as a single
108  * class of lock, so we use subclasses to teach it the difference.
109  *
110  * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
111  * OBJ_LOCK_SHRINKER is used by shrinker.
112  *
113  * It is *essential* that we never go down paths that could trigger the
114  * shrinker for a purgable object.  This is ensured by checking that
115  * msm_obj->madv == MSM_MADV_WILLNEED.
116  */
117 enum msm_gem_lock {
118 	OBJ_LOCK_NORMAL,
119 	OBJ_LOCK_SHRINKER,
120 };
121 
122 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
123 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
124 void msm_gem_free_work(struct work_struct *work);
125 
126 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
127  * associated with the cmdstream submission for synchronization (and
128  * make it easier to unwind when things go wrong, etc).  This only
129  * lasts for the duration of the submit-ioctl.
130  */
131 struct msm_gem_submit {
132 	struct drm_device *dev;
133 	struct msm_gpu *gpu;
134 	struct msm_gem_address_space *aspace;
135 	struct list_head node;   /* node in ring submit list */
136 	struct list_head bo_list;
137 	struct ww_acquire_ctx ticket;
138 	uint32_t seqno;		/* Sequence number of the submit on the ring */
139 	struct dma_fence *fence;
140 	struct msm_gpu_submitqueue *queue;
141 	struct pid *pid;    /* submitting process */
142 	bool valid;         /* true if no cmdstream patching needed */
143 	bool in_rb;         /* "sudo" mode, copy cmds into RB */
144 	struct msm_ringbuffer *ring;
145 	unsigned int nr_cmds;
146 	unsigned int nr_bos;
147 	u32 ident;	   /* A "identifier" for the submit for logging */
148 	struct {
149 		uint32_t type;
150 		uint32_t size;  /* in dwords */
151 		uint64_t iova;
152 		uint32_t idx;   /* cmdstream buffer idx in bos[] */
153 	} *cmd;  /* array of size nr_cmds */
154 	struct {
155 		uint32_t flags;
156 		union {
157 			struct msm_gem_object *obj;
158 			uint32_t handle;
159 		};
160 		uint64_t iova;
161 	} bos[];
162 };
163 
164 /* helper to determine of a buffer in submit should be dumped, used for both
165  * devcoredump and debugfs cmdstream dumping:
166  */
167 static inline bool
168 should_dump(struct msm_gem_submit *submit, int idx)
169 {
170 	extern bool rd_full;
171 	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
172 }
173 
174 #endif /* __MSM_GEM_H__ */
175