xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef VMWGFX_BO_H
29 #define VMWGFX_BO_H
30 
31 #include "device_include/svga_reg.h"
32 
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
35 
36 #include <linux/rbtree_types.h>
37 #include <linux/types.h>
38 
39 struct vmw_bo_dirty;
40 struct vmw_fence_obj;
41 struct vmw_private;
42 struct vmw_resource;
43 
44 enum vmw_bo_domain {
45 	VMW_BO_DOMAIN_SYS           = BIT(0),
46 	VMW_BO_DOMAIN_WAITABLE_SYS  = BIT(1),
47 	VMW_BO_DOMAIN_VRAM          = BIT(2),
48 	VMW_BO_DOMAIN_GMR           = BIT(3),
49 	VMW_BO_DOMAIN_MOB           = BIT(4),
50 };
51 
52 struct vmw_bo_params {
53 	u32 domain;
54 	u32 busy_domain;
55 	enum ttm_bo_type bo_type;
56 	bool pin;
57 	bool keep_resv;
58 	size_t size;
59 	struct dma_resv *resv;
60 	struct sg_table *sg;
61 };
62 
63 /**
64  * struct vmw_bo - TTM buffer object with vmwgfx additions
65  * @tbo: The TTM buffer object
66  * @placement: The preferred placement for this buffer object
67  * @places: The chosen places for the preferred placement.
68  * @busy_places: Chosen busy places for the preferred placement
69  * @map: Kmap object for semi-persistent mappings
70  * @res_tree: RB tree of resources using this buffer object as a backing MOB
71  * @res_prios: Eviction priority counts for attached resources
72  * @map_count: The number of currently active maps. Will differ from the
73  * cpu_writers because it includes kernel maps.
74  * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
75  * increased. May be decreased without reservation.
76  * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
77  * @dirty: structure for user-space dirty-tracking
78  */
79 struct vmw_bo {
80 	struct ttm_buffer_object tbo;
81 
82 	struct ttm_placement placement;
83 	struct ttm_place places[5];
84 	struct ttm_place busy_places[5];
85 
86 	/* Protected by reservation */
87 	struct ttm_bo_kmap_obj map;
88 
89 	struct rb_root res_tree;
90 	u32 res_prios[TTM_MAX_BO_PRIORITY];
91 
92 	atomic_t map_count;
93 	atomic_t cpu_writers;
94 	/* Not ref-counted.  Protected by binding_mutex */
95 	struct vmw_resource *dx_query_ctx;
96 	struct vmw_bo_dirty *dirty;
97 };
98 
99 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
100 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
101 
102 int vmw_bo_create(struct vmw_private *dev_priv,
103 		  struct vmw_bo_params *params,
104 		  struct vmw_bo **p_bo);
105 
106 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
107 		       struct drm_file *file_priv);
108 
109 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
110 		       struct vmw_bo *buf,
111 		       bool interruptible);
112 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
113 			      struct vmw_bo *buf,
114 			      bool interruptible);
115 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
116 				struct vmw_bo *bo,
117 				bool interruptible);
118 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
119 int vmw_bo_unpin(struct vmw_private *vmw_priv,
120 		 struct vmw_bo *bo,
121 		 bool interruptible);
122 
123 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
124 			  SVGAGuestPtr *ptr);
125 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
126 			      struct drm_file *file_priv);
127 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
128 			 struct vmw_fence_obj *fence);
129 
130 void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
131 void vmw_bo_unmap(struct vmw_bo *vbo);
132 
133 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
134 			struct ttm_resource *mem);
135 void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
136 
137 int vmw_user_bo_lookup(struct drm_file *filp,
138 		       u32 handle,
139 		       struct vmw_bo **out);
140 /**
141  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
142  * according to attached resources
143  * @vbo: The struct vmw_bo
144  */
vmw_bo_prio_adjust(struct vmw_bo * vbo)145 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
146 {
147 	int i = ARRAY_SIZE(vbo->res_prios);
148 
149 	while (i--) {
150 		if (vbo->res_prios[i]) {
151 			vbo->tbo.priority = i;
152 			return;
153 		}
154 	}
155 
156 	vbo->tbo.priority = 3;
157 }
158 
159 /**
160  * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
161  * eviction priority
162  * @vbo: The struct vmw_bo
163  * @prio: The resource priority
164  *
165  * After being notified, the code assigns the highest resource eviction priority
166  * to the backing buffer object (mob).
167  */
vmw_bo_prio_add(struct vmw_bo * vbo,int prio)168 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
169 {
170 	if (vbo->res_prios[prio]++ == 0)
171 		vmw_bo_prio_adjust(vbo);
172 }
173 
174 /**
175  * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
176  * priority being removed
177  * @vbo: The struct vmw_bo
178  * @prio: The resource priority
179  *
180  * After being notified, the code assigns the highest resource eviction priority
181  * to the backing buffer object (mob).
182  */
vmw_bo_prio_del(struct vmw_bo * vbo,int prio)183 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
184 {
185 	if (--vbo->res_prios[prio] == 0)
186 		vmw_bo_prio_adjust(vbo);
187 }
188 
vmw_bo_unreference(struct vmw_bo ** buf)189 static inline void vmw_bo_unreference(struct vmw_bo **buf)
190 {
191 	struct vmw_bo *tmp_buf = *buf;
192 
193 	*buf = NULL;
194 	if (tmp_buf)
195 		ttm_bo_put(&tmp_buf->tbo);
196 }
197 
vmw_bo_reference(struct vmw_bo * buf)198 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
199 {
200 	ttm_bo_get(&buf->tbo);
201 	return buf;
202 }
203 
vmw_user_bo_ref(struct vmw_bo * vbo)204 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
205 {
206 	drm_gem_object_get(&vbo->tbo.base);
207 	return vbo;
208 }
209 
vmw_user_bo_unref(struct vmw_bo ** buf)210 static inline void vmw_user_bo_unref(struct vmw_bo **buf)
211 {
212 	struct vmw_bo *tmp_buf = *buf;
213 
214 	*buf = NULL;
215 	if (tmp_buf)
216 		drm_gem_object_put(&tmp_buf->tbo.base);
217 }
218 
to_vmw_bo(struct drm_gem_object * gobj)219 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
220 {
221 	return container_of((gobj), struct vmw_bo, tbo.base);
222 }
223 
224 #endif // VMWGFX_BO_H
225