1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright 2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef VMWGFX_BO_H 29 #define VMWGFX_BO_H 30 31 #include "device_include/svga_reg.h" 32 33 #include <drm/ttm/ttm_bo.h> 34 #include <drm/ttm/ttm_placement.h> 35 36 #include <linux/rbtree_types.h> 37 #include <linux/types.h> 38 39 struct vmw_bo_dirty; 40 struct vmw_fence_obj; 41 struct vmw_private; 42 struct vmw_resource; 43 44 enum vmw_bo_domain { 45 VMW_BO_DOMAIN_SYS = BIT(0), 46 VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1), 47 VMW_BO_DOMAIN_VRAM = BIT(2), 48 VMW_BO_DOMAIN_GMR = BIT(3), 49 VMW_BO_DOMAIN_MOB = BIT(4), 50 }; 51 52 struct vmw_bo_params { 53 u32 domain; 54 u32 busy_domain; 55 enum ttm_bo_type bo_type; 56 size_t size; 57 bool pin; 58 struct dma_resv *resv; 59 struct sg_table *sg; 60 }; 61 62 /** 63 * struct vmw_bo - TTM buffer object with vmwgfx additions 64 * @tbo: The TTM buffer object 65 * @placement: The preferred placement for this buffer object 66 * @places: The chosen places for the preferred placement. 67 * @busy_places: Chosen busy places for the preferred placement 68 * @map: Kmap object for semi-persistent mappings 69 * @res_tree: RB tree of resources using this buffer object as a backing MOB 70 * @res_prios: Eviction priority counts for attached resources 71 * @map_count: The number of currently active maps. Will differ from the 72 * cpu_writers because it includes kernel maps. 73 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when 74 * increased. May be decreased without reservation. 75 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB 76 * @dirty: structure for user-space dirty-tracking 77 */ 78 struct vmw_bo { 79 struct ttm_buffer_object tbo; 80 81 struct ttm_placement placement; 82 struct ttm_place places[5]; 83 struct ttm_place busy_places[5]; 84 85 /* Protected by reservation */ 86 struct ttm_bo_kmap_obj map; 87 88 struct rb_root res_tree; 89 u32 res_prios[TTM_MAX_BO_PRIORITY]; 90 91 atomic_t map_count; 92 atomic_t cpu_writers; 93 /* Not ref-counted. Protected by binding_mutex */ 94 struct vmw_resource *dx_query_ctx; 95 struct vmw_bo_dirty *dirty; 96 }; 97 98 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); 99 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo); 100 101 int vmw_bo_create(struct vmw_private *dev_priv, 102 struct vmw_bo_params *params, 103 struct vmw_bo **p_bo); 104 105 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 106 struct drm_file *file_priv); 107 108 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 109 struct vmw_bo *buf, 110 bool interruptible); 111 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 112 struct vmw_bo *buf, 113 bool interruptible); 114 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, 115 struct vmw_bo *bo, 116 bool interruptible); 117 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin); 118 int vmw_bo_unpin(struct vmw_private *vmw_priv, 119 struct vmw_bo *bo, 120 bool interruptible); 121 122 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 123 SVGAGuestPtr *ptr); 124 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 125 struct drm_file *file_priv); 126 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 127 struct vmw_fence_obj *fence); 128 129 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 130 void vmw_bo_unmap(struct vmw_bo *vbo); 131 132 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 133 struct ttm_resource *mem); 134 void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 135 136 int vmw_user_bo_lookup(struct drm_file *filp, 137 u32 handle, 138 struct vmw_bo **out); 139 /** 140 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 141 * according to attached resources 142 * @vbo: The struct vmw_bo 143 */ 144 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) 145 { 146 int i = ARRAY_SIZE(vbo->res_prios); 147 148 while (i--) { 149 if (vbo->res_prios[i]) { 150 vbo->tbo.priority = i; 151 return; 152 } 153 } 154 155 vbo->tbo.priority = 3; 156 } 157 158 /** 159 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource 160 * eviction priority 161 * @vbo: The struct vmw_bo 162 * @prio: The resource priority 163 * 164 * After being notified, the code assigns the highest resource eviction priority 165 * to the backing buffer object (mob). 166 */ 167 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) 168 { 169 if (vbo->res_prios[prio]++ == 0) 170 vmw_bo_prio_adjust(vbo); 171 } 172 173 /** 174 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain 175 * priority being removed 176 * @vbo: The struct vmw_bo 177 * @prio: The resource priority 178 * 179 * After being notified, the code assigns the highest resource eviction priority 180 * to the backing buffer object (mob). 181 */ 182 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio) 183 { 184 if (--vbo->res_prios[prio] == 0) 185 vmw_bo_prio_adjust(vbo); 186 } 187 188 static inline void vmw_bo_unreference(struct vmw_bo **buf) 189 { 190 struct vmw_bo *tmp_buf = *buf; 191 192 *buf = NULL; 193 if (tmp_buf) 194 ttm_bo_put(&tmp_buf->tbo); 195 } 196 197 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) 198 { 199 ttm_bo_get(&buf->tbo); 200 return buf; 201 } 202 203 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo) 204 { 205 drm_gem_object_get(&vbo->tbo.base); 206 return vbo; 207 } 208 209 static inline void vmw_user_bo_unref(struct vmw_bo **buf) 210 { 211 struct vmw_bo *tmp_buf = *buf; 212 213 *buf = NULL; 214 if (tmp_buf) 215 drm_gem_object_put(&tmp_buf->tbo.base); 216 } 217 218 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) 219 { 220 return container_of((gobj), struct vmw_bo, tbo.base); 221 } 222 223 #endif // VMWGFX_BO_H 224