1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright 2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef VMWGFX_BO_H 29 #define VMWGFX_BO_H 30 31 #include "device_include/svga_reg.h" 32 33 #include <drm/ttm/ttm_bo.h> 34 #include <drm/ttm/ttm_placement.h> 35 36 #include <linux/rbtree_types.h> 37 #include <linux/types.h> 38 39 struct vmw_bo_dirty; 40 struct vmw_fence_obj; 41 struct vmw_private; 42 struct vmw_resource; 43 44 enum vmw_bo_domain { 45 VMW_BO_DOMAIN_SYS = BIT(0), 46 VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1), 47 VMW_BO_DOMAIN_VRAM = BIT(2), 48 VMW_BO_DOMAIN_GMR = BIT(3), 49 VMW_BO_DOMAIN_MOB = BIT(4), 50 }; 51 52 struct vmw_bo_params { 53 u32 domain; 54 u32 busy_domain; 55 enum ttm_bo_type bo_type; 56 size_t size; 57 bool pin; 58 struct dma_resv *resv; 59 struct sg_table *sg; 60 }; 61 62 /** 63 * struct vmw_bo - TTM buffer object with vmwgfx additions 64 * @tbo: The TTM buffer object 65 * @placement: The preferred placement for this buffer object 66 * @places: The chosen places for the preferred placement. 67 * @busy_places: Chosen busy places for the preferred placement 68 * @map: Kmap object for semi-persistent mappings 69 * @res_tree: RB tree of resources using this buffer object as a backing MOB 70 * @res_prios: Eviction priority counts for attached resources 71 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when 72 * increased. May be decreased without reservation. 73 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB 74 * @dirty: structure for user-space dirty-tracking 75 */ 76 struct vmw_bo { 77 struct ttm_buffer_object tbo; 78 79 struct ttm_placement placement; 80 struct ttm_place places[5]; 81 struct ttm_place busy_places[5]; 82 83 /* Protected by reservation */ 84 struct ttm_bo_kmap_obj map; 85 86 struct rb_root res_tree; 87 u32 res_prios[TTM_MAX_BO_PRIORITY]; 88 89 atomic_t cpu_writers; 90 /* Not ref-counted. Protected by binding_mutex */ 91 struct vmw_resource *dx_query_ctx; 92 struct vmw_bo_dirty *dirty; 93 }; 94 95 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); 96 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo); 97 98 int vmw_bo_create(struct vmw_private *dev_priv, 99 struct vmw_bo_params *params, 100 struct vmw_bo **p_bo); 101 102 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 103 struct drm_file *file_priv); 104 105 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, 106 struct vmw_bo *buf, 107 bool interruptible); 108 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, 109 struct vmw_bo *buf, 110 bool interruptible); 111 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, 112 struct vmw_bo *bo, 113 bool interruptible); 114 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin); 115 int vmw_bo_unpin(struct vmw_private *vmw_priv, 116 struct vmw_bo *bo, 117 bool interruptible); 118 119 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, 120 SVGAGuestPtr *ptr); 121 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 122 struct drm_file *file_priv); 123 void vmw_bo_fence_single(struct ttm_buffer_object *bo, 124 struct vmw_fence_obj *fence); 125 126 void *vmw_bo_map_and_cache(struct vmw_bo *vbo); 127 void vmw_bo_unmap(struct vmw_bo *vbo); 128 129 void vmw_bo_move_notify(struct ttm_buffer_object *bo, 130 struct ttm_resource *mem); 131 void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 132 133 int vmw_user_bo_lookup(struct drm_file *filp, 134 u32 handle, 135 struct vmw_bo **out); 136 /** 137 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority 138 * according to attached resources 139 * @vbo: The struct vmw_bo 140 */ 141 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) 142 { 143 int i = ARRAY_SIZE(vbo->res_prios); 144 145 while (i--) { 146 if (vbo->res_prios[i]) { 147 vbo->tbo.priority = i; 148 return; 149 } 150 } 151 152 vbo->tbo.priority = 3; 153 } 154 155 /** 156 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource 157 * eviction priority 158 * @vbo: The struct vmw_bo 159 * @prio: The resource priority 160 * 161 * After being notified, the code assigns the highest resource eviction priority 162 * to the backing buffer object (mob). 163 */ 164 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) 165 { 166 if (vbo->res_prios[prio]++ == 0) 167 vmw_bo_prio_adjust(vbo); 168 } 169 170 /** 171 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain 172 * priority being removed 173 * @vbo: The struct vmw_bo 174 * @prio: The resource priority 175 * 176 * After being notified, the code assigns the highest resource eviction priority 177 * to the backing buffer object (mob). 178 */ 179 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio) 180 { 181 if (--vbo->res_prios[prio] == 0) 182 vmw_bo_prio_adjust(vbo); 183 } 184 185 static inline void vmw_bo_unreference(struct vmw_bo **buf) 186 { 187 struct vmw_bo *tmp_buf = *buf; 188 189 *buf = NULL; 190 if (tmp_buf) 191 ttm_bo_put(&tmp_buf->tbo); 192 } 193 194 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) 195 { 196 ttm_bo_get(&buf->tbo); 197 return buf; 198 } 199 200 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo) 201 { 202 drm_gem_object_get(&vbo->tbo.base); 203 return vbo; 204 } 205 206 static inline void vmw_user_bo_unref(struct vmw_bo **buf) 207 { 208 struct vmw_bo *tmp_buf = *buf; 209 210 *buf = NULL; 211 if (tmp_buf) 212 drm_gem_object_put(&tmp_buf->tbo.base); 213 } 214 215 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) 216 { 217 return container_of((gobj), struct vmw_bo, tbo.base); 218 } 219 220 #endif // VMWGFX_BO_H 221