1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_TYPES_H__ 8 #define __I915_GEM_OBJECT_TYPES_H__ 9 10 #include <drm/drm_gem.h> 11 #include <uapi/drm/i915_drm.h> 12 13 #include "i915_active.h" 14 #include "i915_selftest.h" 15 16 struct drm_i915_gem_object; 17 struct intel_fronbuffer; 18 19 /* 20 * struct i915_lut_handle tracks the fast lookups from handle to vma used 21 * for execbuf. Although we use a radixtree for that mapping, in order to 22 * remove them as the object or context is closed, we need a secondary list 23 * and a translation entry (i915_lut_handle). 24 */ 25 struct i915_lut_handle { 26 struct list_head obj_link; 27 struct i915_gem_context *ctx; 28 u32 handle; 29 }; 30 31 struct drm_i915_gem_object_ops { 32 unsigned int flags; 33 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) 34 #define I915_GEM_OBJECT_HAS_IOMEM BIT(1) 35 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2) 36 #define I915_GEM_OBJECT_IS_PROXY BIT(3) 37 #define I915_GEM_OBJECT_NO_MMAP BIT(4) 38 #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5) 39 40 /* Interface between the GEM object and its backing storage. 41 * get_pages() is called once prior to the use of the associated set 42 * of pages before to binding them into the GTT, and put_pages() is 43 * called after we no longer need them. As we expect there to be 44 * associated cost with migrating pages between the backing storage 45 * and making them available for the GPU (e.g. clflush), we may hold 46 * onto the pages after they are no longer referenced by the GPU 47 * in case they may be used again shortly (for example migrating the 48 * pages to a different memory domain within the GTT). put_pages() 49 * will therefore most likely be called when the object itself is 50 * being released or under memory pressure (where we attempt to 51 * reap pages for the shrinker). 52 */ 53 int (*get_pages)(struct drm_i915_gem_object *obj); 54 void (*put_pages)(struct drm_i915_gem_object *obj, 55 struct sg_table *pages); 56 void (*truncate)(struct drm_i915_gem_object *obj); 57 void (*writeback)(struct drm_i915_gem_object *obj); 58 59 int (*pread)(struct drm_i915_gem_object *obj, 60 const struct drm_i915_gem_pread *arg); 61 int (*pwrite)(struct drm_i915_gem_object *obj, 62 const struct drm_i915_gem_pwrite *arg); 63 64 int (*dmabuf_export)(struct drm_i915_gem_object *obj); 65 void (*release)(struct drm_i915_gem_object *obj); 66 67 const char *name; /* friendly name for debug, e.g. lockdep classes */ 68 }; 69 70 enum i915_map_type { 71 I915_MAP_WB = 0, 72 I915_MAP_WC, 73 #define I915_MAP_OVERRIDE BIT(31) 74 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 75 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 76 }; 77 78 enum i915_mmap_type { 79 I915_MMAP_TYPE_GTT = 0, 80 I915_MMAP_TYPE_WC, 81 I915_MMAP_TYPE_WB, 82 I915_MMAP_TYPE_UC, 83 }; 84 85 struct i915_mmap_offset { 86 struct drm_vma_offset_node vma_node; 87 struct drm_i915_gem_object *obj; 88 enum i915_mmap_type mmap_type; 89 90 struct rb_node offset; 91 }; 92 93 struct i915_gem_object_page_iter { 94 struct scatterlist *sg_pos; 95 unsigned int sg_idx; /* in pages, but 32bit eek! */ 96 97 struct radix_tree_root radix; 98 struct mutex lock; /* protects this cache */ 99 }; 100 101 struct drm_i915_gem_object { 102 struct drm_gem_object base; 103 104 const struct drm_i915_gem_object_ops *ops; 105 106 struct { 107 /** 108 * @vma.lock: protect the list/tree of vmas 109 */ 110 spinlock_t lock; 111 112 /** 113 * @vma.list: List of VMAs backed by this object 114 * 115 * The VMA on this list are ordered by type, all GGTT vma are 116 * placed at the head and all ppGTT vma are placed at the tail. 117 * The different types of GGTT vma are unordered between 118 * themselves, use the @vma.tree (which has a defined order 119 * between all VMA) to quickly find an exact match. 120 */ 121 struct list_head list; 122 123 /** 124 * @vma.tree: Ordered tree of VMAs backed by this object 125 * 126 * All VMA created for this object are placed in the @vma.tree 127 * for fast retrieval via a binary search in 128 * i915_vma_instance(). They are also added to @vma.list for 129 * easy iteration. 130 */ 131 struct rb_root tree; 132 } vma; 133 134 /** 135 * @lut_list: List of vma lookup entries in use for this object. 136 * 137 * If this object is closed, we need to remove all of its VMA from 138 * the fast lookup index in associated contexts; @lut_list provides 139 * this translation from object to context->handles_vma. 140 */ 141 struct list_head lut_list; 142 spinlock_t lut_lock; /* guards lut_list */ 143 144 /** 145 * @obj_link: Link into @i915_gem_ww_ctx.obj_list 146 * 147 * When we lock this object through i915_gem_object_lock() with a 148 * context, we add it to the list to ensure we can unlock everything 149 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called. 150 */ 151 struct list_head obj_link; 152 153 union { 154 struct rcu_head rcu; 155 struct llist_node freed; 156 }; 157 158 /** 159 * Whether the object is currently in the GGTT mmap. 160 */ 161 unsigned int userfault_count; 162 struct list_head userfault_link; 163 164 struct { 165 spinlock_t lock; /* Protects access to mmo offsets */ 166 struct rb_root offsets; 167 } mmo; 168 169 I915_SELFTEST_DECLARE(struct list_head st_link); 170 171 unsigned long flags; 172 #define I915_BO_ALLOC_CONTIGUOUS BIT(0) 173 #define I915_BO_ALLOC_VOLATILE BIT(1) 174 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) 175 #define I915_BO_READONLY BIT(2) 176 #define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */ 177 178 /* 179 * Is the object to be mapped as read-only to the GPU 180 * Only honoured if hardware has relevant pte bit 181 */ 182 unsigned int cache_level:3; 183 unsigned int cache_coherent:2; 184 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) 185 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) 186 unsigned int cache_dirty:1; 187 188 /** 189 * @read_domains: Read memory domains. 190 * 191 * These monitor which caches contain read/write data related to the 192 * object. When transitioning from one set of domains to another, 193 * the driver is called to ensure that caches are suitably flushed and 194 * invalidated. 195 */ 196 u16 read_domains; 197 198 /** 199 * @write_domain: Corresponding unique write memory domain. 200 */ 201 u16 write_domain; 202 203 struct intel_frontbuffer __rcu *frontbuffer; 204 205 /** Current tiling stride for the object, if it's tiled. */ 206 unsigned int tiling_and_stride; 207 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ 208 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1) 209 #define STRIDE_MASK (~TILING_MASK) 210 211 struct { 212 /* 213 * Protects the pages and their use. Do not use directly, but 214 * instead go through the pin/unpin interfaces. 215 */ 216 struct mutex lock; 217 atomic_t pages_pin_count; 218 atomic_t shrink_pin; 219 220 /** 221 * Memory region for this object. 222 */ 223 struct intel_memory_region *region; 224 /** 225 * List of memory region blocks allocated for this object. 226 */ 227 struct list_head blocks; 228 /** 229 * Element within memory_region->objects or region->purgeable 230 * if the object is marked as DONTNEED. Access is protected by 231 * region->obj_lock. 232 */ 233 struct list_head region_link; 234 235 struct sg_table *pages; 236 void *mapping; 237 238 struct i915_page_sizes { 239 /** 240 * The sg mask of the pages sg_table. i.e the mask of 241 * of the lengths for each sg entry. 242 */ 243 unsigned int phys; 244 245 /** 246 * The gtt page sizes we are allowed to use given the 247 * sg mask and the supported page sizes. This will 248 * express the smallest unit we can use for the whole 249 * object, as well as the larger sizes we may be able 250 * to use opportunistically. 251 */ 252 unsigned int sg; 253 254 /** 255 * The actual gtt page size usage. Since we can have 256 * multiple vma associated with this object we need to 257 * prevent any trampling of state, hence a copy of this 258 * struct also lives in each vma, therefore the gtt 259 * value here should only be read/write through the vma. 260 */ 261 unsigned int gtt; 262 } page_sizes; 263 264 I915_SELFTEST_DECLARE(unsigned int page_mask); 265 266 struct i915_gem_object_page_iter get_page; 267 struct i915_gem_object_page_iter get_dma_page; 268 269 /** 270 * Element within i915->mm.unbound_list or i915->mm.bound_list, 271 * locked by i915->mm.obj_lock. 272 */ 273 struct list_head link; 274 275 /** 276 * Advice: are the backing pages purgeable? 277 */ 278 unsigned int madv:2; 279 280 /** 281 * This is set if the object has been written to since the 282 * pages were last acquired. 283 */ 284 bool dirty:1; 285 } mm; 286 287 /** Record of address bit 17 of each page at last unbind. */ 288 unsigned long *bit_17; 289 290 union { 291 struct i915_gem_userptr { 292 uintptr_t ptr; 293 294 struct i915_mm_struct *mm; 295 struct i915_mmu_object *mmu_object; 296 struct work_struct *work; 297 } userptr; 298 299 struct drm_mm_node *stolen; 300 301 unsigned long scratch; 302 u64 encode; 303 304 void *gvt_info; 305 }; 306 }; 307 308 static inline struct drm_i915_gem_object * 309 to_intel_bo(struct drm_gem_object *gem) 310 { 311 /* Assert that to_intel_bo(NULL) == NULL */ 312 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); 313 314 return container_of(gem, struct drm_i915_gem_object, base); 315 } 316 317 #endif 318