1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "display/intel_frontbuffer.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_vma_types.h" 18 19 void i915_gem_init__objects(struct drm_i915_private *i915); 20 21 struct drm_i915_gem_object *i915_gem_object_alloc(void); 22 void i915_gem_object_free(struct drm_i915_gem_object *obj); 23 24 void i915_gem_object_init(struct drm_i915_gem_object *obj, 25 const struct drm_i915_gem_object_ops *ops, 26 struct lock_class_key *key); 27 struct drm_i915_gem_object * 28 i915_gem_object_create_shmem(struct drm_i915_private *i915, 29 resource_size_t size); 30 struct drm_i915_gem_object * 31 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 32 const void *data, resource_size_t size); 33 34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 36 struct sg_table *pages, 37 bool needs_clflush); 38 39 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 40 41 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 42 void i915_gem_free_object(struct drm_gem_object *obj); 43 44 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 45 46 struct sg_table * 47 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 48 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 49 50 /** 51 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 52 * @filp: DRM file private date 53 * @handle: userspace handle 54 * 55 * Returns: 56 * 57 * A pointer to the object named by the handle if such exists on @filp, NULL 58 * otherwise. This object is only valid whilst under the RCU read lock, and 59 * note carefully the object may be in the process of being destroyed. 60 */ 61 static inline struct drm_i915_gem_object * 62 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 63 { 64 #ifdef CONFIG_LOCKDEP 65 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 66 #endif 67 return idr_find(&file->object_idr, handle); 68 } 69 70 static inline struct drm_i915_gem_object * 71 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 72 { 73 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 74 obj = NULL; 75 76 return obj; 77 } 78 79 static inline struct drm_i915_gem_object * 80 i915_gem_object_lookup(struct drm_file *file, u32 handle) 81 { 82 struct drm_i915_gem_object *obj; 83 84 rcu_read_lock(); 85 obj = i915_gem_object_lookup_rcu(file, handle); 86 obj = i915_gem_object_get_rcu(obj); 87 rcu_read_unlock(); 88 89 return obj; 90 } 91 92 __deprecated 93 struct drm_gem_object * 94 drm_gem_object_lookup(struct drm_file *file, u32 handle); 95 96 __attribute__((nonnull)) 97 static inline struct drm_i915_gem_object * 98 i915_gem_object_get(struct drm_i915_gem_object *obj) 99 { 100 drm_gem_object_get(&obj->base); 101 return obj; 102 } 103 104 __attribute__((nonnull)) 105 static inline void 106 i915_gem_object_put(struct drm_i915_gem_object *obj) 107 { 108 __drm_gem_object_put(&obj->base); 109 } 110 111 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 112 113 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 114 { 115 dma_resv_lock(obj->base.resv, NULL); 116 } 117 118 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 119 { 120 return dma_resv_trylock(obj->base.resv); 121 } 122 123 static inline int 124 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 125 { 126 return dma_resv_lock_interruptible(obj->base.resv, NULL); 127 } 128 129 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 130 { 131 dma_resv_unlock(obj->base.resv); 132 } 133 134 struct dma_fence * 135 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); 136 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, 137 struct dma_fence *fence); 138 139 static inline void 140 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 141 { 142 obj->flags |= I915_BO_READONLY; 143 } 144 145 static inline bool 146 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 147 { 148 return obj->flags & I915_BO_READONLY; 149 } 150 151 static inline bool 152 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 153 { 154 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 155 } 156 157 static inline bool 158 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 159 { 160 return obj->flags & I915_BO_ALLOC_VOLATILE; 161 } 162 163 static inline void 164 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 165 { 166 obj->flags |= I915_BO_ALLOC_VOLATILE; 167 } 168 169 static inline bool 170 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 171 unsigned long flags) 172 { 173 return obj->ops->flags & flags; 174 } 175 176 static inline bool 177 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 178 { 179 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); 180 } 181 182 static inline bool 183 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 184 { 185 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 186 } 187 188 static inline bool 189 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 190 { 191 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 192 } 193 194 static inline bool 195 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 196 { 197 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 198 } 199 200 static inline bool 201 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) 202 { 203 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); 204 } 205 206 static inline bool 207 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 208 { 209 return READ_ONCE(obj->frontbuffer); 210 } 211 212 static inline unsigned int 213 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 214 { 215 return obj->tiling_and_stride & TILING_MASK; 216 } 217 218 static inline bool 219 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 220 { 221 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 222 } 223 224 static inline unsigned int 225 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 226 { 227 return obj->tiling_and_stride & STRIDE_MASK; 228 } 229 230 static inline unsigned int 231 i915_gem_tile_height(unsigned int tiling) 232 { 233 GEM_BUG_ON(!tiling); 234 return tiling == I915_TILING_Y ? 32 : 8; 235 } 236 237 static inline unsigned int 238 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 239 { 240 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 241 } 242 243 static inline unsigned int 244 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 245 { 246 return (i915_gem_object_get_stride(obj) * 247 i915_gem_object_get_tile_height(obj)); 248 } 249 250 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 251 unsigned int tiling, unsigned int stride); 252 253 struct scatterlist * 254 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 255 unsigned int n, unsigned int *offset); 256 257 struct page * 258 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 259 unsigned int n); 260 261 dma_addr_t 262 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 263 unsigned long n, 264 unsigned int *len); 265 266 dma_addr_t 267 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 268 unsigned long n); 269 270 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 271 struct sg_table *pages, 272 unsigned int sg_page_sizes); 273 274 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 275 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 276 277 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 278 I915_MM_NORMAL = 0, 279 /* 280 * Only used by struct_mutex, when called "recursively" from 281 * direct-reclaim-esque. Safe because there is only every one 282 * struct_mutex in the entire system. 283 */ 284 I915_MM_SHRINKER = 1, 285 /* 286 * Used for obj->mm.lock when allocating pages. Safe because the object 287 * isn't yet on any LRU, and therefore the shrinker can't deadlock on 288 * it. As soon as the object has pages, obj->mm.lock nests within 289 * fs_reclaim. 290 */ 291 I915_MM_GET_PAGES = 1, 292 }; 293 294 static inline int __must_check 295 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 296 { 297 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); 298 299 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 300 return 0; 301 302 return __i915_gem_object_get_pages(obj); 303 } 304 305 static inline bool 306 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 307 { 308 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 309 } 310 311 static inline void 312 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 313 { 314 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 315 316 atomic_inc(&obj->mm.pages_pin_count); 317 } 318 319 static inline bool 320 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 321 { 322 return atomic_read(&obj->mm.pages_pin_count); 323 } 324 325 static inline void 326 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 327 { 328 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 329 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 330 331 atomic_dec(&obj->mm.pages_pin_count); 332 } 333 334 static inline void 335 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 336 { 337 __i915_gem_object_unpin_pages(obj); 338 } 339 340 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 341 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 342 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 343 344 enum i915_map_type { 345 I915_MAP_WB = 0, 346 I915_MAP_WC, 347 #define I915_MAP_OVERRIDE BIT(31) 348 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 349 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 350 }; 351 352 /** 353 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 354 * @obj: the object to map into kernel address space 355 * @type: the type of mapping, used to select pgprot_t 356 * 357 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 358 * pages and then returns a contiguous mapping of the backing storage into 359 * the kernel address space. Based on the @type of mapping, the PTE will be 360 * set to either WriteBack or WriteCombine (via pgprot_t). 361 * 362 * The caller is responsible for calling i915_gem_object_unpin_map() when the 363 * mapping is no longer required. 364 * 365 * Returns the pointer through which to access the mapped object, or an 366 * ERR_PTR() on error. 367 */ 368 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 369 enum i915_map_type type); 370 371 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 372 unsigned long offset, 373 unsigned long size); 374 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 375 { 376 __i915_gem_object_flush_map(obj, 0, obj->base.size); 377 } 378 379 /** 380 * i915_gem_object_unpin_map - releases an earlier mapping 381 * @obj: the object to unmap 382 * 383 * After pinning the object and mapping its pages, once you are finished 384 * with your access, call i915_gem_object_unpin_map() to release the pin 385 * upon the mapping. Once the pin count reaches zero, that mapping may be 386 * removed. 387 */ 388 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 389 { 390 i915_gem_object_unpin_pages(obj); 391 } 392 393 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 394 395 void 396 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 397 unsigned int flush_domains); 398 399 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 400 unsigned int *needs_clflush); 401 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 402 unsigned int *needs_clflush); 403 #define CLFLUSH_BEFORE BIT(0) 404 #define CLFLUSH_AFTER BIT(1) 405 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 406 407 static inline void 408 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 409 { 410 i915_gem_object_unpin_pages(obj); 411 i915_gem_object_unlock(obj); 412 } 413 414 static inline struct intel_engine_cs * 415 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 416 { 417 struct intel_engine_cs *engine = NULL; 418 struct dma_fence *fence; 419 420 rcu_read_lock(); 421 fence = dma_resv_get_excl_rcu(obj->base.resv); 422 rcu_read_unlock(); 423 424 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 425 engine = to_request(fence)->engine; 426 dma_fence_put(fence); 427 428 return engine; 429 } 430 431 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 432 unsigned int cache_level); 433 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 434 435 int __must_check 436 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 437 int __must_check 438 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 439 int __must_check 440 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 441 struct i915_vma * __must_check 442 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 443 u32 alignment, 444 const struct i915_ggtt_view *view, 445 unsigned int flags); 446 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 447 448 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 449 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 450 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 451 452 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 453 { 454 if (obj->cache_dirty) 455 return false; 456 457 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 458 return true; 459 460 /* Currently in use by HW (display engine)? Keep flushed. */ 461 return i915_gem_object_is_framebuffer(obj); 462 } 463 464 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 465 { 466 obj->read_domains = I915_GEM_DOMAIN_CPU; 467 obj->write_domain = I915_GEM_DOMAIN_CPU; 468 if (cpu_write_needs_clflush(obj)) 469 obj->cache_dirty = true; 470 } 471 472 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 473 unsigned int flags, 474 long timeout); 475 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 476 unsigned int flags, 477 const struct i915_sched_attr *attr); 478 479 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 480 enum fb_op_origin origin); 481 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 482 enum fb_op_origin origin); 483 484 static inline void 485 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 486 enum fb_op_origin origin) 487 { 488 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 489 __i915_gem_object_flush_frontbuffer(obj, origin); 490 } 491 492 static inline void 493 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 494 enum fb_op_origin origin) 495 { 496 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 497 __i915_gem_object_invalidate_frontbuffer(obj, origin); 498 } 499 500 #endif 501