1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "display/intel_frontbuffer.h" 15 #include "i915_gem_object_types.h" 16 #include "i915_gem_gtt.h" 17 #include "i915_vma_types.h" 18 19 void i915_gem_init__objects(struct drm_i915_private *i915); 20 21 struct drm_i915_gem_object *i915_gem_object_alloc(void); 22 void i915_gem_object_free(struct drm_i915_gem_object *obj); 23 24 void i915_gem_object_init(struct drm_i915_gem_object *obj, 25 const struct drm_i915_gem_object_ops *ops, 26 struct lock_class_key *key); 27 struct drm_i915_gem_object * 28 i915_gem_object_create_shmem(struct drm_i915_private *i915, 29 resource_size_t size); 30 struct drm_i915_gem_object * 31 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 32 const void *data, resource_size_t size); 33 34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 36 struct sg_table *pages, 37 bool needs_clflush); 38 39 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 40 41 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 42 43 struct sg_table * 44 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 45 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 46 47 /** 48 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 49 * @filp: DRM file private date 50 * @handle: userspace handle 51 * 52 * Returns: 53 * 54 * A pointer to the object named by the handle if such exists on @filp, NULL 55 * otherwise. This object is only valid whilst under the RCU read lock, and 56 * note carefully the object may be in the process of being destroyed. 57 */ 58 static inline struct drm_i915_gem_object * 59 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 60 { 61 #ifdef CONFIG_LOCKDEP 62 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 63 #endif 64 return idr_find(&file->object_idr, handle); 65 } 66 67 static inline struct drm_i915_gem_object * 68 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 69 { 70 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 71 obj = NULL; 72 73 return obj; 74 } 75 76 static inline struct drm_i915_gem_object * 77 i915_gem_object_lookup(struct drm_file *file, u32 handle) 78 { 79 struct drm_i915_gem_object *obj; 80 81 rcu_read_lock(); 82 obj = i915_gem_object_lookup_rcu(file, handle); 83 obj = i915_gem_object_get_rcu(obj); 84 rcu_read_unlock(); 85 86 return obj; 87 } 88 89 __deprecated 90 struct drm_gem_object * 91 drm_gem_object_lookup(struct drm_file *file, u32 handle); 92 93 __attribute__((nonnull)) 94 static inline struct drm_i915_gem_object * 95 i915_gem_object_get(struct drm_i915_gem_object *obj) 96 { 97 drm_gem_object_get(&obj->base); 98 return obj; 99 } 100 101 __attribute__((nonnull)) 102 static inline void 103 i915_gem_object_put(struct drm_i915_gem_object *obj) 104 { 105 __drm_gem_object_put(&obj->base); 106 } 107 108 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 109 110 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 111 struct i915_gem_ww_ctx *ww, 112 bool intr) 113 { 114 int ret; 115 116 if (intr) 117 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 118 else 119 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 120 121 if (!ret && ww) 122 list_add_tail(&obj->obj_link, &ww->obj_list); 123 if (ret == -EALREADY) 124 ret = 0; 125 126 if (ret == -EDEADLK) 127 ww->contended = obj; 128 129 return ret; 130 } 131 132 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 133 struct i915_gem_ww_ctx *ww) 134 { 135 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 136 } 137 138 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 139 struct i915_gem_ww_ctx *ww) 140 { 141 WARN_ON(ww && !ww->intr); 142 return __i915_gem_object_lock(obj, ww, true); 143 } 144 145 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 146 { 147 return dma_resv_trylock(obj->base.resv); 148 } 149 150 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 151 { 152 dma_resv_unlock(obj->base.resv); 153 } 154 155 struct dma_fence * 156 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); 157 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, 158 struct dma_fence *fence); 159 160 static inline void 161 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 162 { 163 obj->flags |= I915_BO_READONLY; 164 } 165 166 static inline bool 167 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 168 { 169 return obj->flags & I915_BO_READONLY; 170 } 171 172 static inline bool 173 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 174 { 175 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 176 } 177 178 static inline bool 179 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 180 { 181 return obj->flags & I915_BO_ALLOC_VOLATILE; 182 } 183 184 static inline void 185 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 186 { 187 obj->flags |= I915_BO_ALLOC_VOLATILE; 188 } 189 190 static inline bool 191 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 192 unsigned long flags) 193 { 194 return obj->ops->flags & flags; 195 } 196 197 static inline bool 198 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 199 { 200 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); 201 } 202 203 static inline bool 204 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 205 { 206 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 207 } 208 209 static inline bool 210 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 211 { 212 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 213 } 214 215 static inline bool 216 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 217 { 218 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 219 } 220 221 static inline bool 222 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) 223 { 224 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); 225 } 226 227 static inline bool 228 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 229 { 230 return READ_ONCE(obj->frontbuffer); 231 } 232 233 static inline unsigned int 234 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 235 { 236 return obj->tiling_and_stride & TILING_MASK; 237 } 238 239 static inline bool 240 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 241 { 242 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 243 } 244 245 static inline unsigned int 246 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 247 { 248 return obj->tiling_and_stride & STRIDE_MASK; 249 } 250 251 static inline unsigned int 252 i915_gem_tile_height(unsigned int tiling) 253 { 254 GEM_BUG_ON(!tiling); 255 return tiling == I915_TILING_Y ? 32 : 8; 256 } 257 258 static inline unsigned int 259 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 260 { 261 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 262 } 263 264 static inline unsigned int 265 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 266 { 267 return (i915_gem_object_get_stride(obj) * 268 i915_gem_object_get_tile_height(obj)); 269 } 270 271 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 272 unsigned int tiling, unsigned int stride); 273 274 struct scatterlist * 275 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 276 unsigned int n, unsigned int *offset); 277 278 struct page * 279 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 280 unsigned int n); 281 282 struct page * 283 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 284 unsigned int n); 285 286 dma_addr_t 287 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 288 unsigned long n, 289 unsigned int *len); 290 291 dma_addr_t 292 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 293 unsigned long n); 294 295 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 296 struct sg_table *pages, 297 unsigned int sg_page_sizes); 298 299 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 300 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 301 302 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 303 I915_MM_NORMAL = 0, 304 /* 305 * Only used by struct_mutex, when called "recursively" from 306 * direct-reclaim-esque. Safe because there is only every one 307 * struct_mutex in the entire system. 308 */ 309 I915_MM_SHRINKER = 1, 310 /* 311 * Used for obj->mm.lock when allocating pages. Safe because the object 312 * isn't yet on any LRU, and therefore the shrinker can't deadlock on 313 * it. As soon as the object has pages, obj->mm.lock nests within 314 * fs_reclaim. 315 */ 316 I915_MM_GET_PAGES = 1, 317 }; 318 319 static inline int __must_check 320 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 321 { 322 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); 323 324 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 325 return 0; 326 327 return __i915_gem_object_get_pages(obj); 328 } 329 330 static inline bool 331 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 332 { 333 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 334 } 335 336 static inline void 337 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 338 { 339 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 340 341 atomic_inc(&obj->mm.pages_pin_count); 342 } 343 344 static inline bool 345 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 346 { 347 return atomic_read(&obj->mm.pages_pin_count); 348 } 349 350 static inline void 351 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 352 { 353 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 354 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 355 356 atomic_dec(&obj->mm.pages_pin_count); 357 } 358 359 static inline void 360 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 361 { 362 __i915_gem_object_unpin_pages(obj); 363 } 364 365 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 366 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 367 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 368 369 enum i915_map_type { 370 I915_MAP_WB = 0, 371 I915_MAP_WC, 372 #define I915_MAP_OVERRIDE BIT(31) 373 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 374 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 375 }; 376 377 /** 378 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 379 * @obj: the object to map into kernel address space 380 * @type: the type of mapping, used to select pgprot_t 381 * 382 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 383 * pages and then returns a contiguous mapping of the backing storage into 384 * the kernel address space. Based on the @type of mapping, the PTE will be 385 * set to either WriteBack or WriteCombine (via pgprot_t). 386 * 387 * The caller is responsible for calling i915_gem_object_unpin_map() when the 388 * mapping is no longer required. 389 * 390 * Returns the pointer through which to access the mapped object, or an 391 * ERR_PTR() on error. 392 */ 393 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 394 enum i915_map_type type); 395 396 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 397 unsigned long offset, 398 unsigned long size); 399 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 400 { 401 __i915_gem_object_flush_map(obj, 0, obj->base.size); 402 } 403 404 /** 405 * i915_gem_object_unpin_map - releases an earlier mapping 406 * @obj: the object to unmap 407 * 408 * After pinning the object and mapping its pages, once you are finished 409 * with your access, call i915_gem_object_unpin_map() to release the pin 410 * upon the mapping. Once the pin count reaches zero, that mapping may be 411 * removed. 412 */ 413 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 414 { 415 i915_gem_object_unpin_pages(obj); 416 } 417 418 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 419 420 void 421 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 422 unsigned int flush_domains); 423 424 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 425 unsigned int *needs_clflush); 426 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 427 unsigned int *needs_clflush); 428 #define CLFLUSH_BEFORE BIT(0) 429 #define CLFLUSH_AFTER BIT(1) 430 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 431 432 static inline void 433 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 434 { 435 i915_gem_object_unpin_pages(obj); 436 } 437 438 static inline struct intel_engine_cs * 439 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 440 { 441 struct intel_engine_cs *engine = NULL; 442 struct dma_fence *fence; 443 444 rcu_read_lock(); 445 fence = dma_resv_get_excl_rcu(obj->base.resv); 446 rcu_read_unlock(); 447 448 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 449 engine = to_request(fence)->engine; 450 dma_fence_put(fence); 451 452 return engine; 453 } 454 455 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 456 unsigned int cache_level); 457 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 458 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 459 460 int __must_check 461 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 462 int __must_check 463 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 464 int __must_check 465 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 466 struct i915_vma * __must_check 467 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 468 u32 alignment, 469 const struct i915_ggtt_view *view, 470 unsigned int flags); 471 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 472 473 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 474 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 475 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 476 477 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 478 { 479 if (obj->cache_dirty) 480 return false; 481 482 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 483 return true; 484 485 /* Currently in use by HW (display engine)? Keep flushed. */ 486 return i915_gem_object_is_framebuffer(obj); 487 } 488 489 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 490 { 491 obj->read_domains = I915_GEM_DOMAIN_CPU; 492 obj->write_domain = I915_GEM_DOMAIN_CPU; 493 if (cpu_write_needs_clflush(obj)) 494 obj->cache_dirty = true; 495 } 496 497 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 498 unsigned int flags, 499 long timeout); 500 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 501 unsigned int flags, 502 const struct i915_sched_attr *attr); 503 504 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 505 enum fb_op_origin origin); 506 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 507 enum fb_op_origin origin); 508 509 static inline void 510 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 511 enum fb_op_origin origin) 512 { 513 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 514 __i915_gem_object_flush_frontbuffer(obj, origin); 515 } 516 517 static inline void 518 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 519 enum fb_op_origin origin) 520 { 521 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 522 __i915_gem_object_invalidate_frontbuffer(obj, origin); 523 } 524 525 #endif 526