1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_OBJECT_H__ 8 #define __I915_GEM_OBJECT_H__ 9 10 #include <drm/drm_gem.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_device.h> 13 14 #include "display/intel_frontbuffer.h" 15 #include "intel_memory_region.h" 16 #include "i915_gem_object_types.h" 17 #include "i915_gem_gtt.h" 18 #include "i915_gem_ww.h" 19 #include "i915_vma_types.h" 20 21 enum intel_region_id; 22 23 /* 24 * XXX: There is a prevalence of the assumption that we fit the 25 * object's page count inside a 32bit _signed_ variable. Let's document 26 * this and catch if we ever need to fix it. In the meantime, if you do 27 * spot such a local variable, please consider fixing! 28 * 29 * Aside from our own locals (for which we have no excuse!): 30 * - sg_table embeds unsigned int for num_pages 31 * - get_user_pages*() mixed ints with longs 32 */ 33 #define GEM_CHECK_SIZE_OVERFLOW(sz) \ 34 GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX) 35 36 static inline bool i915_gem_object_size_2big(u64 size) 37 { 38 struct drm_i915_gem_object *obj; 39 40 if (GEM_CHECK_SIZE_OVERFLOW(size)) 41 return true; 42 43 if (overflows_type(size, obj->base.size)) 44 return true; 45 46 return false; 47 } 48 49 void i915_gem_init__objects(struct drm_i915_private *i915); 50 51 void i915_objects_module_exit(void); 52 int i915_objects_module_init(void); 53 54 struct drm_i915_gem_object *i915_gem_object_alloc(void); 55 void i915_gem_object_free(struct drm_i915_gem_object *obj); 56 57 void i915_gem_object_init(struct drm_i915_gem_object *obj, 58 const struct drm_i915_gem_object_ops *ops, 59 struct lock_class_key *key, 60 unsigned alloc_flags); 61 struct drm_i915_gem_object * 62 i915_gem_object_create_shmem(struct drm_i915_private *i915, 63 resource_size_t size); 64 struct drm_i915_gem_object * 65 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 66 const void *data, resource_size_t size); 67 struct drm_i915_gem_object * 68 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, 69 struct intel_memory_region **placements, 70 unsigned int n_placements); 71 72 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 73 74 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 75 struct sg_table *pages, 76 bool needs_clflush); 77 78 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, 79 const struct drm_i915_gem_pwrite *args); 80 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, 81 const struct drm_i915_gem_pread *args); 82 83 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 84 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, 85 struct sg_table *pages); 86 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 87 struct sg_table *pages); 88 89 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 90 91 struct sg_table * 92 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 93 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 94 95 /** 96 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 97 * @filp: DRM file private date 98 * @handle: userspace handle 99 * 100 * Returns: 101 * 102 * A pointer to the object named by the handle if such exists on @filp, NULL 103 * otherwise. This object is only valid whilst under the RCU read lock, and 104 * note carefully the object may be in the process of being destroyed. 105 */ 106 static inline struct drm_i915_gem_object * 107 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 108 { 109 #ifdef CONFIG_LOCKDEP 110 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 111 #endif 112 return idr_find(&file->object_idr, handle); 113 } 114 115 static inline struct drm_i915_gem_object * 116 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 117 { 118 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 119 obj = NULL; 120 121 return obj; 122 } 123 124 static inline struct drm_i915_gem_object * 125 i915_gem_object_lookup(struct drm_file *file, u32 handle) 126 { 127 struct drm_i915_gem_object *obj; 128 129 rcu_read_lock(); 130 obj = i915_gem_object_lookup_rcu(file, handle); 131 obj = i915_gem_object_get_rcu(obj); 132 rcu_read_unlock(); 133 134 return obj; 135 } 136 137 __deprecated 138 struct drm_gem_object * 139 drm_gem_object_lookup(struct drm_file *file, u32 handle); 140 141 __attribute__((nonnull)) 142 static inline struct drm_i915_gem_object * 143 i915_gem_object_get(struct drm_i915_gem_object *obj) 144 { 145 drm_gem_object_get(&obj->base); 146 return obj; 147 } 148 149 __attribute__((nonnull)) 150 static inline void 151 i915_gem_object_put(struct drm_i915_gem_object *obj) 152 { 153 __drm_gem_object_put(&obj->base); 154 } 155 156 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 157 158 /* 159 * If more than one potential simultaneous locker, assert held. 160 */ 161 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj) 162 { 163 /* 164 * Note mm list lookup is protected by 165 * kref_get_unless_zero(). 166 */ 167 if (IS_ENABLED(CONFIG_LOCKDEP) && 168 kref_read(&obj->base.refcount) > 0) 169 assert_object_held(obj); 170 } 171 172 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, 173 struct i915_gem_ww_ctx *ww, 174 bool intr) 175 { 176 int ret; 177 178 if (intr) 179 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); 180 else 181 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); 182 183 if (!ret && ww) { 184 i915_gem_object_get(obj); 185 list_add_tail(&obj->obj_link, &ww->obj_list); 186 } 187 if (ret == -EALREADY) 188 ret = 0; 189 190 if (ret == -EDEADLK) { 191 i915_gem_object_get(obj); 192 ww->contended = obj; 193 } 194 195 return ret; 196 } 197 198 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, 199 struct i915_gem_ww_ctx *ww) 200 { 201 return __i915_gem_object_lock(obj, ww, ww && ww->intr); 202 } 203 204 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, 205 struct i915_gem_ww_ctx *ww) 206 { 207 WARN_ON(ww && !ww->intr); 208 return __i915_gem_object_lock(obj, ww, true); 209 } 210 211 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 212 { 213 return dma_resv_trylock(obj->base.resv); 214 } 215 216 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 217 { 218 if (obj->ops->adjust_lru) 219 obj->ops->adjust_lru(obj); 220 221 dma_resv_unlock(obj->base.resv); 222 } 223 224 static inline void 225 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 226 { 227 obj->flags |= I915_BO_READONLY; 228 } 229 230 static inline bool 231 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 232 { 233 return obj->flags & I915_BO_READONLY; 234 } 235 236 static inline bool 237 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 238 { 239 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 240 } 241 242 static inline bool 243 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 244 { 245 return obj->flags & I915_BO_ALLOC_VOLATILE; 246 } 247 248 static inline void 249 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 250 { 251 obj->flags |= I915_BO_ALLOC_VOLATILE; 252 } 253 254 static inline bool 255 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 256 { 257 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 258 } 259 260 static inline void 261 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 262 { 263 set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 264 } 265 266 static inline void 267 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 268 { 269 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 270 } 271 272 static inline bool 273 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 274 unsigned long flags) 275 { 276 return obj->ops->flags & flags; 277 } 278 279 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj); 280 281 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); 282 283 static inline bool 284 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 285 { 286 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 287 } 288 289 static inline bool 290 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 291 { 292 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 293 } 294 295 static inline bool 296 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) 297 { 298 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); 299 } 300 301 static inline bool 302 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 303 { 304 return READ_ONCE(obj->frontbuffer); 305 } 306 307 static inline unsigned int 308 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 309 { 310 return obj->tiling_and_stride & TILING_MASK; 311 } 312 313 static inline bool 314 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 315 { 316 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 317 } 318 319 static inline unsigned int 320 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 321 { 322 return obj->tiling_and_stride & STRIDE_MASK; 323 } 324 325 static inline unsigned int 326 i915_gem_tile_height(unsigned int tiling) 327 { 328 GEM_BUG_ON(!tiling); 329 return tiling == I915_TILING_Y ? 32 : 8; 330 } 331 332 static inline unsigned int 333 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 334 { 335 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 336 } 337 338 static inline unsigned int 339 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 340 { 341 return (i915_gem_object_get_stride(obj) * 342 i915_gem_object_get_tile_height(obj)); 343 } 344 345 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 346 unsigned int tiling, unsigned int stride); 347 348 struct scatterlist * 349 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 350 struct i915_gem_object_page_iter *iter, 351 unsigned int n, 352 unsigned int *offset, bool dma); 353 354 static inline struct scatterlist * 355 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 356 unsigned int n, 357 unsigned int *offset) 358 { 359 return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false); 360 } 361 362 static inline struct scatterlist * 363 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, 364 unsigned int n, 365 unsigned int *offset) 366 { 367 return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true); 368 } 369 370 struct page * 371 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 372 unsigned int n); 373 374 struct page * 375 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 376 unsigned int n); 377 378 dma_addr_t 379 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 380 unsigned long n, 381 unsigned int *len); 382 383 dma_addr_t 384 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 385 unsigned long n); 386 387 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 388 struct sg_table *pages, 389 unsigned int sg_page_sizes); 390 391 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 392 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 393 394 static inline int __must_check 395 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 396 { 397 assert_object_held(obj); 398 399 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 400 return 0; 401 402 return __i915_gem_object_get_pages(obj); 403 } 404 405 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); 406 407 static inline bool 408 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 409 { 410 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 411 } 412 413 static inline void 414 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 415 { 416 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 417 418 atomic_inc(&obj->mm.pages_pin_count); 419 } 420 421 static inline bool 422 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 423 { 424 return atomic_read(&obj->mm.pages_pin_count); 425 } 426 427 static inline void 428 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 429 { 430 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 431 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 432 433 atomic_dec(&obj->mm.pages_pin_count); 434 } 435 436 static inline void 437 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 438 { 439 __i915_gem_object_unpin_pages(obj); 440 } 441 442 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 443 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 444 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 445 446 /** 447 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 448 * @obj: the object to map into kernel address space 449 * @type: the type of mapping, used to select pgprot_t 450 * 451 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 452 * pages and then returns a contiguous mapping of the backing storage into 453 * the kernel address space. Based on the @type of mapping, the PTE will be 454 * set to either WriteBack or WriteCombine (via pgprot_t). 455 * 456 * The caller is responsible for calling i915_gem_object_unpin_map() when the 457 * mapping is no longer required. 458 * 459 * Returns the pointer through which to access the mapped object, or an 460 * ERR_PTR() on error. 461 */ 462 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 463 enum i915_map_type type); 464 465 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, 466 enum i915_map_type type); 467 468 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 469 unsigned long offset, 470 unsigned long size); 471 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 472 { 473 __i915_gem_object_flush_map(obj, 0, obj->base.size); 474 } 475 476 /** 477 * i915_gem_object_unpin_map - releases an earlier mapping 478 * @obj: the object to unmap 479 * 480 * After pinning the object and mapping its pages, once you are finished 481 * with your access, call i915_gem_object_unpin_map() to release the pin 482 * upon the mapping. Once the pin count reaches zero, that mapping may be 483 * removed. 484 */ 485 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 486 { 487 i915_gem_object_unpin_pages(obj); 488 } 489 490 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 491 492 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 493 unsigned int *needs_clflush); 494 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 495 unsigned int *needs_clflush); 496 #define CLFLUSH_BEFORE BIT(0) 497 #define CLFLUSH_AFTER BIT(1) 498 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 499 500 static inline void 501 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 502 { 503 i915_gem_object_unpin_pages(obj); 504 } 505 506 static inline struct intel_engine_cs * 507 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 508 { 509 struct intel_engine_cs *engine = NULL; 510 struct dma_fence *fence; 511 512 rcu_read_lock(); 513 fence = dma_resv_get_excl_unlocked(obj->base.resv); 514 rcu_read_unlock(); 515 516 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 517 engine = to_request(fence)->engine; 518 dma_fence_put(fence); 519 520 return engine; 521 } 522 523 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 524 unsigned int cache_level); 525 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 526 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); 527 528 int __must_check 529 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 530 int __must_check 531 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 532 int __must_check 533 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 534 struct i915_vma * __must_check 535 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 536 struct i915_gem_ww_ctx *ww, 537 u32 alignment, 538 const struct i915_ggtt_view *view, 539 unsigned int flags); 540 541 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 542 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 543 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 544 545 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 546 { 547 if (obj->cache_dirty) 548 return false; 549 550 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 551 return true; 552 553 /* Currently in use by HW (display engine)? Keep flushed. */ 554 return i915_gem_object_is_framebuffer(obj); 555 } 556 557 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 558 { 559 obj->read_domains = I915_GEM_DOMAIN_CPU; 560 obj->write_domain = I915_GEM_DOMAIN_CPU; 561 if (cpu_write_needs_clflush(obj)) 562 obj->cache_dirty = true; 563 } 564 565 void i915_gem_fence_wait_priority(struct dma_fence *fence, 566 const struct i915_sched_attr *attr); 567 568 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 569 unsigned int flags, 570 long timeout); 571 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 572 unsigned int flags, 573 const struct i915_sched_attr *attr); 574 575 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 576 enum fb_op_origin origin); 577 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 578 enum fb_op_origin origin); 579 580 static inline void 581 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 582 enum fb_op_origin origin) 583 { 584 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 585 __i915_gem_object_flush_frontbuffer(obj, origin); 586 } 587 588 static inline void 589 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 590 enum fb_op_origin origin) 591 { 592 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 593 __i915_gem_object_invalidate_frontbuffer(obj, origin); 594 } 595 596 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); 597 598 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 599 600 void __i915_gem_free_object_rcu(struct rcu_head *head); 601 602 void __i915_gem_free_object(struct drm_i915_gem_object *obj); 603 604 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); 605 606 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); 607 608 int i915_gem_object_migrate(struct drm_i915_gem_object *obj, 609 struct i915_gem_ww_ctx *ww, 610 enum intel_region_id id); 611 612 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, 613 enum intel_region_id id); 614 615 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, 616 unsigned int flags); 617 618 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, 619 enum intel_memory_type type); 620 621 #ifdef CONFIG_MMU_NOTIFIER 622 static inline bool 623 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) 624 { 625 return obj->userptr.notifier.mm; 626 } 627 628 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); 629 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); 630 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); 631 #else 632 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; } 633 634 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 635 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 636 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } 637 638 #endif 639 640 #endif 641