1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Please try to maintain the following order within this file unless it makes 24 * sense to do otherwise. From top to bottom: 25 * 1. typedefs 26 * 2. #defines, and macros 27 * 3. structure definitions 28 * 4. function prototypes 29 * 30 * Within each section, please try to order by generation in ascending order, 31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom). 32 */ 33 34 #ifndef __I915_GEM_GTT_H__ 35 #define __I915_GEM_GTT_H__ 36 37 #include <linux/io-mapping.h> 38 #include <linux/kref.h> 39 #include <linux/mm.h> 40 #include <linux/pagevec.h> 41 #include <linux/workqueue.h> 42 43 #include <drm/drm_mm.h> 44 45 #include "gt/intel_reset.h" 46 #include "i915_gem_fence_reg.h" 47 #include "i915_request.h" 48 #include "i915_scatterlist.h" 49 #include "i915_selftest.h" 50 #include "gt/intel_timeline.h" 51 52 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) 53 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) 54 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) 55 56 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 57 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 58 59 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE 60 61 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 62 63 #define I915_FENCE_REG_NONE -1 64 #define I915_MAX_NUM_FENCES 32 65 /* 32 fences + sign bit for FENCE_REG_NONE */ 66 #define I915_MAX_NUM_FENCE_BITS 6 67 68 struct drm_i915_file_private; 69 struct drm_i915_gem_object; 70 struct i915_vma; 71 struct intel_gt; 72 73 typedef u32 gen6_pte_t; 74 typedef u64 gen8_pte_t; 75 76 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 77 78 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 79 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 80 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 81 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 82 #define GEN6_PTE_CACHE_LLC (2 << 1) 83 #define GEN6_PTE_UNCACHED (1 << 1) 84 #define GEN6_PTE_VALID (1 << 0) 85 86 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) 87 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) 88 #define I915_PDES 512 89 #define I915_PDE_MASK (I915_PDES - 1) 90 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) 91 92 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) 93 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) 94 #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 95 #define GEN6_PDE_SHIFT 22 96 #define GEN6_PDE_VALID (1 << 0) 97 98 #define GEN7_PTE_CACHE_L3_LLC (3 << 1) 99 100 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 101 #define BYT_PTE_WRITEABLE (1 << 1) 102 103 /* Cacheability Control is a 4-bit value. The low three bits are stored in bits 104 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. 105 */ 106 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ 107 (((bits) & 0x8) << (11 - 3))) 108 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 109 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 110 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) 111 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 112 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) 113 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 114 #define HSW_PTE_UNCACHED (0) 115 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) 116 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) 117 118 /* 119 * GEN8 32b style address is defined as a 3 level page table: 120 * 31:30 | 29:21 | 20:12 | 11:0 121 * PDPE | PDE | PTE | offset 122 * The difference as compared to normal x86 3 level page table is the PDPEs are 123 * programmed via register. 124 * 125 * GEN8 48b style address is defined as a 4 level page table: 126 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 127 * PML4E | PDPE | PDE | PTE | offset 128 */ 129 #define GEN8_3LVL_PDPES 4 130 131 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) 132 #define PPAT_CACHED_PDE 0 /* WB LLC */ 133 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ 134 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ 135 136 #define CHV_PPAT_SNOOP (1<<6) 137 #define GEN8_PPAT_AGE(x) ((x)<<4) 138 #define GEN8_PPAT_LLCeLLC (3<<2) 139 #define GEN8_PPAT_LLCELLC (2<<2) 140 #define GEN8_PPAT_LLC (1<<2) 141 #define GEN8_PPAT_WB (3<<0) 142 #define GEN8_PPAT_WT (2<<0) 143 #define GEN8_PPAT_WC (1<<0) 144 #define GEN8_PPAT_UC (0<<0) 145 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 146 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 147 148 #define GEN8_PDE_IPS_64K BIT(11) 149 #define GEN8_PDE_PS_2M BIT(7) 150 151 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 152 __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE) 153 154 struct intel_remapped_plane_info { 155 /* in gtt pages */ 156 unsigned int width, height, stride, offset; 157 } __packed; 158 159 struct intel_remapped_info { 160 struct intel_remapped_plane_info plane[2]; 161 unsigned int unused_mbz; 162 } __packed; 163 164 struct intel_rotation_info { 165 struct intel_remapped_plane_info plane[2]; 166 } __packed; 167 168 struct intel_partial_info { 169 u64 offset; 170 unsigned int size; 171 } __packed; 172 173 enum i915_ggtt_view_type { 174 I915_GGTT_VIEW_NORMAL = 0, 175 I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), 176 I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), 177 I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), 178 }; 179 180 static inline void assert_i915_gem_gtt_types(void) 181 { 182 BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); 183 BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); 184 BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int)); 185 186 /* Check that rotation/remapped shares offsets for simplicity */ 187 BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != 188 offsetof(struct intel_rotation_info, plane[0])); 189 BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != 190 offsetofend(struct intel_rotation_info, plane[1])); 191 192 /* As we encode the size of each branch inside the union into its type, 193 * we have to be careful that each branch has a unique size. 194 */ 195 switch ((enum i915_ggtt_view_type)0) { 196 case I915_GGTT_VIEW_NORMAL: 197 case I915_GGTT_VIEW_PARTIAL: 198 case I915_GGTT_VIEW_ROTATED: 199 case I915_GGTT_VIEW_REMAPPED: 200 /* gcc complains if these are identical cases */ 201 break; 202 } 203 } 204 205 struct i915_ggtt_view { 206 enum i915_ggtt_view_type type; 207 union { 208 /* Members need to contain no holes/padding */ 209 struct intel_partial_info partial; 210 struct intel_rotation_info rotated; 211 struct intel_remapped_info remapped; 212 }; 213 }; 214 215 enum i915_cache_level; 216 217 struct i915_vma; 218 219 struct i915_page_dma { 220 struct page *page; 221 union { 222 dma_addr_t daddr; 223 224 /* For gen6/gen7 only. This is the offset in the GGTT 225 * where the page directory entries for PPGTT begin 226 */ 227 u32 ggtt_offset; 228 }; 229 }; 230 231 struct i915_page_scratch { 232 struct i915_page_dma base; 233 u64 encode; 234 }; 235 236 struct i915_page_table { 237 struct i915_page_dma base; 238 atomic_t used; 239 }; 240 241 struct i915_page_directory { 242 struct i915_page_table pt; 243 spinlock_t lock; 244 void *entry[512]; 245 }; 246 247 #define __px_choose_expr(x, type, expr, other) \ 248 __builtin_choose_expr( \ 249 __builtin_types_compatible_p(typeof(x), type) || \ 250 __builtin_types_compatible_p(typeof(x), const type), \ 251 ({ type __x = (type)(x); expr; }), \ 252 other) 253 254 #define px_base(px) \ 255 __px_choose_expr(px, struct i915_page_dma *, __x, \ 256 __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ 257 __px_choose_expr(px, struct i915_page_table *, &__x->base, \ 258 __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ 259 (void)0)))) 260 #define px_dma(px) (px_base(px)->daddr) 261 262 #define px_pt(px) \ 263 __px_choose_expr(px, struct i915_page_table *, __x, \ 264 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ 265 (void)0)) 266 #define px_used(px) (&px_pt(px)->used) 267 268 struct i915_vma_ops { 269 /* Map an object into an address space with the given cache flags. */ 270 int (*bind_vma)(struct i915_vma *vma, 271 enum i915_cache_level cache_level, 272 u32 flags); 273 /* 274 * Unmap an object from an address space. This usually consists of 275 * setting the valid PTE entries to a reserved scratch page. 276 */ 277 void (*unbind_vma)(struct i915_vma *vma); 278 279 int (*set_pages)(struct i915_vma *vma); 280 void (*clear_pages)(struct i915_vma *vma); 281 }; 282 283 struct pagestash { 284 spinlock_t lock; 285 struct pagevec pvec; 286 }; 287 288 struct i915_address_space { 289 struct kref ref; 290 struct rcu_work rcu; 291 292 struct drm_mm mm; 293 struct intel_gt *gt; 294 struct drm_i915_private *i915; 295 struct device *dma; 296 /* Every address space belongs to a struct file - except for the global 297 * GTT that is owned by the driver (and so @file is set to NULL). In 298 * principle, no information should leak from one context to another 299 * (or between files/processes etc) unless explicitly shared by the 300 * owner. Tracking the owner is important in order to free up per-file 301 * objects along with the file, to aide resource tracking, and to 302 * assign blame. 303 */ 304 struct drm_i915_file_private *file; 305 u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 306 u64 reserved; /* size addr space reserved */ 307 308 bool closed; 309 310 struct mutex mutex; /* protects vma and our lists */ 311 #define VM_CLASS_GGTT 0 312 #define VM_CLASS_PPGTT 1 313 314 struct i915_page_scratch scratch[4]; 315 unsigned int scratch_order; 316 unsigned int top; 317 318 /** 319 * List of vma currently bound. 320 */ 321 struct list_head bound_list; 322 323 /** 324 * List of vma that are not unbound. 325 */ 326 struct list_head unbound_list; 327 328 struct pagestash free_pages; 329 330 /* Global GTT */ 331 bool is_ggtt:1; 332 333 /* Some systems require uncached updates of the page directories */ 334 bool pt_kmap_wc:1; 335 336 /* Some systems support read-only mappings for GGTT and/or PPGTT */ 337 bool has_read_only:1; 338 339 u64 (*pte_encode)(dma_addr_t addr, 340 enum i915_cache_level level, 341 u32 flags); /* Create a valid PTE */ 342 #define PTE_READ_ONLY (1<<0) 343 344 int (*allocate_va_range)(struct i915_address_space *vm, 345 u64 start, u64 length); 346 void (*clear_range)(struct i915_address_space *vm, 347 u64 start, u64 length); 348 void (*insert_page)(struct i915_address_space *vm, 349 dma_addr_t addr, 350 u64 offset, 351 enum i915_cache_level cache_level, 352 u32 flags); 353 void (*insert_entries)(struct i915_address_space *vm, 354 struct i915_vma *vma, 355 enum i915_cache_level cache_level, 356 u32 flags); 357 void (*cleanup)(struct i915_address_space *vm); 358 359 struct i915_vma_ops vma_ops; 360 361 I915_SELFTEST_DECLARE(struct fault_attr fault_attr); 362 I915_SELFTEST_DECLARE(bool scrub_64K); 363 }; 364 365 #define i915_is_ggtt(vm) ((vm)->is_ggtt) 366 367 static inline bool 368 i915_vm_is_4lvl(const struct i915_address_space *vm) 369 { 370 return (vm->total - 1) >> 32; 371 } 372 373 static inline bool 374 i915_vm_has_scratch_64K(struct i915_address_space *vm) 375 { 376 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); 377 } 378 379 /* The Graphics Translation Table is the way in which GEN hardware translates a 380 * Graphics Virtual Address into a Physical Address. In addition to the normal 381 * collateral associated with any va->pa translations GEN hardware also has a 382 * portion of the GTT which can be mapped by the CPU and remain both coherent 383 * and correct (in cases like swizzling). That region is referred to as GMADR in 384 * the spec. 385 */ 386 struct i915_ggtt { 387 struct i915_address_space vm; 388 389 struct io_mapping iomap; /* Mapping to our CPU mappable region */ 390 struct resource gmadr; /* GMADR resource */ 391 resource_size_t mappable_end; /* End offset that we can CPU map */ 392 393 /** "Graphics Stolen Memory" holds the global PTEs */ 394 void __iomem *gsm; 395 void (*invalidate)(struct i915_ggtt *ggtt); 396 397 /** PPGTT used for aliasing the PPGTT with the GTT */ 398 struct i915_ppgtt *alias; 399 400 bool do_idle_maps; 401 402 int mtrr; 403 404 u32 pin_bias; 405 406 unsigned int num_fences; 407 struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; 408 struct list_head fence_list; 409 410 /** List of all objects in gtt_space, currently mmaped by userspace. 411 * All objects within this list must also be on bound_list. 412 */ 413 struct list_head userfault_list; 414 415 /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 416 struct intel_wakeref_auto userfault_wakeref; 417 418 struct drm_mm_node error_capture; 419 struct drm_mm_node uc_fw; 420 }; 421 422 struct i915_ppgtt { 423 struct i915_address_space vm; 424 425 intel_engine_mask_t pd_dirty_engines; 426 struct i915_page_directory *pd; 427 }; 428 429 struct gen6_ppgtt { 430 struct i915_ppgtt base; 431 432 struct i915_vma *vma; 433 gen6_pte_t __iomem *pd_addr; 434 435 unsigned int pin_count; 436 bool scan_for_unused_pt; 437 }; 438 439 #define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) 440 441 static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) 442 { 443 BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); 444 return __to_gen6_ppgtt(base); 445 } 446 447 /* 448 * gen6_for_each_pde() iterates over every pde from start until start+length. 449 * If start and start+length are not perfectly divisible, the macro will round 450 * down and up as needed. Start=0 and length=2G effectively iterates over 451 * every PDE in the system. The macro modifies ALL its parameters except 'pd', 452 * so each of the other parameters should preferably be a simple variable, or 453 * at most an lvalue with no side-effects! 454 */ 455 #define gen6_for_each_pde(pt, pd, start, length, iter) \ 456 for (iter = gen6_pde_index(start); \ 457 length > 0 && iter < I915_PDES && \ 458 (pt = i915_pt_entry(pd, iter), true); \ 459 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ 460 temp = min(temp - start, length); \ 461 start += temp, length -= temp; }), ++iter) 462 463 #define gen6_for_all_pdes(pt, pd, iter) \ 464 for (iter = 0; \ 465 iter < I915_PDES && \ 466 (pt = i915_pt_entry(pd, iter), true); \ 467 ++iter) 468 469 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) 470 { 471 const u32 mask = NUM_PTE(pde_shift) - 1; 472 473 return (address >> PAGE_SHIFT) & mask; 474 } 475 476 /* Helper to counts the number of PTEs within the given length. This count 477 * does not cross a page table boundary, so the max value would be 478 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. 479 */ 480 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) 481 { 482 const u64 mask = ~((1ULL << pde_shift) - 1); 483 u64 end; 484 485 GEM_BUG_ON(length == 0); 486 GEM_BUG_ON(offset_in_page(addr | length)); 487 488 end = addr + length; 489 490 if ((addr & mask) != (end & mask)) 491 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); 492 493 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); 494 } 495 496 static inline u32 i915_pde_index(u64 addr, u32 shift) 497 { 498 return (addr >> shift) & I915_PDE_MASK; 499 } 500 501 static inline u32 gen6_pte_index(u32 addr) 502 { 503 return i915_pte_index(addr, GEN6_PDE_SHIFT); 504 } 505 506 static inline u32 gen6_pte_count(u32 addr, u32 length) 507 { 508 return i915_pte_count(addr, length, GEN6_PDE_SHIFT); 509 } 510 511 static inline u32 gen6_pde_index(u32 addr) 512 { 513 return i915_pde_index(addr, GEN6_PDE_SHIFT); 514 } 515 516 static inline struct i915_page_table * 517 i915_pt_entry(const struct i915_page_directory * const pd, 518 const unsigned short n) 519 { 520 return pd->entry[n]; 521 } 522 523 static inline struct i915_page_directory * 524 i915_pd_entry(const struct i915_page_directory * const pdp, 525 const unsigned short n) 526 { 527 return pdp->entry[n]; 528 } 529 530 static inline dma_addr_t 531 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) 532 { 533 struct i915_page_dma *pt = ppgtt->pd->entry[n]; 534 535 return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); 536 } 537 538 static inline struct i915_ggtt * 539 i915_vm_to_ggtt(struct i915_address_space *vm) 540 { 541 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); 542 GEM_BUG_ON(!i915_is_ggtt(vm)); 543 return container_of(vm, struct i915_ggtt, vm); 544 } 545 546 static inline struct i915_ppgtt * 547 i915_vm_to_ppgtt(struct i915_address_space *vm) 548 { 549 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); 550 GEM_BUG_ON(i915_is_ggtt(vm)); 551 return container_of(vm, struct i915_ppgtt, vm); 552 } 553 554 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); 555 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); 556 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); 557 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); 558 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); 559 int i915_init_ggtt(struct drm_i915_private *dev_priv); 560 void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); 561 562 int i915_ppgtt_init_hw(struct intel_gt *gt); 563 564 struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); 565 566 static inline struct i915_address_space * 567 i915_vm_get(struct i915_address_space *vm) 568 { 569 kref_get(&vm->ref); 570 return vm; 571 } 572 573 void i915_vm_release(struct kref *kref); 574 575 static inline void i915_vm_put(struct i915_address_space *vm) 576 { 577 kref_put(&vm->ref, i915_vm_release); 578 } 579 580 int gen6_ppgtt_pin(struct i915_ppgtt *base); 581 void gen6_ppgtt_unpin(struct i915_ppgtt *base); 582 void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); 583 584 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); 585 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); 586 587 int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, 588 struct sg_table *pages); 589 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, 590 struct sg_table *pages); 591 592 int i915_gem_gtt_reserve(struct i915_address_space *vm, 593 struct drm_mm_node *node, 594 u64 size, u64 offset, unsigned long color, 595 unsigned int flags); 596 597 int i915_gem_gtt_insert(struct i915_address_space *vm, 598 struct drm_mm_node *node, 599 u64 size, u64 alignment, unsigned long color, 600 u64 start, u64 end, unsigned int flags); 601 602 /* Flags used by pin/bind&friends. */ 603 #define PIN_NOEVICT BIT_ULL(0) 604 #define PIN_NOSEARCH BIT_ULL(1) 605 #define PIN_NONBLOCK BIT_ULL(2) 606 #define PIN_MAPPABLE BIT_ULL(3) 607 #define PIN_ZONE_4G BIT_ULL(4) 608 #define PIN_HIGH BIT_ULL(5) 609 #define PIN_OFFSET_BIAS BIT_ULL(6) 610 #define PIN_OFFSET_FIXED BIT_ULL(7) 611 612 #define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */ 613 #define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */ 614 #define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */ 615 #define PIN_UPDATE BIT_ULL(11) 616 617 #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) 618 619 #endif 620