1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "i915_drv.h" 14 15 /* 16 * The BIOS typically reserves some of the system's memory for the exclusive 17 * use of the integrated graphics. This memory is no longer available for 18 * use by the OS and so the user finds that his system has less memory 19 * available than he put in. We refer to this memory as stolen. 20 * 21 * The BIOS will allocate its framebuffer from the stolen memory. Our 22 * goal is try to reuse that object for our own fbcon which must always 23 * be available for panics. Anything else we can reuse the stolen memory 24 * for is a boon. 25 */ 26 27 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 28 struct drm_mm_node *node, u64 size, 29 unsigned alignment, u64 start, u64 end) 30 { 31 int ret; 32 33 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 34 return -ENODEV; 35 36 /* WaSkipStolenMemoryFirstPage:bdw+ */ 37 if (INTEL_GEN(dev_priv) >= 8 && start < 4096) 38 start = 4096; 39 40 mutex_lock(&dev_priv->mm.stolen_lock); 41 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, 42 size, alignment, 0, 43 start, end, DRM_MM_INSERT_BEST); 44 mutex_unlock(&dev_priv->mm.stolen_lock); 45 46 return ret; 47 } 48 49 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 50 struct drm_mm_node *node, u64 size, 51 unsigned alignment) 52 { 53 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, 54 alignment, 0, U64_MAX); 55 } 56 57 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 58 struct drm_mm_node *node) 59 { 60 mutex_lock(&dev_priv->mm.stolen_lock); 61 drm_mm_remove_node(node); 62 mutex_unlock(&dev_priv->mm.stolen_lock); 63 } 64 65 static int i915_adjust_stolen(struct drm_i915_private *dev_priv, 66 struct resource *dsm) 67 { 68 struct i915_ggtt *ggtt = &dev_priv->ggtt; 69 struct resource *r; 70 71 if (dsm->start == 0 || dsm->end <= dsm->start) 72 return -EINVAL; 73 74 /* 75 * TODO: We have yet too encounter the case where the GTT wasn't at the 76 * end of stolen. With that assumption we could simplify this. 77 */ 78 79 /* Make sure we don't clobber the GTT if it's within stolen memory */ 80 if (INTEL_GEN(dev_priv) <= 4 && 81 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { 82 struct resource stolen[2] = {*dsm, *dsm}; 83 struct resource ggtt_res; 84 resource_size_t ggtt_start; 85 86 ggtt_start = I915_READ(PGTBL_CTL); 87 if (IS_GEN(dev_priv, 4)) 88 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 89 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 90 else 91 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 92 93 ggtt_res = 94 (struct resource) DEFINE_RES_MEM(ggtt_start, 95 ggtt_total_entries(ggtt) * 4); 96 97 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 98 stolen[0].end = ggtt_res.start; 99 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 100 stolen[1].start = ggtt_res.end; 101 102 /* Pick the larger of the two chunks */ 103 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 104 *dsm = stolen[0]; 105 else 106 *dsm = stolen[1]; 107 108 if (stolen[0].start != stolen[1].start || 109 stolen[0].end != stolen[1].end) { 110 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); 111 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); 112 } 113 } 114 115 /* 116 * Verify that nothing else uses this physical address. Stolen 117 * memory should be reserved by the BIOS and hidden from the 118 * kernel. So if the region is already marked as busy, something 119 * is seriously wrong. 120 */ 121 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, 122 resource_size(dsm), 123 "Graphics Stolen Memory"); 124 if (r == NULL) { 125 /* 126 * One more attempt but this time requesting region from 127 * start + 1, as we have seen that this resolves the region 128 * conflict with the PCI Bus. 129 * This is a BIOS w/a: Some BIOS wrap stolen in the root 130 * PCI bus, but have an off-by-one error. Hence retry the 131 * reservation starting from 1 instead of 0. 132 * There's also BIOS with off-by-one on the other end. 133 */ 134 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, 135 resource_size(dsm) - 2, 136 "Graphics Stolen Memory"); 137 /* 138 * GEN3 firmware likes to smash pci bridges into the stolen 139 * range. Apparently this works. 140 */ 141 if (r == NULL && !IS_GEN(dev_priv, 3)) { 142 DRM_ERROR("conflict detected with stolen region: %pR\n", 143 dsm); 144 145 return -EBUSY; 146 } 147 } 148 149 return 0; 150 } 151 152 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) 153 { 154 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 155 return; 156 157 drm_mm_takedown(&dev_priv->mm.stolen); 158 } 159 160 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, 161 resource_size_t *base, 162 resource_size_t *size) 163 { 164 u32 reg_val = I915_READ(IS_GM45(dev_priv) ? 165 CTG_STOLEN_RESERVED : 166 ELK_STOLEN_RESERVED); 167 resource_size_t stolen_top = dev_priv->dsm.end + 1; 168 169 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", 170 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); 171 172 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 173 return; 174 175 /* 176 * Whether ILK really reuses the ELK register for this is unclear. 177 * Let's see if we catch anyone with this supposedly enabled on ILK. 178 */ 179 WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", 180 reg_val); 181 182 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 183 return; 184 185 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 186 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 187 188 *size = stolen_top - *base; 189 } 190 191 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 192 resource_size_t *base, 193 resource_size_t *size) 194 { 195 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 196 197 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 198 199 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 200 return; 201 202 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 203 204 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 205 case GEN6_STOLEN_RESERVED_1M: 206 *size = 1024 * 1024; 207 break; 208 case GEN6_STOLEN_RESERVED_512K: 209 *size = 512 * 1024; 210 break; 211 case GEN6_STOLEN_RESERVED_256K: 212 *size = 256 * 1024; 213 break; 214 case GEN6_STOLEN_RESERVED_128K: 215 *size = 128 * 1024; 216 break; 217 default: 218 *size = 1024 * 1024; 219 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 220 } 221 } 222 223 static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, 224 resource_size_t *base, 225 resource_size_t *size) 226 { 227 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 228 resource_size_t stolen_top = dev_priv->dsm.end + 1; 229 230 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 231 232 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 233 return; 234 235 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 236 default: 237 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 238 /* fall through */ 239 case GEN7_STOLEN_RESERVED_1M: 240 *size = 1024 * 1024; 241 break; 242 } 243 244 /* 245 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 246 * reserved location as (top - size). 247 */ 248 *base = stolen_top - *size; 249 } 250 251 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, 252 resource_size_t *base, 253 resource_size_t *size) 254 { 255 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 256 257 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 258 259 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 260 return; 261 262 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 263 264 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 265 case GEN7_STOLEN_RESERVED_1M: 266 *size = 1024 * 1024; 267 break; 268 case GEN7_STOLEN_RESERVED_256K: 269 *size = 256 * 1024; 270 break; 271 default: 272 *size = 1024 * 1024; 273 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 274 } 275 } 276 277 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, 278 resource_size_t *base, 279 resource_size_t *size) 280 { 281 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 282 283 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 284 285 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 286 return; 287 288 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 289 290 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 291 case GEN8_STOLEN_RESERVED_1M: 292 *size = 1024 * 1024; 293 break; 294 case GEN8_STOLEN_RESERVED_2M: 295 *size = 2 * 1024 * 1024; 296 break; 297 case GEN8_STOLEN_RESERVED_4M: 298 *size = 4 * 1024 * 1024; 299 break; 300 case GEN8_STOLEN_RESERVED_8M: 301 *size = 8 * 1024 * 1024; 302 break; 303 default: 304 *size = 8 * 1024 * 1024; 305 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 306 } 307 } 308 309 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, 310 resource_size_t *base, 311 resource_size_t *size) 312 { 313 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 314 resource_size_t stolen_top = dev_priv->dsm.end + 1; 315 316 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 317 318 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 319 return; 320 321 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 322 return; 323 324 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 325 *size = stolen_top - *base; 326 } 327 328 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 329 resource_size_t *base, 330 resource_size_t *size) 331 { 332 u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED); 333 334 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 335 336 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 337 338 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 339 case GEN8_STOLEN_RESERVED_1M: 340 *size = 1024 * 1024; 341 break; 342 case GEN8_STOLEN_RESERVED_2M: 343 *size = 2 * 1024 * 1024; 344 break; 345 case GEN8_STOLEN_RESERVED_4M: 346 *size = 4 * 1024 * 1024; 347 break; 348 case GEN8_STOLEN_RESERVED_8M: 349 *size = 8 * 1024 * 1024; 350 break; 351 default: 352 *size = 8 * 1024 * 1024; 353 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 354 } 355 } 356 357 int i915_gem_init_stolen(struct drm_i915_private *dev_priv) 358 { 359 resource_size_t reserved_base, stolen_top; 360 resource_size_t reserved_total, reserved_size; 361 362 mutex_init(&dev_priv->mm.stolen_lock); 363 364 if (intel_vgpu_active(dev_priv)) { 365 DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); 366 return 0; 367 } 368 369 if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { 370 DRM_INFO("DMAR active, disabling use of stolen memory\n"); 371 return 0; 372 } 373 374 if (resource_size(&intel_graphics_stolen_res) == 0) 375 return 0; 376 377 dev_priv->dsm = intel_graphics_stolen_res; 378 379 if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) 380 return 0; 381 382 GEM_BUG_ON(dev_priv->dsm.start == 0); 383 GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); 384 385 stolen_top = dev_priv->dsm.end + 1; 386 reserved_base = stolen_top; 387 reserved_size = 0; 388 389 switch (INTEL_GEN(dev_priv)) { 390 case 2: 391 case 3: 392 break; 393 case 4: 394 if (!IS_G4X(dev_priv)) 395 break; 396 /* fall through */ 397 case 5: 398 g4x_get_stolen_reserved(dev_priv, 399 &reserved_base, &reserved_size); 400 break; 401 case 6: 402 gen6_get_stolen_reserved(dev_priv, 403 &reserved_base, &reserved_size); 404 break; 405 case 7: 406 if (IS_VALLEYVIEW(dev_priv)) 407 vlv_get_stolen_reserved(dev_priv, 408 &reserved_base, &reserved_size); 409 else 410 gen7_get_stolen_reserved(dev_priv, 411 &reserved_base, &reserved_size); 412 break; 413 case 8: 414 case 9: 415 case 10: 416 if (IS_LP(dev_priv)) 417 chv_get_stolen_reserved(dev_priv, 418 &reserved_base, &reserved_size); 419 else 420 bdw_get_stolen_reserved(dev_priv, 421 &reserved_base, &reserved_size); 422 break; 423 case 11: 424 default: 425 icl_get_stolen_reserved(dev_priv, &reserved_base, 426 &reserved_size); 427 break; 428 } 429 430 /* 431 * Our expectation is that the reserved space is at the top of the 432 * stolen region and *never* at the bottom. If we see !reserved_base, 433 * it likely means we failed to read the registers correctly. 434 */ 435 if (!reserved_base) { 436 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", 437 &reserved_base, &reserved_size); 438 reserved_base = stolen_top; 439 reserved_size = 0; 440 } 441 442 dev_priv->dsm_reserved = 443 (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); 444 445 if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { 446 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", 447 &dev_priv->dsm_reserved, &dev_priv->dsm); 448 return 0; 449 } 450 451 /* It is possible for the reserved area to end before the end of stolen 452 * memory, so just consider the start. */ 453 reserved_total = stolen_top - reserved_base; 454 455 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", 456 (u64)resource_size(&dev_priv->dsm) >> 10, 457 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); 458 459 dev_priv->stolen_usable_size = 460 resource_size(&dev_priv->dsm) - reserved_total; 461 462 /* Basic memrange allocator for stolen space. */ 463 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size); 464 465 return 0; 466 } 467 468 static struct sg_table * 469 i915_pages_create_for_stolen(struct drm_device *dev, 470 resource_size_t offset, resource_size_t size) 471 { 472 struct drm_i915_private *dev_priv = to_i915(dev); 473 struct sg_table *st; 474 struct scatterlist *sg; 475 476 GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); 477 478 /* We hide that we have no struct page backing our stolen object 479 * by wrapping the contiguous physical allocation with a fake 480 * dma mapping in a single scatterlist. 481 */ 482 483 st = kmalloc(sizeof(*st), GFP_KERNEL); 484 if (st == NULL) 485 return ERR_PTR(-ENOMEM); 486 487 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 488 kfree(st); 489 return ERR_PTR(-ENOMEM); 490 } 491 492 sg = st->sgl; 493 sg->offset = 0; 494 sg->length = size; 495 496 sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; 497 sg_dma_len(sg) = size; 498 499 return st; 500 } 501 502 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 503 { 504 struct sg_table *pages = 505 i915_pages_create_for_stolen(obj->base.dev, 506 obj->stolen->start, 507 obj->stolen->size); 508 if (IS_ERR(pages)) 509 return PTR_ERR(pages); 510 511 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 512 513 return 0; 514 } 515 516 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 517 struct sg_table *pages) 518 { 519 /* Should only be called from i915_gem_object_release_stolen() */ 520 sg_free_table(pages); 521 kfree(pages); 522 } 523 524 static void 525 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 526 { 527 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 528 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 529 530 GEM_BUG_ON(!stolen); 531 532 __i915_gem_object_unpin_pages(obj); 533 534 i915_gem_stolen_remove_node(dev_priv, stolen); 535 kfree(stolen); 536 } 537 538 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 539 .get_pages = i915_gem_object_get_pages_stolen, 540 .put_pages = i915_gem_object_put_pages_stolen, 541 .release = i915_gem_object_release_stolen, 542 }; 543 544 static struct drm_i915_gem_object * 545 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 546 struct drm_mm_node *stolen) 547 { 548 struct drm_i915_gem_object *obj; 549 unsigned int cache_level; 550 551 obj = i915_gem_object_alloc(); 552 if (obj == NULL) 553 return NULL; 554 555 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); 556 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 557 558 obj->stolen = stolen; 559 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 560 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 561 i915_gem_object_set_cache_coherency(obj, cache_level); 562 563 if (i915_gem_object_pin_pages(obj)) 564 goto cleanup; 565 566 return obj; 567 568 cleanup: 569 i915_gem_object_free(obj); 570 return NULL; 571 } 572 573 struct drm_i915_gem_object * 574 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 575 resource_size_t size) 576 { 577 struct drm_i915_gem_object *obj; 578 struct drm_mm_node *stolen; 579 int ret; 580 581 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 582 return NULL; 583 584 if (size == 0) 585 return NULL; 586 587 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 588 if (!stolen) 589 return NULL; 590 591 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); 592 if (ret) { 593 kfree(stolen); 594 return NULL; 595 } 596 597 obj = _i915_gem_object_create_stolen(dev_priv, stolen); 598 if (obj) 599 return obj; 600 601 i915_gem_stolen_remove_node(dev_priv, stolen); 602 kfree(stolen); 603 return NULL; 604 } 605 606 struct drm_i915_gem_object * 607 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 608 resource_size_t stolen_offset, 609 resource_size_t gtt_offset, 610 resource_size_t size) 611 { 612 struct i915_ggtt *ggtt = &dev_priv->ggtt; 613 struct drm_i915_gem_object *obj; 614 struct drm_mm_node *stolen; 615 struct i915_vma *vma; 616 int ret; 617 618 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 619 return NULL; 620 621 lockdep_assert_held(&dev_priv->drm.struct_mutex); 622 623 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", 624 &stolen_offset, >t_offset, &size); 625 626 /* KISS and expect everything to be page-aligned */ 627 if (WARN_ON(size == 0) || 628 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || 629 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) 630 return NULL; 631 632 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 633 if (!stolen) 634 return NULL; 635 636 stolen->start = stolen_offset; 637 stolen->size = size; 638 mutex_lock(&dev_priv->mm.stolen_lock); 639 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); 640 mutex_unlock(&dev_priv->mm.stolen_lock); 641 if (ret) { 642 DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); 643 kfree(stolen); 644 return NULL; 645 } 646 647 obj = _i915_gem_object_create_stolen(dev_priv, stolen); 648 if (obj == NULL) { 649 DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); 650 i915_gem_stolen_remove_node(dev_priv, stolen); 651 kfree(stolen); 652 return NULL; 653 } 654 655 /* Some objects just need physical mem from stolen space */ 656 if (gtt_offset == I915_GTT_OFFSET_NONE) 657 return obj; 658 659 ret = i915_gem_object_pin_pages(obj); 660 if (ret) 661 goto err; 662 663 vma = i915_vma_instance(obj, &ggtt->vm, NULL); 664 if (IS_ERR(vma)) { 665 ret = PTR_ERR(vma); 666 goto err_pages; 667 } 668 669 /* To simplify the initialisation sequence between KMS and GTT, 670 * we allow construction of the stolen object prior to 671 * setting up the GTT space. The actual reservation will occur 672 * later. 673 */ 674 ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, 675 size, gtt_offset, obj->cache_level, 676 0); 677 if (ret) { 678 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); 679 goto err_pages; 680 } 681 682 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 683 684 vma->pages = obj->mm.pages; 685 vma->flags |= I915_VMA_GLOBAL_BIND; 686 __i915_vma_set_map_and_fenceable(vma); 687 688 mutex_lock(&ggtt->vm.mutex); 689 list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); 690 mutex_unlock(&ggtt->vm.mutex); 691 692 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); 693 atomic_inc(&obj->bind_count); 694 695 return obj; 696 697 err_pages: 698 i915_gem_object_unpin_pages(obj); 699 err: 700 i915_gem_object_put(obj); 701 return NULL; 702 } 703