1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_region.h" 14 #include "i915_drv.h" 15 #include "i915_gem_stolen.h" 16 17 /* 18 * The BIOS typically reserves some of the system's memory for the exclusive 19 * use of the integrated graphics. This memory is no longer available for 20 * use by the OS and so the user finds that his system has less memory 21 * available than he put in. We refer to this memory as stolen. 22 * 23 * The BIOS will allocate its framebuffer from the stolen memory. Our 24 * goal is try to reuse that object for our own fbcon which must always 25 * be available for panics. Anything else we can reuse the stolen memory 26 * for is a boon. 27 */ 28 29 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 30 struct drm_mm_node *node, u64 size, 31 unsigned alignment, u64 start, u64 end) 32 { 33 int ret; 34 35 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 36 return -ENODEV; 37 38 /* WaSkipStolenMemoryFirstPage:bdw+ */ 39 if (INTEL_GEN(dev_priv) >= 8 && start < 4096) 40 start = 4096; 41 42 mutex_lock(&dev_priv->mm.stolen_lock); 43 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, 44 size, alignment, 0, 45 start, end, DRM_MM_INSERT_BEST); 46 mutex_unlock(&dev_priv->mm.stolen_lock); 47 48 return ret; 49 } 50 51 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 52 struct drm_mm_node *node, u64 size, 53 unsigned alignment) 54 { 55 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, 56 alignment, 0, U64_MAX); 57 } 58 59 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 60 struct drm_mm_node *node) 61 { 62 mutex_lock(&dev_priv->mm.stolen_lock); 63 drm_mm_remove_node(node); 64 mutex_unlock(&dev_priv->mm.stolen_lock); 65 } 66 67 static int i915_adjust_stolen(struct drm_i915_private *dev_priv, 68 struct resource *dsm) 69 { 70 struct i915_ggtt *ggtt = &dev_priv->ggtt; 71 struct resource *r; 72 73 if (dsm->start == 0 || dsm->end <= dsm->start) 74 return -EINVAL; 75 76 /* 77 * TODO: We have yet too encounter the case where the GTT wasn't at the 78 * end of stolen. With that assumption we could simplify this. 79 */ 80 81 /* Make sure we don't clobber the GTT if it's within stolen memory */ 82 if (INTEL_GEN(dev_priv) <= 4 && 83 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { 84 struct resource stolen[2] = {*dsm, *dsm}; 85 struct resource ggtt_res; 86 resource_size_t ggtt_start; 87 88 ggtt_start = I915_READ(PGTBL_CTL); 89 if (IS_GEN(dev_priv, 4)) 90 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 91 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 92 else 93 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 94 95 ggtt_res = 96 (struct resource) DEFINE_RES_MEM(ggtt_start, 97 ggtt_total_entries(ggtt) * 4); 98 99 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 100 stolen[0].end = ggtt_res.start; 101 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 102 stolen[1].start = ggtt_res.end; 103 104 /* Pick the larger of the two chunks */ 105 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 106 *dsm = stolen[0]; 107 else 108 *dsm = stolen[1]; 109 110 if (stolen[0].start != stolen[1].start || 111 stolen[0].end != stolen[1].end) { 112 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); 113 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); 114 } 115 } 116 117 /* 118 * Verify that nothing else uses this physical address. Stolen 119 * memory should be reserved by the BIOS and hidden from the 120 * kernel. So if the region is already marked as busy, something 121 * is seriously wrong. 122 */ 123 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, 124 resource_size(dsm), 125 "Graphics Stolen Memory"); 126 if (r == NULL) { 127 /* 128 * One more attempt but this time requesting region from 129 * start + 1, as we have seen that this resolves the region 130 * conflict with the PCI Bus. 131 * This is a BIOS w/a: Some BIOS wrap stolen in the root 132 * PCI bus, but have an off-by-one error. Hence retry the 133 * reservation starting from 1 instead of 0. 134 * There's also BIOS with off-by-one on the other end. 135 */ 136 r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, 137 resource_size(dsm) - 2, 138 "Graphics Stolen Memory"); 139 /* 140 * GEN3 firmware likes to smash pci bridges into the stolen 141 * range. Apparently this works. 142 */ 143 if (r == NULL && !IS_GEN(dev_priv, 3)) { 144 DRM_ERROR("conflict detected with stolen region: %pR\n", 145 dsm); 146 147 return -EBUSY; 148 } 149 } 150 151 return 0; 152 } 153 154 static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) 155 { 156 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 157 return; 158 159 drm_mm_takedown(&dev_priv->mm.stolen); 160 } 161 162 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, 163 resource_size_t *base, 164 resource_size_t *size) 165 { 166 u32 reg_val = I915_READ(IS_GM45(dev_priv) ? 167 CTG_STOLEN_RESERVED : 168 ELK_STOLEN_RESERVED); 169 resource_size_t stolen_top = dev_priv->dsm.end + 1; 170 171 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", 172 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); 173 174 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 175 return; 176 177 /* 178 * Whether ILK really reuses the ELK register for this is unclear. 179 * Let's see if we catch anyone with this supposedly enabled on ILK. 180 */ 181 WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", 182 reg_val); 183 184 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 185 return; 186 187 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 188 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 189 190 *size = stolen_top - *base; 191 } 192 193 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 194 resource_size_t *base, 195 resource_size_t *size) 196 { 197 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 198 199 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 200 201 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 202 return; 203 204 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 205 206 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 207 case GEN6_STOLEN_RESERVED_1M: 208 *size = 1024 * 1024; 209 break; 210 case GEN6_STOLEN_RESERVED_512K: 211 *size = 512 * 1024; 212 break; 213 case GEN6_STOLEN_RESERVED_256K: 214 *size = 256 * 1024; 215 break; 216 case GEN6_STOLEN_RESERVED_128K: 217 *size = 128 * 1024; 218 break; 219 default: 220 *size = 1024 * 1024; 221 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 222 } 223 } 224 225 static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, 226 resource_size_t *base, 227 resource_size_t *size) 228 { 229 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 230 resource_size_t stolen_top = dev_priv->dsm.end + 1; 231 232 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 233 234 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 235 return; 236 237 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 238 default: 239 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 240 /* fall through */ 241 case GEN7_STOLEN_RESERVED_1M: 242 *size = 1024 * 1024; 243 break; 244 } 245 246 /* 247 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 248 * reserved location as (top - size). 249 */ 250 *base = stolen_top - *size; 251 } 252 253 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, 254 resource_size_t *base, 255 resource_size_t *size) 256 { 257 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 258 259 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 260 261 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 262 return; 263 264 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 265 266 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 267 case GEN7_STOLEN_RESERVED_1M: 268 *size = 1024 * 1024; 269 break; 270 case GEN7_STOLEN_RESERVED_256K: 271 *size = 256 * 1024; 272 break; 273 default: 274 *size = 1024 * 1024; 275 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 276 } 277 } 278 279 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, 280 resource_size_t *base, 281 resource_size_t *size) 282 { 283 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 284 285 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 286 287 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 288 return; 289 290 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 291 292 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 293 case GEN8_STOLEN_RESERVED_1M: 294 *size = 1024 * 1024; 295 break; 296 case GEN8_STOLEN_RESERVED_2M: 297 *size = 2 * 1024 * 1024; 298 break; 299 case GEN8_STOLEN_RESERVED_4M: 300 *size = 4 * 1024 * 1024; 301 break; 302 case GEN8_STOLEN_RESERVED_8M: 303 *size = 8 * 1024 * 1024; 304 break; 305 default: 306 *size = 8 * 1024 * 1024; 307 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 308 } 309 } 310 311 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, 312 resource_size_t *base, 313 resource_size_t *size) 314 { 315 u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); 316 resource_size_t stolen_top = dev_priv->dsm.end + 1; 317 318 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 319 320 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 321 return; 322 323 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 324 return; 325 326 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 327 *size = stolen_top - *base; 328 } 329 330 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 331 resource_size_t *base, 332 resource_size_t *size) 333 { 334 u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED); 335 336 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 337 338 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 339 340 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 341 case GEN8_STOLEN_RESERVED_1M: 342 *size = 1024 * 1024; 343 break; 344 case GEN8_STOLEN_RESERVED_2M: 345 *size = 2 * 1024 * 1024; 346 break; 347 case GEN8_STOLEN_RESERVED_4M: 348 *size = 4 * 1024 * 1024; 349 break; 350 case GEN8_STOLEN_RESERVED_8M: 351 *size = 8 * 1024 * 1024; 352 break; 353 default: 354 *size = 8 * 1024 * 1024; 355 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 356 } 357 } 358 359 static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) 360 { 361 resource_size_t reserved_base, stolen_top; 362 resource_size_t reserved_total, reserved_size; 363 364 mutex_init(&dev_priv->mm.stolen_lock); 365 366 if (intel_vgpu_active(dev_priv)) { 367 dev_notice(dev_priv->drm.dev, 368 "%s, disabling use of stolen memory\n", 369 "iGVT-g active"); 370 return 0; 371 } 372 373 if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { 374 dev_notice(dev_priv->drm.dev, 375 "%s, disabling use of stolen memory\n", 376 "DMAR active"); 377 return 0; 378 } 379 380 if (resource_size(&intel_graphics_stolen_res) == 0) 381 return 0; 382 383 dev_priv->dsm = intel_graphics_stolen_res; 384 385 if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) 386 return 0; 387 388 GEM_BUG_ON(dev_priv->dsm.start == 0); 389 GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); 390 391 stolen_top = dev_priv->dsm.end + 1; 392 reserved_base = stolen_top; 393 reserved_size = 0; 394 395 switch (INTEL_GEN(dev_priv)) { 396 case 2: 397 case 3: 398 break; 399 case 4: 400 if (!IS_G4X(dev_priv)) 401 break; 402 /* fall through */ 403 case 5: 404 g4x_get_stolen_reserved(dev_priv, 405 &reserved_base, &reserved_size); 406 break; 407 case 6: 408 gen6_get_stolen_reserved(dev_priv, 409 &reserved_base, &reserved_size); 410 break; 411 case 7: 412 if (IS_VALLEYVIEW(dev_priv)) 413 vlv_get_stolen_reserved(dev_priv, 414 &reserved_base, &reserved_size); 415 else 416 gen7_get_stolen_reserved(dev_priv, 417 &reserved_base, &reserved_size); 418 break; 419 case 8: 420 case 9: 421 case 10: 422 if (IS_LP(dev_priv)) 423 chv_get_stolen_reserved(dev_priv, 424 &reserved_base, &reserved_size); 425 else 426 bdw_get_stolen_reserved(dev_priv, 427 &reserved_base, &reserved_size); 428 break; 429 default: 430 MISSING_CASE(INTEL_GEN(dev_priv)); 431 /* fall-through */ 432 case 11: 433 case 12: 434 icl_get_stolen_reserved(dev_priv, &reserved_base, 435 &reserved_size); 436 break; 437 } 438 439 /* 440 * Our expectation is that the reserved space is at the top of the 441 * stolen region and *never* at the bottom. If we see !reserved_base, 442 * it likely means we failed to read the registers correctly. 443 */ 444 if (!reserved_base) { 445 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", 446 &reserved_base, &reserved_size); 447 reserved_base = stolen_top; 448 reserved_size = 0; 449 } 450 451 dev_priv->dsm_reserved = 452 (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); 453 454 if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { 455 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", 456 &dev_priv->dsm_reserved, &dev_priv->dsm); 457 return 0; 458 } 459 460 /* It is possible for the reserved area to end before the end of stolen 461 * memory, so just consider the start. */ 462 reserved_total = stolen_top - reserved_base; 463 464 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", 465 (u64)resource_size(&dev_priv->dsm) >> 10, 466 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); 467 468 dev_priv->stolen_usable_size = 469 resource_size(&dev_priv->dsm) - reserved_total; 470 471 /* Basic memrange allocator for stolen space. */ 472 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size); 473 474 return 0; 475 } 476 477 static struct sg_table * 478 i915_pages_create_for_stolen(struct drm_device *dev, 479 resource_size_t offset, resource_size_t size) 480 { 481 struct drm_i915_private *dev_priv = to_i915(dev); 482 struct sg_table *st; 483 struct scatterlist *sg; 484 485 GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); 486 487 /* We hide that we have no struct page backing our stolen object 488 * by wrapping the contiguous physical allocation with a fake 489 * dma mapping in a single scatterlist. 490 */ 491 492 st = kmalloc(sizeof(*st), GFP_KERNEL); 493 if (st == NULL) 494 return ERR_PTR(-ENOMEM); 495 496 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 497 kfree(st); 498 return ERR_PTR(-ENOMEM); 499 } 500 501 sg = st->sgl; 502 sg->offset = 0; 503 sg->length = size; 504 505 sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; 506 sg_dma_len(sg) = size; 507 508 return st; 509 } 510 511 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 512 { 513 struct sg_table *pages = 514 i915_pages_create_for_stolen(obj->base.dev, 515 obj->stolen->start, 516 obj->stolen->size); 517 if (IS_ERR(pages)) 518 return PTR_ERR(pages); 519 520 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 521 522 return 0; 523 } 524 525 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 526 struct sg_table *pages) 527 { 528 /* Should only be called from i915_gem_object_release_stolen() */ 529 sg_free_table(pages); 530 kfree(pages); 531 } 532 533 static void 534 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 535 { 536 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 537 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 538 539 GEM_BUG_ON(!stolen); 540 541 i915_gem_stolen_remove_node(dev_priv, stolen); 542 kfree(stolen); 543 544 if (obj->mm.region) 545 i915_gem_object_release_memory_region(obj); 546 } 547 548 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 549 .get_pages = i915_gem_object_get_pages_stolen, 550 .put_pages = i915_gem_object_put_pages_stolen, 551 .release = i915_gem_object_release_stolen, 552 }; 553 554 static struct drm_i915_gem_object * 555 __i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 556 struct drm_mm_node *stolen, 557 struct intel_memory_region *mem) 558 { 559 static struct lock_class_key lock_class; 560 struct drm_i915_gem_object *obj; 561 unsigned int cache_level; 562 int err = -ENOMEM; 563 564 obj = i915_gem_object_alloc(); 565 if (!obj) 566 goto err; 567 568 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); 569 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); 570 571 obj->stolen = stolen; 572 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 573 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 574 i915_gem_object_set_cache_coherency(obj, cache_level); 575 576 err = i915_gem_object_pin_pages(obj); 577 if (err) 578 goto cleanup; 579 580 if (mem) 581 i915_gem_object_init_memory_region(obj, mem, 0); 582 583 return obj; 584 585 cleanup: 586 i915_gem_object_free(obj); 587 err: 588 return ERR_PTR(err); 589 } 590 591 static struct drm_i915_gem_object * 592 _i915_gem_object_create_stolen(struct intel_memory_region *mem, 593 resource_size_t size, 594 unsigned int flags) 595 { 596 struct drm_i915_private *dev_priv = mem->i915; 597 struct drm_i915_gem_object *obj; 598 struct drm_mm_node *stolen; 599 int ret; 600 601 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 602 return ERR_PTR(-ENODEV); 603 604 if (size == 0) 605 return ERR_PTR(-EINVAL); 606 607 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 608 if (!stolen) 609 return ERR_PTR(-ENOMEM); 610 611 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); 612 if (ret) { 613 obj = ERR_PTR(ret); 614 goto err_free; 615 } 616 617 obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem); 618 if (IS_ERR(obj)) 619 goto err_remove; 620 621 return obj; 622 623 err_remove: 624 i915_gem_stolen_remove_node(dev_priv, stolen); 625 err_free: 626 kfree(stolen); 627 return obj; 628 } 629 630 struct drm_i915_gem_object * 631 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 632 resource_size_t size) 633 { 634 return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN], 635 size, I915_BO_ALLOC_CONTIGUOUS); 636 } 637 638 static int init_stolen(struct intel_memory_region *mem) 639 { 640 /* 641 * Initialise stolen early so that we may reserve preallocated 642 * objects for the BIOS to KMS transition. 643 */ 644 return i915_gem_init_stolen(mem->i915); 645 } 646 647 static void release_stolen(struct intel_memory_region *mem) 648 { 649 i915_gem_cleanup_stolen(mem->i915); 650 } 651 652 static const struct intel_memory_region_ops i915_region_stolen_ops = { 653 .init = init_stolen, 654 .release = release_stolen, 655 .create_object = _i915_gem_object_create_stolen, 656 }; 657 658 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) 659 { 660 return intel_memory_region_create(i915, 661 intel_graphics_stolen_res.start, 662 resource_size(&intel_graphics_stolen_res), 663 PAGE_SIZE, 0, 664 &i915_region_stolen_ops); 665 } 666 667 struct drm_i915_gem_object * 668 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 669 resource_size_t stolen_offset, 670 resource_size_t gtt_offset, 671 resource_size_t size) 672 { 673 struct i915_ggtt *ggtt = &dev_priv->ggtt; 674 struct drm_i915_gem_object *obj; 675 struct drm_mm_node *stolen; 676 struct i915_vma *vma; 677 int ret; 678 679 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 680 return ERR_PTR(-ENODEV); 681 682 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", 683 &stolen_offset, >t_offset, &size); 684 685 /* KISS and expect everything to be page-aligned */ 686 if (WARN_ON(size == 0) || 687 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || 688 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) 689 return ERR_PTR(-EINVAL); 690 691 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 692 if (!stolen) 693 return ERR_PTR(-ENOMEM); 694 695 stolen->start = stolen_offset; 696 stolen->size = size; 697 mutex_lock(&dev_priv->mm.stolen_lock); 698 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); 699 mutex_unlock(&dev_priv->mm.stolen_lock); 700 if (ret) { 701 DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); 702 kfree(stolen); 703 return ERR_PTR(ret); 704 } 705 706 obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL); 707 if (IS_ERR(obj)) { 708 DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); 709 i915_gem_stolen_remove_node(dev_priv, stolen); 710 kfree(stolen); 711 return obj; 712 } 713 714 /* Some objects just need physical mem from stolen space */ 715 if (gtt_offset == I915_GTT_OFFSET_NONE) 716 return obj; 717 718 ret = i915_gem_object_pin_pages(obj); 719 if (ret) 720 goto err; 721 722 vma = i915_vma_instance(obj, &ggtt->vm, NULL); 723 if (IS_ERR(vma)) { 724 ret = PTR_ERR(vma); 725 goto err_pages; 726 } 727 728 /* To simplify the initialisation sequence between KMS and GTT, 729 * we allow construction of the stolen object prior to 730 * setting up the GTT space. The actual reservation will occur 731 * later. 732 */ 733 mutex_lock(&ggtt->vm.mutex); 734 ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, 735 size, gtt_offset, obj->cache_level, 736 0); 737 if (ret) { 738 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); 739 mutex_unlock(&ggtt->vm.mutex); 740 goto err_pages; 741 } 742 743 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 744 745 GEM_BUG_ON(vma->pages); 746 vma->pages = obj->mm.pages; 747 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); 748 749 set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); 750 __i915_vma_set_map_and_fenceable(vma); 751 752 list_add_tail(&vma->vm_link, &ggtt->vm.bound_list); 753 mutex_unlock(&ggtt->vm.mutex); 754 755 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); 756 atomic_inc(&obj->bind_count); 757 758 return obj; 759 760 err_pages: 761 i915_gem_object_unpin_pages(obj); 762 err: 763 i915_gem_object_put(obj); 764 return ERR_PTR(ret); 765 } 766