1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_region.h" 14 #include "i915_drv.h" 15 #include "i915_gem_stolen.h" 16 17 /* 18 * The BIOS typically reserves some of the system's memory for the exclusive 19 * use of the integrated graphics. This memory is no longer available for 20 * use by the OS and so the user finds that his system has less memory 21 * available than he put in. We refer to this memory as stolen. 22 * 23 * The BIOS will allocate its framebuffer from the stolen memory. Our 24 * goal is try to reuse that object for our own fbcon which must always 25 * be available for panics. Anything else we can reuse the stolen memory 26 * for is a boon. 27 */ 28 29 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 30 struct drm_mm_node *node, u64 size, 31 unsigned alignment, u64 start, u64 end) 32 { 33 int ret; 34 35 if (!drm_mm_initialized(&i915->mm.stolen)) 36 return -ENODEV; 37 38 /* WaSkipStolenMemoryFirstPage:bdw+ */ 39 if (INTEL_GEN(i915) >= 8 && start < 4096) 40 start = 4096; 41 42 mutex_lock(&i915->mm.stolen_lock); 43 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 44 size, alignment, 0, 45 start, end, DRM_MM_INSERT_BEST); 46 mutex_unlock(&i915->mm.stolen_lock); 47 48 return ret; 49 } 50 51 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 52 struct drm_mm_node *node, u64 size, 53 unsigned alignment) 54 { 55 return i915_gem_stolen_insert_node_in_range(i915, node, size, 56 alignment, 0, U64_MAX); 57 } 58 59 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 60 struct drm_mm_node *node) 61 { 62 mutex_lock(&i915->mm.stolen_lock); 63 drm_mm_remove_node(node); 64 mutex_unlock(&i915->mm.stolen_lock); 65 } 66 67 static int i915_adjust_stolen(struct drm_i915_private *i915, 68 struct resource *dsm) 69 { 70 struct i915_ggtt *ggtt = &i915->ggtt; 71 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 72 struct resource *r; 73 74 if (dsm->start == 0 || dsm->end <= dsm->start) 75 return -EINVAL; 76 77 /* 78 * TODO: We have yet too encounter the case where the GTT wasn't at the 79 * end of stolen. With that assumption we could simplify this. 80 */ 81 82 /* Make sure we don't clobber the GTT if it's within stolen memory */ 83 if (INTEL_GEN(i915) <= 4 && 84 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 85 struct resource stolen[2] = {*dsm, *dsm}; 86 struct resource ggtt_res; 87 resource_size_t ggtt_start; 88 89 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 90 if (IS_GEN(i915, 4)) 91 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 92 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 93 else 94 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 95 96 ggtt_res = 97 (struct resource) DEFINE_RES_MEM(ggtt_start, 98 ggtt_total_entries(ggtt) * 4); 99 100 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 101 stolen[0].end = ggtt_res.start; 102 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 103 stolen[1].start = ggtt_res.end; 104 105 /* Pick the larger of the two chunks */ 106 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 107 *dsm = stolen[0]; 108 else 109 *dsm = stolen[1]; 110 111 if (stolen[0].start != stolen[1].start || 112 stolen[0].end != stolen[1].end) { 113 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); 114 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); 115 } 116 } 117 118 /* 119 * Verify that nothing else uses this physical address. Stolen 120 * memory should be reserved by the BIOS and hidden from the 121 * kernel. So if the region is already marked as busy, something 122 * is seriously wrong. 123 */ 124 r = devm_request_mem_region(i915->drm.dev, dsm->start, 125 resource_size(dsm), 126 "Graphics Stolen Memory"); 127 if (r == NULL) { 128 /* 129 * One more attempt but this time requesting region from 130 * start + 1, as we have seen that this resolves the region 131 * conflict with the PCI Bus. 132 * This is a BIOS w/a: Some BIOS wrap stolen in the root 133 * PCI bus, but have an off-by-one error. Hence retry the 134 * reservation starting from 1 instead of 0. 135 * There's also BIOS with off-by-one on the other end. 136 */ 137 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 138 resource_size(dsm) - 2, 139 "Graphics Stolen Memory"); 140 /* 141 * GEN3 firmware likes to smash pci bridges into the stolen 142 * range. Apparently this works. 143 */ 144 if (!r && !IS_GEN(i915, 3)) { 145 DRM_ERROR("conflict detected with stolen region: %pR\n", 146 dsm); 147 148 return -EBUSY; 149 } 150 } 151 152 return 0; 153 } 154 155 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 156 { 157 if (!drm_mm_initialized(&i915->mm.stolen)) 158 return; 159 160 drm_mm_takedown(&i915->mm.stolen); 161 } 162 163 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 164 struct intel_uncore *uncore, 165 resource_size_t *base, 166 resource_size_t *size) 167 { 168 u32 reg_val = intel_uncore_read(uncore, 169 IS_GM45(i915) ? 170 CTG_STOLEN_RESERVED : 171 ELK_STOLEN_RESERVED); 172 resource_size_t stolen_top = i915->dsm.end + 1; 173 174 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", 175 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 176 177 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 178 return; 179 180 /* 181 * Whether ILK really reuses the ELK register for this is unclear. 182 * Let's see if we catch anyone with this supposedly enabled on ILK. 183 */ 184 WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n", 185 reg_val); 186 187 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 188 return; 189 190 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 191 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 192 193 *size = stolen_top - *base; 194 } 195 196 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 197 struct intel_uncore *uncore, 198 resource_size_t *base, 199 resource_size_t *size) 200 { 201 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 202 203 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 204 205 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 206 return; 207 208 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 209 210 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 211 case GEN6_STOLEN_RESERVED_1M: 212 *size = 1024 * 1024; 213 break; 214 case GEN6_STOLEN_RESERVED_512K: 215 *size = 512 * 1024; 216 break; 217 case GEN6_STOLEN_RESERVED_256K: 218 *size = 256 * 1024; 219 break; 220 case GEN6_STOLEN_RESERVED_128K: 221 *size = 128 * 1024; 222 break; 223 default: 224 *size = 1024 * 1024; 225 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 226 } 227 } 228 229 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 230 struct intel_uncore *uncore, 231 resource_size_t *base, 232 resource_size_t *size) 233 { 234 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 235 resource_size_t stolen_top = i915->dsm.end + 1; 236 237 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 238 239 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 240 return; 241 242 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 243 default: 244 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 245 /* fall through */ 246 case GEN7_STOLEN_RESERVED_1M: 247 *size = 1024 * 1024; 248 break; 249 } 250 251 /* 252 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 253 * reserved location as (top - size). 254 */ 255 *base = stolen_top - *size; 256 } 257 258 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 259 struct intel_uncore *uncore, 260 resource_size_t *base, 261 resource_size_t *size) 262 { 263 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 264 265 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 266 267 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 268 return; 269 270 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 271 272 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 273 case GEN7_STOLEN_RESERVED_1M: 274 *size = 1024 * 1024; 275 break; 276 case GEN7_STOLEN_RESERVED_256K: 277 *size = 256 * 1024; 278 break; 279 default: 280 *size = 1024 * 1024; 281 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 282 } 283 } 284 285 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 286 struct intel_uncore *uncore, 287 resource_size_t *base, 288 resource_size_t *size) 289 { 290 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 291 292 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 293 294 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 295 return; 296 297 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 298 299 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 300 case GEN8_STOLEN_RESERVED_1M: 301 *size = 1024 * 1024; 302 break; 303 case GEN8_STOLEN_RESERVED_2M: 304 *size = 2 * 1024 * 1024; 305 break; 306 case GEN8_STOLEN_RESERVED_4M: 307 *size = 4 * 1024 * 1024; 308 break; 309 case GEN8_STOLEN_RESERVED_8M: 310 *size = 8 * 1024 * 1024; 311 break; 312 default: 313 *size = 8 * 1024 * 1024; 314 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 315 } 316 } 317 318 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 319 struct intel_uncore *uncore, 320 resource_size_t *base, 321 resource_size_t *size) 322 { 323 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 324 resource_size_t stolen_top = i915->dsm.end + 1; 325 326 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); 327 328 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 329 return; 330 331 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 332 return; 333 334 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 335 *size = stolen_top - *base; 336 } 337 338 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 339 struct intel_uncore *uncore, 340 resource_size_t *base, 341 resource_size_t *size) 342 { 343 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 344 345 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 346 347 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 348 349 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 350 case GEN8_STOLEN_RESERVED_1M: 351 *size = 1024 * 1024; 352 break; 353 case GEN8_STOLEN_RESERVED_2M: 354 *size = 2 * 1024 * 1024; 355 break; 356 case GEN8_STOLEN_RESERVED_4M: 357 *size = 4 * 1024 * 1024; 358 break; 359 case GEN8_STOLEN_RESERVED_8M: 360 *size = 8 * 1024 * 1024; 361 break; 362 default: 363 *size = 8 * 1024 * 1024; 364 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 365 } 366 } 367 368 static int i915_gem_init_stolen(struct drm_i915_private *i915) 369 { 370 struct intel_uncore *uncore = &i915->uncore; 371 resource_size_t reserved_base, stolen_top; 372 resource_size_t reserved_total, reserved_size; 373 374 mutex_init(&i915->mm.stolen_lock); 375 376 if (intel_vgpu_active(i915)) { 377 dev_notice(i915->drm.dev, 378 "%s, disabling use of stolen memory\n", 379 "iGVT-g active"); 380 return 0; 381 } 382 383 if (intel_vtd_active() && INTEL_GEN(i915) < 8) { 384 dev_notice(i915->drm.dev, 385 "%s, disabling use of stolen memory\n", 386 "DMAR active"); 387 return 0; 388 } 389 390 if (resource_size(&intel_graphics_stolen_res) == 0) 391 return 0; 392 393 i915->dsm = intel_graphics_stolen_res; 394 395 if (i915_adjust_stolen(i915, &i915->dsm)) 396 return 0; 397 398 GEM_BUG_ON(i915->dsm.start == 0); 399 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 400 401 stolen_top = i915->dsm.end + 1; 402 reserved_base = stolen_top; 403 reserved_size = 0; 404 405 switch (INTEL_GEN(i915)) { 406 case 2: 407 case 3: 408 break; 409 case 4: 410 if (!IS_G4X(i915)) 411 break; 412 /* fall through */ 413 case 5: 414 g4x_get_stolen_reserved(i915, uncore, 415 &reserved_base, &reserved_size); 416 break; 417 case 6: 418 gen6_get_stolen_reserved(i915, uncore, 419 &reserved_base, &reserved_size); 420 break; 421 case 7: 422 if (IS_VALLEYVIEW(i915)) 423 vlv_get_stolen_reserved(i915, uncore, 424 &reserved_base, &reserved_size); 425 else 426 gen7_get_stolen_reserved(i915, uncore, 427 &reserved_base, &reserved_size); 428 break; 429 case 8: 430 case 9: 431 case 10: 432 if (IS_LP(i915)) 433 chv_get_stolen_reserved(i915, uncore, 434 &reserved_base, &reserved_size); 435 else 436 bdw_get_stolen_reserved(i915, uncore, 437 &reserved_base, &reserved_size); 438 break; 439 default: 440 MISSING_CASE(INTEL_GEN(i915)); 441 /* fall-through */ 442 case 11: 443 case 12: 444 icl_get_stolen_reserved(i915, uncore, 445 &reserved_base, 446 &reserved_size); 447 break; 448 } 449 450 /* 451 * Our expectation is that the reserved space is at the top of the 452 * stolen region and *never* at the bottom. If we see !reserved_base, 453 * it likely means we failed to read the registers correctly. 454 */ 455 if (!reserved_base) { 456 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", 457 &reserved_base, &reserved_size); 458 reserved_base = stolen_top; 459 reserved_size = 0; 460 } 461 462 i915->dsm_reserved = 463 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 464 465 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 466 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", 467 &i915->dsm_reserved, &i915->dsm); 468 return 0; 469 } 470 471 /* It is possible for the reserved area to end before the end of stolen 472 * memory, so just consider the start. */ 473 reserved_total = stolen_top - reserved_base; 474 475 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", 476 (u64)resource_size(&i915->dsm) >> 10, 477 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); 478 479 i915->stolen_usable_size = 480 resource_size(&i915->dsm) - reserved_total; 481 482 /* Basic memrange allocator for stolen space. */ 483 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 484 485 return 0; 486 } 487 488 static struct sg_table * 489 i915_pages_create_for_stolen(struct drm_device *dev, 490 resource_size_t offset, resource_size_t size) 491 { 492 struct drm_i915_private *i915 = to_i915(dev); 493 struct sg_table *st; 494 struct scatterlist *sg; 495 496 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 497 498 /* We hide that we have no struct page backing our stolen object 499 * by wrapping the contiguous physical allocation with a fake 500 * dma mapping in a single scatterlist. 501 */ 502 503 st = kmalloc(sizeof(*st), GFP_KERNEL); 504 if (st == NULL) 505 return ERR_PTR(-ENOMEM); 506 507 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 508 kfree(st); 509 return ERR_PTR(-ENOMEM); 510 } 511 512 sg = st->sgl; 513 sg->offset = 0; 514 sg->length = size; 515 516 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 517 sg_dma_len(sg) = size; 518 519 return st; 520 } 521 522 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 523 { 524 struct sg_table *pages = 525 i915_pages_create_for_stolen(obj->base.dev, 526 obj->stolen->start, 527 obj->stolen->size); 528 if (IS_ERR(pages)) 529 return PTR_ERR(pages); 530 531 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 532 533 return 0; 534 } 535 536 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 537 struct sg_table *pages) 538 { 539 /* Should only be called from i915_gem_object_release_stolen() */ 540 sg_free_table(pages); 541 kfree(pages); 542 } 543 544 static void 545 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 546 { 547 struct drm_i915_private *i915 = to_i915(obj->base.dev); 548 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 549 550 GEM_BUG_ON(!stolen); 551 552 i915_gem_object_release_memory_region(obj); 553 554 i915_gem_stolen_remove_node(i915, stolen); 555 kfree(stolen); 556 } 557 558 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 559 .get_pages = i915_gem_object_get_pages_stolen, 560 .put_pages = i915_gem_object_put_pages_stolen, 561 .release = i915_gem_object_release_stolen, 562 }; 563 564 static struct drm_i915_gem_object * 565 __i915_gem_object_create_stolen(struct intel_memory_region *mem, 566 struct drm_mm_node *stolen) 567 { 568 static struct lock_class_key lock_class; 569 struct drm_i915_gem_object *obj; 570 unsigned int cache_level; 571 int err = -ENOMEM; 572 573 obj = i915_gem_object_alloc(); 574 if (!obj) 575 goto err; 576 577 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 578 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); 579 580 obj->stolen = stolen; 581 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 582 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 583 i915_gem_object_set_cache_coherency(obj, cache_level); 584 585 err = i915_gem_object_pin_pages(obj); 586 if (err) 587 goto cleanup; 588 589 i915_gem_object_init_memory_region(obj, mem, 0); 590 591 return obj; 592 593 cleanup: 594 i915_gem_object_free(obj); 595 err: 596 return ERR_PTR(err); 597 } 598 599 static struct drm_i915_gem_object * 600 _i915_gem_object_create_stolen(struct intel_memory_region *mem, 601 resource_size_t size, 602 unsigned int flags) 603 { 604 struct drm_i915_private *i915 = mem->i915; 605 struct drm_i915_gem_object *obj; 606 struct drm_mm_node *stolen; 607 int ret; 608 609 if (!drm_mm_initialized(&i915->mm.stolen)) 610 return ERR_PTR(-ENODEV); 611 612 if (size == 0) 613 return ERR_PTR(-EINVAL); 614 615 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 616 if (!stolen) 617 return ERR_PTR(-ENOMEM); 618 619 ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); 620 if (ret) { 621 obj = ERR_PTR(ret); 622 goto err_free; 623 } 624 625 obj = __i915_gem_object_create_stolen(mem, stolen); 626 if (IS_ERR(obj)) 627 goto err_remove; 628 629 return obj; 630 631 err_remove: 632 i915_gem_stolen_remove_node(i915, stolen); 633 err_free: 634 kfree(stolen); 635 return obj; 636 } 637 638 struct drm_i915_gem_object * 639 i915_gem_object_create_stolen(struct drm_i915_private *i915, 640 resource_size_t size) 641 { 642 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN], 643 size, I915_BO_ALLOC_CONTIGUOUS); 644 } 645 646 static int init_stolen(struct intel_memory_region *mem) 647 { 648 intel_memory_region_set_name(mem, "stolen"); 649 650 /* 651 * Initialise stolen early so that we may reserve preallocated 652 * objects for the BIOS to KMS transition. 653 */ 654 return i915_gem_init_stolen(mem->i915); 655 } 656 657 static void release_stolen(struct intel_memory_region *mem) 658 { 659 i915_gem_cleanup_stolen(mem->i915); 660 } 661 662 static const struct intel_memory_region_ops i915_region_stolen_ops = { 663 .init = init_stolen, 664 .release = release_stolen, 665 .create_object = _i915_gem_object_create_stolen, 666 }; 667 668 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) 669 { 670 return intel_memory_region_create(i915, 671 intel_graphics_stolen_res.start, 672 resource_size(&intel_graphics_stolen_res), 673 PAGE_SIZE, 0, 674 &i915_region_stolen_ops); 675 } 676 677 struct drm_i915_gem_object * 678 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, 679 resource_size_t stolen_offset, 680 resource_size_t gtt_offset, 681 resource_size_t size) 682 { 683 struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN]; 684 struct i915_ggtt *ggtt = &i915->ggtt; 685 struct drm_i915_gem_object *obj; 686 struct drm_mm_node *stolen; 687 struct i915_vma *vma; 688 int ret; 689 690 if (!drm_mm_initialized(&i915->mm.stolen)) 691 return ERR_PTR(-ENODEV); 692 693 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", 694 &stolen_offset, >t_offset, &size); 695 696 /* KISS and expect everything to be page-aligned */ 697 if (WARN_ON(size == 0) || 698 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || 699 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) 700 return ERR_PTR(-EINVAL); 701 702 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 703 if (!stolen) 704 return ERR_PTR(-ENOMEM); 705 706 stolen->start = stolen_offset; 707 stolen->size = size; 708 mutex_lock(&i915->mm.stolen_lock); 709 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 710 mutex_unlock(&i915->mm.stolen_lock); 711 if (ret) { 712 DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); 713 kfree(stolen); 714 return ERR_PTR(ret); 715 } 716 717 obj = __i915_gem_object_create_stolen(mem, stolen); 718 if (IS_ERR(obj)) { 719 DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); 720 i915_gem_stolen_remove_node(i915, stolen); 721 kfree(stolen); 722 return obj; 723 } 724 725 /* Some objects just need physical mem from stolen space */ 726 if (gtt_offset == I915_GTT_OFFSET_NONE) 727 return obj; 728 729 ret = i915_gem_object_pin_pages(obj); 730 if (ret) 731 goto err; 732 733 vma = i915_vma_instance(obj, &ggtt->vm, NULL); 734 if (IS_ERR(vma)) { 735 ret = PTR_ERR(vma); 736 goto err_pages; 737 } 738 739 /* To simplify the initialisation sequence between KMS and GTT, 740 * we allow construction of the stolen object prior to 741 * setting up the GTT space. The actual reservation will occur 742 * later. 743 */ 744 mutex_lock(&ggtt->vm.mutex); 745 ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, 746 size, gtt_offset, obj->cache_level, 747 0); 748 if (ret) { 749 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); 750 mutex_unlock(&ggtt->vm.mutex); 751 goto err_pages; 752 } 753 754 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 755 756 GEM_BUG_ON(vma->pages); 757 vma->pages = obj->mm.pages; 758 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); 759 760 set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); 761 __i915_vma_set_map_and_fenceable(vma); 762 763 list_add_tail(&vma->vm_link, &ggtt->vm.bound_list); 764 mutex_unlock(&ggtt->vm.mutex); 765 766 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); 767 atomic_inc(&obj->bind_count); 768 769 return obj; 770 771 err_pages: 772 i915_gem_object_unpin_pages(obj); 773 err: 774 i915_gem_object_put(obj); 775 return ERR_PTR(ret); 776 } 777