1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "i915_drv.h" 16 #include "i915_gem_stolen.h" 17 #include "i915_vgpu.h" 18 19 /* 20 * The BIOS typically reserves some of the system's memory for the exclusive 21 * use of the integrated graphics. This memory is no longer available for 22 * use by the OS and so the user finds that his system has less memory 23 * available than he put in. We refer to this memory as stolen. 24 * 25 * The BIOS will allocate its framebuffer from the stolen memory. Our 26 * goal is try to reuse that object for our own fbcon which must always 27 * be available for panics. Anything else we can reuse the stolen memory 28 * for is a boon. 29 */ 30 31 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 32 struct drm_mm_node *node, u64 size, 33 unsigned alignment, u64 start, u64 end) 34 { 35 int ret; 36 37 if (!drm_mm_initialized(&i915->mm.stolen)) 38 return -ENODEV; 39 40 /* WaSkipStolenMemoryFirstPage:bdw+ */ 41 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 42 start = 4096; 43 44 mutex_lock(&i915->mm.stolen_lock); 45 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 46 size, alignment, 0, 47 start, end, DRM_MM_INSERT_BEST); 48 mutex_unlock(&i915->mm.stolen_lock); 49 50 return ret; 51 } 52 53 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 54 struct drm_mm_node *node, u64 size, 55 unsigned alignment) 56 { 57 return i915_gem_stolen_insert_node_in_range(i915, node, 58 size, alignment, 59 I915_GEM_STOLEN_BIAS, 60 U64_MAX); 61 } 62 63 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 64 struct drm_mm_node *node) 65 { 66 mutex_lock(&i915->mm.stolen_lock); 67 drm_mm_remove_node(node); 68 mutex_unlock(&i915->mm.stolen_lock); 69 } 70 71 static int i915_adjust_stolen(struct drm_i915_private *i915, 72 struct resource *dsm) 73 { 74 struct i915_ggtt *ggtt = &i915->ggtt; 75 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 76 struct resource *r; 77 78 if (dsm->start == 0 || dsm->end <= dsm->start) 79 return -EINVAL; 80 81 /* 82 * TODO: We have yet too encounter the case where the GTT wasn't at the 83 * end of stolen. With that assumption we could simplify this. 84 */ 85 86 /* Make sure we don't clobber the GTT if it's within stolen memory */ 87 if (GRAPHICS_VER(i915) <= 4 && 88 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 89 struct resource stolen[2] = {*dsm, *dsm}; 90 struct resource ggtt_res; 91 resource_size_t ggtt_start; 92 93 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 94 if (GRAPHICS_VER(i915) == 4) 95 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 96 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 97 else 98 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 99 100 ggtt_res = 101 (struct resource) DEFINE_RES_MEM(ggtt_start, 102 ggtt_total_entries(ggtt) * 4); 103 104 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 105 stolen[0].end = ggtt_res.start; 106 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 107 stolen[1].start = ggtt_res.end; 108 109 /* Pick the larger of the two chunks */ 110 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 111 *dsm = stolen[0]; 112 else 113 *dsm = stolen[1]; 114 115 if (stolen[0].start != stolen[1].start || 116 stolen[0].end != stolen[1].end) { 117 drm_dbg(&i915->drm, 118 "GTT within stolen memory at %pR\n", 119 &ggtt_res); 120 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 121 dsm); 122 } 123 } 124 125 /* 126 * With stolen lmem, we don't need to check if the address range 127 * overlaps with the non-stolen system memory range, since lmem is local 128 * to the gpu. 129 */ 130 if (HAS_LMEM(i915)) 131 return 0; 132 133 /* 134 * Verify that nothing else uses this physical address. Stolen 135 * memory should be reserved by the BIOS and hidden from the 136 * kernel. So if the region is already marked as busy, something 137 * is seriously wrong. 138 */ 139 r = devm_request_mem_region(i915->drm.dev, dsm->start, 140 resource_size(dsm), 141 "Graphics Stolen Memory"); 142 if (r == NULL) { 143 /* 144 * One more attempt but this time requesting region from 145 * start + 1, as we have seen that this resolves the region 146 * conflict with the PCI Bus. 147 * This is a BIOS w/a: Some BIOS wrap stolen in the root 148 * PCI bus, but have an off-by-one error. Hence retry the 149 * reservation starting from 1 instead of 0. 150 * There's also BIOS with off-by-one on the other end. 151 */ 152 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 153 resource_size(dsm) - 2, 154 "Graphics Stolen Memory"); 155 /* 156 * GEN3 firmware likes to smash pci bridges into the stolen 157 * range. Apparently this works. 158 */ 159 if (!r && GRAPHICS_VER(i915) != 3) { 160 drm_err(&i915->drm, 161 "conflict detected with stolen region: %pR\n", 162 dsm); 163 164 return -EBUSY; 165 } 166 } 167 168 return 0; 169 } 170 171 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 172 { 173 if (!drm_mm_initialized(&i915->mm.stolen)) 174 return; 175 176 drm_mm_takedown(&i915->mm.stolen); 177 } 178 179 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 180 struct intel_uncore *uncore, 181 resource_size_t *base, 182 resource_size_t *size) 183 { 184 u32 reg_val = intel_uncore_read(uncore, 185 IS_GM45(i915) ? 186 CTG_STOLEN_RESERVED : 187 ELK_STOLEN_RESERVED); 188 resource_size_t stolen_top = i915->dsm.end + 1; 189 190 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 191 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 192 193 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 194 return; 195 196 /* 197 * Whether ILK really reuses the ELK register for this is unclear. 198 * Let's see if we catch anyone with this supposedly enabled on ILK. 199 */ 200 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 201 "ILK stolen reserved found? 0x%08x\n", 202 reg_val); 203 204 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 205 return; 206 207 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 208 drm_WARN_ON(&i915->drm, 209 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 210 211 *size = stolen_top - *base; 212 } 213 214 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 215 struct intel_uncore *uncore, 216 resource_size_t *base, 217 resource_size_t *size) 218 { 219 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 220 221 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 222 223 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 224 return; 225 226 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 227 228 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 229 case GEN6_STOLEN_RESERVED_1M: 230 *size = 1024 * 1024; 231 break; 232 case GEN6_STOLEN_RESERVED_512K: 233 *size = 512 * 1024; 234 break; 235 case GEN6_STOLEN_RESERVED_256K: 236 *size = 256 * 1024; 237 break; 238 case GEN6_STOLEN_RESERVED_128K: 239 *size = 128 * 1024; 240 break; 241 default: 242 *size = 1024 * 1024; 243 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 244 } 245 } 246 247 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 248 struct intel_uncore *uncore, 249 resource_size_t *base, 250 resource_size_t *size) 251 { 252 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 253 resource_size_t stolen_top = i915->dsm.end + 1; 254 255 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 256 257 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 258 return; 259 260 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 261 default: 262 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 263 fallthrough; 264 case GEN7_STOLEN_RESERVED_1M: 265 *size = 1024 * 1024; 266 break; 267 } 268 269 /* 270 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 271 * reserved location as (top - size). 272 */ 273 *base = stolen_top - *size; 274 } 275 276 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 277 struct intel_uncore *uncore, 278 resource_size_t *base, 279 resource_size_t *size) 280 { 281 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 282 283 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 284 285 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 286 return; 287 288 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 289 290 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 291 case GEN7_STOLEN_RESERVED_1M: 292 *size = 1024 * 1024; 293 break; 294 case GEN7_STOLEN_RESERVED_256K: 295 *size = 256 * 1024; 296 break; 297 default: 298 *size = 1024 * 1024; 299 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 300 } 301 } 302 303 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 304 struct intel_uncore *uncore, 305 resource_size_t *base, 306 resource_size_t *size) 307 { 308 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 309 310 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 311 312 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 313 return; 314 315 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 316 317 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 318 case GEN8_STOLEN_RESERVED_1M: 319 *size = 1024 * 1024; 320 break; 321 case GEN8_STOLEN_RESERVED_2M: 322 *size = 2 * 1024 * 1024; 323 break; 324 case GEN8_STOLEN_RESERVED_4M: 325 *size = 4 * 1024 * 1024; 326 break; 327 case GEN8_STOLEN_RESERVED_8M: 328 *size = 8 * 1024 * 1024; 329 break; 330 default: 331 *size = 8 * 1024 * 1024; 332 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 333 } 334 } 335 336 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 337 struct intel_uncore *uncore, 338 resource_size_t *base, 339 resource_size_t *size) 340 { 341 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 342 resource_size_t stolen_top = i915->dsm.end + 1; 343 344 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 345 346 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 347 return; 348 349 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 350 return; 351 352 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 353 *size = stolen_top - *base; 354 } 355 356 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 357 struct intel_uncore *uncore, 358 resource_size_t *base, 359 resource_size_t *size) 360 { 361 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 362 363 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 364 365 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 366 367 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 368 case GEN8_STOLEN_RESERVED_1M: 369 *size = 1024 * 1024; 370 break; 371 case GEN8_STOLEN_RESERVED_2M: 372 *size = 2 * 1024 * 1024; 373 break; 374 case GEN8_STOLEN_RESERVED_4M: 375 *size = 4 * 1024 * 1024; 376 break; 377 case GEN8_STOLEN_RESERVED_8M: 378 *size = 8 * 1024 * 1024; 379 break; 380 default: 381 *size = 8 * 1024 * 1024; 382 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 383 } 384 } 385 386 static int i915_gem_init_stolen(struct intel_memory_region *mem) 387 { 388 struct drm_i915_private *i915 = mem->i915; 389 struct intel_uncore *uncore = &i915->uncore; 390 resource_size_t reserved_base, stolen_top; 391 resource_size_t reserved_total, reserved_size; 392 393 mutex_init(&i915->mm.stolen_lock); 394 395 if (intel_vgpu_active(i915)) { 396 drm_notice(&i915->drm, 397 "%s, disabling use of stolen memory\n", 398 "iGVT-g active"); 399 return 0; 400 } 401 402 if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 403 drm_notice(&i915->drm, 404 "%s, disabling use of stolen memory\n", 405 "DMAR active"); 406 return 0; 407 } 408 409 if (resource_size(&mem->region) == 0) 410 return 0; 411 412 i915->dsm = mem->region; 413 414 if (i915_adjust_stolen(i915, &i915->dsm)) 415 return 0; 416 417 GEM_BUG_ON(i915->dsm.start == 0); 418 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 419 420 stolen_top = i915->dsm.end + 1; 421 reserved_base = stolen_top; 422 reserved_size = 0; 423 424 switch (GRAPHICS_VER(i915)) { 425 case 2: 426 case 3: 427 break; 428 case 4: 429 if (!IS_G4X(i915)) 430 break; 431 fallthrough; 432 case 5: 433 g4x_get_stolen_reserved(i915, uncore, 434 &reserved_base, &reserved_size); 435 break; 436 case 6: 437 gen6_get_stolen_reserved(i915, uncore, 438 &reserved_base, &reserved_size); 439 break; 440 case 7: 441 if (IS_VALLEYVIEW(i915)) 442 vlv_get_stolen_reserved(i915, uncore, 443 &reserved_base, &reserved_size); 444 else 445 gen7_get_stolen_reserved(i915, uncore, 446 &reserved_base, &reserved_size); 447 break; 448 case 8: 449 case 9: 450 if (IS_LP(i915)) 451 chv_get_stolen_reserved(i915, uncore, 452 &reserved_base, &reserved_size); 453 else 454 bdw_get_stolen_reserved(i915, uncore, 455 &reserved_base, &reserved_size); 456 break; 457 default: 458 MISSING_CASE(GRAPHICS_VER(i915)); 459 fallthrough; 460 case 11: 461 case 12: 462 icl_get_stolen_reserved(i915, uncore, 463 &reserved_base, 464 &reserved_size); 465 break; 466 } 467 468 /* 469 * Our expectation is that the reserved space is at the top of the 470 * stolen region and *never* at the bottom. If we see !reserved_base, 471 * it likely means we failed to read the registers correctly. 472 */ 473 if (!reserved_base) { 474 drm_err(&i915->drm, 475 "inconsistent reservation %pa + %pa; ignoring\n", 476 &reserved_base, &reserved_size); 477 reserved_base = stolen_top; 478 reserved_size = 0; 479 } 480 481 i915->dsm_reserved = 482 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 483 484 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 485 drm_err(&i915->drm, 486 "Stolen reserved area %pR outside stolen memory %pR\n", 487 &i915->dsm_reserved, &i915->dsm); 488 return 0; 489 } 490 491 /* Exclude the reserved region from driver use */ 492 mem->region.end = reserved_base - 1; 493 494 /* It is possible for the reserved area to end before the end of stolen 495 * memory, so just consider the start. */ 496 reserved_total = stolen_top - reserved_base; 497 498 drm_dbg(&i915->drm, 499 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 500 (u64)resource_size(&i915->dsm) >> 10, 501 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); 502 503 i915->stolen_usable_size = 504 resource_size(&i915->dsm) - reserved_total; 505 506 /* Basic memrange allocator for stolen space. */ 507 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 508 509 return 0; 510 } 511 512 static void dbg_poison(struct i915_ggtt *ggtt, 513 dma_addr_t addr, resource_size_t size, 514 u8 x) 515 { 516 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 517 if (!drm_mm_node_allocated(&ggtt->error_capture)) 518 return; 519 520 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 521 return; /* beware stop_machine() inversion */ 522 523 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 524 525 mutex_lock(&ggtt->error_mutex); 526 while (size) { 527 void __iomem *s; 528 529 ggtt->vm.insert_page(&ggtt->vm, addr, 530 ggtt->error_capture.start, 531 I915_CACHE_NONE, 0); 532 mb(); 533 534 s = io_mapping_map_wc(&ggtt->iomap, 535 ggtt->error_capture.start, 536 PAGE_SIZE); 537 memset_io(s, x, PAGE_SIZE); 538 io_mapping_unmap(s); 539 540 addr += PAGE_SIZE; 541 size -= PAGE_SIZE; 542 } 543 mb(); 544 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 545 mutex_unlock(&ggtt->error_mutex); 546 #endif 547 } 548 549 static struct sg_table * 550 i915_pages_create_for_stolen(struct drm_device *dev, 551 resource_size_t offset, resource_size_t size) 552 { 553 struct drm_i915_private *i915 = to_i915(dev); 554 struct sg_table *st; 555 struct scatterlist *sg; 556 557 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 558 559 /* We hide that we have no struct page backing our stolen object 560 * by wrapping the contiguous physical allocation with a fake 561 * dma mapping in a single scatterlist. 562 */ 563 564 st = kmalloc(sizeof(*st), GFP_KERNEL); 565 if (st == NULL) 566 return ERR_PTR(-ENOMEM); 567 568 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 569 kfree(st); 570 return ERR_PTR(-ENOMEM); 571 } 572 573 sg = st->sgl; 574 sg->offset = 0; 575 sg->length = size; 576 577 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 578 sg_dma_len(sg) = size; 579 580 return st; 581 } 582 583 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 584 { 585 struct sg_table *pages = 586 i915_pages_create_for_stolen(obj->base.dev, 587 obj->stolen->start, 588 obj->stolen->size); 589 if (IS_ERR(pages)) 590 return PTR_ERR(pages); 591 592 dbg_poison(&to_i915(obj->base.dev)->ggtt, 593 sg_dma_address(pages->sgl), 594 sg_dma_len(pages->sgl), 595 POISON_INUSE); 596 597 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 598 599 return 0; 600 } 601 602 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 603 struct sg_table *pages) 604 { 605 /* Should only be called from i915_gem_object_release_stolen() */ 606 607 dbg_poison(&to_i915(obj->base.dev)->ggtt, 608 sg_dma_address(pages->sgl), 609 sg_dma_len(pages->sgl), 610 POISON_FREE); 611 612 sg_free_table(pages); 613 kfree(pages); 614 } 615 616 static void 617 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 618 { 619 struct drm_i915_private *i915 = to_i915(obj->base.dev); 620 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 621 622 GEM_BUG_ON(!stolen); 623 i915_gem_stolen_remove_node(i915, stolen); 624 kfree(stolen); 625 626 i915_gem_object_release_memory_region(obj); 627 } 628 629 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 630 .name = "i915_gem_object_stolen", 631 .get_pages = i915_gem_object_get_pages_stolen, 632 .put_pages = i915_gem_object_put_pages_stolen, 633 .release = i915_gem_object_release_stolen, 634 }; 635 636 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 637 struct drm_i915_gem_object *obj, 638 struct drm_mm_node *stolen) 639 { 640 static struct lock_class_key lock_class; 641 unsigned int cache_level; 642 unsigned int flags; 643 int err; 644 645 /* 646 * Stolen objects are always physically contiguous since we just 647 * allocate one big block underneath using the drm_mm range allocator. 648 */ 649 flags = I915_BO_ALLOC_CONTIGUOUS; 650 651 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 652 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 653 654 obj->stolen = stolen; 655 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 656 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 657 i915_gem_object_set_cache_coherency(obj, cache_level); 658 659 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 660 return -EBUSY; 661 662 i915_gem_object_init_memory_region(obj, mem); 663 664 err = i915_gem_object_pin_pages(obj); 665 if (err) 666 i915_gem_object_release_memory_region(obj); 667 i915_gem_object_unlock(obj); 668 669 return err; 670 } 671 672 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 673 struct drm_i915_gem_object *obj, 674 resource_size_t size, 675 resource_size_t page_size, 676 unsigned int flags) 677 { 678 struct drm_i915_private *i915 = mem->i915; 679 struct drm_mm_node *stolen; 680 int ret; 681 682 if (!drm_mm_initialized(&i915->mm.stolen)) 683 return -ENODEV; 684 685 if (size == 0) 686 return -EINVAL; 687 688 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 689 if (!stolen) 690 return -ENOMEM; 691 692 ret = i915_gem_stolen_insert_node(i915, stolen, size, 693 mem->min_page_size); 694 if (ret) 695 goto err_free; 696 697 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 698 if (ret) 699 goto err_remove; 700 701 return 0; 702 703 err_remove: 704 i915_gem_stolen_remove_node(i915, stolen); 705 err_free: 706 kfree(stolen); 707 return ret; 708 } 709 710 struct drm_i915_gem_object * 711 i915_gem_object_create_stolen(struct drm_i915_private *i915, 712 resource_size_t size) 713 { 714 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 715 } 716 717 static int init_stolen_smem(struct intel_memory_region *mem) 718 { 719 /* 720 * Initialise stolen early so that we may reserve preallocated 721 * objects for the BIOS to KMS transition. 722 */ 723 return i915_gem_init_stolen(mem); 724 } 725 726 static int release_stolen_smem(struct intel_memory_region *mem) 727 { 728 i915_gem_cleanup_stolen(mem->i915); 729 return 0; 730 } 731 732 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 733 .init = init_stolen_smem, 734 .release = release_stolen_smem, 735 .init_object = _i915_gem_object_stolen_init, 736 }; 737 738 static int init_stolen_lmem(struct intel_memory_region *mem) 739 { 740 int err; 741 742 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 743 return -ENODEV; 744 745 if (!io_mapping_init_wc(&mem->iomap, 746 mem->io_start, 747 resource_size(&mem->region))) 748 return -EIO; 749 750 /* 751 * TODO: For stolen lmem we mostly just care about populating the dsm 752 * related bits and setting up the drm_mm allocator for the range. 753 * Perhaps split up i915_gem_init_stolen() for this. 754 */ 755 err = i915_gem_init_stolen(mem); 756 if (err) 757 goto err_fini; 758 759 return 0; 760 761 err_fini: 762 io_mapping_fini(&mem->iomap); 763 return err; 764 } 765 766 static int release_stolen_lmem(struct intel_memory_region *mem) 767 { 768 io_mapping_fini(&mem->iomap); 769 i915_gem_cleanup_stolen(mem->i915); 770 return 0; 771 } 772 773 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 774 .init = init_stolen_lmem, 775 .release = release_stolen_lmem, 776 .init_object = _i915_gem_object_stolen_init, 777 }; 778 779 struct intel_memory_region * 780 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 781 u16 instance) 782 { 783 struct intel_uncore *uncore = &i915->uncore; 784 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 785 struct intel_memory_region *mem; 786 resource_size_t min_page_size; 787 resource_size_t io_start; 788 resource_size_t lmem_size; 789 u64 lmem_base; 790 791 lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE); 792 if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2))) 793 return ERR_PTR(-ENODEV); 794 795 lmem_size = pci_resource_len(pdev, 2) - lmem_base; 796 io_start = pci_resource_start(pdev, 2) + lmem_base; 797 798 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 799 I915_GTT_PAGE_SIZE_4K; 800 801 mem = intel_memory_region_create(i915, lmem_base, lmem_size, 802 min_page_size, io_start, 803 type, instance, 804 &i915_region_stolen_lmem_ops); 805 if (IS_ERR(mem)) 806 return mem; 807 808 /* 809 * TODO: consider creating common helper to just print all the 810 * interesting stuff from intel_memory_region, which we can use for all 811 * our probed regions. 812 */ 813 814 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", 815 &mem->io_start); 816 817 intel_memory_region_set_name(mem, "stolen-local"); 818 819 mem->private = true; 820 821 return mem; 822 } 823 824 struct intel_memory_region* 825 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 826 u16 instance) 827 { 828 struct intel_memory_region *mem; 829 830 mem = intel_memory_region_create(i915, 831 intel_graphics_stolen_res.start, 832 resource_size(&intel_graphics_stolen_res), 833 PAGE_SIZE, 0, type, instance, 834 &i915_region_stolen_smem_ops); 835 if (IS_ERR(mem)) 836 return mem; 837 838 intel_memory_region_set_name(mem, "stolen-system"); 839 840 mem->private = true; 841 return mem; 842 } 843 844 struct drm_i915_gem_object * 845 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, 846 resource_size_t stolen_offset, 847 resource_size_t size) 848 { 849 struct intel_memory_region *mem = i915->mm.stolen_region; 850 struct drm_i915_gem_object *obj; 851 struct drm_mm_node *stolen; 852 int ret; 853 854 if (!drm_mm_initialized(&i915->mm.stolen)) 855 return ERR_PTR(-ENODEV); 856 857 drm_dbg(&i915->drm, 858 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 859 &stolen_offset, &size); 860 861 /* KISS and expect everything to be page-aligned */ 862 if (GEM_WARN_ON(size == 0) || 863 GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) || 864 GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size))) 865 return ERR_PTR(-EINVAL); 866 867 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 868 if (!stolen) 869 return ERR_PTR(-ENOMEM); 870 871 stolen->start = stolen_offset; 872 stolen->size = size; 873 mutex_lock(&i915->mm.stolen_lock); 874 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 875 mutex_unlock(&i915->mm.stolen_lock); 876 if (ret) 877 goto err_free; 878 879 obj = i915_gem_object_alloc(); 880 if (!obj) { 881 ret = -ENOMEM; 882 goto err_stolen; 883 } 884 885 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 886 if (ret) 887 goto err_object_free; 888 889 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 890 return obj; 891 892 err_object_free: 893 i915_gem_object_free(obj); 894 err_stolen: 895 i915_gem_stolen_remove_node(i915, stolen); 896 err_free: 897 kfree(stolen); 898 return ERR_PTR(ret); 899 } 900 901 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 902 { 903 return obj->ops == &i915_gem_object_stolen_ops; 904 } 905