1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "gt/intel_gt.h" 16 #include "gt/intel_gt_mcr.h" 17 #include "gt/intel_gt_regs.h" 18 #include "gt/intel_region_lmem.h" 19 #include "i915_drv.h" 20 #include "i915_gem_stolen.h" 21 #include "i915_reg.h" 22 #include "i915_utils.h" 23 #include "i915_vgpu.h" 24 #include "intel_mchbar_regs.h" 25 26 /* 27 * The BIOS typically reserves some of the system's memory for the exclusive 28 * use of the integrated graphics. This memory is no longer available for 29 * use by the OS and so the user finds that his system has less memory 30 * available than he put in. We refer to this memory as stolen. 31 * 32 * The BIOS will allocate its framebuffer from the stolen memory. Our 33 * goal is try to reuse that object for our own fbcon which must always 34 * be available for panics. Anything else we can reuse the stolen memory 35 * for is a boon. 36 */ 37 38 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 39 struct drm_mm_node *node, u64 size, 40 unsigned alignment, u64 start, u64 end) 41 { 42 int ret; 43 44 if (!drm_mm_initialized(&i915->mm.stolen)) 45 return -ENODEV; 46 47 /* WaSkipStolenMemoryFirstPage:bdw+ */ 48 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 49 start = 4096; 50 51 mutex_lock(&i915->mm.stolen_lock); 52 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 53 size, alignment, 0, 54 start, end, DRM_MM_INSERT_BEST); 55 mutex_unlock(&i915->mm.stolen_lock); 56 57 return ret; 58 } 59 60 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 61 struct drm_mm_node *node, u64 size, 62 unsigned alignment) 63 { 64 return i915_gem_stolen_insert_node_in_range(i915, node, 65 size, alignment, 66 I915_GEM_STOLEN_BIAS, 67 U64_MAX); 68 } 69 70 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 71 struct drm_mm_node *node) 72 { 73 mutex_lock(&i915->mm.stolen_lock); 74 drm_mm_remove_node(node); 75 mutex_unlock(&i915->mm.stolen_lock); 76 } 77 78 static int i915_adjust_stolen(struct drm_i915_private *i915, 79 struct resource *dsm) 80 { 81 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 82 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 83 struct resource *r; 84 85 if (dsm->start == 0 || dsm->end <= dsm->start) 86 return -EINVAL; 87 88 /* 89 * TODO: We have yet too encounter the case where the GTT wasn't at the 90 * end of stolen. With that assumption we could simplify this. 91 */ 92 93 /* Make sure we don't clobber the GTT if it's within stolen memory */ 94 if (GRAPHICS_VER(i915) <= 4 && 95 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 96 struct resource stolen[2] = {*dsm, *dsm}; 97 struct resource ggtt_res; 98 resource_size_t ggtt_start; 99 100 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 101 if (GRAPHICS_VER(i915) == 4) 102 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 103 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 104 else 105 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 106 107 ggtt_res = 108 (struct resource) DEFINE_RES_MEM(ggtt_start, 109 ggtt_total_entries(ggtt) * 4); 110 111 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 112 stolen[0].end = ggtt_res.start; 113 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 114 stolen[1].start = ggtt_res.end; 115 116 /* Pick the larger of the two chunks */ 117 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 118 *dsm = stolen[0]; 119 else 120 *dsm = stolen[1]; 121 122 if (stolen[0].start != stolen[1].start || 123 stolen[0].end != stolen[1].end) { 124 drm_dbg(&i915->drm, 125 "GTT within stolen memory at %pR\n", 126 &ggtt_res); 127 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 128 dsm); 129 } 130 } 131 132 /* 133 * With stolen lmem, we don't need to check if the address range 134 * overlaps with the non-stolen system memory range, since lmem is local 135 * to the gpu. 136 */ 137 if (HAS_LMEM(i915)) 138 return 0; 139 140 /* 141 * Verify that nothing else uses this physical address. Stolen 142 * memory should be reserved by the BIOS and hidden from the 143 * kernel. So if the region is already marked as busy, something 144 * is seriously wrong. 145 */ 146 r = devm_request_mem_region(i915->drm.dev, dsm->start, 147 resource_size(dsm), 148 "Graphics Stolen Memory"); 149 if (r == NULL) { 150 /* 151 * One more attempt but this time requesting region from 152 * start + 1, as we have seen that this resolves the region 153 * conflict with the PCI Bus. 154 * This is a BIOS w/a: Some BIOS wrap stolen in the root 155 * PCI bus, but have an off-by-one error. Hence retry the 156 * reservation starting from 1 instead of 0. 157 * There's also BIOS with off-by-one on the other end. 158 */ 159 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 160 resource_size(dsm) - 2, 161 "Graphics Stolen Memory"); 162 /* 163 * GEN3 firmware likes to smash pci bridges into the stolen 164 * range. Apparently this works. 165 */ 166 if (!r && GRAPHICS_VER(i915) != 3) { 167 drm_err(&i915->drm, 168 "conflict detected with stolen region: %pR\n", 169 dsm); 170 171 return -EBUSY; 172 } 173 } 174 175 return 0; 176 } 177 178 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 179 { 180 if (!drm_mm_initialized(&i915->mm.stolen)) 181 return; 182 183 drm_mm_takedown(&i915->mm.stolen); 184 } 185 186 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 187 struct intel_uncore *uncore, 188 resource_size_t *base, 189 resource_size_t *size) 190 { 191 u32 reg_val = intel_uncore_read(uncore, 192 IS_GM45(i915) ? 193 CTG_STOLEN_RESERVED : 194 ELK_STOLEN_RESERVED); 195 resource_size_t stolen_top = i915->dsm.end + 1; 196 197 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 198 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 199 200 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 201 return; 202 203 /* 204 * Whether ILK really reuses the ELK register for this is unclear. 205 * Let's see if we catch anyone with this supposedly enabled on ILK. 206 */ 207 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 208 "ILK stolen reserved found? 0x%08x\n", 209 reg_val); 210 211 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 212 return; 213 214 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 215 drm_WARN_ON(&i915->drm, 216 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 217 218 *size = stolen_top - *base; 219 } 220 221 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 222 struct intel_uncore *uncore, 223 resource_size_t *base, 224 resource_size_t *size) 225 { 226 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 227 228 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 229 230 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 231 return; 232 233 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 234 235 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 236 case GEN6_STOLEN_RESERVED_1M: 237 *size = 1024 * 1024; 238 break; 239 case GEN6_STOLEN_RESERVED_512K: 240 *size = 512 * 1024; 241 break; 242 case GEN6_STOLEN_RESERVED_256K: 243 *size = 256 * 1024; 244 break; 245 case GEN6_STOLEN_RESERVED_128K: 246 *size = 128 * 1024; 247 break; 248 default: 249 *size = 1024 * 1024; 250 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 251 } 252 } 253 254 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 255 struct intel_uncore *uncore, 256 resource_size_t *base, 257 resource_size_t *size) 258 { 259 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 260 resource_size_t stolen_top = i915->dsm.end + 1; 261 262 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 263 264 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 265 return; 266 267 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 268 default: 269 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 270 fallthrough; 271 case GEN7_STOLEN_RESERVED_1M: 272 *size = 1024 * 1024; 273 break; 274 } 275 276 /* 277 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 278 * reserved location as (top - size). 279 */ 280 *base = stolen_top - *size; 281 } 282 283 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 284 struct intel_uncore *uncore, 285 resource_size_t *base, 286 resource_size_t *size) 287 { 288 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 289 290 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 291 292 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 293 return; 294 295 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 296 297 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 298 case GEN7_STOLEN_RESERVED_1M: 299 *size = 1024 * 1024; 300 break; 301 case GEN7_STOLEN_RESERVED_256K: 302 *size = 256 * 1024; 303 break; 304 default: 305 *size = 1024 * 1024; 306 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 307 } 308 } 309 310 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 311 struct intel_uncore *uncore, 312 resource_size_t *base, 313 resource_size_t *size) 314 { 315 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 316 317 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 318 319 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 320 return; 321 322 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 323 324 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 325 case GEN8_STOLEN_RESERVED_1M: 326 *size = 1024 * 1024; 327 break; 328 case GEN8_STOLEN_RESERVED_2M: 329 *size = 2 * 1024 * 1024; 330 break; 331 case GEN8_STOLEN_RESERVED_4M: 332 *size = 4 * 1024 * 1024; 333 break; 334 case GEN8_STOLEN_RESERVED_8M: 335 *size = 8 * 1024 * 1024; 336 break; 337 default: 338 *size = 8 * 1024 * 1024; 339 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 340 } 341 } 342 343 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 344 struct intel_uncore *uncore, 345 resource_size_t *base, 346 resource_size_t *size) 347 { 348 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 349 resource_size_t stolen_top = i915->dsm.end + 1; 350 351 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 352 353 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 354 return; 355 356 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 357 return; 358 359 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 360 *size = stolen_top - *base; 361 } 362 363 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 364 struct intel_uncore *uncore, 365 resource_size_t *base, 366 resource_size_t *size) 367 { 368 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 369 370 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 371 372 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 373 374 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 375 case GEN8_STOLEN_RESERVED_1M: 376 *size = 1024 * 1024; 377 break; 378 case GEN8_STOLEN_RESERVED_2M: 379 *size = 2 * 1024 * 1024; 380 break; 381 case GEN8_STOLEN_RESERVED_4M: 382 *size = 4 * 1024 * 1024; 383 break; 384 case GEN8_STOLEN_RESERVED_8M: 385 *size = 8 * 1024 * 1024; 386 break; 387 default: 388 *size = 8 * 1024 * 1024; 389 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 390 } 391 } 392 393 static int i915_gem_init_stolen(struct intel_memory_region *mem) 394 { 395 struct drm_i915_private *i915 = mem->i915; 396 struct intel_uncore *uncore = &i915->uncore; 397 resource_size_t reserved_base, stolen_top; 398 resource_size_t reserved_total, reserved_size; 399 400 mutex_init(&i915->mm.stolen_lock); 401 402 if (intel_vgpu_active(i915)) { 403 drm_notice(&i915->drm, 404 "%s, disabling use of stolen memory\n", 405 "iGVT-g active"); 406 return 0; 407 } 408 409 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 410 drm_notice(&i915->drm, 411 "%s, disabling use of stolen memory\n", 412 "DMAR active"); 413 return 0; 414 } 415 416 if (resource_size(&mem->region) == 0) 417 return 0; 418 419 i915->dsm = mem->region; 420 421 if (i915_adjust_stolen(i915, &i915->dsm)) 422 return 0; 423 424 GEM_BUG_ON(i915->dsm.start == 0); 425 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 426 427 stolen_top = i915->dsm.end + 1; 428 reserved_base = stolen_top; 429 reserved_size = 0; 430 431 switch (GRAPHICS_VER(i915)) { 432 case 2: 433 case 3: 434 break; 435 case 4: 436 if (!IS_G4X(i915)) 437 break; 438 fallthrough; 439 case 5: 440 g4x_get_stolen_reserved(i915, uncore, 441 &reserved_base, &reserved_size); 442 break; 443 case 6: 444 gen6_get_stolen_reserved(i915, uncore, 445 &reserved_base, &reserved_size); 446 break; 447 case 7: 448 if (IS_VALLEYVIEW(i915)) 449 vlv_get_stolen_reserved(i915, uncore, 450 &reserved_base, &reserved_size); 451 else 452 gen7_get_stolen_reserved(i915, uncore, 453 &reserved_base, &reserved_size); 454 break; 455 case 8: 456 case 9: 457 if (IS_LP(i915)) 458 chv_get_stolen_reserved(i915, uncore, 459 &reserved_base, &reserved_size); 460 else 461 bdw_get_stolen_reserved(i915, uncore, 462 &reserved_base, &reserved_size); 463 break; 464 default: 465 MISSING_CASE(GRAPHICS_VER(i915)); 466 fallthrough; 467 case 11: 468 case 12: 469 icl_get_stolen_reserved(i915, uncore, 470 &reserved_base, 471 &reserved_size); 472 break; 473 } 474 475 /* 476 * Our expectation is that the reserved space is at the top of the 477 * stolen region and *never* at the bottom. If we see !reserved_base, 478 * it likely means we failed to read the registers correctly. 479 */ 480 if (!reserved_base) { 481 drm_err(&i915->drm, 482 "inconsistent reservation %pa + %pa; ignoring\n", 483 &reserved_base, &reserved_size); 484 reserved_base = stolen_top; 485 reserved_size = 0; 486 } 487 488 i915->dsm_reserved = 489 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 490 491 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 492 drm_err(&i915->drm, 493 "Stolen reserved area %pR outside stolen memory %pR\n", 494 &i915->dsm_reserved, &i915->dsm); 495 return 0; 496 } 497 498 /* Exclude the reserved region from driver use */ 499 mem->region.end = reserved_base - 1; 500 mem->io_size = min(mem->io_size, resource_size(&mem->region)); 501 502 /* It is possible for the reserved area to end before the end of stolen 503 * memory, so just consider the start. */ 504 reserved_total = stolen_top - reserved_base; 505 506 i915->stolen_usable_size = 507 resource_size(&i915->dsm) - reserved_total; 508 509 drm_dbg(&i915->drm, 510 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 511 (u64)resource_size(&i915->dsm) >> 10, 512 (u64)i915->stolen_usable_size >> 10); 513 514 if (i915->stolen_usable_size == 0) 515 return 0; 516 517 /* Basic memrange allocator for stolen space. */ 518 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 519 520 return 0; 521 } 522 523 static void dbg_poison(struct i915_ggtt *ggtt, 524 dma_addr_t addr, resource_size_t size, 525 u8 x) 526 { 527 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 528 if (!drm_mm_node_allocated(&ggtt->error_capture)) 529 return; 530 531 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 532 return; /* beware stop_machine() inversion */ 533 534 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 535 536 mutex_lock(&ggtt->error_mutex); 537 while (size) { 538 void __iomem *s; 539 540 ggtt->vm.insert_page(&ggtt->vm, addr, 541 ggtt->error_capture.start, 542 I915_CACHE_NONE, 0); 543 mb(); 544 545 s = io_mapping_map_wc(&ggtt->iomap, 546 ggtt->error_capture.start, 547 PAGE_SIZE); 548 memset_io(s, x, PAGE_SIZE); 549 io_mapping_unmap(s); 550 551 addr += PAGE_SIZE; 552 size -= PAGE_SIZE; 553 } 554 mb(); 555 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 556 mutex_unlock(&ggtt->error_mutex); 557 #endif 558 } 559 560 static struct sg_table * 561 i915_pages_create_for_stolen(struct drm_device *dev, 562 resource_size_t offset, resource_size_t size) 563 { 564 struct drm_i915_private *i915 = to_i915(dev); 565 struct sg_table *st; 566 struct scatterlist *sg; 567 568 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 569 570 /* We hide that we have no struct page backing our stolen object 571 * by wrapping the contiguous physical allocation with a fake 572 * dma mapping in a single scatterlist. 573 */ 574 575 st = kmalloc(sizeof(*st), GFP_KERNEL); 576 if (st == NULL) 577 return ERR_PTR(-ENOMEM); 578 579 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 580 kfree(st); 581 return ERR_PTR(-ENOMEM); 582 } 583 584 sg = st->sgl; 585 sg->offset = 0; 586 sg->length = size; 587 588 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 589 sg_dma_len(sg) = size; 590 591 return st; 592 } 593 594 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 595 { 596 struct drm_i915_private *i915 = to_i915(obj->base.dev); 597 struct sg_table *pages = 598 i915_pages_create_for_stolen(obj->base.dev, 599 obj->stolen->start, 600 obj->stolen->size); 601 if (IS_ERR(pages)) 602 return PTR_ERR(pages); 603 604 dbg_poison(to_gt(i915)->ggtt, 605 sg_dma_address(pages->sgl), 606 sg_dma_len(pages->sgl), 607 POISON_INUSE); 608 609 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 610 611 return 0; 612 } 613 614 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 615 struct sg_table *pages) 616 { 617 struct drm_i915_private *i915 = to_i915(obj->base.dev); 618 /* Should only be called from i915_gem_object_release_stolen() */ 619 620 dbg_poison(to_gt(i915)->ggtt, 621 sg_dma_address(pages->sgl), 622 sg_dma_len(pages->sgl), 623 POISON_FREE); 624 625 sg_free_table(pages); 626 kfree(pages); 627 } 628 629 static void 630 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 631 { 632 struct drm_i915_private *i915 = to_i915(obj->base.dev); 633 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 634 635 GEM_BUG_ON(!stolen); 636 i915_gem_stolen_remove_node(i915, stolen); 637 kfree(stolen); 638 639 i915_gem_object_release_memory_region(obj); 640 } 641 642 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 643 .name = "i915_gem_object_stolen", 644 .get_pages = i915_gem_object_get_pages_stolen, 645 .put_pages = i915_gem_object_put_pages_stolen, 646 .release = i915_gem_object_release_stolen, 647 }; 648 649 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 650 struct drm_i915_gem_object *obj, 651 struct drm_mm_node *stolen) 652 { 653 static struct lock_class_key lock_class; 654 unsigned int cache_level; 655 unsigned int flags; 656 int err; 657 658 /* 659 * Stolen objects are always physically contiguous since we just 660 * allocate one big block underneath using the drm_mm range allocator. 661 */ 662 flags = I915_BO_ALLOC_CONTIGUOUS; 663 664 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 665 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 666 667 obj->stolen = stolen; 668 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 669 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 670 i915_gem_object_set_cache_coherency(obj, cache_level); 671 672 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 673 return -EBUSY; 674 675 i915_gem_object_init_memory_region(obj, mem); 676 677 err = i915_gem_object_pin_pages(obj); 678 if (err) 679 i915_gem_object_release_memory_region(obj); 680 i915_gem_object_unlock(obj); 681 682 return err; 683 } 684 685 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 686 struct drm_i915_gem_object *obj, 687 resource_size_t offset, 688 resource_size_t size, 689 resource_size_t page_size, 690 unsigned int flags) 691 { 692 struct drm_i915_private *i915 = mem->i915; 693 struct drm_mm_node *stolen; 694 int ret; 695 696 if (!drm_mm_initialized(&i915->mm.stolen)) 697 return -ENODEV; 698 699 if (size == 0) 700 return -EINVAL; 701 702 /* 703 * With discrete devices, where we lack a mappable aperture there is no 704 * possible way to ever access this memory on the CPU side. 705 */ 706 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size && 707 !(flags & I915_BO_ALLOC_GPU_ONLY)) 708 return -ENOSPC; 709 710 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 711 if (!stolen) 712 return -ENOMEM; 713 714 if (offset != I915_BO_INVALID_OFFSET) { 715 drm_dbg(&i915->drm, 716 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 717 &offset, &size); 718 719 stolen->start = offset; 720 stolen->size = size; 721 mutex_lock(&i915->mm.stolen_lock); 722 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 723 mutex_unlock(&i915->mm.stolen_lock); 724 } else { 725 ret = i915_gem_stolen_insert_node(i915, stolen, size, 726 mem->min_page_size); 727 } 728 if (ret) 729 goto err_free; 730 731 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 732 if (ret) 733 goto err_remove; 734 735 return 0; 736 737 err_remove: 738 i915_gem_stolen_remove_node(i915, stolen); 739 err_free: 740 kfree(stolen); 741 return ret; 742 } 743 744 struct drm_i915_gem_object * 745 i915_gem_object_create_stolen(struct drm_i915_private *i915, 746 resource_size_t size) 747 { 748 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 749 } 750 751 static int init_stolen_smem(struct intel_memory_region *mem) 752 { 753 /* 754 * Initialise stolen early so that we may reserve preallocated 755 * objects for the BIOS to KMS transition. 756 */ 757 return i915_gem_init_stolen(mem); 758 } 759 760 static int release_stolen_smem(struct intel_memory_region *mem) 761 { 762 i915_gem_cleanup_stolen(mem->i915); 763 return 0; 764 } 765 766 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 767 .init = init_stolen_smem, 768 .release = release_stolen_smem, 769 .init_object = _i915_gem_object_stolen_init, 770 }; 771 772 static int init_stolen_lmem(struct intel_memory_region *mem) 773 { 774 int err; 775 776 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 777 return -ENODEV; 778 779 /* 780 * TODO: For stolen lmem we mostly just care about populating the dsm 781 * related bits and setting up the drm_mm allocator for the range. 782 * Perhaps split up i915_gem_init_stolen() for this. 783 */ 784 err = i915_gem_init_stolen(mem); 785 if (err) 786 return err; 787 788 if (mem->io_size && !io_mapping_init_wc(&mem->iomap, 789 mem->io_start, 790 mem->io_size)) { 791 err = -EIO; 792 goto err_cleanup; 793 } 794 795 return 0; 796 797 err_cleanup: 798 i915_gem_cleanup_stolen(mem->i915); 799 return err; 800 } 801 802 static int release_stolen_lmem(struct intel_memory_region *mem) 803 { 804 if (mem->io_size) 805 io_mapping_fini(&mem->iomap); 806 i915_gem_cleanup_stolen(mem->i915); 807 return 0; 808 } 809 810 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 811 .init = init_stolen_lmem, 812 .release = release_stolen_lmem, 813 .init_object = _i915_gem_object_stolen_init, 814 }; 815 816 struct intel_memory_region * 817 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 818 u16 instance) 819 { 820 struct intel_uncore *uncore = &i915->uncore; 821 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 822 resource_size_t dsm_size, dsm_base, lmem_size; 823 struct intel_memory_region *mem; 824 resource_size_t io_start, io_size; 825 resource_size_t min_page_size; 826 827 if (WARN_ON_ONCE(instance)) 828 return ERR_PTR(-ENODEV); 829 830 /* Use DSM base address instead for stolen memory */ 831 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); 832 if (IS_DG1(uncore->i915)) { 833 lmem_size = pci_resource_len(pdev, 2); 834 if (WARN_ON(lmem_size < dsm_base)) 835 return ERR_PTR(-ENODEV); 836 } else { 837 resource_size_t lmem_range; 838 839 lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF; 840 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; 841 lmem_size *= SZ_1G; 842 } 843 844 dsm_size = lmem_size - dsm_base; 845 if (pci_resource_len(pdev, 2) < lmem_size) { 846 io_start = 0; 847 io_size = 0; 848 } else { 849 io_start = pci_resource_start(pdev, 2) + dsm_base; 850 io_size = dsm_size; 851 } 852 853 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 854 I915_GTT_PAGE_SIZE_4K; 855 856 mem = intel_memory_region_create(i915, dsm_base, dsm_size, 857 min_page_size, 858 io_start, io_size, 859 type, instance, 860 &i915_region_stolen_lmem_ops); 861 if (IS_ERR(mem)) 862 return mem; 863 864 /* 865 * TODO: consider creating common helper to just print all the 866 * interesting stuff from intel_memory_region, which we can use for all 867 * our probed regions. 868 */ 869 870 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", 871 &mem->io_start); 872 drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base); 873 874 intel_memory_region_set_name(mem, "stolen-local"); 875 876 mem->private = true; 877 878 return mem; 879 } 880 881 struct intel_memory_region* 882 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 883 u16 instance) 884 { 885 struct intel_memory_region *mem; 886 887 mem = intel_memory_region_create(i915, 888 intel_graphics_stolen_res.start, 889 resource_size(&intel_graphics_stolen_res), 890 PAGE_SIZE, 0, 0, type, instance, 891 &i915_region_stolen_smem_ops); 892 if (IS_ERR(mem)) 893 return mem; 894 895 intel_memory_region_set_name(mem, "stolen-system"); 896 897 mem->private = true; 898 return mem; 899 } 900 901 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 902 { 903 return obj->ops == &i915_gem_object_stolen_ops; 904 } 905