1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "gt/intel_gt.h" 16 #include "gt/intel_gt_mcr.h" 17 #include "gt/intel_gt_regs.h" 18 #include "gt/intel_region_lmem.h" 19 #include "i915_drv.h" 20 #include "i915_gem_stolen.h" 21 #include "i915_pci.h" 22 #include "i915_reg.h" 23 #include "i915_utils.h" 24 #include "i915_vgpu.h" 25 #include "intel_mchbar_regs.h" 26 #include "intel_pci_config.h" 27 28 /* 29 * The BIOS typically reserves some of the system's memory for the exclusive 30 * use of the integrated graphics. This memory is no longer available for 31 * use by the OS and so the user finds that his system has less memory 32 * available than he put in. We refer to this memory as stolen. 33 * 34 * The BIOS will allocate its framebuffer from the stolen memory. Our 35 * goal is try to reuse that object for our own fbcon which must always 36 * be available for panics. Anything else we can reuse the stolen memory 37 * for is a boon. 38 */ 39 40 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 41 struct drm_mm_node *node, u64 size, 42 unsigned alignment, u64 start, u64 end) 43 { 44 int ret; 45 46 if (!drm_mm_initialized(&i915->mm.stolen)) 47 return -ENODEV; 48 49 /* WaSkipStolenMemoryFirstPage:bdw+ */ 50 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 51 start = 4096; 52 53 mutex_lock(&i915->mm.stolen_lock); 54 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 55 size, alignment, 0, 56 start, end, DRM_MM_INSERT_BEST); 57 mutex_unlock(&i915->mm.stolen_lock); 58 59 return ret; 60 } 61 62 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 63 struct drm_mm_node *node, u64 size, 64 unsigned alignment) 65 { 66 return i915_gem_stolen_insert_node_in_range(i915, node, 67 size, alignment, 68 I915_GEM_STOLEN_BIAS, 69 U64_MAX); 70 } 71 72 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 73 struct drm_mm_node *node) 74 { 75 mutex_lock(&i915->mm.stolen_lock); 76 drm_mm_remove_node(node); 77 mutex_unlock(&i915->mm.stolen_lock); 78 } 79 80 static int i915_adjust_stolen(struct drm_i915_private *i915, 81 struct resource *dsm) 82 { 83 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 84 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 85 struct resource *r; 86 87 if (dsm->start == 0 || dsm->end <= dsm->start) 88 return -EINVAL; 89 90 /* 91 * TODO: We have yet too encounter the case where the GTT wasn't at the 92 * end of stolen. With that assumption we could simplify this. 93 */ 94 95 /* Make sure we don't clobber the GTT if it's within stolen memory */ 96 if (GRAPHICS_VER(i915) <= 4 && 97 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 98 struct resource stolen[2] = {*dsm, *dsm}; 99 struct resource ggtt_res; 100 resource_size_t ggtt_start; 101 102 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 103 if (GRAPHICS_VER(i915) == 4) 104 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 105 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 106 else 107 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 108 109 ggtt_res = 110 (struct resource) DEFINE_RES_MEM(ggtt_start, 111 ggtt_total_entries(ggtt) * 4); 112 113 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 114 stolen[0].end = ggtt_res.start; 115 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 116 stolen[1].start = ggtt_res.end; 117 118 /* Pick the larger of the two chunks */ 119 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 120 *dsm = stolen[0]; 121 else 122 *dsm = stolen[1]; 123 124 if (stolen[0].start != stolen[1].start || 125 stolen[0].end != stolen[1].end) { 126 drm_dbg(&i915->drm, 127 "GTT within stolen memory at %pR\n", 128 &ggtt_res); 129 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 130 dsm); 131 } 132 } 133 134 /* 135 * With stolen lmem, we don't need to check if the address range 136 * overlaps with the non-stolen system memory range, since lmem is local 137 * to the gpu. 138 */ 139 if (HAS_LMEM(i915)) 140 return 0; 141 142 /* 143 * Verify that nothing else uses this physical address. Stolen 144 * memory should be reserved by the BIOS and hidden from the 145 * kernel. So if the region is already marked as busy, something 146 * is seriously wrong. 147 */ 148 r = devm_request_mem_region(i915->drm.dev, dsm->start, 149 resource_size(dsm), 150 "Graphics Stolen Memory"); 151 if (r == NULL) { 152 /* 153 * One more attempt but this time requesting region from 154 * start + 1, as we have seen that this resolves the region 155 * conflict with the PCI Bus. 156 * This is a BIOS w/a: Some BIOS wrap stolen in the root 157 * PCI bus, but have an off-by-one error. Hence retry the 158 * reservation starting from 1 instead of 0. 159 * There's also BIOS with off-by-one on the other end. 160 */ 161 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 162 resource_size(dsm) - 2, 163 "Graphics Stolen Memory"); 164 /* 165 * GEN3 firmware likes to smash pci bridges into the stolen 166 * range. Apparently this works. 167 */ 168 if (!r && GRAPHICS_VER(i915) != 3) { 169 drm_err(&i915->drm, 170 "conflict detected with stolen region: %pR\n", 171 dsm); 172 173 return -EBUSY; 174 } 175 } 176 177 return 0; 178 } 179 180 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 181 { 182 if (!drm_mm_initialized(&i915->mm.stolen)) 183 return; 184 185 drm_mm_takedown(&i915->mm.stolen); 186 } 187 188 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 189 struct intel_uncore *uncore, 190 resource_size_t *base, 191 resource_size_t *size) 192 { 193 u32 reg_val = intel_uncore_read(uncore, 194 IS_GM45(i915) ? 195 CTG_STOLEN_RESERVED : 196 ELK_STOLEN_RESERVED); 197 resource_size_t stolen_top = i915->dsm.end + 1; 198 199 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 200 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 201 202 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 203 return; 204 205 /* 206 * Whether ILK really reuses the ELK register for this is unclear. 207 * Let's see if we catch anyone with this supposedly enabled on ILK. 208 */ 209 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 210 "ILK stolen reserved found? 0x%08x\n", 211 reg_val); 212 213 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 214 return; 215 216 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 217 drm_WARN_ON(&i915->drm, 218 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 219 220 *size = stolen_top - *base; 221 } 222 223 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 224 struct intel_uncore *uncore, 225 resource_size_t *base, 226 resource_size_t *size) 227 { 228 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 229 230 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 231 232 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 233 return; 234 235 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 236 237 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 238 case GEN6_STOLEN_RESERVED_1M: 239 *size = 1024 * 1024; 240 break; 241 case GEN6_STOLEN_RESERVED_512K: 242 *size = 512 * 1024; 243 break; 244 case GEN6_STOLEN_RESERVED_256K: 245 *size = 256 * 1024; 246 break; 247 case GEN6_STOLEN_RESERVED_128K: 248 *size = 128 * 1024; 249 break; 250 default: 251 *size = 1024 * 1024; 252 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 253 } 254 } 255 256 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 257 struct intel_uncore *uncore, 258 resource_size_t *base, 259 resource_size_t *size) 260 { 261 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 262 resource_size_t stolen_top = i915->dsm.end + 1; 263 264 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 265 266 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 267 return; 268 269 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 270 default: 271 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 272 fallthrough; 273 case GEN7_STOLEN_RESERVED_1M: 274 *size = 1024 * 1024; 275 break; 276 } 277 278 /* 279 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 280 * reserved location as (top - size). 281 */ 282 *base = stolen_top - *size; 283 } 284 285 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 286 struct intel_uncore *uncore, 287 resource_size_t *base, 288 resource_size_t *size) 289 { 290 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 291 292 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 293 294 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 295 return; 296 297 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 298 299 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 300 case GEN7_STOLEN_RESERVED_1M: 301 *size = 1024 * 1024; 302 break; 303 case GEN7_STOLEN_RESERVED_256K: 304 *size = 256 * 1024; 305 break; 306 default: 307 *size = 1024 * 1024; 308 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 309 } 310 } 311 312 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 313 struct intel_uncore *uncore, 314 resource_size_t *base, 315 resource_size_t *size) 316 { 317 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 318 319 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 320 321 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 322 return; 323 324 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 325 326 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 327 case GEN8_STOLEN_RESERVED_1M: 328 *size = 1024 * 1024; 329 break; 330 case GEN8_STOLEN_RESERVED_2M: 331 *size = 2 * 1024 * 1024; 332 break; 333 case GEN8_STOLEN_RESERVED_4M: 334 *size = 4 * 1024 * 1024; 335 break; 336 case GEN8_STOLEN_RESERVED_8M: 337 *size = 8 * 1024 * 1024; 338 break; 339 default: 340 *size = 8 * 1024 * 1024; 341 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 342 } 343 } 344 345 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 346 struct intel_uncore *uncore, 347 resource_size_t *base, 348 resource_size_t *size) 349 { 350 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 351 resource_size_t stolen_top = i915->dsm.end + 1; 352 353 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 354 355 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 356 return; 357 358 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 359 return; 360 361 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 362 *size = stolen_top - *base; 363 } 364 365 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 366 struct intel_uncore *uncore, 367 resource_size_t *base, 368 resource_size_t *size) 369 { 370 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 371 372 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 373 374 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 375 376 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 377 case GEN8_STOLEN_RESERVED_1M: 378 *size = 1024 * 1024; 379 break; 380 case GEN8_STOLEN_RESERVED_2M: 381 *size = 2 * 1024 * 1024; 382 break; 383 case GEN8_STOLEN_RESERVED_4M: 384 *size = 4 * 1024 * 1024; 385 break; 386 case GEN8_STOLEN_RESERVED_8M: 387 *size = 8 * 1024 * 1024; 388 break; 389 default: 390 *size = 8 * 1024 * 1024; 391 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 392 } 393 } 394 395 static int i915_gem_init_stolen(struct intel_memory_region *mem) 396 { 397 struct drm_i915_private *i915 = mem->i915; 398 struct intel_uncore *uncore = &i915->uncore; 399 resource_size_t reserved_base, stolen_top; 400 resource_size_t reserved_total, reserved_size; 401 402 mutex_init(&i915->mm.stolen_lock); 403 404 if (intel_vgpu_active(i915)) { 405 drm_notice(&i915->drm, 406 "%s, disabling use of stolen memory\n", 407 "iGVT-g active"); 408 return 0; 409 } 410 411 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 412 drm_notice(&i915->drm, 413 "%s, disabling use of stolen memory\n", 414 "DMAR active"); 415 return 0; 416 } 417 418 if (resource_size(&mem->region) == 0) 419 return 0; 420 421 i915->dsm = mem->region; 422 423 if (i915_adjust_stolen(i915, &i915->dsm)) 424 return 0; 425 426 GEM_BUG_ON(i915->dsm.start == 0); 427 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 428 429 stolen_top = i915->dsm.end + 1; 430 reserved_base = stolen_top; 431 reserved_size = 0; 432 433 if (GRAPHICS_VER(i915) >= 11) { 434 icl_get_stolen_reserved(i915, uncore, 435 &reserved_base, &reserved_size); 436 } else if (GRAPHICS_VER(i915) >= 8) { 437 if (IS_LP(i915)) 438 chv_get_stolen_reserved(i915, uncore, 439 &reserved_base, &reserved_size); 440 else 441 bdw_get_stolen_reserved(i915, uncore, 442 &reserved_base, &reserved_size); 443 } else if (GRAPHICS_VER(i915) >= 7) { 444 if (IS_VALLEYVIEW(i915)) 445 vlv_get_stolen_reserved(i915, uncore, 446 &reserved_base, &reserved_size); 447 else 448 gen7_get_stolen_reserved(i915, uncore, 449 &reserved_base, &reserved_size); 450 } else if (GRAPHICS_VER(i915) >= 6) { 451 gen6_get_stolen_reserved(i915, uncore, 452 &reserved_base, &reserved_size); 453 } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { 454 g4x_get_stolen_reserved(i915, uncore, 455 &reserved_base, &reserved_size); 456 } 457 458 /* 459 * Our expectation is that the reserved space is at the top of the 460 * stolen region and *never* at the bottom. If we see !reserved_base, 461 * it likely means we failed to read the registers correctly. 462 */ 463 if (!reserved_base) { 464 drm_err(&i915->drm, 465 "inconsistent reservation %pa + %pa; ignoring\n", 466 &reserved_base, &reserved_size); 467 reserved_base = stolen_top; 468 reserved_size = 0; 469 } 470 471 i915->dsm_reserved = 472 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 473 474 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 475 drm_err(&i915->drm, 476 "Stolen reserved area %pR outside stolen memory %pR\n", 477 &i915->dsm_reserved, &i915->dsm); 478 return 0; 479 } 480 481 /* Exclude the reserved region from driver use */ 482 mem->region.end = reserved_base - 1; 483 mem->io_size = min(mem->io_size, resource_size(&mem->region)); 484 485 /* It is possible for the reserved area to end before the end of stolen 486 * memory, so just consider the start. */ 487 reserved_total = stolen_top - reserved_base; 488 489 i915->stolen_usable_size = 490 resource_size(&i915->dsm) - reserved_total; 491 492 drm_dbg(&i915->drm, 493 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 494 (u64)resource_size(&i915->dsm) >> 10, 495 (u64)i915->stolen_usable_size >> 10); 496 497 if (i915->stolen_usable_size == 0) 498 return 0; 499 500 /* Basic memrange allocator for stolen space. */ 501 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 502 503 return 0; 504 } 505 506 static void dbg_poison(struct i915_ggtt *ggtt, 507 dma_addr_t addr, resource_size_t size, 508 u8 x) 509 { 510 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 511 if (!drm_mm_node_allocated(&ggtt->error_capture)) 512 return; 513 514 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 515 return; /* beware stop_machine() inversion */ 516 517 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 518 519 mutex_lock(&ggtt->error_mutex); 520 while (size) { 521 void __iomem *s; 522 523 ggtt->vm.insert_page(&ggtt->vm, addr, 524 ggtt->error_capture.start, 525 I915_CACHE_NONE, 0); 526 mb(); 527 528 s = io_mapping_map_wc(&ggtt->iomap, 529 ggtt->error_capture.start, 530 PAGE_SIZE); 531 memset_io(s, x, PAGE_SIZE); 532 io_mapping_unmap(s); 533 534 addr += PAGE_SIZE; 535 size -= PAGE_SIZE; 536 } 537 mb(); 538 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 539 mutex_unlock(&ggtt->error_mutex); 540 #endif 541 } 542 543 static struct sg_table * 544 i915_pages_create_for_stolen(struct drm_device *dev, 545 resource_size_t offset, resource_size_t size) 546 { 547 struct drm_i915_private *i915 = to_i915(dev); 548 struct sg_table *st; 549 struct scatterlist *sg; 550 551 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 552 553 /* We hide that we have no struct page backing our stolen object 554 * by wrapping the contiguous physical allocation with a fake 555 * dma mapping in a single scatterlist. 556 */ 557 558 st = kmalloc(sizeof(*st), GFP_KERNEL); 559 if (st == NULL) 560 return ERR_PTR(-ENOMEM); 561 562 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 563 kfree(st); 564 return ERR_PTR(-ENOMEM); 565 } 566 567 sg = st->sgl; 568 sg->offset = 0; 569 sg->length = size; 570 571 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 572 sg_dma_len(sg) = size; 573 574 return st; 575 } 576 577 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 578 { 579 struct drm_i915_private *i915 = to_i915(obj->base.dev); 580 struct sg_table *pages = 581 i915_pages_create_for_stolen(obj->base.dev, 582 obj->stolen->start, 583 obj->stolen->size); 584 if (IS_ERR(pages)) 585 return PTR_ERR(pages); 586 587 dbg_poison(to_gt(i915)->ggtt, 588 sg_dma_address(pages->sgl), 589 sg_dma_len(pages->sgl), 590 POISON_INUSE); 591 592 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 593 594 return 0; 595 } 596 597 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 598 struct sg_table *pages) 599 { 600 struct drm_i915_private *i915 = to_i915(obj->base.dev); 601 /* Should only be called from i915_gem_object_release_stolen() */ 602 603 dbg_poison(to_gt(i915)->ggtt, 604 sg_dma_address(pages->sgl), 605 sg_dma_len(pages->sgl), 606 POISON_FREE); 607 608 sg_free_table(pages); 609 kfree(pages); 610 } 611 612 static void 613 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 614 { 615 struct drm_i915_private *i915 = to_i915(obj->base.dev); 616 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 617 618 GEM_BUG_ON(!stolen); 619 i915_gem_stolen_remove_node(i915, stolen); 620 kfree(stolen); 621 622 i915_gem_object_release_memory_region(obj); 623 } 624 625 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 626 .name = "i915_gem_object_stolen", 627 .get_pages = i915_gem_object_get_pages_stolen, 628 .put_pages = i915_gem_object_put_pages_stolen, 629 .release = i915_gem_object_release_stolen, 630 }; 631 632 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 633 struct drm_i915_gem_object *obj, 634 struct drm_mm_node *stolen) 635 { 636 static struct lock_class_key lock_class; 637 unsigned int cache_level; 638 unsigned int flags; 639 int err; 640 641 /* 642 * Stolen objects are always physically contiguous since we just 643 * allocate one big block underneath using the drm_mm range allocator. 644 */ 645 flags = I915_BO_ALLOC_CONTIGUOUS; 646 647 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 648 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 649 650 obj->stolen = stolen; 651 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 652 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 653 i915_gem_object_set_cache_coherency(obj, cache_level); 654 655 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 656 return -EBUSY; 657 658 i915_gem_object_init_memory_region(obj, mem); 659 660 err = i915_gem_object_pin_pages(obj); 661 if (err) 662 i915_gem_object_release_memory_region(obj); 663 i915_gem_object_unlock(obj); 664 665 return err; 666 } 667 668 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 669 struct drm_i915_gem_object *obj, 670 resource_size_t offset, 671 resource_size_t size, 672 resource_size_t page_size, 673 unsigned int flags) 674 { 675 struct drm_i915_private *i915 = mem->i915; 676 struct drm_mm_node *stolen; 677 int ret; 678 679 if (!drm_mm_initialized(&i915->mm.stolen)) 680 return -ENODEV; 681 682 if (size == 0) 683 return -EINVAL; 684 685 /* 686 * With discrete devices, where we lack a mappable aperture there is no 687 * possible way to ever access this memory on the CPU side. 688 */ 689 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size && 690 !(flags & I915_BO_ALLOC_GPU_ONLY)) 691 return -ENOSPC; 692 693 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 694 if (!stolen) 695 return -ENOMEM; 696 697 if (offset != I915_BO_INVALID_OFFSET) { 698 drm_dbg(&i915->drm, 699 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 700 &offset, &size); 701 702 stolen->start = offset; 703 stolen->size = size; 704 mutex_lock(&i915->mm.stolen_lock); 705 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 706 mutex_unlock(&i915->mm.stolen_lock); 707 } else { 708 ret = i915_gem_stolen_insert_node(i915, stolen, size, 709 mem->min_page_size); 710 } 711 if (ret) 712 goto err_free; 713 714 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 715 if (ret) 716 goto err_remove; 717 718 return 0; 719 720 err_remove: 721 i915_gem_stolen_remove_node(i915, stolen); 722 err_free: 723 kfree(stolen); 724 return ret; 725 } 726 727 struct drm_i915_gem_object * 728 i915_gem_object_create_stolen(struct drm_i915_private *i915, 729 resource_size_t size) 730 { 731 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 732 } 733 734 static int init_stolen_smem(struct intel_memory_region *mem) 735 { 736 /* 737 * Initialise stolen early so that we may reserve preallocated 738 * objects for the BIOS to KMS transition. 739 */ 740 return i915_gem_init_stolen(mem); 741 } 742 743 static int release_stolen_smem(struct intel_memory_region *mem) 744 { 745 i915_gem_cleanup_stolen(mem->i915); 746 return 0; 747 } 748 749 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 750 .init = init_stolen_smem, 751 .release = release_stolen_smem, 752 .init_object = _i915_gem_object_stolen_init, 753 }; 754 755 static int init_stolen_lmem(struct intel_memory_region *mem) 756 { 757 int err; 758 759 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 760 return -ENODEV; 761 762 /* 763 * TODO: For stolen lmem we mostly just care about populating the dsm 764 * related bits and setting up the drm_mm allocator for the range. 765 * Perhaps split up i915_gem_init_stolen() for this. 766 */ 767 err = i915_gem_init_stolen(mem); 768 if (err) 769 return err; 770 771 if (mem->io_size && !io_mapping_init_wc(&mem->iomap, 772 mem->io_start, 773 mem->io_size)) { 774 err = -EIO; 775 goto err_cleanup; 776 } 777 778 return 0; 779 780 err_cleanup: 781 i915_gem_cleanup_stolen(mem->i915); 782 return err; 783 } 784 785 static int release_stolen_lmem(struct intel_memory_region *mem) 786 { 787 if (mem->io_size) 788 io_mapping_fini(&mem->iomap); 789 i915_gem_cleanup_stolen(mem->i915); 790 return 0; 791 } 792 793 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 794 .init = init_stolen_lmem, 795 .release = release_stolen_lmem, 796 .init_object = _i915_gem_object_stolen_init, 797 }; 798 799 struct intel_memory_region * 800 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 801 u16 instance) 802 { 803 struct intel_uncore *uncore = &i915->uncore; 804 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 805 resource_size_t dsm_size, dsm_base, lmem_size; 806 struct intel_memory_region *mem; 807 resource_size_t io_start, io_size; 808 resource_size_t min_page_size; 809 810 if (WARN_ON_ONCE(instance)) 811 return ERR_PTR(-ENODEV); 812 813 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) 814 return ERR_PTR(-ENXIO); 815 816 /* Use DSM base address instead for stolen memory */ 817 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); 818 if (IS_DG1(uncore->i915)) { 819 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); 820 if (WARN_ON(lmem_size < dsm_base)) 821 return ERR_PTR(-ENODEV); 822 } else { 823 resource_size_t lmem_range; 824 825 lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF; 826 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; 827 lmem_size *= SZ_1G; 828 } 829 830 dsm_size = lmem_size - dsm_base; 831 if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { 832 io_start = 0; 833 io_size = 0; 834 } else { 835 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; 836 io_size = dsm_size; 837 } 838 839 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 840 I915_GTT_PAGE_SIZE_4K; 841 842 mem = intel_memory_region_create(i915, dsm_base, dsm_size, 843 min_page_size, 844 io_start, io_size, 845 type, instance, 846 &i915_region_stolen_lmem_ops); 847 if (IS_ERR(mem)) 848 return mem; 849 850 /* 851 * TODO: consider creating common helper to just print all the 852 * interesting stuff from intel_memory_region, which we can use for all 853 * our probed regions. 854 */ 855 856 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", 857 &mem->io_start); 858 drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base); 859 860 intel_memory_region_set_name(mem, "stolen-local"); 861 862 mem->private = true; 863 864 return mem; 865 } 866 867 struct intel_memory_region* 868 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 869 u16 instance) 870 { 871 struct intel_memory_region *mem; 872 873 mem = intel_memory_region_create(i915, 874 intel_graphics_stolen_res.start, 875 resource_size(&intel_graphics_stolen_res), 876 PAGE_SIZE, 0, 0, type, instance, 877 &i915_region_stolen_smem_ops); 878 if (IS_ERR(mem)) 879 return mem; 880 881 intel_memory_region_set_name(mem, "stolen-system"); 882 883 mem->private = true; 884 return mem; 885 } 886 887 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 888 { 889 return obj->ops == &i915_gem_object_stolen_ops; 890 } 891