1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "i915_drv.h" 16 #include "i915_gem_stolen.h" 17 #include "i915_reg.h" 18 #include "i915_vgpu.h" 19 #include "intel_mchbar_regs.h" 20 21 /* 22 * The BIOS typically reserves some of the system's memory for the exclusive 23 * use of the integrated graphics. This memory is no longer available for 24 * use by the OS and so the user finds that his system has less memory 25 * available than he put in. We refer to this memory as stolen. 26 * 27 * The BIOS will allocate its framebuffer from the stolen memory. Our 28 * goal is try to reuse that object for our own fbcon which must always 29 * be available for panics. Anything else we can reuse the stolen memory 30 * for is a boon. 31 */ 32 33 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 34 struct drm_mm_node *node, u64 size, 35 unsigned alignment, u64 start, u64 end) 36 { 37 int ret; 38 39 if (!drm_mm_initialized(&i915->mm.stolen)) 40 return -ENODEV; 41 42 /* WaSkipStolenMemoryFirstPage:bdw+ */ 43 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 44 start = 4096; 45 46 mutex_lock(&i915->mm.stolen_lock); 47 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 48 size, alignment, 0, 49 start, end, DRM_MM_INSERT_BEST); 50 mutex_unlock(&i915->mm.stolen_lock); 51 52 return ret; 53 } 54 55 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 56 struct drm_mm_node *node, u64 size, 57 unsigned alignment) 58 { 59 return i915_gem_stolen_insert_node_in_range(i915, node, 60 size, alignment, 61 I915_GEM_STOLEN_BIAS, 62 U64_MAX); 63 } 64 65 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 66 struct drm_mm_node *node) 67 { 68 mutex_lock(&i915->mm.stolen_lock); 69 drm_mm_remove_node(node); 70 mutex_unlock(&i915->mm.stolen_lock); 71 } 72 73 static int i915_adjust_stolen(struct drm_i915_private *i915, 74 struct resource *dsm) 75 { 76 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 77 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 78 struct resource *r; 79 80 if (dsm->start == 0 || dsm->end <= dsm->start) 81 return -EINVAL; 82 83 /* 84 * TODO: We have yet too encounter the case where the GTT wasn't at the 85 * end of stolen. With that assumption we could simplify this. 86 */ 87 88 /* Make sure we don't clobber the GTT if it's within stolen memory */ 89 if (GRAPHICS_VER(i915) <= 4 && 90 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 91 struct resource stolen[2] = {*dsm, *dsm}; 92 struct resource ggtt_res; 93 resource_size_t ggtt_start; 94 95 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 96 if (GRAPHICS_VER(i915) == 4) 97 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 98 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 99 else 100 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 101 102 ggtt_res = 103 (struct resource) DEFINE_RES_MEM(ggtt_start, 104 ggtt_total_entries(ggtt) * 4); 105 106 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 107 stolen[0].end = ggtt_res.start; 108 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 109 stolen[1].start = ggtt_res.end; 110 111 /* Pick the larger of the two chunks */ 112 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 113 *dsm = stolen[0]; 114 else 115 *dsm = stolen[1]; 116 117 if (stolen[0].start != stolen[1].start || 118 stolen[0].end != stolen[1].end) { 119 drm_dbg(&i915->drm, 120 "GTT within stolen memory at %pR\n", 121 &ggtt_res); 122 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 123 dsm); 124 } 125 } 126 127 /* 128 * With stolen lmem, we don't need to check if the address range 129 * overlaps with the non-stolen system memory range, since lmem is local 130 * to the gpu. 131 */ 132 if (HAS_LMEM(i915)) 133 return 0; 134 135 /* 136 * Verify that nothing else uses this physical address. Stolen 137 * memory should be reserved by the BIOS and hidden from the 138 * kernel. So if the region is already marked as busy, something 139 * is seriously wrong. 140 */ 141 r = devm_request_mem_region(i915->drm.dev, dsm->start, 142 resource_size(dsm), 143 "Graphics Stolen Memory"); 144 if (r == NULL) { 145 /* 146 * One more attempt but this time requesting region from 147 * start + 1, as we have seen that this resolves the region 148 * conflict with the PCI Bus. 149 * This is a BIOS w/a: Some BIOS wrap stolen in the root 150 * PCI bus, but have an off-by-one error. Hence retry the 151 * reservation starting from 1 instead of 0. 152 * There's also BIOS with off-by-one on the other end. 153 */ 154 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 155 resource_size(dsm) - 2, 156 "Graphics Stolen Memory"); 157 /* 158 * GEN3 firmware likes to smash pci bridges into the stolen 159 * range. Apparently this works. 160 */ 161 if (!r && GRAPHICS_VER(i915) != 3) { 162 drm_err(&i915->drm, 163 "conflict detected with stolen region: %pR\n", 164 dsm); 165 166 return -EBUSY; 167 } 168 } 169 170 return 0; 171 } 172 173 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 174 { 175 if (!drm_mm_initialized(&i915->mm.stolen)) 176 return; 177 178 drm_mm_takedown(&i915->mm.stolen); 179 } 180 181 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 182 struct intel_uncore *uncore, 183 resource_size_t *base, 184 resource_size_t *size) 185 { 186 u32 reg_val = intel_uncore_read(uncore, 187 IS_GM45(i915) ? 188 CTG_STOLEN_RESERVED : 189 ELK_STOLEN_RESERVED); 190 resource_size_t stolen_top = i915->dsm.end + 1; 191 192 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 193 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 194 195 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 196 return; 197 198 /* 199 * Whether ILK really reuses the ELK register for this is unclear. 200 * Let's see if we catch anyone with this supposedly enabled on ILK. 201 */ 202 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 203 "ILK stolen reserved found? 0x%08x\n", 204 reg_val); 205 206 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 207 return; 208 209 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 210 drm_WARN_ON(&i915->drm, 211 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 212 213 *size = stolen_top - *base; 214 } 215 216 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 217 struct intel_uncore *uncore, 218 resource_size_t *base, 219 resource_size_t *size) 220 { 221 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 222 223 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 224 225 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 226 return; 227 228 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 229 230 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 231 case GEN6_STOLEN_RESERVED_1M: 232 *size = 1024 * 1024; 233 break; 234 case GEN6_STOLEN_RESERVED_512K: 235 *size = 512 * 1024; 236 break; 237 case GEN6_STOLEN_RESERVED_256K: 238 *size = 256 * 1024; 239 break; 240 case GEN6_STOLEN_RESERVED_128K: 241 *size = 128 * 1024; 242 break; 243 default: 244 *size = 1024 * 1024; 245 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 246 } 247 } 248 249 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 250 struct intel_uncore *uncore, 251 resource_size_t *base, 252 resource_size_t *size) 253 { 254 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 255 resource_size_t stolen_top = i915->dsm.end + 1; 256 257 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 258 259 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 260 return; 261 262 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 263 default: 264 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 265 fallthrough; 266 case GEN7_STOLEN_RESERVED_1M: 267 *size = 1024 * 1024; 268 break; 269 } 270 271 /* 272 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 273 * reserved location as (top - size). 274 */ 275 *base = stolen_top - *size; 276 } 277 278 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 279 struct intel_uncore *uncore, 280 resource_size_t *base, 281 resource_size_t *size) 282 { 283 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 284 285 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 286 287 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 288 return; 289 290 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 291 292 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 293 case GEN7_STOLEN_RESERVED_1M: 294 *size = 1024 * 1024; 295 break; 296 case GEN7_STOLEN_RESERVED_256K: 297 *size = 256 * 1024; 298 break; 299 default: 300 *size = 1024 * 1024; 301 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 302 } 303 } 304 305 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 306 struct intel_uncore *uncore, 307 resource_size_t *base, 308 resource_size_t *size) 309 { 310 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 311 312 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 313 314 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 315 return; 316 317 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 318 319 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 320 case GEN8_STOLEN_RESERVED_1M: 321 *size = 1024 * 1024; 322 break; 323 case GEN8_STOLEN_RESERVED_2M: 324 *size = 2 * 1024 * 1024; 325 break; 326 case GEN8_STOLEN_RESERVED_4M: 327 *size = 4 * 1024 * 1024; 328 break; 329 case GEN8_STOLEN_RESERVED_8M: 330 *size = 8 * 1024 * 1024; 331 break; 332 default: 333 *size = 8 * 1024 * 1024; 334 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 335 } 336 } 337 338 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 339 struct intel_uncore *uncore, 340 resource_size_t *base, 341 resource_size_t *size) 342 { 343 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 344 resource_size_t stolen_top = i915->dsm.end + 1; 345 346 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 347 348 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 349 return; 350 351 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 352 return; 353 354 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 355 *size = stolen_top - *base; 356 } 357 358 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 359 struct intel_uncore *uncore, 360 resource_size_t *base, 361 resource_size_t *size) 362 { 363 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 364 365 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 366 367 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 368 369 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 370 case GEN8_STOLEN_RESERVED_1M: 371 *size = 1024 * 1024; 372 break; 373 case GEN8_STOLEN_RESERVED_2M: 374 *size = 2 * 1024 * 1024; 375 break; 376 case GEN8_STOLEN_RESERVED_4M: 377 *size = 4 * 1024 * 1024; 378 break; 379 case GEN8_STOLEN_RESERVED_8M: 380 *size = 8 * 1024 * 1024; 381 break; 382 default: 383 *size = 8 * 1024 * 1024; 384 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 385 } 386 } 387 388 static int i915_gem_init_stolen(struct intel_memory_region *mem) 389 { 390 struct drm_i915_private *i915 = mem->i915; 391 struct intel_uncore *uncore = &i915->uncore; 392 resource_size_t reserved_base, stolen_top; 393 resource_size_t reserved_total, reserved_size; 394 395 mutex_init(&i915->mm.stolen_lock); 396 397 if (intel_vgpu_active(i915)) { 398 drm_notice(&i915->drm, 399 "%s, disabling use of stolen memory\n", 400 "iGVT-g active"); 401 return 0; 402 } 403 404 if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 405 drm_notice(&i915->drm, 406 "%s, disabling use of stolen memory\n", 407 "DMAR active"); 408 return 0; 409 } 410 411 if (resource_size(&mem->region) == 0) 412 return 0; 413 414 i915->dsm = mem->region; 415 416 if (i915_adjust_stolen(i915, &i915->dsm)) 417 return 0; 418 419 GEM_BUG_ON(i915->dsm.start == 0); 420 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 421 422 stolen_top = i915->dsm.end + 1; 423 reserved_base = stolen_top; 424 reserved_size = 0; 425 426 switch (GRAPHICS_VER(i915)) { 427 case 2: 428 case 3: 429 break; 430 case 4: 431 if (!IS_G4X(i915)) 432 break; 433 fallthrough; 434 case 5: 435 g4x_get_stolen_reserved(i915, uncore, 436 &reserved_base, &reserved_size); 437 break; 438 case 6: 439 gen6_get_stolen_reserved(i915, uncore, 440 &reserved_base, &reserved_size); 441 break; 442 case 7: 443 if (IS_VALLEYVIEW(i915)) 444 vlv_get_stolen_reserved(i915, uncore, 445 &reserved_base, &reserved_size); 446 else 447 gen7_get_stolen_reserved(i915, uncore, 448 &reserved_base, &reserved_size); 449 break; 450 case 8: 451 case 9: 452 if (IS_LP(i915)) 453 chv_get_stolen_reserved(i915, uncore, 454 &reserved_base, &reserved_size); 455 else 456 bdw_get_stolen_reserved(i915, uncore, 457 &reserved_base, &reserved_size); 458 break; 459 default: 460 MISSING_CASE(GRAPHICS_VER(i915)); 461 fallthrough; 462 case 11: 463 case 12: 464 icl_get_stolen_reserved(i915, uncore, 465 &reserved_base, 466 &reserved_size); 467 break; 468 } 469 470 /* 471 * Our expectation is that the reserved space is at the top of the 472 * stolen region and *never* at the bottom. If we see !reserved_base, 473 * it likely means we failed to read the registers correctly. 474 */ 475 if (!reserved_base) { 476 drm_err(&i915->drm, 477 "inconsistent reservation %pa + %pa; ignoring\n", 478 &reserved_base, &reserved_size); 479 reserved_base = stolen_top; 480 reserved_size = 0; 481 } 482 483 i915->dsm_reserved = 484 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 485 486 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 487 drm_err(&i915->drm, 488 "Stolen reserved area %pR outside stolen memory %pR\n", 489 &i915->dsm_reserved, &i915->dsm); 490 return 0; 491 } 492 493 /* Exclude the reserved region from driver use */ 494 mem->region.end = reserved_base - 1; 495 496 /* It is possible for the reserved area to end before the end of stolen 497 * memory, so just consider the start. */ 498 reserved_total = stolen_top - reserved_base; 499 500 drm_dbg(&i915->drm, 501 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 502 (u64)resource_size(&i915->dsm) >> 10, 503 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); 504 505 i915->stolen_usable_size = 506 resource_size(&i915->dsm) - reserved_total; 507 508 /* Basic memrange allocator for stolen space. */ 509 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 510 511 return 0; 512 } 513 514 static void dbg_poison(struct i915_ggtt *ggtt, 515 dma_addr_t addr, resource_size_t size, 516 u8 x) 517 { 518 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 519 if (!drm_mm_node_allocated(&ggtt->error_capture)) 520 return; 521 522 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 523 return; /* beware stop_machine() inversion */ 524 525 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 526 527 mutex_lock(&ggtt->error_mutex); 528 while (size) { 529 void __iomem *s; 530 531 ggtt->vm.insert_page(&ggtt->vm, addr, 532 ggtt->error_capture.start, 533 I915_CACHE_NONE, 0); 534 mb(); 535 536 s = io_mapping_map_wc(&ggtt->iomap, 537 ggtt->error_capture.start, 538 PAGE_SIZE); 539 memset_io(s, x, PAGE_SIZE); 540 io_mapping_unmap(s); 541 542 addr += PAGE_SIZE; 543 size -= PAGE_SIZE; 544 } 545 mb(); 546 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 547 mutex_unlock(&ggtt->error_mutex); 548 #endif 549 } 550 551 static struct sg_table * 552 i915_pages_create_for_stolen(struct drm_device *dev, 553 resource_size_t offset, resource_size_t size) 554 { 555 struct drm_i915_private *i915 = to_i915(dev); 556 struct sg_table *st; 557 struct scatterlist *sg; 558 559 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 560 561 /* We hide that we have no struct page backing our stolen object 562 * by wrapping the contiguous physical allocation with a fake 563 * dma mapping in a single scatterlist. 564 */ 565 566 st = kmalloc(sizeof(*st), GFP_KERNEL); 567 if (st == NULL) 568 return ERR_PTR(-ENOMEM); 569 570 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 571 kfree(st); 572 return ERR_PTR(-ENOMEM); 573 } 574 575 sg = st->sgl; 576 sg->offset = 0; 577 sg->length = size; 578 579 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 580 sg_dma_len(sg) = size; 581 582 return st; 583 } 584 585 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 586 { 587 struct drm_i915_private *i915 = to_i915(obj->base.dev); 588 struct sg_table *pages = 589 i915_pages_create_for_stolen(obj->base.dev, 590 obj->stolen->start, 591 obj->stolen->size); 592 if (IS_ERR(pages)) 593 return PTR_ERR(pages); 594 595 dbg_poison(to_gt(i915)->ggtt, 596 sg_dma_address(pages->sgl), 597 sg_dma_len(pages->sgl), 598 POISON_INUSE); 599 600 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 601 602 return 0; 603 } 604 605 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 606 struct sg_table *pages) 607 { 608 struct drm_i915_private *i915 = to_i915(obj->base.dev); 609 /* Should only be called from i915_gem_object_release_stolen() */ 610 611 dbg_poison(to_gt(i915)->ggtt, 612 sg_dma_address(pages->sgl), 613 sg_dma_len(pages->sgl), 614 POISON_FREE); 615 616 sg_free_table(pages); 617 kfree(pages); 618 } 619 620 static void 621 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 622 { 623 struct drm_i915_private *i915 = to_i915(obj->base.dev); 624 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 625 626 GEM_BUG_ON(!stolen); 627 i915_gem_stolen_remove_node(i915, stolen); 628 kfree(stolen); 629 630 i915_gem_object_release_memory_region(obj); 631 } 632 633 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 634 .name = "i915_gem_object_stolen", 635 .get_pages = i915_gem_object_get_pages_stolen, 636 .put_pages = i915_gem_object_put_pages_stolen, 637 .release = i915_gem_object_release_stolen, 638 }; 639 640 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 641 struct drm_i915_gem_object *obj, 642 struct drm_mm_node *stolen) 643 { 644 static struct lock_class_key lock_class; 645 unsigned int cache_level; 646 unsigned int flags; 647 int err; 648 649 /* 650 * Stolen objects are always physically contiguous since we just 651 * allocate one big block underneath using the drm_mm range allocator. 652 */ 653 flags = I915_BO_ALLOC_CONTIGUOUS; 654 655 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 656 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 657 658 obj->stolen = stolen; 659 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 660 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 661 i915_gem_object_set_cache_coherency(obj, cache_level); 662 663 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 664 return -EBUSY; 665 666 i915_gem_object_init_memory_region(obj, mem); 667 668 err = i915_gem_object_pin_pages(obj); 669 if (err) 670 i915_gem_object_release_memory_region(obj); 671 i915_gem_object_unlock(obj); 672 673 return err; 674 } 675 676 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 677 struct drm_i915_gem_object *obj, 678 resource_size_t size, 679 resource_size_t page_size, 680 unsigned int flags) 681 { 682 struct drm_i915_private *i915 = mem->i915; 683 struct drm_mm_node *stolen; 684 int ret; 685 686 if (!drm_mm_initialized(&i915->mm.stolen)) 687 return -ENODEV; 688 689 if (size == 0) 690 return -EINVAL; 691 692 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 693 if (!stolen) 694 return -ENOMEM; 695 696 ret = i915_gem_stolen_insert_node(i915, stolen, size, 697 mem->min_page_size); 698 if (ret) 699 goto err_free; 700 701 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 702 if (ret) 703 goto err_remove; 704 705 return 0; 706 707 err_remove: 708 i915_gem_stolen_remove_node(i915, stolen); 709 err_free: 710 kfree(stolen); 711 return ret; 712 } 713 714 struct drm_i915_gem_object * 715 i915_gem_object_create_stolen(struct drm_i915_private *i915, 716 resource_size_t size) 717 { 718 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 719 } 720 721 static int init_stolen_smem(struct intel_memory_region *mem) 722 { 723 /* 724 * Initialise stolen early so that we may reserve preallocated 725 * objects for the BIOS to KMS transition. 726 */ 727 return i915_gem_init_stolen(mem); 728 } 729 730 static int release_stolen_smem(struct intel_memory_region *mem) 731 { 732 i915_gem_cleanup_stolen(mem->i915); 733 return 0; 734 } 735 736 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 737 .init = init_stolen_smem, 738 .release = release_stolen_smem, 739 .init_object = _i915_gem_object_stolen_init, 740 }; 741 742 static int init_stolen_lmem(struct intel_memory_region *mem) 743 { 744 int err; 745 746 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 747 return -ENODEV; 748 749 if (!io_mapping_init_wc(&mem->iomap, 750 mem->io_start, 751 resource_size(&mem->region))) 752 return -EIO; 753 754 /* 755 * TODO: For stolen lmem we mostly just care about populating the dsm 756 * related bits and setting up the drm_mm allocator for the range. 757 * Perhaps split up i915_gem_init_stolen() for this. 758 */ 759 err = i915_gem_init_stolen(mem); 760 if (err) 761 goto err_fini; 762 763 return 0; 764 765 err_fini: 766 io_mapping_fini(&mem->iomap); 767 return err; 768 } 769 770 static int release_stolen_lmem(struct intel_memory_region *mem) 771 { 772 io_mapping_fini(&mem->iomap); 773 i915_gem_cleanup_stolen(mem->i915); 774 return 0; 775 } 776 777 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 778 .init = init_stolen_lmem, 779 .release = release_stolen_lmem, 780 .init_object = _i915_gem_object_stolen_init, 781 }; 782 783 struct intel_memory_region * 784 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 785 u16 instance) 786 { 787 struct intel_uncore *uncore = &i915->uncore; 788 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 789 struct intel_memory_region *mem; 790 resource_size_t min_page_size; 791 resource_size_t io_start; 792 resource_size_t lmem_size; 793 u64 lmem_base; 794 795 lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE); 796 if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2))) 797 return ERR_PTR(-ENODEV); 798 799 lmem_size = pci_resource_len(pdev, 2) - lmem_base; 800 io_start = pci_resource_start(pdev, 2) + lmem_base; 801 802 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 803 I915_GTT_PAGE_SIZE_4K; 804 805 mem = intel_memory_region_create(i915, lmem_base, lmem_size, 806 min_page_size, io_start, 807 type, instance, 808 &i915_region_stolen_lmem_ops); 809 if (IS_ERR(mem)) 810 return mem; 811 812 /* 813 * TODO: consider creating common helper to just print all the 814 * interesting stuff from intel_memory_region, which we can use for all 815 * our probed regions. 816 */ 817 818 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", 819 &mem->io_start); 820 821 intel_memory_region_set_name(mem, "stolen-local"); 822 823 mem->private = true; 824 825 return mem; 826 } 827 828 struct intel_memory_region* 829 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 830 u16 instance) 831 { 832 struct intel_memory_region *mem; 833 834 mem = intel_memory_region_create(i915, 835 intel_graphics_stolen_res.start, 836 resource_size(&intel_graphics_stolen_res), 837 PAGE_SIZE, 0, type, instance, 838 &i915_region_stolen_smem_ops); 839 if (IS_ERR(mem)) 840 return mem; 841 842 intel_memory_region_set_name(mem, "stolen-system"); 843 844 mem->private = true; 845 return mem; 846 } 847 848 struct drm_i915_gem_object * 849 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, 850 resource_size_t stolen_offset, 851 resource_size_t size) 852 { 853 struct intel_memory_region *mem = i915->mm.stolen_region; 854 struct drm_i915_gem_object *obj; 855 struct drm_mm_node *stolen; 856 int ret; 857 858 if (!drm_mm_initialized(&i915->mm.stolen)) 859 return ERR_PTR(-ENODEV); 860 861 drm_dbg(&i915->drm, 862 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 863 &stolen_offset, &size); 864 865 /* KISS and expect everything to be page-aligned */ 866 if (GEM_WARN_ON(size == 0) || 867 GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) || 868 GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size))) 869 return ERR_PTR(-EINVAL); 870 871 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 872 if (!stolen) 873 return ERR_PTR(-ENOMEM); 874 875 stolen->start = stolen_offset; 876 stolen->size = size; 877 mutex_lock(&i915->mm.stolen_lock); 878 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 879 mutex_unlock(&i915->mm.stolen_lock); 880 if (ret) 881 goto err_free; 882 883 obj = i915_gem_object_alloc(); 884 if (!obj) { 885 ret = -ENOMEM; 886 goto err_stolen; 887 } 888 889 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 890 if (ret) 891 goto err_object_free; 892 893 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 894 return obj; 895 896 err_object_free: 897 i915_gem_object_free(obj); 898 err_stolen: 899 i915_gem_stolen_remove_node(i915, stolen); 900 err_free: 901 kfree(stolen); 902 return ERR_PTR(ret); 903 } 904 905 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 906 { 907 return obj->ops == &i915_gem_object_stolen_ops; 908 } 909