1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2012 Intel Corporation 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/mutex.h> 9 10 #include <drm/drm_mm.h> 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "gem/i915_gem_region.h" 15 #include "i915_drv.h" 16 #include "i915_gem_stolen.h" 17 #include "i915_reg.h" 18 #include "i915_utils.h" 19 #include "i915_vgpu.h" 20 #include "intel_mchbar_regs.h" 21 22 /* 23 * The BIOS typically reserves some of the system's memory for the exclusive 24 * use of the integrated graphics. This memory is no longer available for 25 * use by the OS and so the user finds that his system has less memory 26 * available than he put in. We refer to this memory as stolen. 27 * 28 * The BIOS will allocate its framebuffer from the stolen memory. Our 29 * goal is try to reuse that object for our own fbcon which must always 30 * be available for panics. Anything else we can reuse the stolen memory 31 * for is a boon. 32 */ 33 34 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 35 struct drm_mm_node *node, u64 size, 36 unsigned alignment, u64 start, u64 end) 37 { 38 int ret; 39 40 if (!drm_mm_initialized(&i915->mm.stolen)) 41 return -ENODEV; 42 43 /* WaSkipStolenMemoryFirstPage:bdw+ */ 44 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 45 start = 4096; 46 47 mutex_lock(&i915->mm.stolen_lock); 48 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 49 size, alignment, 0, 50 start, end, DRM_MM_INSERT_BEST); 51 mutex_unlock(&i915->mm.stolen_lock); 52 53 return ret; 54 } 55 56 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 57 struct drm_mm_node *node, u64 size, 58 unsigned alignment) 59 { 60 return i915_gem_stolen_insert_node_in_range(i915, node, 61 size, alignment, 62 I915_GEM_STOLEN_BIAS, 63 U64_MAX); 64 } 65 66 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 67 struct drm_mm_node *node) 68 { 69 mutex_lock(&i915->mm.stolen_lock); 70 drm_mm_remove_node(node); 71 mutex_unlock(&i915->mm.stolen_lock); 72 } 73 74 static int i915_adjust_stolen(struct drm_i915_private *i915, 75 struct resource *dsm) 76 { 77 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 78 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 79 struct resource *r; 80 81 if (dsm->start == 0 || dsm->end <= dsm->start) 82 return -EINVAL; 83 84 /* 85 * TODO: We have yet too encounter the case where the GTT wasn't at the 86 * end of stolen. With that assumption we could simplify this. 87 */ 88 89 /* Make sure we don't clobber the GTT if it's within stolen memory */ 90 if (GRAPHICS_VER(i915) <= 4 && 91 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 92 struct resource stolen[2] = {*dsm, *dsm}; 93 struct resource ggtt_res; 94 resource_size_t ggtt_start; 95 96 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 97 if (GRAPHICS_VER(i915) == 4) 98 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 99 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 100 else 101 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 102 103 ggtt_res = 104 (struct resource) DEFINE_RES_MEM(ggtt_start, 105 ggtt_total_entries(ggtt) * 4); 106 107 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 108 stolen[0].end = ggtt_res.start; 109 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 110 stolen[1].start = ggtt_res.end; 111 112 /* Pick the larger of the two chunks */ 113 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 114 *dsm = stolen[0]; 115 else 116 *dsm = stolen[1]; 117 118 if (stolen[0].start != stolen[1].start || 119 stolen[0].end != stolen[1].end) { 120 drm_dbg(&i915->drm, 121 "GTT within stolen memory at %pR\n", 122 &ggtt_res); 123 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 124 dsm); 125 } 126 } 127 128 /* 129 * With stolen lmem, we don't need to check if the address range 130 * overlaps with the non-stolen system memory range, since lmem is local 131 * to the gpu. 132 */ 133 if (HAS_LMEM(i915)) 134 return 0; 135 136 /* 137 * Verify that nothing else uses this physical address. Stolen 138 * memory should be reserved by the BIOS and hidden from the 139 * kernel. So if the region is already marked as busy, something 140 * is seriously wrong. 141 */ 142 r = devm_request_mem_region(i915->drm.dev, dsm->start, 143 resource_size(dsm), 144 "Graphics Stolen Memory"); 145 if (r == NULL) { 146 /* 147 * One more attempt but this time requesting region from 148 * start + 1, as we have seen that this resolves the region 149 * conflict with the PCI Bus. 150 * This is a BIOS w/a: Some BIOS wrap stolen in the root 151 * PCI bus, but have an off-by-one error. Hence retry the 152 * reservation starting from 1 instead of 0. 153 * There's also BIOS with off-by-one on the other end. 154 */ 155 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 156 resource_size(dsm) - 2, 157 "Graphics Stolen Memory"); 158 /* 159 * GEN3 firmware likes to smash pci bridges into the stolen 160 * range. Apparently this works. 161 */ 162 if (!r && GRAPHICS_VER(i915) != 3) { 163 drm_err(&i915->drm, 164 "conflict detected with stolen region: %pR\n", 165 dsm); 166 167 return -EBUSY; 168 } 169 } 170 171 return 0; 172 } 173 174 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 175 { 176 if (!drm_mm_initialized(&i915->mm.stolen)) 177 return; 178 179 drm_mm_takedown(&i915->mm.stolen); 180 } 181 182 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 183 struct intel_uncore *uncore, 184 resource_size_t *base, 185 resource_size_t *size) 186 { 187 u32 reg_val = intel_uncore_read(uncore, 188 IS_GM45(i915) ? 189 CTG_STOLEN_RESERVED : 190 ELK_STOLEN_RESERVED); 191 resource_size_t stolen_top = i915->dsm.end + 1; 192 193 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 194 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 195 196 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 197 return; 198 199 /* 200 * Whether ILK really reuses the ELK register for this is unclear. 201 * Let's see if we catch anyone with this supposedly enabled on ILK. 202 */ 203 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 204 "ILK stolen reserved found? 0x%08x\n", 205 reg_val); 206 207 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 208 return; 209 210 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 211 drm_WARN_ON(&i915->drm, 212 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 213 214 *size = stolen_top - *base; 215 } 216 217 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 218 struct intel_uncore *uncore, 219 resource_size_t *base, 220 resource_size_t *size) 221 { 222 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 223 224 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 225 226 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 227 return; 228 229 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 230 231 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 232 case GEN6_STOLEN_RESERVED_1M: 233 *size = 1024 * 1024; 234 break; 235 case GEN6_STOLEN_RESERVED_512K: 236 *size = 512 * 1024; 237 break; 238 case GEN6_STOLEN_RESERVED_256K: 239 *size = 256 * 1024; 240 break; 241 case GEN6_STOLEN_RESERVED_128K: 242 *size = 128 * 1024; 243 break; 244 default: 245 *size = 1024 * 1024; 246 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 247 } 248 } 249 250 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 251 struct intel_uncore *uncore, 252 resource_size_t *base, 253 resource_size_t *size) 254 { 255 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 256 resource_size_t stolen_top = i915->dsm.end + 1; 257 258 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 259 260 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 261 return; 262 263 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 264 default: 265 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 266 fallthrough; 267 case GEN7_STOLEN_RESERVED_1M: 268 *size = 1024 * 1024; 269 break; 270 } 271 272 /* 273 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 274 * reserved location as (top - size). 275 */ 276 *base = stolen_top - *size; 277 } 278 279 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 280 struct intel_uncore *uncore, 281 resource_size_t *base, 282 resource_size_t *size) 283 { 284 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 285 286 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 287 288 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 289 return; 290 291 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 292 293 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 294 case GEN7_STOLEN_RESERVED_1M: 295 *size = 1024 * 1024; 296 break; 297 case GEN7_STOLEN_RESERVED_256K: 298 *size = 256 * 1024; 299 break; 300 default: 301 *size = 1024 * 1024; 302 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 303 } 304 } 305 306 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 307 struct intel_uncore *uncore, 308 resource_size_t *base, 309 resource_size_t *size) 310 { 311 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 312 313 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 314 315 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 316 return; 317 318 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 319 320 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 321 case GEN8_STOLEN_RESERVED_1M: 322 *size = 1024 * 1024; 323 break; 324 case GEN8_STOLEN_RESERVED_2M: 325 *size = 2 * 1024 * 1024; 326 break; 327 case GEN8_STOLEN_RESERVED_4M: 328 *size = 4 * 1024 * 1024; 329 break; 330 case GEN8_STOLEN_RESERVED_8M: 331 *size = 8 * 1024 * 1024; 332 break; 333 default: 334 *size = 8 * 1024 * 1024; 335 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 336 } 337 } 338 339 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 340 struct intel_uncore *uncore, 341 resource_size_t *base, 342 resource_size_t *size) 343 { 344 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 345 resource_size_t stolen_top = i915->dsm.end + 1; 346 347 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 348 349 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 350 return; 351 352 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 353 return; 354 355 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 356 *size = stolen_top - *base; 357 } 358 359 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 360 struct intel_uncore *uncore, 361 resource_size_t *base, 362 resource_size_t *size) 363 { 364 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 365 366 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 367 368 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 369 370 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 371 case GEN8_STOLEN_RESERVED_1M: 372 *size = 1024 * 1024; 373 break; 374 case GEN8_STOLEN_RESERVED_2M: 375 *size = 2 * 1024 * 1024; 376 break; 377 case GEN8_STOLEN_RESERVED_4M: 378 *size = 4 * 1024 * 1024; 379 break; 380 case GEN8_STOLEN_RESERVED_8M: 381 *size = 8 * 1024 * 1024; 382 break; 383 default: 384 *size = 8 * 1024 * 1024; 385 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 386 } 387 } 388 389 static int i915_gem_init_stolen(struct intel_memory_region *mem) 390 { 391 struct drm_i915_private *i915 = mem->i915; 392 struct intel_uncore *uncore = &i915->uncore; 393 resource_size_t reserved_base, stolen_top; 394 resource_size_t reserved_total, reserved_size; 395 396 mutex_init(&i915->mm.stolen_lock); 397 398 if (intel_vgpu_active(i915)) { 399 drm_notice(&i915->drm, 400 "%s, disabling use of stolen memory\n", 401 "iGVT-g active"); 402 return 0; 403 } 404 405 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 406 drm_notice(&i915->drm, 407 "%s, disabling use of stolen memory\n", 408 "DMAR active"); 409 return 0; 410 } 411 412 if (resource_size(&mem->region) == 0) 413 return 0; 414 415 i915->dsm = mem->region; 416 417 if (i915_adjust_stolen(i915, &i915->dsm)) 418 return 0; 419 420 GEM_BUG_ON(i915->dsm.start == 0); 421 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); 422 423 stolen_top = i915->dsm.end + 1; 424 reserved_base = stolen_top; 425 reserved_size = 0; 426 427 switch (GRAPHICS_VER(i915)) { 428 case 2: 429 case 3: 430 break; 431 case 4: 432 if (!IS_G4X(i915)) 433 break; 434 fallthrough; 435 case 5: 436 g4x_get_stolen_reserved(i915, uncore, 437 &reserved_base, &reserved_size); 438 break; 439 case 6: 440 gen6_get_stolen_reserved(i915, uncore, 441 &reserved_base, &reserved_size); 442 break; 443 case 7: 444 if (IS_VALLEYVIEW(i915)) 445 vlv_get_stolen_reserved(i915, uncore, 446 &reserved_base, &reserved_size); 447 else 448 gen7_get_stolen_reserved(i915, uncore, 449 &reserved_base, &reserved_size); 450 break; 451 case 8: 452 case 9: 453 if (IS_LP(i915)) 454 chv_get_stolen_reserved(i915, uncore, 455 &reserved_base, &reserved_size); 456 else 457 bdw_get_stolen_reserved(i915, uncore, 458 &reserved_base, &reserved_size); 459 break; 460 default: 461 MISSING_CASE(GRAPHICS_VER(i915)); 462 fallthrough; 463 case 11: 464 case 12: 465 icl_get_stolen_reserved(i915, uncore, 466 &reserved_base, 467 &reserved_size); 468 break; 469 } 470 471 /* 472 * Our expectation is that the reserved space is at the top of the 473 * stolen region and *never* at the bottom. If we see !reserved_base, 474 * it likely means we failed to read the registers correctly. 475 */ 476 if (!reserved_base) { 477 drm_err(&i915->drm, 478 "inconsistent reservation %pa + %pa; ignoring\n", 479 &reserved_base, &reserved_size); 480 reserved_base = stolen_top; 481 reserved_size = 0; 482 } 483 484 i915->dsm_reserved = 485 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); 486 487 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { 488 drm_err(&i915->drm, 489 "Stolen reserved area %pR outside stolen memory %pR\n", 490 &i915->dsm_reserved, &i915->dsm); 491 return 0; 492 } 493 494 /* Exclude the reserved region from driver use */ 495 mem->region.end = reserved_base - 1; 496 mem->io_size = resource_size(&mem->region); 497 498 /* It is possible for the reserved area to end before the end of stolen 499 * memory, so just consider the start. */ 500 reserved_total = stolen_top - reserved_base; 501 502 i915->stolen_usable_size = 503 resource_size(&i915->dsm) - reserved_total; 504 505 drm_dbg(&i915->drm, 506 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 507 (u64)resource_size(&i915->dsm) >> 10, 508 (u64)i915->stolen_usable_size >> 10); 509 510 if (i915->stolen_usable_size == 0) 511 return 0; 512 513 /* Basic memrange allocator for stolen space. */ 514 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); 515 516 return 0; 517 } 518 519 static void dbg_poison(struct i915_ggtt *ggtt, 520 dma_addr_t addr, resource_size_t size, 521 u8 x) 522 { 523 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 524 if (!drm_mm_node_allocated(&ggtt->error_capture)) 525 return; 526 527 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 528 return; /* beware stop_machine() inversion */ 529 530 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 531 532 mutex_lock(&ggtt->error_mutex); 533 while (size) { 534 void __iomem *s; 535 536 ggtt->vm.insert_page(&ggtt->vm, addr, 537 ggtt->error_capture.start, 538 I915_CACHE_NONE, 0); 539 mb(); 540 541 s = io_mapping_map_wc(&ggtt->iomap, 542 ggtt->error_capture.start, 543 PAGE_SIZE); 544 memset_io(s, x, PAGE_SIZE); 545 io_mapping_unmap(s); 546 547 addr += PAGE_SIZE; 548 size -= PAGE_SIZE; 549 } 550 mb(); 551 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 552 mutex_unlock(&ggtt->error_mutex); 553 #endif 554 } 555 556 static struct sg_table * 557 i915_pages_create_for_stolen(struct drm_device *dev, 558 resource_size_t offset, resource_size_t size) 559 { 560 struct drm_i915_private *i915 = to_i915(dev); 561 struct sg_table *st; 562 struct scatterlist *sg; 563 564 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); 565 566 /* We hide that we have no struct page backing our stolen object 567 * by wrapping the contiguous physical allocation with a fake 568 * dma mapping in a single scatterlist. 569 */ 570 571 st = kmalloc(sizeof(*st), GFP_KERNEL); 572 if (st == NULL) 573 return ERR_PTR(-ENOMEM); 574 575 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 576 kfree(st); 577 return ERR_PTR(-ENOMEM); 578 } 579 580 sg = st->sgl; 581 sg->offset = 0; 582 sg->length = size; 583 584 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; 585 sg_dma_len(sg) = size; 586 587 return st; 588 } 589 590 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 591 { 592 struct drm_i915_private *i915 = to_i915(obj->base.dev); 593 struct sg_table *pages = 594 i915_pages_create_for_stolen(obj->base.dev, 595 obj->stolen->start, 596 obj->stolen->size); 597 if (IS_ERR(pages)) 598 return PTR_ERR(pages); 599 600 dbg_poison(to_gt(i915)->ggtt, 601 sg_dma_address(pages->sgl), 602 sg_dma_len(pages->sgl), 603 POISON_INUSE); 604 605 __i915_gem_object_set_pages(obj, pages, obj->stolen->size); 606 607 return 0; 608 } 609 610 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 611 struct sg_table *pages) 612 { 613 struct drm_i915_private *i915 = to_i915(obj->base.dev); 614 /* Should only be called from i915_gem_object_release_stolen() */ 615 616 dbg_poison(to_gt(i915)->ggtt, 617 sg_dma_address(pages->sgl), 618 sg_dma_len(pages->sgl), 619 POISON_FREE); 620 621 sg_free_table(pages); 622 kfree(pages); 623 } 624 625 static void 626 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 627 { 628 struct drm_i915_private *i915 = to_i915(obj->base.dev); 629 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 630 631 GEM_BUG_ON(!stolen); 632 i915_gem_stolen_remove_node(i915, stolen); 633 kfree(stolen); 634 635 i915_gem_object_release_memory_region(obj); 636 } 637 638 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 639 .name = "i915_gem_object_stolen", 640 .get_pages = i915_gem_object_get_pages_stolen, 641 .put_pages = i915_gem_object_put_pages_stolen, 642 .release = i915_gem_object_release_stolen, 643 }; 644 645 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 646 struct drm_i915_gem_object *obj, 647 struct drm_mm_node *stolen) 648 { 649 static struct lock_class_key lock_class; 650 unsigned int cache_level; 651 unsigned int flags; 652 int err; 653 654 /* 655 * Stolen objects are always physically contiguous since we just 656 * allocate one big block underneath using the drm_mm range allocator. 657 */ 658 flags = I915_BO_ALLOC_CONTIGUOUS; 659 660 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 661 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 662 663 obj->stolen = stolen; 664 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 665 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 666 i915_gem_object_set_cache_coherency(obj, cache_level); 667 668 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 669 return -EBUSY; 670 671 i915_gem_object_init_memory_region(obj, mem); 672 673 err = i915_gem_object_pin_pages(obj); 674 if (err) 675 i915_gem_object_release_memory_region(obj); 676 i915_gem_object_unlock(obj); 677 678 return err; 679 } 680 681 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 682 struct drm_i915_gem_object *obj, 683 resource_size_t size, 684 resource_size_t page_size, 685 unsigned int flags) 686 { 687 struct drm_i915_private *i915 = mem->i915; 688 struct drm_mm_node *stolen; 689 int ret; 690 691 if (!drm_mm_initialized(&i915->mm.stolen)) 692 return -ENODEV; 693 694 if (size == 0) 695 return -EINVAL; 696 697 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 698 if (!stolen) 699 return -ENOMEM; 700 701 ret = i915_gem_stolen_insert_node(i915, stolen, size, 702 mem->min_page_size); 703 if (ret) 704 goto err_free; 705 706 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 707 if (ret) 708 goto err_remove; 709 710 return 0; 711 712 err_remove: 713 i915_gem_stolen_remove_node(i915, stolen); 714 err_free: 715 kfree(stolen); 716 return ret; 717 } 718 719 struct drm_i915_gem_object * 720 i915_gem_object_create_stolen(struct drm_i915_private *i915, 721 resource_size_t size) 722 { 723 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 724 } 725 726 static int init_stolen_smem(struct intel_memory_region *mem) 727 { 728 /* 729 * Initialise stolen early so that we may reserve preallocated 730 * objects for the BIOS to KMS transition. 731 */ 732 return i915_gem_init_stolen(mem); 733 } 734 735 static int release_stolen_smem(struct intel_memory_region *mem) 736 { 737 i915_gem_cleanup_stolen(mem->i915); 738 return 0; 739 } 740 741 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 742 .init = init_stolen_smem, 743 .release = release_stolen_smem, 744 .init_object = _i915_gem_object_stolen_init, 745 }; 746 747 static int init_stolen_lmem(struct intel_memory_region *mem) 748 { 749 int err; 750 751 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 752 return -ENODEV; 753 754 if (!io_mapping_init_wc(&mem->iomap, 755 mem->io_start, 756 mem->io_size)) 757 return -EIO; 758 759 /* 760 * TODO: For stolen lmem we mostly just care about populating the dsm 761 * related bits and setting up the drm_mm allocator for the range. 762 * Perhaps split up i915_gem_init_stolen() for this. 763 */ 764 err = i915_gem_init_stolen(mem); 765 if (err) 766 goto err_fini; 767 768 return 0; 769 770 err_fini: 771 io_mapping_fini(&mem->iomap); 772 return err; 773 } 774 775 static int release_stolen_lmem(struct intel_memory_region *mem) 776 { 777 io_mapping_fini(&mem->iomap); 778 i915_gem_cleanup_stolen(mem->i915); 779 return 0; 780 } 781 782 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 783 .init = init_stolen_lmem, 784 .release = release_stolen_lmem, 785 .init_object = _i915_gem_object_stolen_init, 786 }; 787 788 struct intel_memory_region * 789 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 790 u16 instance) 791 { 792 struct intel_uncore *uncore = &i915->uncore; 793 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 794 struct intel_memory_region *mem; 795 resource_size_t min_page_size; 796 resource_size_t io_start; 797 resource_size_t lmem_size; 798 u64 lmem_base; 799 800 lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE); 801 if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2))) 802 return ERR_PTR(-ENODEV); 803 804 lmem_size = pci_resource_len(pdev, 2) - lmem_base; 805 io_start = pci_resource_start(pdev, 2) + lmem_base; 806 807 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 808 I915_GTT_PAGE_SIZE_4K; 809 810 mem = intel_memory_region_create(i915, lmem_base, lmem_size, 811 min_page_size, 812 io_start, lmem_size, 813 type, instance, 814 &i915_region_stolen_lmem_ops); 815 if (IS_ERR(mem)) 816 return mem; 817 818 /* 819 * TODO: consider creating common helper to just print all the 820 * interesting stuff from intel_memory_region, which we can use for all 821 * our probed regions. 822 */ 823 824 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", 825 &mem->io_start); 826 827 intel_memory_region_set_name(mem, "stolen-local"); 828 829 mem->private = true; 830 831 return mem; 832 } 833 834 struct intel_memory_region* 835 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 836 u16 instance) 837 { 838 struct intel_memory_region *mem; 839 840 mem = intel_memory_region_create(i915, 841 intel_graphics_stolen_res.start, 842 resource_size(&intel_graphics_stolen_res), 843 PAGE_SIZE, 0, 0, type, instance, 844 &i915_region_stolen_smem_ops); 845 if (IS_ERR(mem)) 846 return mem; 847 848 intel_memory_region_set_name(mem, "stolen-system"); 849 850 mem->private = true; 851 return mem; 852 } 853 854 struct drm_i915_gem_object * 855 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, 856 resource_size_t stolen_offset, 857 resource_size_t size) 858 { 859 struct intel_memory_region *mem = i915->mm.stolen_region; 860 struct drm_i915_gem_object *obj; 861 struct drm_mm_node *stolen; 862 int ret; 863 864 if (!drm_mm_initialized(&i915->mm.stolen)) 865 return ERR_PTR(-ENODEV); 866 867 drm_dbg(&i915->drm, 868 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 869 &stolen_offset, &size); 870 871 /* KISS and expect everything to be page-aligned */ 872 if (GEM_WARN_ON(size == 0) || 873 GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) || 874 GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size))) 875 return ERR_PTR(-EINVAL); 876 877 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 878 if (!stolen) 879 return ERR_PTR(-ENOMEM); 880 881 stolen->start = stolen_offset; 882 stolen->size = size; 883 mutex_lock(&i915->mm.stolen_lock); 884 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 885 mutex_unlock(&i915->mm.stolen_lock); 886 if (ret) 887 goto err_free; 888 889 obj = i915_gem_object_alloc(); 890 if (!obj) { 891 ret = -ENOMEM; 892 goto err_stolen; 893 } 894 895 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 896 if (ret) 897 goto err_object_free; 898 899 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 900 return obj; 901 902 err_object_free: 903 i915_gem_object_free(obj); 904 err_stolen: 905 i915_gem_stolen_remove_node(i915, stolen); 906 err_free: 907 kfree(stolen); 908 return ERR_PTR(ret); 909 } 910 911 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 912 { 913 return obj->ops == &i915_gem_object_stolen_ops; 914 } 915