1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017 Intel Corporation 5 */ 6 7 #include <linux/prime_numbers.h> 8 9 #include "i915_selftest.h" 10 11 #include "gem/i915_gem_region.h" 12 #include "gem/i915_gem_lmem.h" 13 #include "gem/i915_gem_pm.h" 14 15 #include "gt/intel_gt.h" 16 17 #include "igt_gem_utils.h" 18 #include "mock_context.h" 19 20 #include "selftests/mock_drm.h" 21 #include "selftests/mock_gem_device.h" 22 #include "selftests/mock_region.h" 23 #include "selftests/i915_random.h" 24 25 static const unsigned int page_sizes[] = { 26 I915_GTT_PAGE_SIZE_2M, 27 I915_GTT_PAGE_SIZE_64K, 28 I915_GTT_PAGE_SIZE_4K, 29 }; 30 31 static unsigned int get_largest_page_size(struct drm_i915_private *i915, 32 u64 rem) 33 { 34 int i; 35 36 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { 37 unsigned int page_size = page_sizes[i]; 38 39 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) 40 return page_size; 41 } 42 43 return 0; 44 } 45 46 static void huge_pages_free_pages(struct sg_table *st) 47 { 48 struct scatterlist *sg; 49 50 for (sg = st->sgl; sg; sg = __sg_next(sg)) { 51 if (sg_page(sg)) 52 __free_pages(sg_page(sg), get_order(sg->length)); 53 } 54 55 sg_free_table(st); 56 kfree(st); 57 } 58 59 static int get_huge_pages(struct drm_i915_gem_object *obj) 60 { 61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) 62 unsigned int page_mask = obj->mm.page_mask; 63 struct sg_table *st; 64 struct scatterlist *sg; 65 unsigned int sg_page_sizes; 66 u64 rem; 67 68 st = kmalloc(sizeof(*st), GFP); 69 if (!st) 70 return -ENOMEM; 71 72 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { 73 kfree(st); 74 return -ENOMEM; 75 } 76 77 rem = obj->base.size; 78 sg = st->sgl; 79 st->nents = 0; 80 sg_page_sizes = 0; 81 82 /* 83 * Our goal here is simple, we want to greedily fill the object from 84 * largest to smallest page-size, while ensuring that we use *every* 85 * page-size as per the given page-mask. 86 */ 87 do { 88 unsigned int bit = ilog2(page_mask); 89 unsigned int page_size = BIT(bit); 90 int order = get_order(page_size); 91 92 do { 93 struct page *page; 94 95 GEM_BUG_ON(order >= MAX_ORDER); 96 page = alloc_pages(GFP | __GFP_ZERO, order); 97 if (!page) 98 goto err; 99 100 sg_set_page(sg, page, page_size, 0); 101 sg_page_sizes |= page_size; 102 st->nents++; 103 104 rem -= page_size; 105 if (!rem) { 106 sg_mark_end(sg); 107 break; 108 } 109 110 sg = __sg_next(sg); 111 } while ((rem - ((page_size-1) & page_mask)) >= page_size); 112 113 page_mask &= (page_size-1); 114 } while (page_mask); 115 116 if (i915_gem_gtt_prepare_pages(obj, st)) 117 goto err; 118 119 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); 120 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 121 122 return 0; 123 124 err: 125 sg_set_page(sg, NULL, 0, 0); 126 sg_mark_end(sg); 127 huge_pages_free_pages(st); 128 129 return -ENOMEM; 130 } 131 132 static void put_huge_pages(struct drm_i915_gem_object *obj, 133 struct sg_table *pages) 134 { 135 i915_gem_gtt_finish_pages(obj, pages); 136 huge_pages_free_pages(pages); 137 138 obj->mm.dirty = false; 139 } 140 141 static const struct drm_i915_gem_object_ops huge_page_ops = { 142 .name = "huge-gem", 143 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 144 I915_GEM_OBJECT_IS_SHRINKABLE, 145 .get_pages = get_huge_pages, 146 .put_pages = put_huge_pages, 147 }; 148 149 static struct drm_i915_gem_object * 150 huge_pages_object(struct drm_i915_private *i915, 151 u64 size, 152 unsigned int page_mask) 153 { 154 static struct lock_class_key lock_class; 155 struct drm_i915_gem_object *obj; 156 157 GEM_BUG_ON(!size); 158 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); 159 160 if (size >> PAGE_SHIFT > INT_MAX) 161 return ERR_PTR(-E2BIG); 162 163 if (overflows_type(size, obj->base.size)) 164 return ERR_PTR(-E2BIG); 165 166 obj = i915_gem_object_alloc(); 167 if (!obj) 168 return ERR_PTR(-ENOMEM); 169 170 drm_gem_private_object_init(&i915->drm, &obj->base, size); 171 i915_gem_object_init(obj, &huge_page_ops, &lock_class); 172 173 i915_gem_object_set_volatile(obj); 174 175 obj->write_domain = I915_GEM_DOMAIN_CPU; 176 obj->read_domains = I915_GEM_DOMAIN_CPU; 177 obj->cache_level = I915_CACHE_NONE; 178 179 obj->mm.page_mask = page_mask; 180 181 return obj; 182 } 183 184 static int fake_get_huge_pages(struct drm_i915_gem_object *obj) 185 { 186 struct drm_i915_private *i915 = to_i915(obj->base.dev); 187 const u64 max_len = rounddown_pow_of_two(UINT_MAX); 188 struct sg_table *st; 189 struct scatterlist *sg; 190 unsigned int sg_page_sizes; 191 u64 rem; 192 193 st = kmalloc(sizeof(*st), GFP); 194 if (!st) 195 return -ENOMEM; 196 197 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { 198 kfree(st); 199 return -ENOMEM; 200 } 201 202 /* Use optimal page sized chunks to fill in the sg table */ 203 rem = obj->base.size; 204 sg = st->sgl; 205 st->nents = 0; 206 sg_page_sizes = 0; 207 do { 208 unsigned int page_size = get_largest_page_size(i915, rem); 209 unsigned int len = min(page_size * div_u64(rem, page_size), 210 max_len); 211 212 GEM_BUG_ON(!page_size); 213 214 sg->offset = 0; 215 sg->length = len; 216 sg_dma_len(sg) = len; 217 sg_dma_address(sg) = page_size; 218 219 sg_page_sizes |= len; 220 221 st->nents++; 222 223 rem -= len; 224 if (!rem) { 225 sg_mark_end(sg); 226 break; 227 } 228 229 sg = sg_next(sg); 230 } while (1); 231 232 i915_sg_trim(st); 233 234 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 235 236 return 0; 237 } 238 239 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) 240 { 241 struct drm_i915_private *i915 = to_i915(obj->base.dev); 242 struct sg_table *st; 243 struct scatterlist *sg; 244 unsigned int page_size; 245 246 st = kmalloc(sizeof(*st), GFP); 247 if (!st) 248 return -ENOMEM; 249 250 if (sg_alloc_table(st, 1, GFP)) { 251 kfree(st); 252 return -ENOMEM; 253 } 254 255 sg = st->sgl; 256 st->nents = 1; 257 258 page_size = get_largest_page_size(i915, obj->base.size); 259 GEM_BUG_ON(!page_size); 260 261 sg->offset = 0; 262 sg->length = obj->base.size; 263 sg_dma_len(sg) = obj->base.size; 264 sg_dma_address(sg) = page_size; 265 266 __i915_gem_object_set_pages(obj, st, sg->length); 267 268 return 0; 269 #undef GFP 270 } 271 272 static void fake_free_huge_pages(struct drm_i915_gem_object *obj, 273 struct sg_table *pages) 274 { 275 sg_free_table(pages); 276 kfree(pages); 277 } 278 279 static void fake_put_huge_pages(struct drm_i915_gem_object *obj, 280 struct sg_table *pages) 281 { 282 fake_free_huge_pages(obj, pages); 283 obj->mm.dirty = false; 284 } 285 286 static const struct drm_i915_gem_object_ops fake_ops = { 287 .name = "fake-gem", 288 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 289 .get_pages = fake_get_huge_pages, 290 .put_pages = fake_put_huge_pages, 291 }; 292 293 static const struct drm_i915_gem_object_ops fake_ops_single = { 294 .name = "fake-gem", 295 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 296 .get_pages = fake_get_huge_pages_single, 297 .put_pages = fake_put_huge_pages, 298 }; 299 300 static struct drm_i915_gem_object * 301 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) 302 { 303 static struct lock_class_key lock_class; 304 struct drm_i915_gem_object *obj; 305 306 GEM_BUG_ON(!size); 307 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 308 309 if (size >> PAGE_SHIFT > UINT_MAX) 310 return ERR_PTR(-E2BIG); 311 312 if (overflows_type(size, obj->base.size)) 313 return ERR_PTR(-E2BIG); 314 315 obj = i915_gem_object_alloc(); 316 if (!obj) 317 return ERR_PTR(-ENOMEM); 318 319 drm_gem_private_object_init(&i915->drm, &obj->base, size); 320 321 if (single) 322 i915_gem_object_init(obj, &fake_ops_single, &lock_class); 323 else 324 i915_gem_object_init(obj, &fake_ops, &lock_class); 325 326 i915_gem_object_set_volatile(obj); 327 328 obj->write_domain = I915_GEM_DOMAIN_CPU; 329 obj->read_domains = I915_GEM_DOMAIN_CPU; 330 obj->cache_level = I915_CACHE_NONE; 331 332 return obj; 333 } 334 335 static int igt_check_page_sizes(struct i915_vma *vma) 336 { 337 struct drm_i915_private *i915 = vma->vm->i915; 338 unsigned int supported = INTEL_INFO(i915)->page_sizes; 339 struct drm_i915_gem_object *obj = vma->obj; 340 int err; 341 342 /* We have to wait for the async bind to complete before our asserts */ 343 err = i915_vma_sync(vma); 344 if (err) 345 return err; 346 347 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { 348 pr_err("unsupported page_sizes.sg=%u, supported=%u\n", 349 vma->page_sizes.sg & ~supported, supported); 350 err = -EINVAL; 351 } 352 353 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { 354 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", 355 vma->page_sizes.gtt & ~supported, supported); 356 err = -EINVAL; 357 } 358 359 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { 360 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n", 361 vma->page_sizes.phys, obj->mm.page_sizes.phys); 362 err = -EINVAL; 363 } 364 365 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { 366 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n", 367 vma->page_sizes.sg, obj->mm.page_sizes.sg); 368 err = -EINVAL; 369 } 370 371 if (obj->mm.page_sizes.gtt) { 372 pr_err("obj->page_sizes.gtt(%u) should never be set\n", 373 obj->mm.page_sizes.gtt); 374 err = -EINVAL; 375 } 376 377 return err; 378 } 379 380 static int igt_mock_exhaust_device_supported_pages(void *arg) 381 { 382 struct i915_ppgtt *ppgtt = arg; 383 struct drm_i915_private *i915 = ppgtt->vm.i915; 384 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; 385 struct drm_i915_gem_object *obj; 386 struct i915_vma *vma; 387 int i, j, single; 388 int err; 389 390 /* 391 * Sanity check creating objects with every valid page support 392 * combination for our mock device. 393 */ 394 395 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { 396 unsigned int combination = SZ_4K; /* Required for ppGTT */ 397 398 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { 399 if (i & BIT(j)) 400 combination |= page_sizes[j]; 401 } 402 403 mkwrite_device_info(i915)->page_sizes = combination; 404 405 for (single = 0; single <= 1; ++single) { 406 obj = fake_huge_pages_object(i915, combination, !!single); 407 if (IS_ERR(obj)) { 408 err = PTR_ERR(obj); 409 goto out_device; 410 } 411 412 if (obj->base.size != combination) { 413 pr_err("obj->base.size=%zu, expected=%u\n", 414 obj->base.size, combination); 415 err = -EINVAL; 416 goto out_put; 417 } 418 419 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 420 if (IS_ERR(vma)) { 421 err = PTR_ERR(vma); 422 goto out_put; 423 } 424 425 err = i915_vma_pin(vma, 0, 0, PIN_USER); 426 if (err) 427 goto out_put; 428 429 err = igt_check_page_sizes(vma); 430 431 if (vma->page_sizes.sg != combination) { 432 pr_err("page_sizes.sg=%u, expected=%u\n", 433 vma->page_sizes.sg, combination); 434 err = -EINVAL; 435 } 436 437 i915_vma_unpin(vma); 438 i915_gem_object_put(obj); 439 440 if (err) 441 goto out_device; 442 } 443 } 444 445 goto out_device; 446 447 out_put: 448 i915_gem_object_put(obj); 449 out_device: 450 mkwrite_device_info(i915)->page_sizes = saved_mask; 451 452 return err; 453 } 454 455 static int igt_mock_memory_region_huge_pages(void *arg) 456 { 457 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; 458 struct i915_ppgtt *ppgtt = arg; 459 struct drm_i915_private *i915 = ppgtt->vm.i915; 460 unsigned long supported = INTEL_INFO(i915)->page_sizes; 461 struct intel_memory_region *mem; 462 struct drm_i915_gem_object *obj; 463 struct i915_vma *vma; 464 int bit; 465 int err = 0; 466 467 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0); 468 if (IS_ERR(mem)) { 469 pr_err("%s failed to create memory region\n", __func__); 470 return PTR_ERR(mem); 471 } 472 473 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 474 unsigned int page_size = BIT(bit); 475 resource_size_t phys; 476 int i; 477 478 for (i = 0; i < ARRAY_SIZE(flags); ++i) { 479 obj = i915_gem_object_create_region(mem, page_size, 480 flags[i]); 481 if (IS_ERR(obj)) { 482 err = PTR_ERR(obj); 483 goto out_region; 484 } 485 486 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 487 if (IS_ERR(vma)) { 488 err = PTR_ERR(vma); 489 goto out_put; 490 } 491 492 err = i915_vma_pin(vma, 0, 0, PIN_USER); 493 if (err) 494 goto out_put; 495 496 err = igt_check_page_sizes(vma); 497 if (err) 498 goto out_unpin; 499 500 phys = i915_gem_object_get_dma_address(obj, 0); 501 if (!IS_ALIGNED(phys, page_size)) { 502 pr_err("%s addr misaligned(%pa) page_size=%u\n", 503 __func__, &phys, page_size); 504 err = -EINVAL; 505 goto out_unpin; 506 } 507 508 if (vma->page_sizes.gtt != page_size) { 509 pr_err("%s page_sizes.gtt=%u, expected=%u\n", 510 __func__, vma->page_sizes.gtt, 511 page_size); 512 err = -EINVAL; 513 goto out_unpin; 514 } 515 516 i915_vma_unpin(vma); 517 __i915_gem_object_put_pages(obj); 518 i915_gem_object_put(obj); 519 } 520 } 521 522 goto out_region; 523 524 out_unpin: 525 i915_vma_unpin(vma); 526 out_put: 527 i915_gem_object_put(obj); 528 out_region: 529 intel_memory_region_put(mem); 530 return err; 531 } 532 533 static int igt_mock_ppgtt_misaligned_dma(void *arg) 534 { 535 struct i915_ppgtt *ppgtt = arg; 536 struct drm_i915_private *i915 = ppgtt->vm.i915; 537 unsigned long supported = INTEL_INFO(i915)->page_sizes; 538 struct drm_i915_gem_object *obj; 539 int bit; 540 int err; 541 542 /* 543 * Sanity check dma misalignment for huge pages -- the dma addresses we 544 * insert into the paging structures need to always respect the page 545 * size alignment. 546 */ 547 548 bit = ilog2(I915_GTT_PAGE_SIZE_64K); 549 550 for_each_set_bit_from(bit, &supported, 551 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 552 IGT_TIMEOUT(end_time); 553 unsigned int page_size = BIT(bit); 554 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; 555 unsigned int offset; 556 unsigned int size = 557 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1; 558 struct i915_vma *vma; 559 560 obj = fake_huge_pages_object(i915, size, true); 561 if (IS_ERR(obj)) 562 return PTR_ERR(obj); 563 564 if (obj->base.size != size) { 565 pr_err("obj->base.size=%zu, expected=%u\n", 566 obj->base.size, size); 567 err = -EINVAL; 568 goto out_put; 569 } 570 571 err = i915_gem_object_pin_pages(obj); 572 if (err) 573 goto out_put; 574 575 /* Force the page size for this object */ 576 obj->mm.page_sizes.sg = page_size; 577 578 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 579 if (IS_ERR(vma)) { 580 err = PTR_ERR(vma); 581 goto out_unpin; 582 } 583 584 err = i915_vma_pin(vma, 0, 0, flags); 585 if (err) 586 goto out_unpin; 587 588 589 err = igt_check_page_sizes(vma); 590 591 if (vma->page_sizes.gtt != page_size) { 592 pr_err("page_sizes.gtt=%u, expected %u\n", 593 vma->page_sizes.gtt, page_size); 594 err = -EINVAL; 595 } 596 597 i915_vma_unpin(vma); 598 599 if (err) 600 goto out_unpin; 601 602 /* 603 * Try all the other valid offsets until the next 604 * boundary -- should always fall back to using 4K 605 * pages. 606 */ 607 for (offset = 4096; offset < page_size; offset += 4096) { 608 err = i915_vma_unbind(vma); 609 if (err) 610 goto out_unpin; 611 612 err = i915_vma_pin(vma, 0, 0, flags | offset); 613 if (err) 614 goto out_unpin; 615 616 err = igt_check_page_sizes(vma); 617 618 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { 619 pr_err("page_sizes.gtt=%u, expected %llu\n", 620 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); 621 err = -EINVAL; 622 } 623 624 i915_vma_unpin(vma); 625 626 if (err) 627 goto out_unpin; 628 629 if (igt_timeout(end_time, 630 "%s timed out at offset %x with page-size %x\n", 631 __func__, offset, page_size)) 632 break; 633 } 634 635 i915_gem_object_unpin_pages(obj); 636 __i915_gem_object_put_pages(obj); 637 i915_gem_object_put(obj); 638 } 639 640 return 0; 641 642 out_unpin: 643 i915_gem_object_unpin_pages(obj); 644 out_put: 645 i915_gem_object_put(obj); 646 647 return err; 648 } 649 650 static void close_object_list(struct list_head *objects, 651 struct i915_ppgtt *ppgtt) 652 { 653 struct drm_i915_gem_object *obj, *on; 654 655 list_for_each_entry_safe(obj, on, objects, st_link) { 656 list_del(&obj->st_link); 657 i915_gem_object_unpin_pages(obj); 658 __i915_gem_object_put_pages(obj); 659 i915_gem_object_put(obj); 660 } 661 } 662 663 static int igt_mock_ppgtt_huge_fill(void *arg) 664 { 665 struct i915_ppgtt *ppgtt = arg; 666 struct drm_i915_private *i915 = ppgtt->vm.i915; 667 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; 668 unsigned long page_num; 669 bool single = false; 670 LIST_HEAD(objects); 671 IGT_TIMEOUT(end_time); 672 int err = -ENODEV; 673 674 for_each_prime_number_from(page_num, 1, max_pages) { 675 struct drm_i915_gem_object *obj; 676 u64 size = page_num << PAGE_SHIFT; 677 struct i915_vma *vma; 678 unsigned int expected_gtt = 0; 679 int i; 680 681 obj = fake_huge_pages_object(i915, size, single); 682 if (IS_ERR(obj)) { 683 err = PTR_ERR(obj); 684 break; 685 } 686 687 if (obj->base.size != size) { 688 pr_err("obj->base.size=%zd, expected=%llu\n", 689 obj->base.size, size); 690 i915_gem_object_put(obj); 691 err = -EINVAL; 692 break; 693 } 694 695 err = i915_gem_object_pin_pages(obj); 696 if (err) { 697 i915_gem_object_put(obj); 698 break; 699 } 700 701 list_add(&obj->st_link, &objects); 702 703 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 704 if (IS_ERR(vma)) { 705 err = PTR_ERR(vma); 706 break; 707 } 708 709 err = i915_vma_pin(vma, 0, 0, PIN_USER); 710 if (err) 711 break; 712 713 err = igt_check_page_sizes(vma); 714 if (err) { 715 i915_vma_unpin(vma); 716 break; 717 } 718 719 /* 720 * Figure out the expected gtt page size knowing that we go from 721 * largest to smallest page size sg chunks, and that we align to 722 * the largest page size. 723 */ 724 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { 725 unsigned int page_size = page_sizes[i]; 726 727 if (HAS_PAGE_SIZES(i915, page_size) && 728 size >= page_size) { 729 expected_gtt |= page_size; 730 size &= page_size-1; 731 } 732 } 733 734 GEM_BUG_ON(!expected_gtt); 735 GEM_BUG_ON(size); 736 737 if (expected_gtt & I915_GTT_PAGE_SIZE_4K) 738 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K; 739 740 i915_vma_unpin(vma); 741 742 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { 743 if (!IS_ALIGNED(vma->node.start, 744 I915_GTT_PAGE_SIZE_2M)) { 745 pr_err("node.start(%llx) not aligned to 2M\n", 746 vma->node.start); 747 err = -EINVAL; 748 break; 749 } 750 751 if (!IS_ALIGNED(vma->node.size, 752 I915_GTT_PAGE_SIZE_2M)) { 753 pr_err("node.size(%llx) not aligned to 2M\n", 754 vma->node.size); 755 err = -EINVAL; 756 break; 757 } 758 } 759 760 if (vma->page_sizes.gtt != expected_gtt) { 761 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n", 762 vma->page_sizes.gtt, expected_gtt, 763 obj->base.size, yesno(!!single)); 764 err = -EINVAL; 765 break; 766 } 767 768 if (igt_timeout(end_time, 769 "%s timed out at size %zd\n", 770 __func__, obj->base.size)) 771 break; 772 773 single = !single; 774 } 775 776 close_object_list(&objects, ppgtt); 777 778 if (err == -ENOMEM || err == -ENOSPC) 779 err = 0; 780 781 return err; 782 } 783 784 static int igt_mock_ppgtt_64K(void *arg) 785 { 786 struct i915_ppgtt *ppgtt = arg; 787 struct drm_i915_private *i915 = ppgtt->vm.i915; 788 struct drm_i915_gem_object *obj; 789 const struct object_info { 790 unsigned int size; 791 unsigned int gtt; 792 unsigned int offset; 793 } objects[] = { 794 /* Cases with forced padding/alignment */ 795 { 796 .size = SZ_64K, 797 .gtt = I915_GTT_PAGE_SIZE_64K, 798 .offset = 0, 799 }, 800 { 801 .size = SZ_64K + SZ_4K, 802 .gtt = I915_GTT_PAGE_SIZE_4K, 803 .offset = 0, 804 }, 805 { 806 .size = SZ_64K - SZ_4K, 807 .gtt = I915_GTT_PAGE_SIZE_4K, 808 .offset = 0, 809 }, 810 { 811 .size = SZ_2M, 812 .gtt = I915_GTT_PAGE_SIZE_64K, 813 .offset = 0, 814 }, 815 { 816 .size = SZ_2M - SZ_4K, 817 .gtt = I915_GTT_PAGE_SIZE_4K, 818 .offset = 0, 819 }, 820 { 821 .size = SZ_2M + SZ_4K, 822 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K, 823 .offset = 0, 824 }, 825 { 826 .size = SZ_2M + SZ_64K, 827 .gtt = I915_GTT_PAGE_SIZE_64K, 828 .offset = 0, 829 }, 830 { 831 .size = SZ_2M - SZ_64K, 832 .gtt = I915_GTT_PAGE_SIZE_64K, 833 .offset = 0, 834 }, 835 /* Try without any forced padding/alignment */ 836 { 837 .size = SZ_64K, 838 .offset = SZ_2M, 839 .gtt = I915_GTT_PAGE_SIZE_4K, 840 }, 841 { 842 .size = SZ_128K, 843 .offset = SZ_2M - SZ_64K, 844 .gtt = I915_GTT_PAGE_SIZE_4K, 845 }, 846 }; 847 struct i915_vma *vma; 848 int i, single; 849 int err; 850 851 /* 852 * Sanity check some of the trickiness with 64K pages -- either we can 853 * safely mark the whole page-table(2M block) as 64K, or we have to 854 * always fallback to 4K. 855 */ 856 857 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K)) 858 return 0; 859 860 for (i = 0; i < ARRAY_SIZE(objects); ++i) { 861 unsigned int size = objects[i].size; 862 unsigned int expected_gtt = objects[i].gtt; 863 unsigned int offset = objects[i].offset; 864 unsigned int flags = PIN_USER; 865 866 for (single = 0; single <= 1; single++) { 867 obj = fake_huge_pages_object(i915, size, !!single); 868 if (IS_ERR(obj)) 869 return PTR_ERR(obj); 870 871 err = i915_gem_object_pin_pages(obj); 872 if (err) 873 goto out_object_put; 874 875 /* 876 * Disable 2M pages -- We only want to use 64K/4K pages 877 * for this test. 878 */ 879 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; 880 881 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 882 if (IS_ERR(vma)) { 883 err = PTR_ERR(vma); 884 goto out_object_unpin; 885 } 886 887 if (offset) 888 flags |= PIN_OFFSET_FIXED | offset; 889 890 err = i915_vma_pin(vma, 0, 0, flags); 891 if (err) 892 goto out_object_unpin; 893 894 err = igt_check_page_sizes(vma); 895 if (err) 896 goto out_vma_unpin; 897 898 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { 899 if (!IS_ALIGNED(vma->node.start, 900 I915_GTT_PAGE_SIZE_2M)) { 901 pr_err("node.start(%llx) not aligned to 2M\n", 902 vma->node.start); 903 err = -EINVAL; 904 goto out_vma_unpin; 905 } 906 907 if (!IS_ALIGNED(vma->node.size, 908 I915_GTT_PAGE_SIZE_2M)) { 909 pr_err("node.size(%llx) not aligned to 2M\n", 910 vma->node.size); 911 err = -EINVAL; 912 goto out_vma_unpin; 913 } 914 } 915 916 if (vma->page_sizes.gtt != expected_gtt) { 917 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n", 918 vma->page_sizes.gtt, expected_gtt, i, 919 yesno(!!single)); 920 err = -EINVAL; 921 goto out_vma_unpin; 922 } 923 924 i915_vma_unpin(vma); 925 i915_gem_object_unpin_pages(obj); 926 __i915_gem_object_put_pages(obj); 927 i915_gem_object_put(obj); 928 } 929 } 930 931 return 0; 932 933 out_vma_unpin: 934 i915_vma_unpin(vma); 935 out_object_unpin: 936 i915_gem_object_unpin_pages(obj); 937 out_object_put: 938 i915_gem_object_put(obj); 939 940 return err; 941 } 942 943 static int gpu_write(struct intel_context *ce, 944 struct i915_vma *vma, 945 u32 dw, 946 u32 val) 947 { 948 int err; 949 950 i915_gem_object_lock(vma->obj, NULL); 951 err = i915_gem_object_set_to_gtt_domain(vma->obj, true); 952 i915_gem_object_unlock(vma->obj); 953 if (err) 954 return err; 955 956 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), 957 vma->size >> PAGE_SHIFT, val); 958 } 959 960 static int 961 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) 962 { 963 unsigned int needs_flush; 964 unsigned long n; 965 int err; 966 967 i915_gem_object_lock(obj, NULL); 968 err = i915_gem_object_prepare_read(obj, &needs_flush); 969 if (err) 970 goto err_unlock; 971 972 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { 973 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); 974 975 if (needs_flush & CLFLUSH_BEFORE) 976 drm_clflush_virt_range(ptr, PAGE_SIZE); 977 978 if (ptr[dword] != val) { 979 pr_err("n=%lu ptr[%u]=%u, val=%u\n", 980 n, dword, ptr[dword], val); 981 kunmap_atomic(ptr); 982 err = -EINVAL; 983 break; 984 } 985 986 kunmap_atomic(ptr); 987 } 988 989 i915_gem_object_finish_access(obj); 990 err_unlock: 991 i915_gem_object_unlock(obj); 992 993 return err; 994 } 995 996 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val) 997 { 998 unsigned long n = obj->base.size >> PAGE_SHIFT; 999 u32 *ptr; 1000 int err; 1001 1002 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); 1003 if (err) 1004 return err; 1005 1006 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); 1007 if (IS_ERR(ptr)) 1008 return PTR_ERR(ptr); 1009 1010 ptr += dword; 1011 while (n--) { 1012 if (*ptr != val) { 1013 pr_err("base[%u]=%08x, val=%08x\n", 1014 dword, *ptr, val); 1015 err = -EINVAL; 1016 break; 1017 } 1018 1019 ptr += PAGE_SIZE / sizeof(*ptr); 1020 } 1021 1022 i915_gem_object_unpin_map(obj); 1023 return err; 1024 } 1025 1026 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) 1027 { 1028 if (i915_gem_object_has_struct_page(obj)) 1029 return __cpu_check_shmem(obj, dword, val); 1030 else 1031 return __cpu_check_vmap(obj, dword, val); 1032 } 1033 1034 static int __igt_write_huge(struct intel_context *ce, 1035 struct drm_i915_gem_object *obj, 1036 u64 size, u64 offset, 1037 u32 dword, u32 val) 1038 { 1039 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; 1040 struct i915_vma *vma; 1041 int err; 1042 1043 vma = i915_vma_instance(obj, ce->vm, NULL); 1044 if (IS_ERR(vma)) 1045 return PTR_ERR(vma); 1046 1047 err = i915_vma_unbind(vma); 1048 if (err) 1049 return err; 1050 1051 err = i915_vma_pin(vma, size, 0, flags | offset); 1052 if (err) { 1053 /* 1054 * The ggtt may have some pages reserved so 1055 * refrain from erroring out. 1056 */ 1057 if (err == -ENOSPC && i915_is_ggtt(ce->vm)) 1058 err = 0; 1059 1060 return err; 1061 } 1062 1063 err = igt_check_page_sizes(vma); 1064 if (err) 1065 goto out_vma_unpin; 1066 1067 err = gpu_write(ce, vma, dword, val); 1068 if (err) { 1069 pr_err("gpu-write failed at offset=%llx\n", offset); 1070 goto out_vma_unpin; 1071 } 1072 1073 err = cpu_check(obj, dword, val); 1074 if (err) { 1075 pr_err("cpu-check failed at offset=%llx\n", offset); 1076 goto out_vma_unpin; 1077 } 1078 1079 out_vma_unpin: 1080 i915_vma_unpin(vma); 1081 return err; 1082 } 1083 1084 static int igt_write_huge(struct i915_gem_context *ctx, 1085 struct drm_i915_gem_object *obj) 1086 { 1087 struct i915_gem_engines *engines; 1088 struct i915_gem_engines_iter it; 1089 struct intel_context *ce; 1090 I915_RND_STATE(prng); 1091 IGT_TIMEOUT(end_time); 1092 unsigned int max_page_size; 1093 unsigned int count; 1094 u64 max; 1095 u64 num; 1096 u64 size; 1097 int *order; 1098 int i, n; 1099 int err = 0; 1100 1101 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 1102 1103 size = obj->base.size; 1104 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 1105 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 1106 1107 n = 0; 1108 count = 0; 1109 max = U64_MAX; 1110 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1111 count++; 1112 if (!intel_engine_can_store_dword(ce->engine)) 1113 continue; 1114 1115 max = min(max, ce->vm->total); 1116 n++; 1117 } 1118 i915_gem_context_unlock_engines(ctx); 1119 if (!n) 1120 return 0; 1121 1122 /* 1123 * To keep things interesting when alternating between engines in our 1124 * randomized order, lets also make feeding to the same engine a few 1125 * times in succession a possibility by enlarging the permutation array. 1126 */ 1127 order = i915_random_order(count * count, &prng); 1128 if (!order) 1129 return -ENOMEM; 1130 1131 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); 1132 max = div_u64(max - size, max_page_size); 1133 1134 /* 1135 * Try various offsets in an ascending/descending fashion until we 1136 * timeout -- we want to avoid issues hidden by effectively always using 1137 * offset = 0. 1138 */ 1139 i = 0; 1140 engines = i915_gem_context_lock_engines(ctx); 1141 for_each_prime_number_from(num, 0, max) { 1142 u64 offset_low = num * max_page_size; 1143 u64 offset_high = (max - num) * max_page_size; 1144 u32 dword = offset_in_page(num) / 4; 1145 struct intel_context *ce; 1146 1147 ce = engines->engines[order[i] % engines->num_engines]; 1148 i = (i + 1) % (count * count); 1149 if (!ce || !intel_engine_can_store_dword(ce->engine)) 1150 continue; 1151 1152 /* 1153 * In order to utilize 64K pages we need to both pad the vma 1154 * size and ensure the vma offset is at the start of the pt 1155 * boundary, however to improve coverage we opt for testing both 1156 * aligned and unaligned offsets. 1157 */ 1158 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 1159 offset_low = round_down(offset_low, 1160 I915_GTT_PAGE_SIZE_2M); 1161 1162 err = __igt_write_huge(ce, obj, size, offset_low, 1163 dword, num + 1); 1164 if (err) 1165 break; 1166 1167 err = __igt_write_huge(ce, obj, size, offset_high, 1168 dword, num + 1); 1169 if (err) 1170 break; 1171 1172 if (igt_timeout(end_time, 1173 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", 1174 __func__, ce->engine->name, offset_low, offset_high, 1175 max_page_size)) 1176 break; 1177 } 1178 i915_gem_context_unlock_engines(ctx); 1179 1180 kfree(order); 1181 1182 return err; 1183 } 1184 1185 typedef struct drm_i915_gem_object * 1186 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); 1187 1188 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) 1189 { 1190 return i915->mm.gemfs && has_transparent_hugepage(); 1191 } 1192 1193 static struct drm_i915_gem_object * 1194 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags) 1195 { 1196 if (!igt_can_allocate_thp(i915)) { 1197 pr_info("%s missing THP support, skipping\n", __func__); 1198 return ERR_PTR(-ENODEV); 1199 } 1200 1201 return i915_gem_object_create_shmem(i915, size); 1202 } 1203 1204 static struct drm_i915_gem_object * 1205 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags) 1206 { 1207 return i915_gem_object_create_internal(i915, size); 1208 } 1209 1210 static struct drm_i915_gem_object * 1211 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags) 1212 { 1213 return huge_pages_object(i915, size, size); 1214 } 1215 1216 static struct drm_i915_gem_object * 1217 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags) 1218 { 1219 return i915_gem_object_create_lmem(i915, size, flags); 1220 } 1221 1222 static u32 igt_random_size(struct rnd_state *prng, 1223 u32 min_page_size, 1224 u32 max_page_size) 1225 { 1226 u64 mask; 1227 u32 size; 1228 1229 GEM_BUG_ON(!is_power_of_2(min_page_size)); 1230 GEM_BUG_ON(!is_power_of_2(max_page_size)); 1231 GEM_BUG_ON(min_page_size < PAGE_SIZE); 1232 GEM_BUG_ON(min_page_size > max_page_size); 1233 1234 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK; 1235 size = prandom_u32_state(prng) & mask; 1236 if (size < min_page_size) 1237 size |= min_page_size; 1238 1239 return size; 1240 } 1241 1242 static int igt_ppgtt_smoke_huge(void *arg) 1243 { 1244 struct i915_gem_context *ctx = arg; 1245 struct drm_i915_private *i915 = ctx->i915; 1246 struct drm_i915_gem_object *obj; 1247 I915_RND_STATE(prng); 1248 struct { 1249 igt_create_fn fn; 1250 u32 min; 1251 u32 max; 1252 } backends[] = { 1253 { igt_create_internal, SZ_64K, SZ_2M, }, 1254 { igt_create_shmem, SZ_64K, SZ_32M, }, 1255 { igt_create_local, SZ_64K, SZ_1G, }, 1256 }; 1257 int err; 1258 int i; 1259 1260 /* 1261 * Sanity check that the HW uses huge pages correctly through our 1262 * various backends -- ensure that our writes land in the right place. 1263 */ 1264 1265 for (i = 0; i < ARRAY_SIZE(backends); ++i) { 1266 u32 min = backends[i].min; 1267 u32 max = backends[i].max; 1268 u32 size = max; 1269 try_again: 1270 size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); 1271 1272 obj = backends[i].fn(i915, size, 0); 1273 if (IS_ERR(obj)) { 1274 err = PTR_ERR(obj); 1275 if (err == -E2BIG) { 1276 size >>= 1; 1277 goto try_again; 1278 } else if (err == -ENODEV) { 1279 err = 0; 1280 continue; 1281 } 1282 1283 return err; 1284 } 1285 1286 err = i915_gem_object_pin_pages(obj); 1287 if (err) { 1288 if (err == -ENXIO || err == -E2BIG) { 1289 i915_gem_object_put(obj); 1290 size >>= 1; 1291 goto try_again; 1292 } 1293 goto out_put; 1294 } 1295 1296 if (obj->mm.page_sizes.phys < min) { 1297 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n", 1298 __func__, size, i); 1299 err = -ENOMEM; 1300 goto out_unpin; 1301 } 1302 1303 err = igt_write_huge(ctx, obj); 1304 if (err) { 1305 pr_err("%s write-huge failed with size=%u, i=%d\n", 1306 __func__, size, i); 1307 } 1308 out_unpin: 1309 i915_gem_object_unpin_pages(obj); 1310 __i915_gem_object_put_pages(obj); 1311 out_put: 1312 i915_gem_object_put(obj); 1313 1314 if (err == -ENOMEM || err == -ENXIO) 1315 err = 0; 1316 1317 if (err) 1318 break; 1319 1320 cond_resched(); 1321 } 1322 1323 return err; 1324 } 1325 1326 static int igt_ppgtt_sanity_check(void *arg) 1327 { 1328 struct i915_gem_context *ctx = arg; 1329 struct drm_i915_private *i915 = ctx->i915; 1330 unsigned int supported = INTEL_INFO(i915)->page_sizes; 1331 struct { 1332 igt_create_fn fn; 1333 unsigned int flags; 1334 } backends[] = { 1335 { igt_create_system, 0, }, 1336 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, 1337 }; 1338 struct { 1339 u32 size; 1340 u32 pages; 1341 } combos[] = { 1342 { SZ_64K, SZ_64K }, 1343 { SZ_2M, SZ_2M }, 1344 { SZ_2M, SZ_64K }, 1345 { SZ_2M - SZ_64K, SZ_64K }, 1346 { SZ_2M - SZ_4K, SZ_64K | SZ_4K }, 1347 { SZ_2M + SZ_4K, SZ_64K | SZ_4K }, 1348 { SZ_2M + SZ_4K, SZ_2M | SZ_4K }, 1349 { SZ_2M + SZ_64K, SZ_2M | SZ_64K }, 1350 }; 1351 int i, j; 1352 int err; 1353 1354 if (supported == I915_GTT_PAGE_SIZE_4K) 1355 return 0; 1356 1357 /* 1358 * Sanity check that the HW behaves with a limited set of combinations. 1359 * We already have a bunch of randomised testing, which should give us 1360 * a decent amount of variation between runs, however we should keep 1361 * this to limit the chances of introducing a temporary regression, by 1362 * testing the most obvious cases that might make something blow up. 1363 */ 1364 1365 for (i = 0; i < ARRAY_SIZE(backends); ++i) { 1366 for (j = 0; j < ARRAY_SIZE(combos); ++j) { 1367 struct drm_i915_gem_object *obj; 1368 u32 size = combos[j].size; 1369 u32 pages = combos[j].pages; 1370 1371 obj = backends[i].fn(i915, size, backends[i].flags); 1372 if (IS_ERR(obj)) { 1373 err = PTR_ERR(obj); 1374 if (err == -ENODEV) { 1375 pr_info("Device lacks local memory, skipping\n"); 1376 err = 0; 1377 break; 1378 } 1379 1380 return err; 1381 } 1382 1383 err = i915_gem_object_pin_pages(obj); 1384 if (err) { 1385 i915_gem_object_put(obj); 1386 goto out; 1387 } 1388 1389 GEM_BUG_ON(pages > obj->base.size); 1390 pages = pages & supported; 1391 1392 if (pages) 1393 obj->mm.page_sizes.sg = pages; 1394 1395 err = igt_write_huge(ctx, obj); 1396 1397 i915_gem_object_unpin_pages(obj); 1398 __i915_gem_object_put_pages(obj); 1399 i915_gem_object_put(obj); 1400 1401 if (err) { 1402 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", 1403 __func__, size, pages, i, j); 1404 goto out; 1405 } 1406 } 1407 1408 cond_resched(); 1409 } 1410 1411 out: 1412 if (err == -ENOMEM) 1413 err = 0; 1414 1415 return err; 1416 } 1417 1418 static int igt_tmpfs_fallback(void *arg) 1419 { 1420 struct i915_gem_context *ctx = arg; 1421 struct drm_i915_private *i915 = ctx->i915; 1422 struct vfsmount *gemfs = i915->mm.gemfs; 1423 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); 1424 struct drm_i915_gem_object *obj; 1425 struct i915_vma *vma; 1426 u32 *vaddr; 1427 int err = 0; 1428 1429 /* 1430 * Make sure that we don't burst into a ball of flames upon falling back 1431 * to tmpfs, which we rely on if on the off-chance we encouter a failure 1432 * when setting up gemfs. 1433 */ 1434 1435 i915->mm.gemfs = NULL; 1436 1437 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); 1438 if (IS_ERR(obj)) { 1439 err = PTR_ERR(obj); 1440 goto out_restore; 1441 } 1442 1443 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1444 if (IS_ERR(vaddr)) { 1445 err = PTR_ERR(vaddr); 1446 goto out_put; 1447 } 1448 *vaddr = 0xdeadbeaf; 1449 1450 __i915_gem_object_flush_map(obj, 0, 64); 1451 i915_gem_object_unpin_map(obj); 1452 1453 vma = i915_vma_instance(obj, vm, NULL); 1454 if (IS_ERR(vma)) { 1455 err = PTR_ERR(vma); 1456 goto out_put; 1457 } 1458 1459 err = i915_vma_pin(vma, 0, 0, PIN_USER); 1460 if (err) 1461 goto out_put; 1462 1463 err = igt_check_page_sizes(vma); 1464 1465 i915_vma_unpin(vma); 1466 out_put: 1467 i915_gem_object_put(obj); 1468 out_restore: 1469 i915->mm.gemfs = gemfs; 1470 1471 i915_vm_put(vm); 1472 return err; 1473 } 1474 1475 static int igt_shrink_thp(void *arg) 1476 { 1477 struct i915_gem_context *ctx = arg; 1478 struct drm_i915_private *i915 = ctx->i915; 1479 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); 1480 struct drm_i915_gem_object *obj; 1481 struct i915_gem_engines_iter it; 1482 struct intel_context *ce; 1483 struct i915_vma *vma; 1484 unsigned int flags = PIN_USER; 1485 unsigned int n; 1486 int err = 0; 1487 1488 /* 1489 * Sanity check shrinking huge-paged object -- make sure nothing blows 1490 * up. 1491 */ 1492 1493 if (!igt_can_allocate_thp(i915)) { 1494 pr_info("missing THP support, skipping\n"); 1495 goto out_vm; 1496 } 1497 1498 obj = i915_gem_object_create_shmem(i915, SZ_2M); 1499 if (IS_ERR(obj)) { 1500 err = PTR_ERR(obj); 1501 goto out_vm; 1502 } 1503 1504 vma = i915_vma_instance(obj, vm, NULL); 1505 if (IS_ERR(vma)) { 1506 err = PTR_ERR(vma); 1507 goto out_put; 1508 } 1509 1510 err = i915_vma_pin(vma, 0, 0, flags); 1511 if (err) 1512 goto out_put; 1513 1514 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { 1515 pr_info("failed to allocate THP, finishing test early\n"); 1516 goto out_unpin; 1517 } 1518 1519 err = igt_check_page_sizes(vma); 1520 if (err) 1521 goto out_unpin; 1522 1523 n = 0; 1524 1525 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1526 if (!intel_engine_can_store_dword(ce->engine)) 1527 continue; 1528 1529 err = gpu_write(ce, vma, n++, 0xdeadbeaf); 1530 if (err) 1531 break; 1532 } 1533 i915_gem_context_unlock_engines(ctx); 1534 i915_vma_unpin(vma); 1535 if (err) 1536 goto out_put; 1537 1538 /* 1539 * Now that the pages are *unpinned* shrink-all should invoke 1540 * shmem to truncate our pages. 1541 */ 1542 i915_gem_shrink_all(i915); 1543 if (i915_gem_object_has_pages(obj)) { 1544 pr_err("shrink-all didn't truncate the pages\n"); 1545 err = -EINVAL; 1546 goto out_put; 1547 } 1548 1549 if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) { 1550 pr_err("residual page-size bits left\n"); 1551 err = -EINVAL; 1552 goto out_put; 1553 } 1554 1555 err = i915_vma_pin(vma, 0, 0, flags); 1556 if (err) 1557 goto out_put; 1558 1559 while (n--) { 1560 err = cpu_check(obj, n, 0xdeadbeaf); 1561 if (err) 1562 break; 1563 } 1564 1565 out_unpin: 1566 i915_vma_unpin(vma); 1567 out_put: 1568 i915_gem_object_put(obj); 1569 out_vm: 1570 i915_vm_put(vm); 1571 1572 return err; 1573 } 1574 1575 int i915_gem_huge_page_mock_selftests(void) 1576 { 1577 static const struct i915_subtest tests[] = { 1578 SUBTEST(igt_mock_exhaust_device_supported_pages), 1579 SUBTEST(igt_mock_memory_region_huge_pages), 1580 SUBTEST(igt_mock_ppgtt_misaligned_dma), 1581 SUBTEST(igt_mock_ppgtt_huge_fill), 1582 SUBTEST(igt_mock_ppgtt_64K), 1583 }; 1584 struct drm_i915_private *dev_priv; 1585 struct i915_ppgtt *ppgtt; 1586 int err; 1587 1588 dev_priv = mock_gem_device(); 1589 if (!dev_priv) 1590 return -ENOMEM; 1591 1592 /* Pretend to be a device which supports the 48b PPGTT */ 1593 mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; 1594 mkwrite_device_info(dev_priv)->ppgtt_size = 48; 1595 1596 ppgtt = i915_ppgtt_create(&dev_priv->gt); 1597 if (IS_ERR(ppgtt)) { 1598 err = PTR_ERR(ppgtt); 1599 goto out_unlock; 1600 } 1601 1602 if (!i915_vm_is_4lvl(&ppgtt->vm)) { 1603 pr_err("failed to create 48b PPGTT\n"); 1604 err = -EINVAL; 1605 goto out_put; 1606 } 1607 1608 /* If we were ever hit this then it's time to mock the 64K scratch */ 1609 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { 1610 pr_err("PPGTT missing 64K scratch page\n"); 1611 err = -EINVAL; 1612 goto out_put; 1613 } 1614 1615 err = i915_subtests(tests, ppgtt); 1616 1617 out_put: 1618 i915_vm_put(&ppgtt->vm); 1619 out_unlock: 1620 mock_destroy_device(dev_priv); 1621 return err; 1622 } 1623 1624 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) 1625 { 1626 static const struct i915_subtest tests[] = { 1627 SUBTEST(igt_shrink_thp), 1628 SUBTEST(igt_tmpfs_fallback), 1629 SUBTEST(igt_ppgtt_smoke_huge), 1630 SUBTEST(igt_ppgtt_sanity_check), 1631 }; 1632 struct i915_gem_context *ctx; 1633 struct i915_address_space *vm; 1634 struct file *file; 1635 int err; 1636 1637 if (!HAS_PPGTT(i915)) { 1638 pr_info("PPGTT not supported, skipping live-selftests\n"); 1639 return 0; 1640 } 1641 1642 if (intel_gt_is_wedged(&i915->gt)) 1643 return 0; 1644 1645 file = mock_file(i915); 1646 if (IS_ERR(file)) 1647 return PTR_ERR(file); 1648 1649 ctx = live_context(i915, file); 1650 if (IS_ERR(ctx)) { 1651 err = PTR_ERR(ctx); 1652 goto out_file; 1653 } 1654 1655 mutex_lock(&ctx->mutex); 1656 vm = i915_gem_context_vm(ctx); 1657 if (vm) 1658 WRITE_ONCE(vm->scrub_64K, true); 1659 mutex_unlock(&ctx->mutex); 1660 1661 err = i915_subtests(tests, ctx); 1662 1663 out_file: 1664 fput(file); 1665 return err; 1666 } 1667