1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017 Intel Corporation 5 */ 6 7 #include <linux/prime_numbers.h> 8 #include <linux/string_helpers.h> 9 #include <linux/swap.h> 10 11 #include "i915_selftest.h" 12 13 #include "gem/i915_gem_internal.h" 14 #include "gem/i915_gem_lmem.h" 15 #include "gem/i915_gem_pm.h" 16 #include "gem/i915_gem_region.h" 17 18 #include "gt/intel_gt.h" 19 20 #include "igt_gem_utils.h" 21 #include "mock_context.h" 22 23 #include "selftests/mock_drm.h" 24 #include "selftests/mock_gem_device.h" 25 #include "selftests/mock_region.h" 26 #include "selftests/i915_random.h" 27 28 static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915, 29 struct file *file) 30 { 31 struct i915_gem_context *ctx = live_context(i915, file); 32 struct i915_address_space *vm; 33 34 if (IS_ERR(ctx)) 35 return ctx; 36 37 vm = ctx->vm; 38 if (vm) 39 WRITE_ONCE(vm->scrub_64K, true); 40 41 return ctx; 42 } 43 44 static const unsigned int page_sizes[] = { 45 I915_GTT_PAGE_SIZE_2M, 46 I915_GTT_PAGE_SIZE_64K, 47 I915_GTT_PAGE_SIZE_4K, 48 }; 49 50 static unsigned int get_largest_page_size(struct drm_i915_private *i915, 51 u64 rem) 52 { 53 int i; 54 55 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { 56 unsigned int page_size = page_sizes[i]; 57 58 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) 59 return page_size; 60 } 61 62 return 0; 63 } 64 65 static void huge_pages_free_pages(struct sg_table *st) 66 { 67 struct scatterlist *sg; 68 69 for (sg = st->sgl; sg; sg = __sg_next(sg)) { 70 if (sg_page(sg)) 71 __free_pages(sg_page(sg), get_order(sg->length)); 72 } 73 74 sg_free_table(st); 75 kfree(st); 76 } 77 78 static int get_huge_pages(struct drm_i915_gem_object *obj) 79 { 80 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) 81 unsigned int page_mask = obj->mm.page_mask; 82 struct sg_table *st; 83 struct scatterlist *sg; 84 unsigned int sg_page_sizes; 85 u64 rem; 86 87 st = kmalloc(sizeof(*st), GFP); 88 if (!st) 89 return -ENOMEM; 90 91 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { 92 kfree(st); 93 return -ENOMEM; 94 } 95 96 rem = obj->base.size; 97 sg = st->sgl; 98 st->nents = 0; 99 sg_page_sizes = 0; 100 101 /* 102 * Our goal here is simple, we want to greedily fill the object from 103 * largest to smallest page-size, while ensuring that we use *every* 104 * page-size as per the given page-mask. 105 */ 106 do { 107 unsigned int bit = ilog2(page_mask); 108 unsigned int page_size = BIT(bit); 109 int order = get_order(page_size); 110 111 do { 112 struct page *page; 113 114 GEM_BUG_ON(order >= MAX_ORDER); 115 page = alloc_pages(GFP | __GFP_ZERO, order); 116 if (!page) 117 goto err; 118 119 sg_set_page(sg, page, page_size, 0); 120 sg_page_sizes |= page_size; 121 st->nents++; 122 123 rem -= page_size; 124 if (!rem) { 125 sg_mark_end(sg); 126 break; 127 } 128 129 sg = __sg_next(sg); 130 } while ((rem - ((page_size-1) & page_mask)) >= page_size); 131 132 page_mask &= (page_size-1); 133 } while (page_mask); 134 135 if (i915_gem_gtt_prepare_pages(obj, st)) 136 goto err; 137 138 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); 139 __i915_gem_object_set_pages(obj, st); 140 141 return 0; 142 143 err: 144 sg_set_page(sg, NULL, 0, 0); 145 sg_mark_end(sg); 146 huge_pages_free_pages(st); 147 148 return -ENOMEM; 149 } 150 151 static void put_huge_pages(struct drm_i915_gem_object *obj, 152 struct sg_table *pages) 153 { 154 i915_gem_gtt_finish_pages(obj, pages); 155 huge_pages_free_pages(pages); 156 157 obj->mm.dirty = false; 158 159 __start_cpu_write(obj); 160 } 161 162 static const struct drm_i915_gem_object_ops huge_page_ops = { 163 .name = "huge-gem", 164 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 165 .get_pages = get_huge_pages, 166 .put_pages = put_huge_pages, 167 }; 168 169 static struct drm_i915_gem_object * 170 huge_pages_object(struct drm_i915_private *i915, 171 u64 size, 172 unsigned int page_mask) 173 { 174 static struct lock_class_key lock_class; 175 struct drm_i915_gem_object *obj; 176 unsigned int cache_level; 177 178 GEM_BUG_ON(!size); 179 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); 180 181 if (size >> PAGE_SHIFT > INT_MAX) 182 return ERR_PTR(-E2BIG); 183 184 if (overflows_type(size, obj->base.size)) 185 return ERR_PTR(-E2BIG); 186 187 obj = i915_gem_object_alloc(); 188 if (!obj) 189 return ERR_PTR(-ENOMEM); 190 191 drm_gem_private_object_init(&i915->drm, &obj->base, size); 192 i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0); 193 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; 194 i915_gem_object_set_volatile(obj); 195 196 obj->write_domain = I915_GEM_DOMAIN_CPU; 197 obj->read_domains = I915_GEM_DOMAIN_CPU; 198 199 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 200 i915_gem_object_set_cache_coherency(obj, cache_level); 201 202 obj->mm.page_mask = page_mask; 203 204 return obj; 205 } 206 207 static int fake_get_huge_pages(struct drm_i915_gem_object *obj) 208 { 209 struct drm_i915_private *i915 = to_i915(obj->base.dev); 210 const u64 max_len = rounddown_pow_of_two(UINT_MAX); 211 struct sg_table *st; 212 struct scatterlist *sg; 213 u64 rem; 214 215 st = kmalloc(sizeof(*st), GFP); 216 if (!st) 217 return -ENOMEM; 218 219 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { 220 kfree(st); 221 return -ENOMEM; 222 } 223 224 /* Use optimal page sized chunks to fill in the sg table */ 225 rem = obj->base.size; 226 sg = st->sgl; 227 st->nents = 0; 228 do { 229 unsigned int page_size = get_largest_page_size(i915, rem); 230 unsigned int len = min(page_size * div_u64(rem, page_size), 231 max_len); 232 233 GEM_BUG_ON(!page_size); 234 235 sg->offset = 0; 236 sg->length = len; 237 sg_dma_len(sg) = len; 238 sg_dma_address(sg) = page_size; 239 240 st->nents++; 241 242 rem -= len; 243 if (!rem) { 244 sg_mark_end(sg); 245 break; 246 } 247 248 sg = sg_next(sg); 249 } while (1); 250 251 i915_sg_trim(st); 252 253 __i915_gem_object_set_pages(obj, st); 254 255 return 0; 256 } 257 258 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) 259 { 260 struct drm_i915_private *i915 = to_i915(obj->base.dev); 261 struct sg_table *st; 262 struct scatterlist *sg; 263 unsigned int page_size; 264 265 st = kmalloc(sizeof(*st), GFP); 266 if (!st) 267 return -ENOMEM; 268 269 if (sg_alloc_table(st, 1, GFP)) { 270 kfree(st); 271 return -ENOMEM; 272 } 273 274 sg = st->sgl; 275 st->nents = 1; 276 277 page_size = get_largest_page_size(i915, obj->base.size); 278 GEM_BUG_ON(!page_size); 279 280 sg->offset = 0; 281 sg->length = obj->base.size; 282 sg_dma_len(sg) = obj->base.size; 283 sg_dma_address(sg) = page_size; 284 285 __i915_gem_object_set_pages(obj, st); 286 287 return 0; 288 #undef GFP 289 } 290 291 static void fake_free_huge_pages(struct drm_i915_gem_object *obj, 292 struct sg_table *pages) 293 { 294 sg_free_table(pages); 295 kfree(pages); 296 } 297 298 static void fake_put_huge_pages(struct drm_i915_gem_object *obj, 299 struct sg_table *pages) 300 { 301 fake_free_huge_pages(obj, pages); 302 obj->mm.dirty = false; 303 } 304 305 static const struct drm_i915_gem_object_ops fake_ops = { 306 .name = "fake-gem", 307 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 308 .get_pages = fake_get_huge_pages, 309 .put_pages = fake_put_huge_pages, 310 }; 311 312 static const struct drm_i915_gem_object_ops fake_ops_single = { 313 .name = "fake-gem", 314 .flags = I915_GEM_OBJECT_IS_SHRINKABLE, 315 .get_pages = fake_get_huge_pages_single, 316 .put_pages = fake_put_huge_pages, 317 }; 318 319 static struct drm_i915_gem_object * 320 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) 321 { 322 static struct lock_class_key lock_class; 323 struct drm_i915_gem_object *obj; 324 325 GEM_BUG_ON(!size); 326 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 327 328 if (size >> PAGE_SHIFT > UINT_MAX) 329 return ERR_PTR(-E2BIG); 330 331 if (overflows_type(size, obj->base.size)) 332 return ERR_PTR(-E2BIG); 333 334 obj = i915_gem_object_alloc(); 335 if (!obj) 336 return ERR_PTR(-ENOMEM); 337 338 drm_gem_private_object_init(&i915->drm, &obj->base, size); 339 340 if (single) 341 i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0); 342 else 343 i915_gem_object_init(obj, &fake_ops, &lock_class, 0); 344 345 i915_gem_object_set_volatile(obj); 346 347 obj->write_domain = I915_GEM_DOMAIN_CPU; 348 obj->read_domains = I915_GEM_DOMAIN_CPU; 349 obj->cache_level = I915_CACHE_NONE; 350 351 return obj; 352 } 353 354 static int igt_check_page_sizes(struct i915_vma *vma) 355 { 356 struct drm_i915_private *i915 = vma->vm->i915; 357 unsigned int supported = RUNTIME_INFO(i915)->page_sizes; 358 struct drm_i915_gem_object *obj = vma->obj; 359 int err; 360 361 /* We have to wait for the async bind to complete before our asserts */ 362 err = i915_vma_sync(vma); 363 if (err) 364 return err; 365 366 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { 367 pr_err("unsupported page_sizes.sg=%u, supported=%u\n", 368 vma->page_sizes.sg & ~supported, supported); 369 err = -EINVAL; 370 } 371 372 if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) { 373 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", 374 vma->resource->page_sizes_gtt & ~supported, supported); 375 err = -EINVAL; 376 } 377 378 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { 379 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n", 380 vma->page_sizes.phys, obj->mm.page_sizes.phys); 381 err = -EINVAL; 382 } 383 384 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { 385 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n", 386 vma->page_sizes.sg, obj->mm.page_sizes.sg); 387 err = -EINVAL; 388 } 389 390 /* 391 * The dma-api is like a box of chocolates when it comes to the 392 * alignment of dma addresses, however for LMEM we have total control 393 * and so can guarantee alignment, likewise when we allocate our blocks 394 * they should appear in descending order, and if we know that we align 395 * to the largest page size for the GTT address, we should be able to 396 * assert that if we see 2M physical pages then we should also get 2M 397 * GTT pages. If we don't then something might be wrong in our 398 * construction of the backing pages. 399 * 400 * Maintaining alignment is required to utilise huge pages in the ppGGT. 401 */ 402 if (i915_gem_object_is_lmem(obj) && 403 IS_ALIGNED(vma->node.start, SZ_2M) && 404 vma->page_sizes.sg & SZ_2M && 405 vma->resource->page_sizes_gtt < SZ_2M) { 406 pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n", 407 vma->page_sizes.sg, vma->resource->page_sizes_gtt); 408 err = -EINVAL; 409 } 410 411 return err; 412 } 413 414 static int igt_mock_exhaust_device_supported_pages(void *arg) 415 { 416 struct i915_ppgtt *ppgtt = arg; 417 struct drm_i915_private *i915 = ppgtt->vm.i915; 418 unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes; 419 struct drm_i915_gem_object *obj; 420 struct i915_vma *vma; 421 int i, j, single; 422 int err; 423 424 /* 425 * Sanity check creating objects with every valid page support 426 * combination for our mock device. 427 */ 428 429 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { 430 unsigned int combination = SZ_4K; /* Required for ppGTT */ 431 432 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { 433 if (i & BIT(j)) 434 combination |= page_sizes[j]; 435 } 436 437 RUNTIME_INFO(i915)->page_sizes = combination; 438 439 for (single = 0; single <= 1; ++single) { 440 obj = fake_huge_pages_object(i915, combination, !!single); 441 if (IS_ERR(obj)) { 442 err = PTR_ERR(obj); 443 goto out_device; 444 } 445 446 if (obj->base.size != combination) { 447 pr_err("obj->base.size=%zu, expected=%u\n", 448 obj->base.size, combination); 449 err = -EINVAL; 450 goto out_put; 451 } 452 453 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 454 if (IS_ERR(vma)) { 455 err = PTR_ERR(vma); 456 goto out_put; 457 } 458 459 err = i915_vma_pin(vma, 0, 0, PIN_USER); 460 if (err) 461 goto out_put; 462 463 err = igt_check_page_sizes(vma); 464 465 if (vma->page_sizes.sg != combination) { 466 pr_err("page_sizes.sg=%u, expected=%u\n", 467 vma->page_sizes.sg, combination); 468 err = -EINVAL; 469 } 470 471 i915_vma_unpin(vma); 472 i915_gem_object_put(obj); 473 474 if (err) 475 goto out_device; 476 } 477 } 478 479 goto out_device; 480 481 out_put: 482 i915_gem_object_put(obj); 483 out_device: 484 RUNTIME_INFO(i915)->page_sizes = saved_mask; 485 486 return err; 487 } 488 489 static int igt_mock_memory_region_huge_pages(void *arg) 490 { 491 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; 492 struct i915_ppgtt *ppgtt = arg; 493 struct drm_i915_private *i915 = ppgtt->vm.i915; 494 unsigned long supported = RUNTIME_INFO(i915)->page_sizes; 495 struct intel_memory_region *mem; 496 struct drm_i915_gem_object *obj; 497 struct i915_vma *vma; 498 int bit; 499 int err = 0; 500 501 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0); 502 if (IS_ERR(mem)) { 503 pr_err("%s failed to create memory region\n", __func__); 504 return PTR_ERR(mem); 505 } 506 507 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 508 unsigned int page_size = BIT(bit); 509 resource_size_t phys; 510 int i; 511 512 for (i = 0; i < ARRAY_SIZE(flags); ++i) { 513 obj = i915_gem_object_create_region(mem, 514 page_size, page_size, 515 flags[i]); 516 if (IS_ERR(obj)) { 517 err = PTR_ERR(obj); 518 goto out_region; 519 } 520 521 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 522 if (IS_ERR(vma)) { 523 err = PTR_ERR(vma); 524 goto out_put; 525 } 526 527 err = i915_vma_pin(vma, 0, 0, PIN_USER); 528 if (err) 529 goto out_put; 530 531 err = igt_check_page_sizes(vma); 532 if (err) 533 goto out_unpin; 534 535 phys = i915_gem_object_get_dma_address(obj, 0); 536 if (!IS_ALIGNED(phys, page_size)) { 537 pr_err("%s addr misaligned(%pa) page_size=%u\n", 538 __func__, &phys, page_size); 539 err = -EINVAL; 540 goto out_unpin; 541 } 542 543 if (vma->resource->page_sizes_gtt != page_size) { 544 pr_err("%s page_sizes.gtt=%u, expected=%u\n", 545 __func__, vma->resource->page_sizes_gtt, 546 page_size); 547 err = -EINVAL; 548 goto out_unpin; 549 } 550 551 i915_vma_unpin(vma); 552 __i915_gem_object_put_pages(obj); 553 i915_gem_object_put(obj); 554 } 555 } 556 557 goto out_region; 558 559 out_unpin: 560 i915_vma_unpin(vma); 561 out_put: 562 i915_gem_object_put(obj); 563 out_region: 564 intel_memory_region_destroy(mem); 565 return err; 566 } 567 568 static int igt_mock_ppgtt_misaligned_dma(void *arg) 569 { 570 struct i915_ppgtt *ppgtt = arg; 571 struct drm_i915_private *i915 = ppgtt->vm.i915; 572 unsigned long supported = RUNTIME_INFO(i915)->page_sizes; 573 struct drm_i915_gem_object *obj; 574 int bit; 575 int err; 576 577 /* 578 * Sanity check dma misalignment for huge pages -- the dma addresses we 579 * insert into the paging structures need to always respect the page 580 * size alignment. 581 */ 582 583 bit = ilog2(I915_GTT_PAGE_SIZE_64K); 584 585 for_each_set_bit_from(bit, &supported, 586 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 587 IGT_TIMEOUT(end_time); 588 unsigned int page_size = BIT(bit); 589 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; 590 unsigned int offset; 591 unsigned int size = 592 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1; 593 struct i915_vma *vma; 594 595 obj = fake_huge_pages_object(i915, size, true); 596 if (IS_ERR(obj)) 597 return PTR_ERR(obj); 598 599 if (obj->base.size != size) { 600 pr_err("obj->base.size=%zu, expected=%u\n", 601 obj->base.size, size); 602 err = -EINVAL; 603 goto out_put; 604 } 605 606 err = i915_gem_object_pin_pages_unlocked(obj); 607 if (err) 608 goto out_put; 609 610 /* Force the page size for this object */ 611 obj->mm.page_sizes.sg = page_size; 612 613 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 614 if (IS_ERR(vma)) { 615 err = PTR_ERR(vma); 616 goto out_unpin; 617 } 618 619 err = i915_vma_pin(vma, 0, 0, flags); 620 if (err) 621 goto out_unpin; 622 623 624 err = igt_check_page_sizes(vma); 625 626 if (vma->resource->page_sizes_gtt != page_size) { 627 pr_err("page_sizes.gtt=%u, expected %u\n", 628 vma->resource->page_sizes_gtt, page_size); 629 err = -EINVAL; 630 } 631 632 i915_vma_unpin(vma); 633 634 if (err) 635 goto out_unpin; 636 637 /* 638 * Try all the other valid offsets until the next 639 * boundary -- should always fall back to using 4K 640 * pages. 641 */ 642 for (offset = 4096; offset < page_size; offset += 4096) { 643 err = i915_vma_unbind_unlocked(vma); 644 if (err) 645 goto out_unpin; 646 647 err = i915_vma_pin(vma, 0, 0, flags | offset); 648 if (err) 649 goto out_unpin; 650 651 err = igt_check_page_sizes(vma); 652 653 if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) { 654 pr_err("page_sizes.gtt=%u, expected %llu\n", 655 vma->resource->page_sizes_gtt, 656 I915_GTT_PAGE_SIZE_4K); 657 err = -EINVAL; 658 } 659 660 i915_vma_unpin(vma); 661 662 if (err) 663 goto out_unpin; 664 665 if (igt_timeout(end_time, 666 "%s timed out at offset %x with page-size %x\n", 667 __func__, offset, page_size)) 668 break; 669 } 670 671 i915_gem_object_lock(obj, NULL); 672 i915_gem_object_unpin_pages(obj); 673 __i915_gem_object_put_pages(obj); 674 i915_gem_object_unlock(obj); 675 i915_gem_object_put(obj); 676 } 677 678 return 0; 679 680 out_unpin: 681 i915_gem_object_lock(obj, NULL); 682 i915_gem_object_unpin_pages(obj); 683 i915_gem_object_unlock(obj); 684 out_put: 685 i915_gem_object_put(obj); 686 687 return err; 688 } 689 690 static void close_object_list(struct list_head *objects, 691 struct i915_ppgtt *ppgtt) 692 { 693 struct drm_i915_gem_object *obj, *on; 694 695 list_for_each_entry_safe(obj, on, objects, st_link) { 696 list_del(&obj->st_link); 697 i915_gem_object_lock(obj, NULL); 698 i915_gem_object_unpin_pages(obj); 699 __i915_gem_object_put_pages(obj); 700 i915_gem_object_unlock(obj); 701 i915_gem_object_put(obj); 702 } 703 } 704 705 static int igt_mock_ppgtt_huge_fill(void *arg) 706 { 707 struct i915_ppgtt *ppgtt = arg; 708 struct drm_i915_private *i915 = ppgtt->vm.i915; 709 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; 710 unsigned long page_num; 711 bool single = false; 712 LIST_HEAD(objects); 713 IGT_TIMEOUT(end_time); 714 int err = -ENODEV; 715 716 for_each_prime_number_from(page_num, 1, max_pages) { 717 struct drm_i915_gem_object *obj; 718 u64 size = page_num << PAGE_SHIFT; 719 struct i915_vma *vma; 720 unsigned int expected_gtt = 0; 721 int i; 722 723 obj = fake_huge_pages_object(i915, size, single); 724 if (IS_ERR(obj)) { 725 err = PTR_ERR(obj); 726 break; 727 } 728 729 if (obj->base.size != size) { 730 pr_err("obj->base.size=%zd, expected=%llu\n", 731 obj->base.size, size); 732 i915_gem_object_put(obj); 733 err = -EINVAL; 734 break; 735 } 736 737 err = i915_gem_object_pin_pages_unlocked(obj); 738 if (err) { 739 i915_gem_object_put(obj); 740 break; 741 } 742 743 list_add(&obj->st_link, &objects); 744 745 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 746 if (IS_ERR(vma)) { 747 err = PTR_ERR(vma); 748 break; 749 } 750 751 err = i915_vma_pin(vma, 0, 0, PIN_USER); 752 if (err) 753 break; 754 755 err = igt_check_page_sizes(vma); 756 if (err) { 757 i915_vma_unpin(vma); 758 break; 759 } 760 761 /* 762 * Figure out the expected gtt page size knowing that we go from 763 * largest to smallest page size sg chunks, and that we align to 764 * the largest page size. 765 */ 766 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { 767 unsigned int page_size = page_sizes[i]; 768 769 if (HAS_PAGE_SIZES(i915, page_size) && 770 size >= page_size) { 771 expected_gtt |= page_size; 772 size &= page_size-1; 773 } 774 } 775 776 GEM_BUG_ON(!expected_gtt); 777 GEM_BUG_ON(size); 778 779 if (expected_gtt & I915_GTT_PAGE_SIZE_4K) 780 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K; 781 782 i915_vma_unpin(vma); 783 784 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { 785 if (!IS_ALIGNED(vma->node.start, 786 I915_GTT_PAGE_SIZE_2M)) { 787 pr_err("node.start(%llx) not aligned to 2M\n", 788 vma->node.start); 789 err = -EINVAL; 790 break; 791 } 792 793 if (!IS_ALIGNED(vma->node.size, 794 I915_GTT_PAGE_SIZE_2M)) { 795 pr_err("node.size(%llx) not aligned to 2M\n", 796 vma->node.size); 797 err = -EINVAL; 798 break; 799 } 800 } 801 802 if (vma->resource->page_sizes_gtt != expected_gtt) { 803 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n", 804 vma->resource->page_sizes_gtt, expected_gtt, 805 obj->base.size, str_yes_no(!!single)); 806 err = -EINVAL; 807 break; 808 } 809 810 if (igt_timeout(end_time, 811 "%s timed out at size %zd\n", 812 __func__, obj->base.size)) 813 break; 814 815 single = !single; 816 } 817 818 close_object_list(&objects, ppgtt); 819 820 if (err == -ENOMEM || err == -ENOSPC) 821 err = 0; 822 823 return err; 824 } 825 826 static int igt_mock_ppgtt_64K(void *arg) 827 { 828 struct i915_ppgtt *ppgtt = arg; 829 struct drm_i915_private *i915 = ppgtt->vm.i915; 830 struct drm_i915_gem_object *obj; 831 const struct object_info { 832 unsigned int size; 833 unsigned int gtt; 834 unsigned int offset; 835 } objects[] = { 836 /* Cases with forced padding/alignment */ 837 { 838 .size = SZ_64K, 839 .gtt = I915_GTT_PAGE_SIZE_64K, 840 .offset = 0, 841 }, 842 { 843 .size = SZ_64K + SZ_4K, 844 .gtt = I915_GTT_PAGE_SIZE_4K, 845 .offset = 0, 846 }, 847 { 848 .size = SZ_64K - SZ_4K, 849 .gtt = I915_GTT_PAGE_SIZE_4K, 850 .offset = 0, 851 }, 852 { 853 .size = SZ_2M, 854 .gtt = I915_GTT_PAGE_SIZE_64K, 855 .offset = 0, 856 }, 857 { 858 .size = SZ_2M - SZ_4K, 859 .gtt = I915_GTT_PAGE_SIZE_4K, 860 .offset = 0, 861 }, 862 { 863 .size = SZ_2M + SZ_4K, 864 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K, 865 .offset = 0, 866 }, 867 { 868 .size = SZ_2M + SZ_64K, 869 .gtt = I915_GTT_PAGE_SIZE_64K, 870 .offset = 0, 871 }, 872 { 873 .size = SZ_2M - SZ_64K, 874 .gtt = I915_GTT_PAGE_SIZE_64K, 875 .offset = 0, 876 }, 877 /* Try without any forced padding/alignment */ 878 { 879 .size = SZ_64K, 880 .offset = SZ_2M, 881 .gtt = I915_GTT_PAGE_SIZE_4K, 882 }, 883 { 884 .size = SZ_128K, 885 .offset = SZ_2M - SZ_64K, 886 .gtt = I915_GTT_PAGE_SIZE_4K, 887 }, 888 }; 889 struct i915_vma *vma; 890 int i, single; 891 int err; 892 893 /* 894 * Sanity check some of the trickiness with 64K pages -- either we can 895 * safely mark the whole page-table(2M block) as 64K, or we have to 896 * always fallback to 4K. 897 */ 898 899 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K)) 900 return 0; 901 902 for (i = 0; i < ARRAY_SIZE(objects); ++i) { 903 unsigned int size = objects[i].size; 904 unsigned int expected_gtt = objects[i].gtt; 905 unsigned int offset = objects[i].offset; 906 unsigned int flags = PIN_USER; 907 908 for (single = 0; single <= 1; single++) { 909 obj = fake_huge_pages_object(i915, size, !!single); 910 if (IS_ERR(obj)) 911 return PTR_ERR(obj); 912 913 err = i915_gem_object_pin_pages_unlocked(obj); 914 if (err) 915 goto out_object_put; 916 917 /* 918 * Disable 2M pages -- We only want to use 64K/4K pages 919 * for this test. 920 */ 921 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; 922 923 vma = i915_vma_instance(obj, &ppgtt->vm, NULL); 924 if (IS_ERR(vma)) { 925 err = PTR_ERR(vma); 926 goto out_object_unpin; 927 } 928 929 if (offset) 930 flags |= PIN_OFFSET_FIXED | offset; 931 932 err = i915_vma_pin(vma, 0, 0, flags); 933 if (err) 934 goto out_object_unpin; 935 936 err = igt_check_page_sizes(vma); 937 if (err) 938 goto out_vma_unpin; 939 940 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { 941 if (!IS_ALIGNED(vma->node.start, 942 I915_GTT_PAGE_SIZE_2M)) { 943 pr_err("node.start(%llx) not aligned to 2M\n", 944 vma->node.start); 945 err = -EINVAL; 946 goto out_vma_unpin; 947 } 948 949 if (!IS_ALIGNED(vma->node.size, 950 I915_GTT_PAGE_SIZE_2M)) { 951 pr_err("node.size(%llx) not aligned to 2M\n", 952 vma->node.size); 953 err = -EINVAL; 954 goto out_vma_unpin; 955 } 956 } 957 958 if (vma->resource->page_sizes_gtt != expected_gtt) { 959 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n", 960 vma->resource->page_sizes_gtt, 961 expected_gtt, i, str_yes_no(!!single)); 962 err = -EINVAL; 963 goto out_vma_unpin; 964 } 965 966 i915_vma_unpin(vma); 967 i915_gem_object_lock(obj, NULL); 968 i915_gem_object_unpin_pages(obj); 969 __i915_gem_object_put_pages(obj); 970 i915_gem_object_unlock(obj); 971 i915_gem_object_put(obj); 972 973 i915_gem_drain_freed_objects(i915); 974 } 975 } 976 977 return 0; 978 979 out_vma_unpin: 980 i915_vma_unpin(vma); 981 out_object_unpin: 982 i915_gem_object_lock(obj, NULL); 983 i915_gem_object_unpin_pages(obj); 984 i915_gem_object_unlock(obj); 985 out_object_put: 986 i915_gem_object_put(obj); 987 988 return err; 989 } 990 991 static int gpu_write(struct intel_context *ce, 992 struct i915_vma *vma, 993 u32 dw, 994 u32 val) 995 { 996 int err; 997 998 i915_gem_object_lock(vma->obj, NULL); 999 err = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1000 i915_gem_object_unlock(vma->obj); 1001 if (err) 1002 return err; 1003 1004 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), 1005 vma->size >> PAGE_SHIFT, val); 1006 } 1007 1008 static int 1009 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) 1010 { 1011 unsigned int needs_flush; 1012 unsigned long n; 1013 int err; 1014 1015 i915_gem_object_lock(obj, NULL); 1016 err = i915_gem_object_prepare_read(obj, &needs_flush); 1017 if (err) 1018 goto err_unlock; 1019 1020 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { 1021 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); 1022 1023 if (needs_flush & CLFLUSH_BEFORE) 1024 drm_clflush_virt_range(ptr, PAGE_SIZE); 1025 1026 if (ptr[dword] != val) { 1027 pr_err("n=%lu ptr[%u]=%u, val=%u\n", 1028 n, dword, ptr[dword], val); 1029 kunmap_atomic(ptr); 1030 err = -EINVAL; 1031 break; 1032 } 1033 1034 kunmap_atomic(ptr); 1035 } 1036 1037 i915_gem_object_finish_access(obj); 1038 err_unlock: 1039 i915_gem_object_unlock(obj); 1040 1041 return err; 1042 } 1043 1044 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val) 1045 { 1046 unsigned long n = obj->base.size >> PAGE_SHIFT; 1047 u32 *ptr; 1048 int err; 1049 1050 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); 1051 if (err) 1052 return err; 1053 1054 ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 1055 if (IS_ERR(ptr)) 1056 return PTR_ERR(ptr); 1057 1058 ptr += dword; 1059 while (n--) { 1060 if (*ptr != val) { 1061 pr_err("base[%u]=%08x, val=%08x\n", 1062 dword, *ptr, val); 1063 err = -EINVAL; 1064 break; 1065 } 1066 1067 ptr += PAGE_SIZE / sizeof(*ptr); 1068 } 1069 1070 i915_gem_object_unpin_map(obj); 1071 return err; 1072 } 1073 1074 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) 1075 { 1076 if (i915_gem_object_has_struct_page(obj)) 1077 return __cpu_check_shmem(obj, dword, val); 1078 else 1079 return __cpu_check_vmap(obj, dword, val); 1080 } 1081 1082 static int __igt_write_huge(struct intel_context *ce, 1083 struct drm_i915_gem_object *obj, 1084 u64 size, u64 offset, 1085 u32 dword, u32 val) 1086 { 1087 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; 1088 struct i915_vma *vma; 1089 int err; 1090 1091 vma = i915_vma_instance(obj, ce->vm, NULL); 1092 if (IS_ERR(vma)) 1093 return PTR_ERR(vma); 1094 1095 err = i915_vma_pin(vma, size, 0, flags | offset); 1096 if (err) { 1097 /* 1098 * The ggtt may have some pages reserved so 1099 * refrain from erroring out. 1100 */ 1101 if (err == -ENOSPC && i915_is_ggtt(ce->vm)) 1102 err = 0; 1103 1104 return err; 1105 } 1106 1107 err = igt_check_page_sizes(vma); 1108 if (err) 1109 goto out_vma_unpin; 1110 1111 err = gpu_write(ce, vma, dword, val); 1112 if (err) { 1113 pr_err("gpu-write failed at offset=%llx\n", offset); 1114 goto out_vma_unpin; 1115 } 1116 1117 err = cpu_check(obj, dword, val); 1118 if (err) { 1119 pr_err("cpu-check failed at offset=%llx\n", offset); 1120 goto out_vma_unpin; 1121 } 1122 1123 out_vma_unpin: 1124 i915_vma_unpin(vma); 1125 return err; 1126 } 1127 1128 static int igt_write_huge(struct drm_i915_private *i915, 1129 struct drm_i915_gem_object *obj) 1130 { 1131 struct i915_gem_engines *engines; 1132 struct i915_gem_engines_iter it; 1133 struct intel_context *ce; 1134 I915_RND_STATE(prng); 1135 IGT_TIMEOUT(end_time); 1136 unsigned int max_page_size; 1137 unsigned int count; 1138 struct i915_gem_context *ctx; 1139 struct file *file; 1140 u64 max; 1141 u64 num; 1142 u64 size; 1143 int *order; 1144 int i, n; 1145 int err = 0; 1146 1147 file = mock_file(i915); 1148 if (IS_ERR(file)) 1149 return PTR_ERR(file); 1150 1151 ctx = hugepage_ctx(i915, file); 1152 if (IS_ERR(ctx)) { 1153 err = PTR_ERR(ctx); 1154 goto out; 1155 } 1156 1157 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 1158 1159 size = obj->base.size; 1160 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K && 1161 !HAS_64K_PAGES(i915)) 1162 size = round_up(size, I915_GTT_PAGE_SIZE_2M); 1163 1164 n = 0; 1165 count = 0; 1166 max = U64_MAX; 1167 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1168 count++; 1169 if (!intel_engine_can_store_dword(ce->engine)) 1170 continue; 1171 1172 max = min(max, ce->vm->total); 1173 n++; 1174 } 1175 i915_gem_context_unlock_engines(ctx); 1176 if (!n) 1177 goto out; 1178 1179 /* 1180 * To keep things interesting when alternating between engines in our 1181 * randomized order, lets also make feeding to the same engine a few 1182 * times in succession a possibility by enlarging the permutation array. 1183 */ 1184 order = i915_random_order(count * count, &prng); 1185 if (!order) 1186 return -ENOMEM; 1187 1188 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); 1189 max = div_u64(max - size, max_page_size); 1190 1191 /* 1192 * Try various offsets in an ascending/descending fashion until we 1193 * timeout -- we want to avoid issues hidden by effectively always using 1194 * offset = 0. 1195 */ 1196 i = 0; 1197 engines = i915_gem_context_lock_engines(ctx); 1198 for_each_prime_number_from(num, 0, max) { 1199 u64 offset_low = num * max_page_size; 1200 u64 offset_high = (max - num) * max_page_size; 1201 u32 dword = offset_in_page(num) / 4; 1202 struct intel_context *ce; 1203 1204 ce = engines->engines[order[i] % engines->num_engines]; 1205 i = (i + 1) % (count * count); 1206 if (!ce || !intel_engine_can_store_dword(ce->engine)) 1207 continue; 1208 1209 /* 1210 * In order to utilize 64K pages we need to both pad the vma 1211 * size and ensure the vma offset is at the start of the pt 1212 * boundary, however to improve coverage we opt for testing both 1213 * aligned and unaligned offsets. 1214 * 1215 * With PS64 this is no longer the case, but to ensure we 1216 * sometimes get the compact layout for smaller objects, apply 1217 * the round_up anyway. 1218 */ 1219 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 1220 offset_low = round_down(offset_low, 1221 I915_GTT_PAGE_SIZE_2M); 1222 1223 err = __igt_write_huge(ce, obj, size, offset_low, 1224 dword, num + 1); 1225 if (err) 1226 break; 1227 1228 err = __igt_write_huge(ce, obj, size, offset_high, 1229 dword, num + 1); 1230 if (err) 1231 break; 1232 1233 if (igt_timeout(end_time, 1234 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", 1235 __func__, ce->engine->name, offset_low, offset_high, 1236 max_page_size)) 1237 break; 1238 } 1239 i915_gem_context_unlock_engines(ctx); 1240 1241 kfree(order); 1242 1243 out: 1244 fput(file); 1245 return err; 1246 } 1247 1248 typedef struct drm_i915_gem_object * 1249 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); 1250 1251 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) 1252 { 1253 return i915->mm.gemfs && has_transparent_hugepage(); 1254 } 1255 1256 static struct drm_i915_gem_object * 1257 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags) 1258 { 1259 if (!igt_can_allocate_thp(i915)) { 1260 pr_info("%s missing THP support, skipping\n", __func__); 1261 return ERR_PTR(-ENODEV); 1262 } 1263 1264 return i915_gem_object_create_shmem(i915, size); 1265 } 1266 1267 static struct drm_i915_gem_object * 1268 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags) 1269 { 1270 return i915_gem_object_create_internal(i915, size); 1271 } 1272 1273 static struct drm_i915_gem_object * 1274 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags) 1275 { 1276 return huge_pages_object(i915, size, size); 1277 } 1278 1279 static struct drm_i915_gem_object * 1280 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags) 1281 { 1282 return i915_gem_object_create_lmem(i915, size, flags); 1283 } 1284 1285 static u32 igt_random_size(struct rnd_state *prng, 1286 u32 min_page_size, 1287 u32 max_page_size) 1288 { 1289 u64 mask; 1290 u32 size; 1291 1292 GEM_BUG_ON(!is_power_of_2(min_page_size)); 1293 GEM_BUG_ON(!is_power_of_2(max_page_size)); 1294 GEM_BUG_ON(min_page_size < PAGE_SIZE); 1295 GEM_BUG_ON(min_page_size > max_page_size); 1296 1297 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK; 1298 size = prandom_u32_state(prng) & mask; 1299 if (size < min_page_size) 1300 size |= min_page_size; 1301 1302 return size; 1303 } 1304 1305 static int igt_ppgtt_smoke_huge(void *arg) 1306 { 1307 struct drm_i915_private *i915 = arg; 1308 struct drm_i915_gem_object *obj; 1309 I915_RND_STATE(prng); 1310 struct { 1311 igt_create_fn fn; 1312 u32 min; 1313 u32 max; 1314 } backends[] = { 1315 { igt_create_internal, SZ_64K, SZ_2M, }, 1316 { igt_create_shmem, SZ_64K, SZ_32M, }, 1317 { igt_create_local, SZ_64K, SZ_1G, }, 1318 }; 1319 int err; 1320 int i; 1321 1322 /* 1323 * Sanity check that the HW uses huge pages correctly through our 1324 * various backends -- ensure that our writes land in the right place. 1325 */ 1326 1327 for (i = 0; i < ARRAY_SIZE(backends); ++i) { 1328 u32 min = backends[i].min; 1329 u32 max = backends[i].max; 1330 u32 size = max; 1331 1332 try_again: 1333 size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); 1334 1335 obj = backends[i].fn(i915, size, 0); 1336 if (IS_ERR(obj)) { 1337 err = PTR_ERR(obj); 1338 if (err == -E2BIG) { 1339 size >>= 1; 1340 goto try_again; 1341 } else if (err == -ENODEV) { 1342 err = 0; 1343 continue; 1344 } 1345 1346 return err; 1347 } 1348 1349 err = i915_gem_object_pin_pages_unlocked(obj); 1350 if (err) { 1351 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) { 1352 i915_gem_object_put(obj); 1353 size >>= 1; 1354 goto try_again; 1355 } 1356 goto out_put; 1357 } 1358 1359 if (obj->mm.page_sizes.phys < min) { 1360 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n", 1361 __func__, size, i); 1362 err = -ENOMEM; 1363 goto out_unpin; 1364 } 1365 1366 err = igt_write_huge(i915, obj); 1367 if (err) { 1368 pr_err("%s write-huge failed with size=%u, i=%d\n", 1369 __func__, size, i); 1370 } 1371 out_unpin: 1372 i915_gem_object_lock(obj, NULL); 1373 i915_gem_object_unpin_pages(obj); 1374 __i915_gem_object_put_pages(obj); 1375 i915_gem_object_unlock(obj); 1376 out_put: 1377 i915_gem_object_put(obj); 1378 1379 if (err == -ENOMEM || err == -ENXIO) 1380 err = 0; 1381 1382 if (err) 1383 break; 1384 1385 cond_resched(); 1386 } 1387 1388 return err; 1389 } 1390 1391 static int igt_ppgtt_sanity_check(void *arg) 1392 { 1393 struct drm_i915_private *i915 = arg; 1394 unsigned int supported = RUNTIME_INFO(i915)->page_sizes; 1395 struct { 1396 igt_create_fn fn; 1397 unsigned int flags; 1398 } backends[] = { 1399 { igt_create_system, 0, }, 1400 { igt_create_local, 0, }, 1401 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, 1402 }; 1403 struct { 1404 u32 size; 1405 u32 pages; 1406 } combos[] = { 1407 { SZ_64K, SZ_64K }, 1408 { SZ_2M, SZ_2M }, 1409 { SZ_2M, SZ_64K }, 1410 { SZ_2M - SZ_64K, SZ_64K }, 1411 { SZ_2M - SZ_4K, SZ_64K | SZ_4K }, 1412 { SZ_2M + SZ_4K, SZ_64K | SZ_4K }, 1413 { SZ_2M + SZ_4K, SZ_2M | SZ_4K }, 1414 { SZ_2M + SZ_64K, SZ_2M | SZ_64K }, 1415 { SZ_2M + SZ_64K, SZ_64K }, 1416 }; 1417 int i, j; 1418 int err; 1419 1420 if (supported == I915_GTT_PAGE_SIZE_4K) 1421 return 0; 1422 1423 /* 1424 * Sanity check that the HW behaves with a limited set of combinations. 1425 * We already have a bunch of randomised testing, which should give us 1426 * a decent amount of variation between runs, however we should keep 1427 * this to limit the chances of introducing a temporary regression, by 1428 * testing the most obvious cases that might make something blow up. 1429 */ 1430 1431 for (i = 0; i < ARRAY_SIZE(backends); ++i) { 1432 for (j = 0; j < ARRAY_SIZE(combos); ++j) { 1433 struct drm_i915_gem_object *obj; 1434 u32 size = combos[j].size; 1435 u32 pages = combos[j].pages; 1436 1437 obj = backends[i].fn(i915, size, backends[i].flags); 1438 if (IS_ERR(obj)) { 1439 err = PTR_ERR(obj); 1440 if (err == -ENODEV) { 1441 pr_info("Device lacks local memory, skipping\n"); 1442 err = 0; 1443 break; 1444 } 1445 1446 return err; 1447 } 1448 1449 err = i915_gem_object_pin_pages_unlocked(obj); 1450 if (err) { 1451 i915_gem_object_put(obj); 1452 goto out; 1453 } 1454 1455 GEM_BUG_ON(pages > obj->base.size); 1456 pages = pages & supported; 1457 1458 if (pages) 1459 obj->mm.page_sizes.sg = pages; 1460 1461 err = igt_write_huge(i915, obj); 1462 1463 i915_gem_object_lock(obj, NULL); 1464 i915_gem_object_unpin_pages(obj); 1465 __i915_gem_object_put_pages(obj); 1466 i915_gem_object_unlock(obj); 1467 i915_gem_object_put(obj); 1468 1469 if (err) { 1470 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", 1471 __func__, size, pages, i, j); 1472 goto out; 1473 } 1474 } 1475 1476 cond_resched(); 1477 } 1478 1479 out: 1480 if (err == -ENOMEM) 1481 err = 0; 1482 1483 return err; 1484 } 1485 1486 static int igt_ppgtt_compact(void *arg) 1487 { 1488 struct drm_i915_private *i915 = arg; 1489 struct drm_i915_gem_object *obj; 1490 int err; 1491 1492 /* 1493 * Simple test to catch issues with compact 64K pages -- since the pt is 1494 * compacted to 256B that gives us 32 entries per pt, however since the 1495 * backing page for the pt is 4K, any extra entries we might incorrectly 1496 * write out should be ignored by the HW. If ever hit such a case this 1497 * test should catch it since some of our writes would land in scratch. 1498 */ 1499 1500 if (!HAS_64K_PAGES(i915)) { 1501 pr_info("device lacks compact 64K page support, skipping\n"); 1502 return 0; 1503 } 1504 1505 if (!HAS_LMEM(i915)) { 1506 pr_info("device lacks LMEM support, skipping\n"); 1507 return 0; 1508 } 1509 1510 /* We want the range to cover multiple page-table boundaries. */ 1511 obj = i915_gem_object_create_lmem(i915, SZ_4M, 0); 1512 if (IS_ERR(obj)) 1513 return PTR_ERR(obj); 1514 1515 err = i915_gem_object_pin_pages_unlocked(obj); 1516 if (err) 1517 goto out_put; 1518 1519 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { 1520 pr_info("LMEM compact unable to allocate huge-page(s)\n"); 1521 goto out_unpin; 1522 } 1523 1524 /* 1525 * Disable 2M GTT pages by forcing the page-size to 64K for the GTT 1526 * insertion. 1527 */ 1528 obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K; 1529 1530 err = igt_write_huge(i915, obj); 1531 if (err) 1532 pr_err("LMEM compact write-huge failed\n"); 1533 1534 out_unpin: 1535 i915_gem_object_unpin_pages(obj); 1536 out_put: 1537 i915_gem_object_put(obj); 1538 1539 if (err == -ENOMEM) 1540 err = 0; 1541 1542 return err; 1543 } 1544 1545 static int igt_ppgtt_mixed(void *arg) 1546 { 1547 struct drm_i915_private *i915 = arg; 1548 const unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; 1549 struct drm_i915_gem_object *obj, *on; 1550 struct i915_gem_engines *engines; 1551 struct i915_gem_engines_iter it; 1552 struct i915_address_space *vm; 1553 struct i915_gem_context *ctx; 1554 struct intel_context *ce; 1555 struct file *file; 1556 I915_RND_STATE(prng); 1557 LIST_HEAD(objects); 1558 struct intel_memory_region *mr; 1559 struct i915_vma *vma; 1560 unsigned int count; 1561 u32 i, addr; 1562 int *order; 1563 int n, err; 1564 1565 /* 1566 * Sanity check mixing 4K and 64K pages within the same page-table via 1567 * the new PS64 TLB hint. 1568 */ 1569 1570 if (!HAS_64K_PAGES(i915)) { 1571 pr_info("device lacks PS64, skipping\n"); 1572 return 0; 1573 } 1574 1575 file = mock_file(i915); 1576 if (IS_ERR(file)) 1577 return PTR_ERR(file); 1578 1579 ctx = hugepage_ctx(i915, file); 1580 if (IS_ERR(ctx)) { 1581 err = PTR_ERR(ctx); 1582 goto out; 1583 } 1584 vm = i915_gem_context_get_eb_vm(ctx); 1585 1586 i = 0; 1587 addr = 0; 1588 do { 1589 u32 sz; 1590 1591 sz = i915_prandom_u32_max_state(SZ_4M, &prng); 1592 sz = max_t(u32, sz, SZ_4K); 1593 1594 mr = i915->mm.regions[INTEL_REGION_LMEM_0]; 1595 if (i & 1) 1596 mr = i915->mm.regions[INTEL_REGION_SMEM]; 1597 1598 obj = i915_gem_object_create_region(mr, sz, 0, 0); 1599 if (IS_ERR(obj)) { 1600 err = PTR_ERR(obj); 1601 goto out_vm; 1602 } 1603 1604 list_add_tail(&obj->st_link, &objects); 1605 1606 vma = i915_vma_instance(obj, vm, NULL); 1607 if (IS_ERR(vma)) { 1608 err = PTR_ERR(vma); 1609 goto err_put; 1610 } 1611 1612 addr = round_up(addr, mr->min_page_size); 1613 err = i915_vma_pin(vma, 0, 0, addr | flags); 1614 if (err) 1615 goto err_put; 1616 1617 if (mr->type == INTEL_MEMORY_LOCAL && 1618 (vma->resource->page_sizes_gtt & I915_GTT_PAGE_SIZE_4K)) { 1619 err = -EINVAL; 1620 goto err_put; 1621 } 1622 1623 addr += obj->base.size; 1624 i++; 1625 } while (addr <= SZ_16M); 1626 1627 n = 0; 1628 count = 0; 1629 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1630 count++; 1631 if (!intel_engine_can_store_dword(ce->engine)) 1632 continue; 1633 1634 n++; 1635 } 1636 i915_gem_context_unlock_engines(ctx); 1637 if (!n) 1638 goto err_put; 1639 1640 order = i915_random_order(count * count, &prng); 1641 if (!order) { 1642 err = -ENOMEM; 1643 goto err_put; 1644 } 1645 1646 i = 0; 1647 addr = 0; 1648 engines = i915_gem_context_lock_engines(ctx); 1649 list_for_each_entry(obj, &objects, st_link) { 1650 u32 rnd = i915_prandom_u32_max_state(UINT_MAX, &prng); 1651 1652 addr = round_up(addr, obj->mm.region->min_page_size); 1653 1654 ce = engines->engines[order[i] % engines->num_engines]; 1655 i = (i + 1) % (count * count); 1656 if (!ce || !intel_engine_can_store_dword(ce->engine)) 1657 continue; 1658 1659 err = __igt_write_huge(ce, obj, obj->base.size, addr, 0, rnd); 1660 if (err) 1661 break; 1662 1663 err = __igt_write_huge(ce, obj, obj->base.size, addr, 1664 offset_in_page(rnd) / sizeof(u32), rnd + 1); 1665 if (err) 1666 break; 1667 1668 err = __igt_write_huge(ce, obj, obj->base.size, addr, 1669 (PAGE_SIZE / sizeof(u32)) - 1, 1670 rnd + 2); 1671 if (err) 1672 break; 1673 1674 addr += obj->base.size; 1675 1676 cond_resched(); 1677 } 1678 1679 i915_gem_context_unlock_engines(ctx); 1680 kfree(order); 1681 err_put: 1682 list_for_each_entry_safe(obj, on, &objects, st_link) { 1683 list_del(&obj->st_link); 1684 i915_gem_object_put(obj); 1685 } 1686 out_vm: 1687 i915_vm_put(vm); 1688 out: 1689 fput(file); 1690 return err; 1691 } 1692 1693 static int igt_tmpfs_fallback(void *arg) 1694 { 1695 struct drm_i915_private *i915 = arg; 1696 struct i915_address_space *vm; 1697 struct i915_gem_context *ctx; 1698 struct vfsmount *gemfs = i915->mm.gemfs; 1699 struct drm_i915_gem_object *obj; 1700 struct i915_vma *vma; 1701 struct file *file; 1702 u32 *vaddr; 1703 int err = 0; 1704 1705 file = mock_file(i915); 1706 if (IS_ERR(file)) 1707 return PTR_ERR(file); 1708 1709 ctx = hugepage_ctx(i915, file); 1710 if (IS_ERR(ctx)) { 1711 err = PTR_ERR(ctx); 1712 goto out; 1713 } 1714 vm = i915_gem_context_get_eb_vm(ctx); 1715 1716 /* 1717 * Make sure that we don't burst into a ball of flames upon falling back 1718 * to tmpfs, which we rely on if on the off-chance we encouter a failure 1719 * when setting up gemfs. 1720 */ 1721 1722 i915->mm.gemfs = NULL; 1723 1724 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); 1725 if (IS_ERR(obj)) { 1726 err = PTR_ERR(obj); 1727 goto out_restore; 1728 } 1729 1730 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); 1731 if (IS_ERR(vaddr)) { 1732 err = PTR_ERR(vaddr); 1733 goto out_put; 1734 } 1735 *vaddr = 0xdeadbeaf; 1736 1737 __i915_gem_object_flush_map(obj, 0, 64); 1738 i915_gem_object_unpin_map(obj); 1739 1740 vma = i915_vma_instance(obj, vm, NULL); 1741 if (IS_ERR(vma)) { 1742 err = PTR_ERR(vma); 1743 goto out_put; 1744 } 1745 1746 err = i915_vma_pin(vma, 0, 0, PIN_USER); 1747 if (err) 1748 goto out_put; 1749 1750 err = igt_check_page_sizes(vma); 1751 1752 i915_vma_unpin(vma); 1753 out_put: 1754 i915_gem_object_put(obj); 1755 out_restore: 1756 i915->mm.gemfs = gemfs; 1757 1758 i915_vm_put(vm); 1759 out: 1760 fput(file); 1761 return err; 1762 } 1763 1764 static int igt_shrink_thp(void *arg) 1765 { 1766 struct drm_i915_private *i915 = arg; 1767 struct i915_address_space *vm; 1768 struct i915_gem_context *ctx; 1769 struct drm_i915_gem_object *obj; 1770 struct i915_gem_engines_iter it; 1771 struct intel_context *ce; 1772 struct i915_vma *vma; 1773 struct file *file; 1774 unsigned int flags = PIN_USER; 1775 unsigned int n; 1776 intel_wakeref_t wf; 1777 bool should_swap; 1778 int err; 1779 1780 if (!igt_can_allocate_thp(i915)) { 1781 pr_info("missing THP support, skipping\n"); 1782 return 0; 1783 } 1784 1785 file = mock_file(i915); 1786 if (IS_ERR(file)) 1787 return PTR_ERR(file); 1788 1789 ctx = hugepage_ctx(i915, file); 1790 if (IS_ERR(ctx)) { 1791 err = PTR_ERR(ctx); 1792 goto out; 1793 } 1794 vm = i915_gem_context_get_eb_vm(ctx); 1795 1796 /* 1797 * Sanity check shrinking huge-paged object -- make sure nothing blows 1798 * up. 1799 */ 1800 1801 obj = i915_gem_object_create_shmem(i915, SZ_2M); 1802 if (IS_ERR(obj)) { 1803 err = PTR_ERR(obj); 1804 goto out_vm; 1805 } 1806 1807 vma = i915_vma_instance(obj, vm, NULL); 1808 if (IS_ERR(vma)) { 1809 err = PTR_ERR(vma); 1810 goto out_put; 1811 } 1812 1813 wf = intel_runtime_pm_get(&i915->runtime_pm); /* active shrink */ 1814 1815 err = i915_vma_pin(vma, 0, 0, flags); 1816 if (err) 1817 goto out_wf; 1818 1819 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { 1820 pr_info("failed to allocate THP, finishing test early\n"); 1821 goto out_unpin; 1822 } 1823 1824 err = igt_check_page_sizes(vma); 1825 if (err) 1826 goto out_unpin; 1827 1828 n = 0; 1829 1830 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1831 if (!intel_engine_can_store_dword(ce->engine)) 1832 continue; 1833 1834 err = gpu_write(ce, vma, n++, 0xdeadbeaf); 1835 if (err) 1836 break; 1837 } 1838 i915_gem_context_unlock_engines(ctx); 1839 /* 1840 * Nuke everything *before* we unpin the pages so we can be reasonably 1841 * sure that when later checking get_nr_swap_pages() that some random 1842 * leftover object doesn't steal the remaining swap space. 1843 */ 1844 i915_gem_shrink(NULL, i915, -1UL, NULL, 1845 I915_SHRINK_BOUND | 1846 I915_SHRINK_UNBOUND | 1847 I915_SHRINK_ACTIVE); 1848 i915_vma_unpin(vma); 1849 if (err) 1850 goto out_put; 1851 1852 /* 1853 * Now that the pages are *unpinned* shrinking should invoke 1854 * shmem to truncate our pages, if we have available swap. 1855 */ 1856 should_swap = get_nr_swap_pages() > 0; 1857 i915_gem_shrink(NULL, i915, -1UL, NULL, 1858 I915_SHRINK_BOUND | 1859 I915_SHRINK_UNBOUND | 1860 I915_SHRINK_ACTIVE | 1861 I915_SHRINK_WRITEBACK); 1862 if (should_swap == i915_gem_object_has_pages(obj)) { 1863 pr_err("unexpected pages mismatch, should_swap=%s\n", 1864 str_yes_no(should_swap)); 1865 err = -EINVAL; 1866 goto out_put; 1867 } 1868 1869 if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) { 1870 pr_err("unexpected residual page-size bits, should_swap=%s\n", 1871 str_yes_no(should_swap)); 1872 err = -EINVAL; 1873 goto out_put; 1874 } 1875 1876 err = i915_vma_pin(vma, 0, 0, flags); 1877 if (err) 1878 goto out_put; 1879 1880 while (n--) { 1881 err = cpu_check(obj, n, 0xdeadbeaf); 1882 if (err) 1883 break; 1884 } 1885 1886 out_unpin: 1887 i915_vma_unpin(vma); 1888 out_wf: 1889 intel_runtime_pm_put(&i915->runtime_pm, wf); 1890 out_put: 1891 i915_gem_object_put(obj); 1892 out_vm: 1893 i915_vm_put(vm); 1894 out: 1895 fput(file); 1896 return err; 1897 } 1898 1899 int i915_gem_huge_page_mock_selftests(void) 1900 { 1901 static const struct i915_subtest tests[] = { 1902 SUBTEST(igt_mock_exhaust_device_supported_pages), 1903 SUBTEST(igt_mock_memory_region_huge_pages), 1904 SUBTEST(igt_mock_ppgtt_misaligned_dma), 1905 SUBTEST(igt_mock_ppgtt_huge_fill), 1906 SUBTEST(igt_mock_ppgtt_64K), 1907 }; 1908 struct drm_i915_private *dev_priv; 1909 struct i915_ppgtt *ppgtt; 1910 int err; 1911 1912 dev_priv = mock_gem_device(); 1913 if (!dev_priv) 1914 return -ENOMEM; 1915 1916 /* Pretend to be a device which supports the 48b PPGTT */ 1917 RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; 1918 RUNTIME_INFO(dev_priv)->ppgtt_size = 48; 1919 1920 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); 1921 if (IS_ERR(ppgtt)) { 1922 err = PTR_ERR(ppgtt); 1923 goto out_unlock; 1924 } 1925 1926 if (!i915_vm_is_4lvl(&ppgtt->vm)) { 1927 pr_err("failed to create 48b PPGTT\n"); 1928 err = -EINVAL; 1929 goto out_put; 1930 } 1931 1932 /* If we were ever hit this then it's time to mock the 64K scratch */ 1933 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { 1934 pr_err("PPGTT missing 64K scratch page\n"); 1935 err = -EINVAL; 1936 goto out_put; 1937 } 1938 1939 err = i915_subtests(tests, ppgtt); 1940 1941 out_put: 1942 i915_vm_put(&ppgtt->vm); 1943 out_unlock: 1944 mock_destroy_device(dev_priv); 1945 return err; 1946 } 1947 1948 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) 1949 { 1950 static const struct i915_subtest tests[] = { 1951 SUBTEST(igt_shrink_thp), 1952 SUBTEST(igt_tmpfs_fallback), 1953 SUBTEST(igt_ppgtt_smoke_huge), 1954 SUBTEST(igt_ppgtt_sanity_check), 1955 SUBTEST(igt_ppgtt_compact), 1956 SUBTEST(igt_ppgtt_mixed), 1957 }; 1958 1959 if (!HAS_PPGTT(i915)) { 1960 pr_info("PPGTT not supported, skipping live-selftests\n"); 1961 return 0; 1962 } 1963 1964 if (intel_gt_is_wedged(to_gt(i915))) 1965 return 0; 1966 1967 return i915_live_subtests(tests, i915); 1968 } 1969