1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #include <linux/prime_numbers.h> 8 9 #include "gem/i915_gem_internal.h" 10 #include "gem/i915_gem_region.h" 11 #include "gem/i915_gem_ttm.h" 12 #include "gt/intel_engine_pm.h" 13 #include "gt/intel_gpu_commands.h" 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_pm.h" 16 #include "gt/intel_migrate.h" 17 #include "i915_ttm_buddy_manager.h" 18 19 #include "huge_gem_object.h" 20 #include "i915_selftest.h" 21 #include "selftests/i915_random.h" 22 #include "selftests/igt_flush_test.h" 23 #include "selftests/igt_mmap.h" 24 25 struct tile { 26 unsigned int width; 27 unsigned int height; 28 unsigned int stride; 29 unsigned int size; 30 unsigned int tiling; 31 unsigned int swizzle; 32 }; 33 34 static u64 swizzle_bit(unsigned int bit, u64 offset) 35 { 36 return (offset & BIT_ULL(bit)) >> (bit - 6); 37 } 38 39 static u64 tiled_offset(const struct tile *tile, u64 v) 40 { 41 u64 x, y; 42 43 if (tile->tiling == I915_TILING_NONE) 44 return v; 45 46 y = div64_u64_rem(v, tile->stride, &x); 47 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; 48 49 if (tile->tiling == I915_TILING_X) { 50 v += y * tile->width; 51 v += div64_u64_rem(x, tile->width, &x) << tile->size; 52 v += x; 53 } else if (tile->width == 128) { 54 const unsigned int ytile_span = 16; 55 const unsigned int ytile_height = 512; 56 57 v += y * ytile_span; 58 v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 59 v += x; 60 } else { 61 const unsigned int ytile_span = 32; 62 const unsigned int ytile_height = 256; 63 64 v += y * ytile_span; 65 v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 66 v += x; 67 } 68 69 switch (tile->swizzle) { 70 case I915_BIT_6_SWIZZLE_9: 71 v ^= swizzle_bit(9, v); 72 break; 73 case I915_BIT_6_SWIZZLE_9_10: 74 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); 75 break; 76 case I915_BIT_6_SWIZZLE_9_11: 77 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); 78 break; 79 case I915_BIT_6_SWIZZLE_9_10_11: 80 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); 81 break; 82 } 83 84 return v; 85 } 86 87 static int check_partial_mapping(struct drm_i915_gem_object *obj, 88 const struct tile *tile, 89 struct rnd_state *prng) 90 { 91 const unsigned long npages = obj->base.size / PAGE_SIZE; 92 struct drm_i915_private *i915 = to_i915(obj->base.dev); 93 struct i915_ggtt_view view; 94 struct i915_vma *vma; 95 unsigned long page; 96 u32 __iomem *io; 97 struct page *p; 98 unsigned int n; 99 u64 offset; 100 u32 *cpu; 101 int err; 102 103 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 104 if (err) { 105 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 106 tile->tiling, tile->stride, err); 107 return err; 108 } 109 110 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 111 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 112 113 i915_gem_object_lock(obj, NULL); 114 err = i915_gem_object_set_to_gtt_domain(obj, true); 115 i915_gem_object_unlock(obj); 116 if (err) { 117 pr_err("Failed to flush to GTT write domain; err=%d\n", err); 118 return err; 119 } 120 121 page = i915_prandom_u32_max_state(npages, prng); 122 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); 123 124 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 125 if (IS_ERR(vma)) { 126 pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 127 page, (int)PTR_ERR(vma)); 128 return PTR_ERR(vma); 129 } 130 131 n = page - view.partial.offset; 132 GEM_BUG_ON(n >= view.partial.size); 133 134 io = i915_vma_pin_iomap(vma); 135 i915_vma_unpin(vma); 136 if (IS_ERR(io)) { 137 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 138 page, (int)PTR_ERR(io)); 139 err = PTR_ERR(io); 140 goto out; 141 } 142 143 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 144 i915_vma_unpin_iomap(vma); 145 146 offset = tiled_offset(tile, page << PAGE_SHIFT); 147 if (offset >= obj->base.size) 148 goto out; 149 150 intel_gt_flush_ggtt_writes(to_gt(i915)); 151 152 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 153 cpu = kmap(p) + offset_in_page(offset); 154 drm_clflush_virt_range(cpu, sizeof(*cpu)); 155 if (*cpu != (u32)page) { 156 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 157 page, n, 158 view.partial.offset, 159 view.partial.size, 160 vma->size >> PAGE_SHIFT, 161 tile->tiling ? tile_row_pages(obj) : 0, 162 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 163 offset >> PAGE_SHIFT, 164 (unsigned int)offset_in_page(offset), 165 offset, 166 (u32)page, *cpu); 167 err = -EINVAL; 168 } 169 *cpu = 0; 170 drm_clflush_virt_range(cpu, sizeof(*cpu)); 171 kunmap(p); 172 173 out: 174 i915_gem_object_lock(obj, NULL); 175 i915_vma_destroy(vma); 176 i915_gem_object_unlock(obj); 177 return err; 178 } 179 180 static int check_partial_mappings(struct drm_i915_gem_object *obj, 181 const struct tile *tile, 182 unsigned long end_time) 183 { 184 const unsigned int nreal = obj->scratch / PAGE_SIZE; 185 const unsigned long npages = obj->base.size / PAGE_SIZE; 186 struct drm_i915_private *i915 = to_i915(obj->base.dev); 187 struct i915_vma *vma; 188 unsigned long page; 189 int err; 190 191 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 192 if (err) { 193 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 194 tile->tiling, tile->stride, err); 195 return err; 196 } 197 198 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 199 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 200 201 i915_gem_object_lock(obj, NULL); 202 err = i915_gem_object_set_to_gtt_domain(obj, true); 203 i915_gem_object_unlock(obj); 204 if (err) { 205 pr_err("Failed to flush to GTT write domain; err=%d\n", err); 206 return err; 207 } 208 209 for_each_prime_number_from(page, 1, npages) { 210 struct i915_ggtt_view view = 211 compute_partial_view(obj, page, MIN_CHUNK_PAGES); 212 u32 __iomem *io; 213 struct page *p; 214 unsigned int n; 215 u64 offset; 216 u32 *cpu; 217 218 GEM_BUG_ON(view.partial.size > nreal); 219 cond_resched(); 220 221 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 222 if (IS_ERR(vma)) { 223 pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 224 page, (int)PTR_ERR(vma)); 225 return PTR_ERR(vma); 226 } 227 228 n = page - view.partial.offset; 229 GEM_BUG_ON(n >= view.partial.size); 230 231 io = i915_vma_pin_iomap(vma); 232 i915_vma_unpin(vma); 233 if (IS_ERR(io)) { 234 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 235 page, (int)PTR_ERR(io)); 236 return PTR_ERR(io); 237 } 238 239 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 240 i915_vma_unpin_iomap(vma); 241 242 offset = tiled_offset(tile, page << PAGE_SHIFT); 243 if (offset >= obj->base.size) 244 continue; 245 246 intel_gt_flush_ggtt_writes(to_gt(i915)); 247 248 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 249 cpu = kmap(p) + offset_in_page(offset); 250 drm_clflush_virt_range(cpu, sizeof(*cpu)); 251 if (*cpu != (u32)page) { 252 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 253 page, n, 254 view.partial.offset, 255 view.partial.size, 256 vma->size >> PAGE_SHIFT, 257 tile->tiling ? tile_row_pages(obj) : 0, 258 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 259 offset >> PAGE_SHIFT, 260 (unsigned int)offset_in_page(offset), 261 offset, 262 (u32)page, *cpu); 263 err = -EINVAL; 264 } 265 *cpu = 0; 266 drm_clflush_virt_range(cpu, sizeof(*cpu)); 267 kunmap(p); 268 if (err) 269 return err; 270 271 i915_gem_object_lock(obj, NULL); 272 i915_vma_destroy(vma); 273 i915_gem_object_unlock(obj); 274 275 if (igt_timeout(end_time, 276 "%s: timed out after tiling=%d stride=%d\n", 277 __func__, tile->tiling, tile->stride)) 278 return -EINTR; 279 } 280 281 return 0; 282 } 283 284 static unsigned int 285 setup_tile_size(struct tile *tile, struct drm_i915_private *i915) 286 { 287 if (GRAPHICS_VER(i915) <= 2) { 288 tile->height = 16; 289 tile->width = 128; 290 tile->size = 11; 291 } else if (tile->tiling == I915_TILING_Y && 292 HAS_128_BYTE_Y_TILING(i915)) { 293 tile->height = 32; 294 tile->width = 128; 295 tile->size = 12; 296 } else { 297 tile->height = 8; 298 tile->width = 512; 299 tile->size = 12; 300 } 301 302 if (GRAPHICS_VER(i915) < 4) 303 return 8192 / tile->width; 304 else if (GRAPHICS_VER(i915) < 7) 305 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; 306 else 307 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; 308 } 309 310 static int igt_partial_tiling(void *arg) 311 { 312 const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 313 struct drm_i915_private *i915 = arg; 314 struct drm_i915_gem_object *obj; 315 intel_wakeref_t wakeref; 316 int tiling; 317 int err; 318 319 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 320 return 0; 321 322 /* We want to check the page mapping and fencing of a large object 323 * mmapped through the GTT. The object we create is larger than can 324 * possibly be mmaped as a whole, and so we must use partial GGTT vma. 325 * We then check that a write through each partial GGTT vma ends up 326 * in the right set of pages within the object, and with the expected 327 * tiling, which we verify by manual swizzling. 328 */ 329 330 obj = huge_gem_object(i915, 331 nreal << PAGE_SHIFT, 332 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 333 if (IS_ERR(obj)) 334 return PTR_ERR(obj); 335 336 err = i915_gem_object_pin_pages_unlocked(obj); 337 if (err) { 338 pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 339 nreal, obj->base.size / PAGE_SIZE, err); 340 goto out; 341 } 342 343 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 344 345 if (1) { 346 IGT_TIMEOUT(end); 347 struct tile tile; 348 349 tile.height = 1; 350 tile.width = 1; 351 tile.size = 0; 352 tile.stride = 0; 353 tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 354 tile.tiling = I915_TILING_NONE; 355 356 err = check_partial_mappings(obj, &tile, end); 357 if (err && err != -EINTR) 358 goto out_unlock; 359 } 360 361 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { 362 IGT_TIMEOUT(end); 363 unsigned int max_pitch; 364 unsigned int pitch; 365 struct tile tile; 366 367 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 368 /* 369 * The swizzling pattern is actually unknown as it 370 * varies based on physical address of each page. 371 * See i915_gem_detect_bit_6_swizzle(). 372 */ 373 break; 374 375 tile.tiling = tiling; 376 switch (tiling) { 377 case I915_TILING_X: 378 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; 379 break; 380 case I915_TILING_Y: 381 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; 382 break; 383 } 384 385 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); 386 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 387 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 388 continue; 389 390 max_pitch = setup_tile_size(&tile, i915); 391 392 for (pitch = max_pitch; pitch; pitch >>= 1) { 393 tile.stride = tile.width * pitch; 394 err = check_partial_mappings(obj, &tile, end); 395 if (err == -EINTR) 396 goto next_tiling; 397 if (err) 398 goto out_unlock; 399 400 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { 401 tile.stride = tile.width * (pitch - 1); 402 err = check_partial_mappings(obj, &tile, end); 403 if (err == -EINTR) 404 goto next_tiling; 405 if (err) 406 goto out_unlock; 407 } 408 409 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { 410 tile.stride = tile.width * (pitch + 1); 411 err = check_partial_mappings(obj, &tile, end); 412 if (err == -EINTR) 413 goto next_tiling; 414 if (err) 415 goto out_unlock; 416 } 417 } 418 419 if (GRAPHICS_VER(i915) >= 4) { 420 for_each_prime_number(pitch, max_pitch) { 421 tile.stride = tile.width * pitch; 422 err = check_partial_mappings(obj, &tile, end); 423 if (err == -EINTR) 424 goto next_tiling; 425 if (err) 426 goto out_unlock; 427 } 428 } 429 430 next_tiling: ; 431 } 432 433 out_unlock: 434 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 435 i915_gem_object_unpin_pages(obj); 436 out: 437 i915_gem_object_put(obj); 438 return err; 439 } 440 441 static int igt_smoke_tiling(void *arg) 442 { 443 const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 444 struct drm_i915_private *i915 = arg; 445 struct drm_i915_gem_object *obj; 446 intel_wakeref_t wakeref; 447 I915_RND_STATE(prng); 448 unsigned long count; 449 IGT_TIMEOUT(end); 450 int err; 451 452 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 453 return 0; 454 455 /* 456 * igt_partial_tiling() does an exhastive check of partial tiling 457 * chunking, but will undoubtably run out of time. Here, we do a 458 * randomised search and hope over many runs of 1s with different 459 * seeds we will do a thorough check. 460 * 461 * Remember to look at the st_seed if we see a flip-flop in BAT! 462 */ 463 464 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 465 return 0; 466 467 obj = huge_gem_object(i915, 468 nreal << PAGE_SHIFT, 469 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 470 if (IS_ERR(obj)) 471 return PTR_ERR(obj); 472 473 err = i915_gem_object_pin_pages_unlocked(obj); 474 if (err) { 475 pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 476 nreal, obj->base.size / PAGE_SIZE, err); 477 goto out; 478 } 479 480 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 481 482 count = 0; 483 do { 484 struct tile tile; 485 486 tile.tiling = 487 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); 488 switch (tile.tiling) { 489 case I915_TILING_NONE: 490 tile.height = 1; 491 tile.width = 1; 492 tile.size = 0; 493 tile.stride = 0; 494 tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 495 break; 496 497 case I915_TILING_X: 498 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; 499 break; 500 case I915_TILING_Y: 501 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; 502 break; 503 } 504 505 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 506 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 507 continue; 508 509 if (tile.tiling != I915_TILING_NONE) { 510 unsigned int max_pitch = setup_tile_size(&tile, i915); 511 512 tile.stride = 513 i915_prandom_u32_max_state(max_pitch, &prng); 514 tile.stride = (1 + tile.stride) * tile.width; 515 if (GRAPHICS_VER(i915) < 4) 516 tile.stride = rounddown_pow_of_two(tile.stride); 517 } 518 519 err = check_partial_mapping(obj, &tile, &prng); 520 if (err) 521 break; 522 523 count++; 524 } while (!__igt_timeout(end, NULL)); 525 526 pr_info("%s: Completed %lu trials\n", __func__, count); 527 528 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 529 i915_gem_object_unpin_pages(obj); 530 out: 531 i915_gem_object_put(obj); 532 return err; 533 } 534 535 static int make_obj_busy(struct drm_i915_gem_object *obj) 536 { 537 struct drm_i915_private *i915 = to_i915(obj->base.dev); 538 struct intel_engine_cs *engine; 539 540 for_each_uabi_engine(engine, i915) { 541 struct i915_request *rq; 542 struct i915_vma *vma; 543 struct i915_gem_ww_ctx ww; 544 int err; 545 546 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 547 if (IS_ERR(vma)) 548 return PTR_ERR(vma); 549 550 i915_gem_ww_ctx_init(&ww, false); 551 retry: 552 err = i915_gem_object_lock(obj, &ww); 553 if (!err) 554 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 555 if (err) 556 goto err; 557 558 rq = intel_engine_create_kernel_request(engine); 559 if (IS_ERR(rq)) { 560 err = PTR_ERR(rq); 561 goto err_unpin; 562 } 563 564 err = i915_request_await_object(rq, vma->obj, true); 565 if (err == 0) 566 err = i915_vma_move_to_active(vma, rq, 567 EXEC_OBJECT_WRITE); 568 569 i915_request_add(rq); 570 err_unpin: 571 i915_vma_unpin(vma); 572 err: 573 if (err == -EDEADLK) { 574 err = i915_gem_ww_ctx_backoff(&ww); 575 if (!err) 576 goto retry; 577 } 578 i915_gem_ww_ctx_fini(&ww); 579 if (err) 580 return err; 581 } 582 583 i915_gem_object_put(obj); /* leave it only alive via its active ref */ 584 return 0; 585 } 586 587 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) 588 { 589 if (HAS_LMEM(i915)) 590 return I915_MMAP_TYPE_FIXED; 591 592 return I915_MMAP_TYPE_GTT; 593 } 594 595 static struct drm_i915_gem_object * 596 create_sys_or_internal(struct drm_i915_private *i915, 597 unsigned long size) 598 { 599 if (HAS_LMEM(i915)) { 600 struct intel_memory_region *sys_region = 601 i915->mm.regions[INTEL_REGION_SMEM]; 602 603 return __i915_gem_object_create_user(i915, size, &sys_region, 1); 604 } 605 606 return i915_gem_object_create_internal(i915, size); 607 } 608 609 static bool assert_mmap_offset(struct drm_i915_private *i915, 610 unsigned long size, 611 int expected) 612 { 613 struct drm_i915_gem_object *obj; 614 u64 offset; 615 int ret; 616 617 obj = create_sys_or_internal(i915, size); 618 if (IS_ERR(obj)) 619 return expected && expected == PTR_ERR(obj); 620 621 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); 622 i915_gem_object_put(obj); 623 624 return ret == expected; 625 } 626 627 static void disable_retire_worker(struct drm_i915_private *i915) 628 { 629 i915_gem_driver_unregister__shrinker(i915); 630 intel_gt_pm_get(to_gt(i915)); 631 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); 632 } 633 634 static void restore_retire_worker(struct drm_i915_private *i915) 635 { 636 igt_flush_test(i915); 637 intel_gt_pm_put(to_gt(i915)); 638 i915_gem_driver_register__shrinker(i915); 639 } 640 641 static void mmap_offset_lock(struct drm_i915_private *i915) 642 __acquires(&i915->drm.vma_offset_manager->vm_lock) 643 { 644 write_lock(&i915->drm.vma_offset_manager->vm_lock); 645 } 646 647 static void mmap_offset_unlock(struct drm_i915_private *i915) 648 __releases(&i915->drm.vma_offset_manager->vm_lock) 649 { 650 write_unlock(&i915->drm.vma_offset_manager->vm_lock); 651 } 652 653 static int igt_mmap_offset_exhaustion(void *arg) 654 { 655 struct drm_i915_private *i915 = arg; 656 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; 657 struct drm_i915_gem_object *obj; 658 struct drm_mm_node *hole, *next; 659 int loop, err = 0; 660 u64 offset; 661 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; 662 663 /* Disable background reaper */ 664 disable_retire_worker(i915); 665 GEM_BUG_ON(!to_gt(i915)->awake); 666 intel_gt_retire_requests(to_gt(i915)); 667 i915_gem_drain_freed_objects(i915); 668 669 /* Trim the device mmap space to only a page */ 670 mmap_offset_lock(i915); 671 loop = 1; /* PAGE_SIZE units */ 672 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { 673 struct drm_mm_node *resv; 674 675 resv = kzalloc(sizeof(*resv), GFP_NOWAIT); 676 if (!resv) { 677 err = -ENOMEM; 678 goto out_park; 679 } 680 681 resv->start = drm_mm_hole_node_start(hole) + loop; 682 resv->size = hole->hole_size - loop; 683 resv->color = -1ul; 684 loop = 0; 685 686 if (!resv->size) { 687 kfree(resv); 688 continue; 689 } 690 691 pr_debug("Reserving hole [%llx + %llx]\n", 692 resv->start, resv->size); 693 694 err = drm_mm_reserve_node(mm, resv); 695 if (err) { 696 pr_err("Failed to trim VMA manager, err=%d\n", err); 697 kfree(resv); 698 goto out_park; 699 } 700 } 701 GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); 702 mmap_offset_unlock(i915); 703 704 /* Just fits! */ 705 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { 706 pr_err("Unable to insert object into single page hole\n"); 707 err = -EINVAL; 708 goto out; 709 } 710 711 /* Too large */ 712 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { 713 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); 714 err = -EINVAL; 715 goto out; 716 } 717 718 /* Fill the hole, further allocation attempts should then fail */ 719 obj = create_sys_or_internal(i915, PAGE_SIZE); 720 if (IS_ERR(obj)) { 721 err = PTR_ERR(obj); 722 pr_err("Unable to create object for reclaimed hole\n"); 723 goto out; 724 } 725 726 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); 727 if (err) { 728 pr_err("Unable to insert object into reclaimed hole\n"); 729 goto err_obj; 730 } 731 732 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { 733 pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); 734 err = -EINVAL; 735 goto err_obj; 736 } 737 738 i915_gem_object_put(obj); 739 740 /* Now fill with busy dead objects that we expect to reap */ 741 for (loop = 0; loop < 3; loop++) { 742 if (intel_gt_is_wedged(to_gt(i915))) 743 break; 744 745 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 746 if (IS_ERR(obj)) { 747 err = PTR_ERR(obj); 748 goto out; 749 } 750 751 err = make_obj_busy(obj); 752 if (err) { 753 pr_err("[loop %d] Failed to busy the object\n", loop); 754 goto err_obj; 755 } 756 } 757 758 out: 759 mmap_offset_lock(i915); 760 out_park: 761 drm_mm_for_each_node_safe(hole, next, mm) { 762 if (hole->color != -1ul) 763 continue; 764 765 drm_mm_remove_node(hole); 766 kfree(hole); 767 } 768 mmap_offset_unlock(i915); 769 restore_retire_worker(i915); 770 return err; 771 err_obj: 772 i915_gem_object_put(obj); 773 goto out; 774 } 775 776 static int gtt_set(struct drm_i915_gem_object *obj) 777 { 778 struct i915_vma *vma; 779 void __iomem *map; 780 int err = 0; 781 782 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 783 if (IS_ERR(vma)) 784 return PTR_ERR(vma); 785 786 intel_gt_pm_get(vma->vm->gt); 787 map = i915_vma_pin_iomap(vma); 788 i915_vma_unpin(vma); 789 if (IS_ERR(map)) { 790 err = PTR_ERR(map); 791 goto out; 792 } 793 794 memset_io(map, POISON_INUSE, obj->base.size); 795 i915_vma_unpin_iomap(vma); 796 797 out: 798 intel_gt_pm_put(vma->vm->gt); 799 return err; 800 } 801 802 static int gtt_check(struct drm_i915_gem_object *obj) 803 { 804 struct i915_vma *vma; 805 void __iomem *map; 806 int err = 0; 807 808 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 809 if (IS_ERR(vma)) 810 return PTR_ERR(vma); 811 812 intel_gt_pm_get(vma->vm->gt); 813 map = i915_vma_pin_iomap(vma); 814 i915_vma_unpin(vma); 815 if (IS_ERR(map)) { 816 err = PTR_ERR(map); 817 goto out; 818 } 819 820 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { 821 pr_err("%s: Write via mmap did not land in backing store (GTT)\n", 822 obj->mm.region->name); 823 err = -EINVAL; 824 } 825 i915_vma_unpin_iomap(vma); 826 827 out: 828 intel_gt_pm_put(vma->vm->gt); 829 return err; 830 } 831 832 static int wc_set(struct drm_i915_gem_object *obj) 833 { 834 void *vaddr; 835 836 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 837 if (IS_ERR(vaddr)) 838 return PTR_ERR(vaddr); 839 840 memset(vaddr, POISON_INUSE, obj->base.size); 841 i915_gem_object_flush_map(obj); 842 i915_gem_object_unpin_map(obj); 843 844 return 0; 845 } 846 847 static int wc_check(struct drm_i915_gem_object *obj) 848 { 849 void *vaddr; 850 int err = 0; 851 852 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 853 if (IS_ERR(vaddr)) 854 return PTR_ERR(vaddr); 855 856 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { 857 pr_err("%s: Write via mmap did not land in backing store (WC)\n", 858 obj->mm.region->name); 859 err = -EINVAL; 860 } 861 i915_gem_object_unpin_map(obj); 862 863 return err; 864 } 865 866 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) 867 { 868 struct drm_i915_private *i915 = to_i915(obj->base.dev); 869 bool no_map; 870 871 if (obj->ops->mmap_offset) 872 return type == I915_MMAP_TYPE_FIXED; 873 else if (type == I915_MMAP_TYPE_FIXED) 874 return false; 875 876 if (type == I915_MMAP_TYPE_GTT && 877 !i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 878 return false; 879 880 i915_gem_object_lock(obj, NULL); 881 no_map = (type != I915_MMAP_TYPE_GTT && 882 !i915_gem_object_has_struct_page(obj) && 883 !i915_gem_object_has_iomem(obj)); 884 i915_gem_object_unlock(obj); 885 886 return !no_map; 887 } 888 889 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) 890 static int __igt_mmap(struct drm_i915_private *i915, 891 struct drm_i915_gem_object *obj, 892 enum i915_mmap_type type) 893 { 894 struct vm_area_struct *area; 895 unsigned long addr; 896 int err, i; 897 u64 offset; 898 899 if (!can_mmap(obj, type)) 900 return 0; 901 902 err = wc_set(obj); 903 if (err == -ENXIO) 904 err = gtt_set(obj); 905 if (err) 906 return err; 907 908 err = __assign_mmap_offset(obj, type, &offset, NULL); 909 if (err) 910 return err; 911 912 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 913 if (IS_ERR_VALUE(addr)) 914 return addr; 915 916 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); 917 918 mmap_read_lock(current->mm); 919 area = vma_lookup(current->mm, addr); 920 mmap_read_unlock(current->mm); 921 if (!area) { 922 pr_err("%s: Did not create a vm_area_struct for the mmap\n", 923 obj->mm.region->name); 924 err = -EINVAL; 925 goto out_unmap; 926 } 927 928 for (i = 0; i < obj->base.size / sizeof(u32); i++) { 929 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); 930 u32 x; 931 932 if (get_user(x, ux)) { 933 pr_err("%s: Unable to read from mmap, offset:%zd\n", 934 obj->mm.region->name, i * sizeof(x)); 935 err = -EFAULT; 936 goto out_unmap; 937 } 938 939 if (x != expand32(POISON_INUSE)) { 940 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", 941 obj->mm.region->name, 942 i * sizeof(x), x, expand32(POISON_INUSE)); 943 err = -EINVAL; 944 goto out_unmap; 945 } 946 947 x = expand32(POISON_FREE); 948 if (put_user(x, ux)) { 949 pr_err("%s: Unable to write to mmap, offset:%zd\n", 950 obj->mm.region->name, i * sizeof(x)); 951 err = -EFAULT; 952 goto out_unmap; 953 } 954 } 955 956 if (type == I915_MMAP_TYPE_GTT) 957 intel_gt_flush_ggtt_writes(to_gt(i915)); 958 959 err = wc_check(obj); 960 if (err == -ENXIO) 961 err = gtt_check(obj); 962 out_unmap: 963 vm_munmap(addr, obj->base.size); 964 return err; 965 } 966 967 static int igt_mmap(void *arg) 968 { 969 struct drm_i915_private *i915 = arg; 970 struct intel_memory_region *mr; 971 enum intel_region_id id; 972 973 for_each_memory_region(mr, i915, id) { 974 unsigned long sizes[] = { 975 PAGE_SIZE, 976 mr->min_page_size, 977 SZ_4M, 978 }; 979 int i; 980 981 for (i = 0; i < ARRAY_SIZE(sizes); i++) { 982 struct drm_i915_gem_object *obj; 983 int err; 984 985 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1); 986 if (obj == ERR_PTR(-ENODEV)) 987 continue; 988 989 if (IS_ERR(obj)) 990 return PTR_ERR(obj); 991 992 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); 993 if (err == 0) 994 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); 995 if (err == 0) 996 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED); 997 998 i915_gem_object_put(obj); 999 if (err) 1000 return err; 1001 } 1002 } 1003 1004 return 0; 1005 } 1006 1007 static void igt_close_objects(struct drm_i915_private *i915, 1008 struct list_head *objects) 1009 { 1010 struct drm_i915_gem_object *obj, *on; 1011 1012 list_for_each_entry_safe(obj, on, objects, st_link) { 1013 i915_gem_object_lock(obj, NULL); 1014 if (i915_gem_object_has_pinned_pages(obj)) 1015 i915_gem_object_unpin_pages(obj); 1016 /* No polluting the memory region between tests */ 1017 __i915_gem_object_put_pages(obj); 1018 i915_gem_object_unlock(obj); 1019 list_del(&obj->st_link); 1020 i915_gem_object_put(obj); 1021 } 1022 1023 cond_resched(); 1024 1025 i915_gem_drain_freed_objects(i915); 1026 } 1027 1028 static void igt_make_evictable(struct list_head *objects) 1029 { 1030 struct drm_i915_gem_object *obj; 1031 1032 list_for_each_entry(obj, objects, st_link) { 1033 i915_gem_object_lock(obj, NULL); 1034 if (i915_gem_object_has_pinned_pages(obj)) 1035 i915_gem_object_unpin_pages(obj); 1036 i915_gem_object_unlock(obj); 1037 } 1038 1039 cond_resched(); 1040 } 1041 1042 static int igt_fill_mappable(struct intel_memory_region *mr, 1043 struct list_head *objects) 1044 { 1045 u64 size, total; 1046 int err; 1047 1048 total = 0; 1049 size = mr->io_size; 1050 do { 1051 struct drm_i915_gem_object *obj; 1052 1053 obj = i915_gem_object_create_region(mr, size, 0, 0); 1054 if (IS_ERR(obj)) { 1055 err = PTR_ERR(obj); 1056 goto err_close; 1057 } 1058 1059 list_add(&obj->st_link, objects); 1060 1061 err = i915_gem_object_pin_pages_unlocked(obj); 1062 if (err) { 1063 if (err != -ENXIO && err != -ENOMEM) 1064 goto err_close; 1065 1066 if (size == mr->min_page_size) { 1067 err = 0; 1068 break; 1069 } 1070 1071 size >>= 1; 1072 continue; 1073 } 1074 1075 total += obj->base.size; 1076 } while (1); 1077 1078 pr_info("%s filled=%lluMiB\n", __func__, total >> 20); 1079 return 0; 1080 1081 err_close: 1082 igt_close_objects(mr->i915, objects); 1083 return err; 1084 } 1085 1086 static int ___igt_mmap_migrate(struct drm_i915_private *i915, 1087 struct drm_i915_gem_object *obj, 1088 unsigned long addr, 1089 bool unfaultable) 1090 { 1091 struct vm_area_struct *area; 1092 int err = 0, i; 1093 1094 pr_info("igt_mmap(%s, %d) @ %lx\n", 1095 obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr); 1096 1097 mmap_read_lock(current->mm); 1098 area = vma_lookup(current->mm, addr); 1099 mmap_read_unlock(current->mm); 1100 if (!area) { 1101 pr_err("%s: Did not create a vm_area_struct for the mmap\n", 1102 obj->mm.region->name); 1103 err = -EINVAL; 1104 goto out_unmap; 1105 } 1106 1107 for (i = 0; i < obj->base.size / sizeof(u32); i++) { 1108 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); 1109 u32 x; 1110 1111 if (get_user(x, ux)) { 1112 err = -EFAULT; 1113 if (!unfaultable) { 1114 pr_err("%s: Unable to read from mmap, offset:%zd\n", 1115 obj->mm.region->name, i * sizeof(x)); 1116 goto out_unmap; 1117 } 1118 1119 continue; 1120 } 1121 1122 if (unfaultable) { 1123 pr_err("%s: Faulted unmappable memory\n", 1124 obj->mm.region->name); 1125 err = -EINVAL; 1126 goto out_unmap; 1127 } 1128 1129 if (x != expand32(POISON_INUSE)) { 1130 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", 1131 obj->mm.region->name, 1132 i * sizeof(x), x, expand32(POISON_INUSE)); 1133 err = -EINVAL; 1134 goto out_unmap; 1135 } 1136 1137 x = expand32(POISON_FREE); 1138 if (put_user(x, ux)) { 1139 pr_err("%s: Unable to write to mmap, offset:%zd\n", 1140 obj->mm.region->name, i * sizeof(x)); 1141 err = -EFAULT; 1142 goto out_unmap; 1143 } 1144 } 1145 1146 if (unfaultable) { 1147 if (err == -EFAULT) 1148 err = 0; 1149 } else { 1150 obj->flags &= ~I915_BO_ALLOC_GPU_ONLY; 1151 err = wc_check(obj); 1152 } 1153 out_unmap: 1154 vm_munmap(addr, obj->base.size); 1155 return err; 1156 } 1157 1158 #define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0) 1159 #define IGT_MMAP_MIGRATE_FILL (1 << 1) 1160 #define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2) 1161 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3) 1162 static int __igt_mmap_migrate(struct intel_memory_region **placements, 1163 int n_placements, 1164 struct intel_memory_region *expected_mr, 1165 unsigned int flags) 1166 { 1167 struct drm_i915_private *i915 = placements[0]->i915; 1168 struct drm_i915_gem_object *obj; 1169 struct i915_request *rq = NULL; 1170 unsigned long addr; 1171 LIST_HEAD(objects); 1172 u64 offset; 1173 int err; 1174 1175 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, 1176 placements, 1177 n_placements); 1178 if (IS_ERR(obj)) 1179 return PTR_ERR(obj); 1180 1181 if (flags & IGT_MMAP_MIGRATE_TOPDOWN) 1182 obj->flags |= I915_BO_ALLOC_GPU_ONLY; 1183 1184 err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL); 1185 if (err) 1186 goto out_put; 1187 1188 /* 1189 * This will eventually create a GEM context, due to opening dummy drm 1190 * file, which needs a tiny amount of mappable device memory for the top 1191 * level paging structures(and perhaps scratch), so make sure we 1192 * allocate early, to avoid tears. 1193 */ 1194 addr = igt_mmap_offset(i915, offset, obj->base.size, 1195 PROT_WRITE, MAP_SHARED); 1196 if (IS_ERR_VALUE(addr)) { 1197 err = addr; 1198 goto out_put; 1199 } 1200 1201 if (flags & IGT_MMAP_MIGRATE_FILL) { 1202 err = igt_fill_mappable(placements[0], &objects); 1203 if (err) 1204 goto out_put; 1205 } 1206 1207 err = i915_gem_object_lock(obj, NULL); 1208 if (err) 1209 goto out_put; 1210 1211 err = i915_gem_object_pin_pages(obj); 1212 if (err) { 1213 i915_gem_object_unlock(obj); 1214 goto out_put; 1215 } 1216 1217 err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL, 1218 obj->mm.pages->sgl, obj->cache_level, 1219 i915_gem_object_is_lmem(obj), 1220 expand32(POISON_INUSE), &rq); 1221 i915_gem_object_unpin_pages(obj); 1222 if (rq) { 1223 dma_resv_add_excl_fence(obj->base.resv, &rq->fence); 1224 i915_gem_object_set_moving_fence(obj, &rq->fence); 1225 i915_request_put(rq); 1226 } 1227 i915_gem_object_unlock(obj); 1228 if (err) 1229 goto out_put; 1230 1231 if (flags & IGT_MMAP_MIGRATE_EVICTABLE) 1232 igt_make_evictable(&objects); 1233 1234 err = ___igt_mmap_migrate(i915, obj, addr, 1235 flags & IGT_MMAP_MIGRATE_UNFAULTABLE); 1236 if (!err && obj->mm.region != expected_mr) { 1237 pr_err("%s region mismatch %s\n", __func__, expected_mr->name); 1238 err = -EINVAL; 1239 } 1240 1241 out_put: 1242 i915_gem_object_put(obj); 1243 igt_close_objects(i915, &objects); 1244 return err; 1245 } 1246 1247 static int igt_mmap_migrate(void *arg) 1248 { 1249 struct drm_i915_private *i915 = arg; 1250 struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM]; 1251 struct intel_memory_region *mr; 1252 enum intel_region_id id; 1253 1254 for_each_memory_region(mr, i915, id) { 1255 struct intel_memory_region *mixed[] = { mr, system }; 1256 struct intel_memory_region *single[] = { mr }; 1257 struct ttm_resource_manager *man = mr->region_private; 1258 resource_size_t saved_io_size; 1259 int err; 1260 1261 if (mr->private) 1262 continue; 1263 1264 if (!mr->io_size) 1265 continue; 1266 1267 /* 1268 * For testing purposes let's force small BAR, if not already 1269 * present. 1270 */ 1271 saved_io_size = mr->io_size; 1272 if (mr->io_size == mr->total) { 1273 resource_size_t io_size = mr->io_size; 1274 1275 io_size = rounddown_pow_of_two(io_size >> 1); 1276 if (io_size < PAGE_SIZE) 1277 continue; 1278 1279 mr->io_size = io_size; 1280 i915_ttm_buddy_man_force_visible_size(man, 1281 io_size >> PAGE_SHIFT); 1282 } 1283 1284 /* 1285 * Allocate in the mappable portion, should be no suprises here. 1286 */ 1287 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0); 1288 if (err) 1289 goto out_io_size; 1290 1291 /* 1292 * Allocate in the non-mappable portion, but force migrating to 1293 * the mappable portion on fault (LMEM -> LMEM) 1294 */ 1295 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, 1296 IGT_MMAP_MIGRATE_TOPDOWN | 1297 IGT_MMAP_MIGRATE_FILL | 1298 IGT_MMAP_MIGRATE_EVICTABLE); 1299 if (err) 1300 goto out_io_size; 1301 1302 /* 1303 * Allocate in the non-mappable portion, but force spilling into 1304 * system memory on fault (LMEM -> SMEM) 1305 */ 1306 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system, 1307 IGT_MMAP_MIGRATE_TOPDOWN | 1308 IGT_MMAP_MIGRATE_FILL); 1309 if (err) 1310 goto out_io_size; 1311 1312 /* 1313 * Allocate in the non-mappable portion, but since the mappable 1314 * portion is already full, and we can't spill to system memory, 1315 * then we should expect the fault to fail. 1316 */ 1317 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, 1318 IGT_MMAP_MIGRATE_TOPDOWN | 1319 IGT_MMAP_MIGRATE_FILL | 1320 IGT_MMAP_MIGRATE_UNFAULTABLE); 1321 out_io_size: 1322 mr->io_size = saved_io_size; 1323 i915_ttm_buddy_man_force_visible_size(man, 1324 mr->io_size >> PAGE_SHIFT); 1325 if (err) 1326 return err; 1327 } 1328 1329 return 0; 1330 } 1331 1332 static const char *repr_mmap_type(enum i915_mmap_type type) 1333 { 1334 switch (type) { 1335 case I915_MMAP_TYPE_GTT: return "gtt"; 1336 case I915_MMAP_TYPE_WB: return "wb"; 1337 case I915_MMAP_TYPE_WC: return "wc"; 1338 case I915_MMAP_TYPE_UC: return "uc"; 1339 case I915_MMAP_TYPE_FIXED: return "fixed"; 1340 default: return "unknown"; 1341 } 1342 } 1343 1344 static bool can_access(struct drm_i915_gem_object *obj) 1345 { 1346 bool access; 1347 1348 i915_gem_object_lock(obj, NULL); 1349 access = i915_gem_object_has_struct_page(obj) || 1350 i915_gem_object_has_iomem(obj); 1351 i915_gem_object_unlock(obj); 1352 1353 return access; 1354 } 1355 1356 static int __igt_mmap_access(struct drm_i915_private *i915, 1357 struct drm_i915_gem_object *obj, 1358 enum i915_mmap_type type) 1359 { 1360 unsigned long __user *ptr; 1361 unsigned long A, B; 1362 unsigned long x, y; 1363 unsigned long addr; 1364 int err; 1365 u64 offset; 1366 1367 memset(&A, 0xAA, sizeof(A)); 1368 memset(&B, 0xBB, sizeof(B)); 1369 1370 if (!can_mmap(obj, type) || !can_access(obj)) 1371 return 0; 1372 1373 err = __assign_mmap_offset(obj, type, &offset, NULL); 1374 if (err) 1375 return err; 1376 1377 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1378 if (IS_ERR_VALUE(addr)) 1379 return addr; 1380 ptr = (unsigned long __user *)addr; 1381 1382 err = __put_user(A, ptr); 1383 if (err) { 1384 pr_err("%s(%s): failed to write into user mmap\n", 1385 obj->mm.region->name, repr_mmap_type(type)); 1386 goto out_unmap; 1387 } 1388 1389 intel_gt_flush_ggtt_writes(to_gt(i915)); 1390 1391 err = access_process_vm(current, addr, &x, sizeof(x), 0); 1392 if (err != sizeof(x)) { 1393 pr_err("%s(%s): access_process_vm() read failed\n", 1394 obj->mm.region->name, repr_mmap_type(type)); 1395 goto out_unmap; 1396 } 1397 1398 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE); 1399 if (err != sizeof(B)) { 1400 pr_err("%s(%s): access_process_vm() write failed\n", 1401 obj->mm.region->name, repr_mmap_type(type)); 1402 goto out_unmap; 1403 } 1404 1405 intel_gt_flush_ggtt_writes(to_gt(i915)); 1406 1407 err = __get_user(y, ptr); 1408 if (err) { 1409 pr_err("%s(%s): failed to read from user mmap\n", 1410 obj->mm.region->name, repr_mmap_type(type)); 1411 goto out_unmap; 1412 } 1413 1414 if (x != A || y != B) { 1415 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n", 1416 obj->mm.region->name, repr_mmap_type(type), 1417 x, y); 1418 err = -EINVAL; 1419 goto out_unmap; 1420 } 1421 1422 out_unmap: 1423 vm_munmap(addr, obj->base.size); 1424 return err; 1425 } 1426 1427 static int igt_mmap_access(void *arg) 1428 { 1429 struct drm_i915_private *i915 = arg; 1430 struct intel_memory_region *mr; 1431 enum intel_region_id id; 1432 1433 for_each_memory_region(mr, i915, id) { 1434 struct drm_i915_gem_object *obj; 1435 int err; 1436 1437 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1438 if (obj == ERR_PTR(-ENODEV)) 1439 continue; 1440 1441 if (IS_ERR(obj)) 1442 return PTR_ERR(obj); 1443 1444 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); 1445 if (err == 0) 1446 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); 1447 if (err == 0) 1448 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); 1449 if (err == 0) 1450 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); 1451 if (err == 0) 1452 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED); 1453 1454 i915_gem_object_put(obj); 1455 if (err) 1456 return err; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int __igt_mmap_gpu(struct drm_i915_private *i915, 1463 struct drm_i915_gem_object *obj, 1464 enum i915_mmap_type type) 1465 { 1466 struct intel_engine_cs *engine; 1467 unsigned long addr; 1468 u32 __user *ux; 1469 u32 bbe; 1470 int err; 1471 u64 offset; 1472 1473 /* 1474 * Verify that the mmap access into the backing store aligns with 1475 * that of the GPU, i.e. that mmap is indeed writing into the same 1476 * page as being read by the GPU. 1477 */ 1478 1479 if (!can_mmap(obj, type)) 1480 return 0; 1481 1482 err = wc_set(obj); 1483 if (err == -ENXIO) 1484 err = gtt_set(obj); 1485 if (err) 1486 return err; 1487 1488 err = __assign_mmap_offset(obj, type, &offset, NULL); 1489 if (err) 1490 return err; 1491 1492 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1493 if (IS_ERR_VALUE(addr)) 1494 return addr; 1495 1496 ux = u64_to_user_ptr((u64)addr); 1497 bbe = MI_BATCH_BUFFER_END; 1498 if (put_user(bbe, ux)) { 1499 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); 1500 err = -EFAULT; 1501 goto out_unmap; 1502 } 1503 1504 if (type == I915_MMAP_TYPE_GTT) 1505 intel_gt_flush_ggtt_writes(to_gt(i915)); 1506 1507 for_each_uabi_engine(engine, i915) { 1508 struct i915_request *rq; 1509 struct i915_vma *vma; 1510 struct i915_gem_ww_ctx ww; 1511 1512 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); 1513 if (IS_ERR(vma)) { 1514 err = PTR_ERR(vma); 1515 goto out_unmap; 1516 } 1517 1518 i915_gem_ww_ctx_init(&ww, false); 1519 retry: 1520 err = i915_gem_object_lock(obj, &ww); 1521 if (!err) 1522 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 1523 if (err) 1524 goto out_ww; 1525 1526 rq = i915_request_create(engine->kernel_context); 1527 if (IS_ERR(rq)) { 1528 err = PTR_ERR(rq); 1529 goto out_unpin; 1530 } 1531 1532 err = i915_request_await_object(rq, vma->obj, false); 1533 if (err == 0) 1534 err = i915_vma_move_to_active(vma, rq, 0); 1535 1536 err = engine->emit_bb_start(rq, vma->node.start, 0, 0); 1537 i915_request_get(rq); 1538 i915_request_add(rq); 1539 1540 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 1541 struct drm_printer p = 1542 drm_info_printer(engine->i915->drm.dev); 1543 1544 pr_err("%s(%s, %s): Failed to execute batch\n", 1545 __func__, engine->name, obj->mm.region->name); 1546 intel_engine_dump(engine, &p, 1547 "%s\n", engine->name); 1548 1549 intel_gt_set_wedged(engine->gt); 1550 err = -EIO; 1551 } 1552 i915_request_put(rq); 1553 1554 out_unpin: 1555 i915_vma_unpin(vma); 1556 out_ww: 1557 if (err == -EDEADLK) { 1558 err = i915_gem_ww_ctx_backoff(&ww); 1559 if (!err) 1560 goto retry; 1561 } 1562 i915_gem_ww_ctx_fini(&ww); 1563 if (err) 1564 goto out_unmap; 1565 } 1566 1567 out_unmap: 1568 vm_munmap(addr, obj->base.size); 1569 return err; 1570 } 1571 1572 static int igt_mmap_gpu(void *arg) 1573 { 1574 struct drm_i915_private *i915 = arg; 1575 struct intel_memory_region *mr; 1576 enum intel_region_id id; 1577 1578 for_each_memory_region(mr, i915, id) { 1579 struct drm_i915_gem_object *obj; 1580 int err; 1581 1582 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1583 if (obj == ERR_PTR(-ENODEV)) 1584 continue; 1585 1586 if (IS_ERR(obj)) 1587 return PTR_ERR(obj); 1588 1589 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); 1590 if (err == 0) 1591 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); 1592 if (err == 0) 1593 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED); 1594 1595 i915_gem_object_put(obj); 1596 if (err) 1597 return err; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int check_present_pte(pte_t *pte, unsigned long addr, void *data) 1604 { 1605 if (!pte_present(*pte) || pte_none(*pte)) { 1606 pr_err("missing PTE:%lx\n", 1607 (addr - (unsigned long)data) >> PAGE_SHIFT); 1608 return -EINVAL; 1609 } 1610 1611 return 0; 1612 } 1613 1614 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) 1615 { 1616 if (pte_present(*pte) && !pte_none(*pte)) { 1617 pr_err("present PTE:%lx; expected to be revoked\n", 1618 (addr - (unsigned long)data) >> PAGE_SHIFT); 1619 return -EINVAL; 1620 } 1621 1622 return 0; 1623 } 1624 1625 static int check_present(unsigned long addr, unsigned long len) 1626 { 1627 return apply_to_page_range(current->mm, addr, len, 1628 check_present_pte, (void *)addr); 1629 } 1630 1631 static int check_absent(unsigned long addr, unsigned long len) 1632 { 1633 return apply_to_page_range(current->mm, addr, len, 1634 check_absent_pte, (void *)addr); 1635 } 1636 1637 static int prefault_range(u64 start, u64 len) 1638 { 1639 const char __user *addr, *end; 1640 char __maybe_unused c; 1641 int err; 1642 1643 addr = u64_to_user_ptr(start); 1644 end = addr + len; 1645 1646 for (; addr < end; addr += PAGE_SIZE) { 1647 err = __get_user(c, addr); 1648 if (err) 1649 return err; 1650 } 1651 1652 return __get_user(c, end - 1); 1653 } 1654 1655 static int __igt_mmap_revoke(struct drm_i915_private *i915, 1656 struct drm_i915_gem_object *obj, 1657 enum i915_mmap_type type) 1658 { 1659 unsigned long addr; 1660 int err; 1661 u64 offset; 1662 1663 if (!can_mmap(obj, type)) 1664 return 0; 1665 1666 err = __assign_mmap_offset(obj, type, &offset, NULL); 1667 if (err) 1668 return err; 1669 1670 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1671 if (IS_ERR_VALUE(addr)) 1672 return addr; 1673 1674 err = prefault_range(addr, obj->base.size); 1675 if (err) 1676 goto out_unmap; 1677 1678 err = check_present(addr, obj->base.size); 1679 if (err) { 1680 pr_err("%s: was not present\n", obj->mm.region->name); 1681 goto out_unmap; 1682 } 1683 1684 /* 1685 * After unbinding the object from the GGTT, its address may be reused 1686 * for other objects. Ergo we have to revoke the previous mmap PTE 1687 * access as it no longer points to the same object. 1688 */ 1689 i915_gem_object_lock(obj, NULL); 1690 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 1691 i915_gem_object_unlock(obj); 1692 if (err) { 1693 pr_err("Failed to unbind object!\n"); 1694 goto out_unmap; 1695 } 1696 1697 if (type != I915_MMAP_TYPE_GTT) { 1698 i915_gem_object_lock(obj, NULL); 1699 __i915_gem_object_put_pages(obj); 1700 i915_gem_object_unlock(obj); 1701 if (i915_gem_object_has_pages(obj)) { 1702 pr_err("Failed to put-pages object!\n"); 1703 err = -EINVAL; 1704 goto out_unmap; 1705 } 1706 } 1707 1708 err = check_absent(addr, obj->base.size); 1709 if (err) { 1710 pr_err("%s: was not absent\n", obj->mm.region->name); 1711 goto out_unmap; 1712 } 1713 1714 out_unmap: 1715 vm_munmap(addr, obj->base.size); 1716 return err; 1717 } 1718 1719 static int igt_mmap_revoke(void *arg) 1720 { 1721 struct drm_i915_private *i915 = arg; 1722 struct intel_memory_region *mr; 1723 enum intel_region_id id; 1724 1725 for_each_memory_region(mr, i915, id) { 1726 struct drm_i915_gem_object *obj; 1727 int err; 1728 1729 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1730 if (obj == ERR_PTR(-ENODEV)) 1731 continue; 1732 1733 if (IS_ERR(obj)) 1734 return PTR_ERR(obj); 1735 1736 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); 1737 if (err == 0) 1738 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); 1739 if (err == 0) 1740 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED); 1741 1742 i915_gem_object_put(obj); 1743 if (err) 1744 return err; 1745 } 1746 1747 return 0; 1748 } 1749 1750 int i915_gem_mman_live_selftests(struct drm_i915_private *i915) 1751 { 1752 static const struct i915_subtest tests[] = { 1753 SUBTEST(igt_partial_tiling), 1754 SUBTEST(igt_smoke_tiling), 1755 SUBTEST(igt_mmap_offset_exhaustion), 1756 SUBTEST(igt_mmap), 1757 SUBTEST(igt_mmap_migrate), 1758 SUBTEST(igt_mmap_access), 1759 SUBTEST(igt_mmap_revoke), 1760 SUBTEST(igt_mmap_gpu), 1761 }; 1762 1763 return i915_subtests(tests, i915); 1764 } 1765