1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #include <linux/prime_numbers.h> 8 9 #include "gt/intel_engine_pm.h" 10 #include "gt/intel_gpu_commands.h" 11 #include "gt/intel_gt.h" 12 #include "gt/intel_gt_pm.h" 13 #include "gem/i915_gem_region.h" 14 #include "huge_gem_object.h" 15 #include "i915_selftest.h" 16 #include "selftests/i915_random.h" 17 #include "selftests/igt_flush_test.h" 18 #include "selftests/igt_mmap.h" 19 20 struct tile { 21 unsigned int width; 22 unsigned int height; 23 unsigned int stride; 24 unsigned int size; 25 unsigned int tiling; 26 unsigned int swizzle; 27 }; 28 29 static u64 swizzle_bit(unsigned int bit, u64 offset) 30 { 31 return (offset & BIT_ULL(bit)) >> (bit - 6); 32 } 33 34 static u64 tiled_offset(const struct tile *tile, u64 v) 35 { 36 u64 x, y; 37 38 if (tile->tiling == I915_TILING_NONE) 39 return v; 40 41 y = div64_u64_rem(v, tile->stride, &x); 42 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; 43 44 if (tile->tiling == I915_TILING_X) { 45 v += y * tile->width; 46 v += div64_u64_rem(x, tile->width, &x) << tile->size; 47 v += x; 48 } else if (tile->width == 128) { 49 const unsigned int ytile_span = 16; 50 const unsigned int ytile_height = 512; 51 52 v += y * ytile_span; 53 v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 54 v += x; 55 } else { 56 const unsigned int ytile_span = 32; 57 const unsigned int ytile_height = 256; 58 59 v += y * ytile_span; 60 v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 61 v += x; 62 } 63 64 switch (tile->swizzle) { 65 case I915_BIT_6_SWIZZLE_9: 66 v ^= swizzle_bit(9, v); 67 break; 68 case I915_BIT_6_SWIZZLE_9_10: 69 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); 70 break; 71 case I915_BIT_6_SWIZZLE_9_11: 72 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); 73 break; 74 case I915_BIT_6_SWIZZLE_9_10_11: 75 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); 76 break; 77 } 78 79 return v; 80 } 81 82 static int check_partial_mapping(struct drm_i915_gem_object *obj, 83 const struct tile *tile, 84 struct rnd_state *prng) 85 { 86 const unsigned long npages = obj->base.size / PAGE_SIZE; 87 struct drm_i915_private *i915 = to_i915(obj->base.dev); 88 struct i915_ggtt_view view; 89 struct i915_vma *vma; 90 unsigned long page; 91 u32 __iomem *io; 92 struct page *p; 93 unsigned int n; 94 u64 offset; 95 u32 *cpu; 96 int err; 97 98 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 99 if (err) { 100 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 101 tile->tiling, tile->stride, err); 102 return err; 103 } 104 105 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 106 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 107 108 i915_gem_object_lock(obj, NULL); 109 err = i915_gem_object_set_to_gtt_domain(obj, true); 110 i915_gem_object_unlock(obj); 111 if (err) { 112 pr_err("Failed to flush to GTT write domain; err=%d\n", err); 113 return err; 114 } 115 116 page = i915_prandom_u32_max_state(npages, prng); 117 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); 118 119 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 120 if (IS_ERR(vma)) { 121 pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 122 page, (int)PTR_ERR(vma)); 123 return PTR_ERR(vma); 124 } 125 126 n = page - view.partial.offset; 127 GEM_BUG_ON(n >= view.partial.size); 128 129 io = i915_vma_pin_iomap(vma); 130 i915_vma_unpin(vma); 131 if (IS_ERR(io)) { 132 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 133 page, (int)PTR_ERR(io)); 134 err = PTR_ERR(io); 135 goto out; 136 } 137 138 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 139 i915_vma_unpin_iomap(vma); 140 141 offset = tiled_offset(tile, page << PAGE_SHIFT); 142 if (offset >= obj->base.size) 143 goto out; 144 145 intel_gt_flush_ggtt_writes(to_gt(i915)); 146 147 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 148 cpu = kmap(p) + offset_in_page(offset); 149 drm_clflush_virt_range(cpu, sizeof(*cpu)); 150 if (*cpu != (u32)page) { 151 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 152 page, n, 153 view.partial.offset, 154 view.partial.size, 155 vma->size >> PAGE_SHIFT, 156 tile->tiling ? tile_row_pages(obj) : 0, 157 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 158 offset >> PAGE_SHIFT, 159 (unsigned int)offset_in_page(offset), 160 offset, 161 (u32)page, *cpu); 162 err = -EINVAL; 163 } 164 *cpu = 0; 165 drm_clflush_virt_range(cpu, sizeof(*cpu)); 166 kunmap(p); 167 168 out: 169 __i915_vma_put(vma); 170 return err; 171 } 172 173 static int check_partial_mappings(struct drm_i915_gem_object *obj, 174 const struct tile *tile, 175 unsigned long end_time) 176 { 177 const unsigned int nreal = obj->scratch / PAGE_SIZE; 178 const unsigned long npages = obj->base.size / PAGE_SIZE; 179 struct drm_i915_private *i915 = to_i915(obj->base.dev); 180 struct i915_vma *vma; 181 unsigned long page; 182 int err; 183 184 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 185 if (err) { 186 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 187 tile->tiling, tile->stride, err); 188 return err; 189 } 190 191 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 192 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 193 194 i915_gem_object_lock(obj, NULL); 195 err = i915_gem_object_set_to_gtt_domain(obj, true); 196 i915_gem_object_unlock(obj); 197 if (err) { 198 pr_err("Failed to flush to GTT write domain; err=%d\n", err); 199 return err; 200 } 201 202 for_each_prime_number_from(page, 1, npages) { 203 struct i915_ggtt_view view = 204 compute_partial_view(obj, page, MIN_CHUNK_PAGES); 205 u32 __iomem *io; 206 struct page *p; 207 unsigned int n; 208 u64 offset; 209 u32 *cpu; 210 211 GEM_BUG_ON(view.partial.size > nreal); 212 cond_resched(); 213 214 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 215 if (IS_ERR(vma)) { 216 pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 217 page, (int)PTR_ERR(vma)); 218 return PTR_ERR(vma); 219 } 220 221 n = page - view.partial.offset; 222 GEM_BUG_ON(n >= view.partial.size); 223 224 io = i915_vma_pin_iomap(vma); 225 i915_vma_unpin(vma); 226 if (IS_ERR(io)) { 227 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 228 page, (int)PTR_ERR(io)); 229 return PTR_ERR(io); 230 } 231 232 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 233 i915_vma_unpin_iomap(vma); 234 235 offset = tiled_offset(tile, page << PAGE_SHIFT); 236 if (offset >= obj->base.size) 237 continue; 238 239 intel_gt_flush_ggtt_writes(to_gt(i915)); 240 241 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 242 cpu = kmap(p) + offset_in_page(offset); 243 drm_clflush_virt_range(cpu, sizeof(*cpu)); 244 if (*cpu != (u32)page) { 245 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 246 page, n, 247 view.partial.offset, 248 view.partial.size, 249 vma->size >> PAGE_SHIFT, 250 tile->tiling ? tile_row_pages(obj) : 0, 251 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 252 offset >> PAGE_SHIFT, 253 (unsigned int)offset_in_page(offset), 254 offset, 255 (u32)page, *cpu); 256 err = -EINVAL; 257 } 258 *cpu = 0; 259 drm_clflush_virt_range(cpu, sizeof(*cpu)); 260 kunmap(p); 261 if (err) 262 return err; 263 264 __i915_vma_put(vma); 265 266 if (igt_timeout(end_time, 267 "%s: timed out after tiling=%d stride=%d\n", 268 __func__, tile->tiling, tile->stride)) 269 return -EINTR; 270 } 271 272 return 0; 273 } 274 275 static unsigned int 276 setup_tile_size(struct tile *tile, struct drm_i915_private *i915) 277 { 278 if (GRAPHICS_VER(i915) <= 2) { 279 tile->height = 16; 280 tile->width = 128; 281 tile->size = 11; 282 } else if (tile->tiling == I915_TILING_Y && 283 HAS_128_BYTE_Y_TILING(i915)) { 284 tile->height = 32; 285 tile->width = 128; 286 tile->size = 12; 287 } else { 288 tile->height = 8; 289 tile->width = 512; 290 tile->size = 12; 291 } 292 293 if (GRAPHICS_VER(i915) < 4) 294 return 8192 / tile->width; 295 else if (GRAPHICS_VER(i915) < 7) 296 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; 297 else 298 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; 299 } 300 301 static int igt_partial_tiling(void *arg) 302 { 303 const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 304 struct drm_i915_private *i915 = arg; 305 struct drm_i915_gem_object *obj; 306 intel_wakeref_t wakeref; 307 int tiling; 308 int err; 309 310 if (!i915_ggtt_has_aperture(&i915->ggtt)) 311 return 0; 312 313 /* We want to check the page mapping and fencing of a large object 314 * mmapped through the GTT. The object we create is larger than can 315 * possibly be mmaped as a whole, and so we must use partial GGTT vma. 316 * We then check that a write through each partial GGTT vma ends up 317 * in the right set of pages within the object, and with the expected 318 * tiling, which we verify by manual swizzling. 319 */ 320 321 obj = huge_gem_object(i915, 322 nreal << PAGE_SHIFT, 323 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 324 if (IS_ERR(obj)) 325 return PTR_ERR(obj); 326 327 err = i915_gem_object_pin_pages_unlocked(obj); 328 if (err) { 329 pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 330 nreal, obj->base.size / PAGE_SIZE, err); 331 goto out; 332 } 333 334 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 335 336 if (1) { 337 IGT_TIMEOUT(end); 338 struct tile tile; 339 340 tile.height = 1; 341 tile.width = 1; 342 tile.size = 0; 343 tile.stride = 0; 344 tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 345 tile.tiling = I915_TILING_NONE; 346 347 err = check_partial_mappings(obj, &tile, end); 348 if (err && err != -EINTR) 349 goto out_unlock; 350 } 351 352 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { 353 IGT_TIMEOUT(end); 354 unsigned int max_pitch; 355 unsigned int pitch; 356 struct tile tile; 357 358 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 359 /* 360 * The swizzling pattern is actually unknown as it 361 * varies based on physical address of each page. 362 * See i915_gem_detect_bit_6_swizzle(). 363 */ 364 break; 365 366 tile.tiling = tiling; 367 switch (tiling) { 368 case I915_TILING_X: 369 tile.swizzle = i915->ggtt.bit_6_swizzle_x; 370 break; 371 case I915_TILING_Y: 372 tile.swizzle = i915->ggtt.bit_6_swizzle_y; 373 break; 374 } 375 376 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); 377 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 378 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 379 continue; 380 381 max_pitch = setup_tile_size(&tile, i915); 382 383 for (pitch = max_pitch; pitch; pitch >>= 1) { 384 tile.stride = tile.width * pitch; 385 err = check_partial_mappings(obj, &tile, end); 386 if (err == -EINTR) 387 goto next_tiling; 388 if (err) 389 goto out_unlock; 390 391 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { 392 tile.stride = tile.width * (pitch - 1); 393 err = check_partial_mappings(obj, &tile, end); 394 if (err == -EINTR) 395 goto next_tiling; 396 if (err) 397 goto out_unlock; 398 } 399 400 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { 401 tile.stride = tile.width * (pitch + 1); 402 err = check_partial_mappings(obj, &tile, end); 403 if (err == -EINTR) 404 goto next_tiling; 405 if (err) 406 goto out_unlock; 407 } 408 } 409 410 if (GRAPHICS_VER(i915) >= 4) { 411 for_each_prime_number(pitch, max_pitch) { 412 tile.stride = tile.width * pitch; 413 err = check_partial_mappings(obj, &tile, end); 414 if (err == -EINTR) 415 goto next_tiling; 416 if (err) 417 goto out_unlock; 418 } 419 } 420 421 next_tiling: ; 422 } 423 424 out_unlock: 425 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 426 i915_gem_object_unpin_pages(obj); 427 out: 428 i915_gem_object_put(obj); 429 return err; 430 } 431 432 static int igt_smoke_tiling(void *arg) 433 { 434 const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 435 struct drm_i915_private *i915 = arg; 436 struct drm_i915_gem_object *obj; 437 intel_wakeref_t wakeref; 438 I915_RND_STATE(prng); 439 unsigned long count; 440 IGT_TIMEOUT(end); 441 int err; 442 443 if (!i915_ggtt_has_aperture(&i915->ggtt)) 444 return 0; 445 446 /* 447 * igt_partial_tiling() does an exhastive check of partial tiling 448 * chunking, but will undoubtably run out of time. Here, we do a 449 * randomised search and hope over many runs of 1s with different 450 * seeds we will do a thorough check. 451 * 452 * Remember to look at the st_seed if we see a flip-flop in BAT! 453 */ 454 455 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 456 return 0; 457 458 obj = huge_gem_object(i915, 459 nreal << PAGE_SHIFT, 460 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 461 if (IS_ERR(obj)) 462 return PTR_ERR(obj); 463 464 err = i915_gem_object_pin_pages_unlocked(obj); 465 if (err) { 466 pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 467 nreal, obj->base.size / PAGE_SIZE, err); 468 goto out; 469 } 470 471 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 472 473 count = 0; 474 do { 475 struct tile tile; 476 477 tile.tiling = 478 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); 479 switch (tile.tiling) { 480 case I915_TILING_NONE: 481 tile.height = 1; 482 tile.width = 1; 483 tile.size = 0; 484 tile.stride = 0; 485 tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 486 break; 487 488 case I915_TILING_X: 489 tile.swizzle = i915->ggtt.bit_6_swizzle_x; 490 break; 491 case I915_TILING_Y: 492 tile.swizzle = i915->ggtt.bit_6_swizzle_y; 493 break; 494 } 495 496 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 497 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 498 continue; 499 500 if (tile.tiling != I915_TILING_NONE) { 501 unsigned int max_pitch = setup_tile_size(&tile, i915); 502 503 tile.stride = 504 i915_prandom_u32_max_state(max_pitch, &prng); 505 tile.stride = (1 + tile.stride) * tile.width; 506 if (GRAPHICS_VER(i915) < 4) 507 tile.stride = rounddown_pow_of_two(tile.stride); 508 } 509 510 err = check_partial_mapping(obj, &tile, &prng); 511 if (err) 512 break; 513 514 count++; 515 } while (!__igt_timeout(end, NULL)); 516 517 pr_info("%s: Completed %lu trials\n", __func__, count); 518 519 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 520 i915_gem_object_unpin_pages(obj); 521 out: 522 i915_gem_object_put(obj); 523 return err; 524 } 525 526 static int make_obj_busy(struct drm_i915_gem_object *obj) 527 { 528 struct drm_i915_private *i915 = to_i915(obj->base.dev); 529 struct intel_engine_cs *engine; 530 531 for_each_uabi_engine(engine, i915) { 532 struct i915_request *rq; 533 struct i915_vma *vma; 534 struct i915_gem_ww_ctx ww; 535 int err; 536 537 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 538 if (IS_ERR(vma)) 539 return PTR_ERR(vma); 540 541 i915_gem_ww_ctx_init(&ww, false); 542 retry: 543 err = i915_gem_object_lock(obj, &ww); 544 if (!err) 545 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 546 if (err) 547 goto err; 548 549 rq = intel_engine_create_kernel_request(engine); 550 if (IS_ERR(rq)) { 551 err = PTR_ERR(rq); 552 goto err_unpin; 553 } 554 555 err = i915_request_await_object(rq, vma->obj, true); 556 if (err == 0) 557 err = i915_vma_move_to_active(vma, rq, 558 EXEC_OBJECT_WRITE); 559 560 i915_request_add(rq); 561 err_unpin: 562 i915_vma_unpin(vma); 563 err: 564 if (err == -EDEADLK) { 565 err = i915_gem_ww_ctx_backoff(&ww); 566 if (!err) 567 goto retry; 568 } 569 i915_gem_ww_ctx_fini(&ww); 570 if (err) 571 return err; 572 } 573 574 i915_gem_object_put(obj); /* leave it only alive via its active ref */ 575 return 0; 576 } 577 578 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) 579 { 580 if (HAS_LMEM(i915)) 581 return I915_MMAP_TYPE_FIXED; 582 583 return I915_MMAP_TYPE_GTT; 584 } 585 586 static struct drm_i915_gem_object * 587 create_sys_or_internal(struct drm_i915_private *i915, 588 unsigned long size) 589 { 590 if (HAS_LMEM(i915)) { 591 struct intel_memory_region *sys_region = 592 i915->mm.regions[INTEL_REGION_SMEM]; 593 594 return __i915_gem_object_create_user(i915, size, &sys_region, 1); 595 } 596 597 return i915_gem_object_create_internal(i915, size); 598 } 599 600 static bool assert_mmap_offset(struct drm_i915_private *i915, 601 unsigned long size, 602 int expected) 603 { 604 struct drm_i915_gem_object *obj; 605 u64 offset; 606 int ret; 607 608 obj = create_sys_or_internal(i915, size); 609 if (IS_ERR(obj)) 610 return expected && expected == PTR_ERR(obj); 611 612 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); 613 i915_gem_object_put(obj); 614 615 return ret == expected; 616 } 617 618 static void disable_retire_worker(struct drm_i915_private *i915) 619 { 620 i915_gem_driver_unregister__shrinker(i915); 621 intel_gt_pm_get(to_gt(i915)); 622 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); 623 } 624 625 static void restore_retire_worker(struct drm_i915_private *i915) 626 { 627 igt_flush_test(i915); 628 intel_gt_pm_put(to_gt(i915)); 629 i915_gem_driver_register__shrinker(i915); 630 } 631 632 static void mmap_offset_lock(struct drm_i915_private *i915) 633 __acquires(&i915->drm.vma_offset_manager->vm_lock) 634 { 635 write_lock(&i915->drm.vma_offset_manager->vm_lock); 636 } 637 638 static void mmap_offset_unlock(struct drm_i915_private *i915) 639 __releases(&i915->drm.vma_offset_manager->vm_lock) 640 { 641 write_unlock(&i915->drm.vma_offset_manager->vm_lock); 642 } 643 644 static int igt_mmap_offset_exhaustion(void *arg) 645 { 646 struct drm_i915_private *i915 = arg; 647 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; 648 struct drm_i915_gem_object *obj; 649 struct drm_mm_node *hole, *next; 650 int loop, err = 0; 651 u64 offset; 652 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; 653 654 /* Disable background reaper */ 655 disable_retire_worker(i915); 656 GEM_BUG_ON(!to_gt(i915)->awake); 657 intel_gt_retire_requests(to_gt(i915)); 658 i915_gem_drain_freed_objects(i915); 659 660 /* Trim the device mmap space to only a page */ 661 mmap_offset_lock(i915); 662 loop = 1; /* PAGE_SIZE units */ 663 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { 664 struct drm_mm_node *resv; 665 666 resv = kzalloc(sizeof(*resv), GFP_NOWAIT); 667 if (!resv) { 668 err = -ENOMEM; 669 goto out_park; 670 } 671 672 resv->start = drm_mm_hole_node_start(hole) + loop; 673 resv->size = hole->hole_size - loop; 674 resv->color = -1ul; 675 loop = 0; 676 677 if (!resv->size) { 678 kfree(resv); 679 continue; 680 } 681 682 pr_debug("Reserving hole [%llx + %llx]\n", 683 resv->start, resv->size); 684 685 err = drm_mm_reserve_node(mm, resv); 686 if (err) { 687 pr_err("Failed to trim VMA manager, err=%d\n", err); 688 kfree(resv); 689 goto out_park; 690 } 691 } 692 GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); 693 mmap_offset_unlock(i915); 694 695 /* Just fits! */ 696 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { 697 pr_err("Unable to insert object into single page hole\n"); 698 err = -EINVAL; 699 goto out; 700 } 701 702 /* Too large */ 703 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { 704 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); 705 err = -EINVAL; 706 goto out; 707 } 708 709 /* Fill the hole, further allocation attempts should then fail */ 710 obj = create_sys_or_internal(i915, PAGE_SIZE); 711 if (IS_ERR(obj)) { 712 err = PTR_ERR(obj); 713 pr_err("Unable to create object for reclaimed hole\n"); 714 goto out; 715 } 716 717 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); 718 if (err) { 719 pr_err("Unable to insert object into reclaimed hole\n"); 720 goto err_obj; 721 } 722 723 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { 724 pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); 725 err = -EINVAL; 726 goto err_obj; 727 } 728 729 i915_gem_object_put(obj); 730 731 /* Now fill with busy dead objects that we expect to reap */ 732 for (loop = 0; loop < 3; loop++) { 733 if (intel_gt_is_wedged(to_gt(i915))) 734 break; 735 736 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 737 if (IS_ERR(obj)) { 738 err = PTR_ERR(obj); 739 goto out; 740 } 741 742 err = make_obj_busy(obj); 743 if (err) { 744 pr_err("[loop %d] Failed to busy the object\n", loop); 745 goto err_obj; 746 } 747 } 748 749 out: 750 mmap_offset_lock(i915); 751 out_park: 752 drm_mm_for_each_node_safe(hole, next, mm) { 753 if (hole->color != -1ul) 754 continue; 755 756 drm_mm_remove_node(hole); 757 kfree(hole); 758 } 759 mmap_offset_unlock(i915); 760 restore_retire_worker(i915); 761 return err; 762 err_obj: 763 i915_gem_object_put(obj); 764 goto out; 765 } 766 767 static int gtt_set(struct drm_i915_gem_object *obj) 768 { 769 struct i915_vma *vma; 770 void __iomem *map; 771 int err = 0; 772 773 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 774 if (IS_ERR(vma)) 775 return PTR_ERR(vma); 776 777 intel_gt_pm_get(vma->vm->gt); 778 map = i915_vma_pin_iomap(vma); 779 i915_vma_unpin(vma); 780 if (IS_ERR(map)) { 781 err = PTR_ERR(map); 782 goto out; 783 } 784 785 memset_io(map, POISON_INUSE, obj->base.size); 786 i915_vma_unpin_iomap(vma); 787 788 out: 789 intel_gt_pm_put(vma->vm->gt); 790 return err; 791 } 792 793 static int gtt_check(struct drm_i915_gem_object *obj) 794 { 795 struct i915_vma *vma; 796 void __iomem *map; 797 int err = 0; 798 799 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 800 if (IS_ERR(vma)) 801 return PTR_ERR(vma); 802 803 intel_gt_pm_get(vma->vm->gt); 804 map = i915_vma_pin_iomap(vma); 805 i915_vma_unpin(vma); 806 if (IS_ERR(map)) { 807 err = PTR_ERR(map); 808 goto out; 809 } 810 811 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { 812 pr_err("%s: Write via mmap did not land in backing store (GTT)\n", 813 obj->mm.region->name); 814 err = -EINVAL; 815 } 816 i915_vma_unpin_iomap(vma); 817 818 out: 819 intel_gt_pm_put(vma->vm->gt); 820 return err; 821 } 822 823 static int wc_set(struct drm_i915_gem_object *obj) 824 { 825 void *vaddr; 826 827 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 828 if (IS_ERR(vaddr)) 829 return PTR_ERR(vaddr); 830 831 memset(vaddr, POISON_INUSE, obj->base.size); 832 i915_gem_object_flush_map(obj); 833 i915_gem_object_unpin_map(obj); 834 835 return 0; 836 } 837 838 static int wc_check(struct drm_i915_gem_object *obj) 839 { 840 void *vaddr; 841 int err = 0; 842 843 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 844 if (IS_ERR(vaddr)) 845 return PTR_ERR(vaddr); 846 847 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { 848 pr_err("%s: Write via mmap did not land in backing store (WC)\n", 849 obj->mm.region->name); 850 err = -EINVAL; 851 } 852 i915_gem_object_unpin_map(obj); 853 854 return err; 855 } 856 857 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) 858 { 859 bool no_map; 860 861 if (obj->ops->mmap_offset) 862 return type == I915_MMAP_TYPE_FIXED; 863 else if (type == I915_MMAP_TYPE_FIXED) 864 return false; 865 866 if (type == I915_MMAP_TYPE_GTT && 867 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) 868 return false; 869 870 i915_gem_object_lock(obj, NULL); 871 no_map = (type != I915_MMAP_TYPE_GTT && 872 !i915_gem_object_has_struct_page(obj) && 873 !i915_gem_object_has_iomem(obj)); 874 i915_gem_object_unlock(obj); 875 876 return !no_map; 877 } 878 879 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) 880 static int __igt_mmap(struct drm_i915_private *i915, 881 struct drm_i915_gem_object *obj, 882 enum i915_mmap_type type) 883 { 884 struct vm_area_struct *area; 885 unsigned long addr; 886 int err, i; 887 u64 offset; 888 889 if (!can_mmap(obj, type)) 890 return 0; 891 892 err = wc_set(obj); 893 if (err == -ENXIO) 894 err = gtt_set(obj); 895 if (err) 896 return err; 897 898 err = __assign_mmap_offset(obj, type, &offset, NULL); 899 if (err) 900 return err; 901 902 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 903 if (IS_ERR_VALUE(addr)) 904 return addr; 905 906 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); 907 908 mmap_read_lock(current->mm); 909 area = vma_lookup(current->mm, addr); 910 mmap_read_unlock(current->mm); 911 if (!area) { 912 pr_err("%s: Did not create a vm_area_struct for the mmap\n", 913 obj->mm.region->name); 914 err = -EINVAL; 915 goto out_unmap; 916 } 917 918 for (i = 0; i < obj->base.size / sizeof(u32); i++) { 919 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); 920 u32 x; 921 922 if (get_user(x, ux)) { 923 pr_err("%s: Unable to read from mmap, offset:%zd\n", 924 obj->mm.region->name, i * sizeof(x)); 925 err = -EFAULT; 926 goto out_unmap; 927 } 928 929 if (x != expand32(POISON_INUSE)) { 930 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", 931 obj->mm.region->name, 932 i * sizeof(x), x, expand32(POISON_INUSE)); 933 err = -EINVAL; 934 goto out_unmap; 935 } 936 937 x = expand32(POISON_FREE); 938 if (put_user(x, ux)) { 939 pr_err("%s: Unable to write to mmap, offset:%zd\n", 940 obj->mm.region->name, i * sizeof(x)); 941 err = -EFAULT; 942 goto out_unmap; 943 } 944 } 945 946 if (type == I915_MMAP_TYPE_GTT) 947 intel_gt_flush_ggtt_writes(to_gt(i915)); 948 949 err = wc_check(obj); 950 if (err == -ENXIO) 951 err = gtt_check(obj); 952 out_unmap: 953 vm_munmap(addr, obj->base.size); 954 return err; 955 } 956 957 static int igt_mmap(void *arg) 958 { 959 struct drm_i915_private *i915 = arg; 960 struct intel_memory_region *mr; 961 enum intel_region_id id; 962 963 for_each_memory_region(mr, i915, id) { 964 unsigned long sizes[] = { 965 PAGE_SIZE, 966 mr->min_page_size, 967 SZ_4M, 968 }; 969 int i; 970 971 for (i = 0; i < ARRAY_SIZE(sizes); i++) { 972 struct drm_i915_gem_object *obj; 973 int err; 974 975 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1); 976 if (obj == ERR_PTR(-ENODEV)) 977 continue; 978 979 if (IS_ERR(obj)) 980 return PTR_ERR(obj); 981 982 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); 983 if (err == 0) 984 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); 985 if (err == 0) 986 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED); 987 988 i915_gem_object_put(obj); 989 if (err) 990 return err; 991 } 992 } 993 994 return 0; 995 } 996 997 static const char *repr_mmap_type(enum i915_mmap_type type) 998 { 999 switch (type) { 1000 case I915_MMAP_TYPE_GTT: return "gtt"; 1001 case I915_MMAP_TYPE_WB: return "wb"; 1002 case I915_MMAP_TYPE_WC: return "wc"; 1003 case I915_MMAP_TYPE_UC: return "uc"; 1004 case I915_MMAP_TYPE_FIXED: return "fixed"; 1005 default: return "unknown"; 1006 } 1007 } 1008 1009 static bool can_access(struct drm_i915_gem_object *obj) 1010 { 1011 bool access; 1012 1013 i915_gem_object_lock(obj, NULL); 1014 access = i915_gem_object_has_struct_page(obj) || 1015 i915_gem_object_has_iomem(obj); 1016 i915_gem_object_unlock(obj); 1017 1018 return access; 1019 } 1020 1021 static int __igt_mmap_access(struct drm_i915_private *i915, 1022 struct drm_i915_gem_object *obj, 1023 enum i915_mmap_type type) 1024 { 1025 unsigned long __user *ptr; 1026 unsigned long A, B; 1027 unsigned long x, y; 1028 unsigned long addr; 1029 int err; 1030 u64 offset; 1031 1032 memset(&A, 0xAA, sizeof(A)); 1033 memset(&B, 0xBB, sizeof(B)); 1034 1035 if (!can_mmap(obj, type) || !can_access(obj)) 1036 return 0; 1037 1038 err = __assign_mmap_offset(obj, type, &offset, NULL); 1039 if (err) 1040 return err; 1041 1042 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1043 if (IS_ERR_VALUE(addr)) 1044 return addr; 1045 ptr = (unsigned long __user *)addr; 1046 1047 err = __put_user(A, ptr); 1048 if (err) { 1049 pr_err("%s(%s): failed to write into user mmap\n", 1050 obj->mm.region->name, repr_mmap_type(type)); 1051 goto out_unmap; 1052 } 1053 1054 intel_gt_flush_ggtt_writes(to_gt(i915)); 1055 1056 err = access_process_vm(current, addr, &x, sizeof(x), 0); 1057 if (err != sizeof(x)) { 1058 pr_err("%s(%s): access_process_vm() read failed\n", 1059 obj->mm.region->name, repr_mmap_type(type)); 1060 goto out_unmap; 1061 } 1062 1063 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE); 1064 if (err != sizeof(B)) { 1065 pr_err("%s(%s): access_process_vm() write failed\n", 1066 obj->mm.region->name, repr_mmap_type(type)); 1067 goto out_unmap; 1068 } 1069 1070 intel_gt_flush_ggtt_writes(to_gt(i915)); 1071 1072 err = __get_user(y, ptr); 1073 if (err) { 1074 pr_err("%s(%s): failed to read from user mmap\n", 1075 obj->mm.region->name, repr_mmap_type(type)); 1076 goto out_unmap; 1077 } 1078 1079 if (x != A || y != B) { 1080 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n", 1081 obj->mm.region->name, repr_mmap_type(type), 1082 x, y); 1083 err = -EINVAL; 1084 goto out_unmap; 1085 } 1086 1087 out_unmap: 1088 vm_munmap(addr, obj->base.size); 1089 return err; 1090 } 1091 1092 static int igt_mmap_access(void *arg) 1093 { 1094 struct drm_i915_private *i915 = arg; 1095 struct intel_memory_region *mr; 1096 enum intel_region_id id; 1097 1098 for_each_memory_region(mr, i915, id) { 1099 struct drm_i915_gem_object *obj; 1100 int err; 1101 1102 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1103 if (obj == ERR_PTR(-ENODEV)) 1104 continue; 1105 1106 if (IS_ERR(obj)) 1107 return PTR_ERR(obj); 1108 1109 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); 1110 if (err == 0) 1111 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); 1112 if (err == 0) 1113 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); 1114 if (err == 0) 1115 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); 1116 if (err == 0) 1117 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED); 1118 1119 i915_gem_object_put(obj); 1120 if (err) 1121 return err; 1122 } 1123 1124 return 0; 1125 } 1126 1127 static int __igt_mmap_gpu(struct drm_i915_private *i915, 1128 struct drm_i915_gem_object *obj, 1129 enum i915_mmap_type type) 1130 { 1131 struct intel_engine_cs *engine; 1132 unsigned long addr; 1133 u32 __user *ux; 1134 u32 bbe; 1135 int err; 1136 u64 offset; 1137 1138 /* 1139 * Verify that the mmap access into the backing store aligns with 1140 * that of the GPU, i.e. that mmap is indeed writing into the same 1141 * page as being read by the GPU. 1142 */ 1143 1144 if (!can_mmap(obj, type)) 1145 return 0; 1146 1147 err = wc_set(obj); 1148 if (err == -ENXIO) 1149 err = gtt_set(obj); 1150 if (err) 1151 return err; 1152 1153 err = __assign_mmap_offset(obj, type, &offset, NULL); 1154 if (err) 1155 return err; 1156 1157 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1158 if (IS_ERR_VALUE(addr)) 1159 return addr; 1160 1161 ux = u64_to_user_ptr((u64)addr); 1162 bbe = MI_BATCH_BUFFER_END; 1163 if (put_user(bbe, ux)) { 1164 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); 1165 err = -EFAULT; 1166 goto out_unmap; 1167 } 1168 1169 if (type == I915_MMAP_TYPE_GTT) 1170 intel_gt_flush_ggtt_writes(to_gt(i915)); 1171 1172 for_each_uabi_engine(engine, i915) { 1173 struct i915_request *rq; 1174 struct i915_vma *vma; 1175 struct i915_gem_ww_ctx ww; 1176 1177 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); 1178 if (IS_ERR(vma)) { 1179 err = PTR_ERR(vma); 1180 goto out_unmap; 1181 } 1182 1183 i915_gem_ww_ctx_init(&ww, false); 1184 retry: 1185 err = i915_gem_object_lock(obj, &ww); 1186 if (!err) 1187 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 1188 if (err) 1189 goto out_ww; 1190 1191 rq = i915_request_create(engine->kernel_context); 1192 if (IS_ERR(rq)) { 1193 err = PTR_ERR(rq); 1194 goto out_unpin; 1195 } 1196 1197 err = i915_request_await_object(rq, vma->obj, false); 1198 if (err == 0) 1199 err = i915_vma_move_to_active(vma, rq, 0); 1200 1201 err = engine->emit_bb_start(rq, vma->node.start, 0, 0); 1202 i915_request_get(rq); 1203 i915_request_add(rq); 1204 1205 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 1206 struct drm_printer p = 1207 drm_info_printer(engine->i915->drm.dev); 1208 1209 pr_err("%s(%s, %s): Failed to execute batch\n", 1210 __func__, engine->name, obj->mm.region->name); 1211 intel_engine_dump(engine, &p, 1212 "%s\n", engine->name); 1213 1214 intel_gt_set_wedged(engine->gt); 1215 err = -EIO; 1216 } 1217 i915_request_put(rq); 1218 1219 out_unpin: 1220 i915_vma_unpin(vma); 1221 out_ww: 1222 if (err == -EDEADLK) { 1223 err = i915_gem_ww_ctx_backoff(&ww); 1224 if (!err) 1225 goto retry; 1226 } 1227 i915_gem_ww_ctx_fini(&ww); 1228 if (err) 1229 goto out_unmap; 1230 } 1231 1232 out_unmap: 1233 vm_munmap(addr, obj->base.size); 1234 return err; 1235 } 1236 1237 static int igt_mmap_gpu(void *arg) 1238 { 1239 struct drm_i915_private *i915 = arg; 1240 struct intel_memory_region *mr; 1241 enum intel_region_id id; 1242 1243 for_each_memory_region(mr, i915, id) { 1244 struct drm_i915_gem_object *obj; 1245 int err; 1246 1247 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1248 if (obj == ERR_PTR(-ENODEV)) 1249 continue; 1250 1251 if (IS_ERR(obj)) 1252 return PTR_ERR(obj); 1253 1254 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); 1255 if (err == 0) 1256 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); 1257 if (err == 0) 1258 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED); 1259 1260 i915_gem_object_put(obj); 1261 if (err) 1262 return err; 1263 } 1264 1265 return 0; 1266 } 1267 1268 static int check_present_pte(pte_t *pte, unsigned long addr, void *data) 1269 { 1270 if (!pte_present(*pte) || pte_none(*pte)) { 1271 pr_err("missing PTE:%lx\n", 1272 (addr - (unsigned long)data) >> PAGE_SHIFT); 1273 return -EINVAL; 1274 } 1275 1276 return 0; 1277 } 1278 1279 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) 1280 { 1281 if (pte_present(*pte) && !pte_none(*pte)) { 1282 pr_err("present PTE:%lx; expected to be revoked\n", 1283 (addr - (unsigned long)data) >> PAGE_SHIFT); 1284 return -EINVAL; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int check_present(unsigned long addr, unsigned long len) 1291 { 1292 return apply_to_page_range(current->mm, addr, len, 1293 check_present_pte, (void *)addr); 1294 } 1295 1296 static int check_absent(unsigned long addr, unsigned long len) 1297 { 1298 return apply_to_page_range(current->mm, addr, len, 1299 check_absent_pte, (void *)addr); 1300 } 1301 1302 static int prefault_range(u64 start, u64 len) 1303 { 1304 const char __user *addr, *end; 1305 char __maybe_unused c; 1306 int err; 1307 1308 addr = u64_to_user_ptr(start); 1309 end = addr + len; 1310 1311 for (; addr < end; addr += PAGE_SIZE) { 1312 err = __get_user(c, addr); 1313 if (err) 1314 return err; 1315 } 1316 1317 return __get_user(c, end - 1); 1318 } 1319 1320 static int __igt_mmap_revoke(struct drm_i915_private *i915, 1321 struct drm_i915_gem_object *obj, 1322 enum i915_mmap_type type) 1323 { 1324 unsigned long addr; 1325 int err; 1326 u64 offset; 1327 1328 if (!can_mmap(obj, type)) 1329 return 0; 1330 1331 err = __assign_mmap_offset(obj, type, &offset, NULL); 1332 if (err) 1333 return err; 1334 1335 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); 1336 if (IS_ERR_VALUE(addr)) 1337 return addr; 1338 1339 err = prefault_range(addr, obj->base.size); 1340 if (err) 1341 goto out_unmap; 1342 1343 err = check_present(addr, obj->base.size); 1344 if (err) { 1345 pr_err("%s: was not present\n", obj->mm.region->name); 1346 goto out_unmap; 1347 } 1348 1349 /* 1350 * After unbinding the object from the GGTT, its address may be reused 1351 * for other objects. Ergo we have to revoke the previous mmap PTE 1352 * access as it no longer points to the same object. 1353 */ 1354 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 1355 if (err) { 1356 pr_err("Failed to unbind object!\n"); 1357 goto out_unmap; 1358 } 1359 1360 if (type != I915_MMAP_TYPE_GTT) { 1361 i915_gem_object_lock(obj, NULL); 1362 __i915_gem_object_put_pages(obj); 1363 i915_gem_object_unlock(obj); 1364 if (i915_gem_object_has_pages(obj)) { 1365 pr_err("Failed to put-pages object!\n"); 1366 err = -EINVAL; 1367 goto out_unmap; 1368 } 1369 } 1370 1371 err = check_absent(addr, obj->base.size); 1372 if (err) { 1373 pr_err("%s: was not absent\n", obj->mm.region->name); 1374 goto out_unmap; 1375 } 1376 1377 out_unmap: 1378 vm_munmap(addr, obj->base.size); 1379 return err; 1380 } 1381 1382 static int igt_mmap_revoke(void *arg) 1383 { 1384 struct drm_i915_private *i915 = arg; 1385 struct intel_memory_region *mr; 1386 enum intel_region_id id; 1387 1388 for_each_memory_region(mr, i915, id) { 1389 struct drm_i915_gem_object *obj; 1390 int err; 1391 1392 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); 1393 if (obj == ERR_PTR(-ENODEV)) 1394 continue; 1395 1396 if (IS_ERR(obj)) 1397 return PTR_ERR(obj); 1398 1399 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); 1400 if (err == 0) 1401 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); 1402 if (err == 0) 1403 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED); 1404 1405 i915_gem_object_put(obj); 1406 if (err) 1407 return err; 1408 } 1409 1410 return 0; 1411 } 1412 1413 int i915_gem_mman_live_selftests(struct drm_i915_private *i915) 1414 { 1415 static const struct i915_subtest tests[] = { 1416 SUBTEST(igt_partial_tiling), 1417 SUBTEST(igt_smoke_tiling), 1418 SUBTEST(igt_mmap_offset_exhaustion), 1419 SUBTEST(igt_mmap), 1420 SUBTEST(igt_mmap_access), 1421 SUBTEST(igt_mmap_revoke), 1422 SUBTEST(igt_mmap_gpu), 1423 }; 1424 1425 return i915_subtests(tests, i915); 1426 } 1427