1b414fcd5SChris Wilson /* 2b414fcd5SChris Wilson * SPDX-License-Identifier: MIT 3b414fcd5SChris Wilson * 4b414fcd5SChris Wilson * Copyright © 2016 Intel Corporation 5b414fcd5SChris Wilson */ 6b414fcd5SChris Wilson 7b414fcd5SChris Wilson #include <linux/prime_numbers.h> 8b414fcd5SChris Wilson 9b414fcd5SChris Wilson #include "gt/intel_gt_pm.h" 1010be98a7SChris Wilson #include "huge_gem_object.h" 11b414fcd5SChris Wilson #include "i915_selftest.h" 12b414fcd5SChris Wilson #include "selftests/igt_flush_test.h" 13b414fcd5SChris Wilson 14b414fcd5SChris Wilson struct tile { 15b414fcd5SChris Wilson unsigned int width; 16b414fcd5SChris Wilson unsigned int height; 17b414fcd5SChris Wilson unsigned int stride; 18b414fcd5SChris Wilson unsigned int size; 19b414fcd5SChris Wilson unsigned int tiling; 20b414fcd5SChris Wilson unsigned int swizzle; 21b414fcd5SChris Wilson }; 22b414fcd5SChris Wilson 23b414fcd5SChris Wilson static u64 swizzle_bit(unsigned int bit, u64 offset) 24b414fcd5SChris Wilson { 25b414fcd5SChris Wilson return (offset & BIT_ULL(bit)) >> (bit - 6); 26b414fcd5SChris Wilson } 27b414fcd5SChris Wilson 28b414fcd5SChris Wilson static u64 tiled_offset(const struct tile *tile, u64 v) 29b414fcd5SChris Wilson { 30b414fcd5SChris Wilson u64 x, y; 31b414fcd5SChris Wilson 32b414fcd5SChris Wilson if (tile->tiling == I915_TILING_NONE) 33b414fcd5SChris Wilson return v; 34b414fcd5SChris Wilson 35b414fcd5SChris Wilson y = div64_u64_rem(v, tile->stride, &x); 36b414fcd5SChris Wilson v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; 37b414fcd5SChris Wilson 38b414fcd5SChris Wilson if (tile->tiling == I915_TILING_X) { 39b414fcd5SChris Wilson v += y * tile->width; 40b414fcd5SChris Wilson v += div64_u64_rem(x, tile->width, &x) << tile->size; 41b414fcd5SChris Wilson v += x; 42b414fcd5SChris Wilson } else if (tile->width == 128) { 43b414fcd5SChris Wilson const unsigned int ytile_span = 16; 44b414fcd5SChris Wilson const unsigned int ytile_height = 512; 45b414fcd5SChris Wilson 46b414fcd5SChris Wilson v += y * ytile_span; 47b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 48b414fcd5SChris Wilson v += x; 49b414fcd5SChris Wilson } else { 50b414fcd5SChris Wilson const unsigned int ytile_span = 32; 51b414fcd5SChris Wilson const unsigned int ytile_height = 256; 52b414fcd5SChris Wilson 53b414fcd5SChris Wilson v += y * ytile_span; 54b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 55b414fcd5SChris Wilson v += x; 56b414fcd5SChris Wilson } 57b414fcd5SChris Wilson 58b414fcd5SChris Wilson switch (tile->swizzle) { 59b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9: 60b414fcd5SChris Wilson v ^= swizzle_bit(9, v); 61b414fcd5SChris Wilson break; 62b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10: 63b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); 64b414fcd5SChris Wilson break; 65b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_11: 66b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); 67b414fcd5SChris Wilson break; 68b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10_11: 69b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); 70b414fcd5SChris Wilson break; 71b414fcd5SChris Wilson } 72b414fcd5SChris Wilson 73b414fcd5SChris Wilson return v; 74b414fcd5SChris Wilson } 75b414fcd5SChris Wilson 76b414fcd5SChris Wilson static int check_partial_mapping(struct drm_i915_gem_object *obj, 77b414fcd5SChris Wilson const struct tile *tile, 78b414fcd5SChris Wilson unsigned long end_time) 79b414fcd5SChris Wilson { 80b414fcd5SChris Wilson const unsigned int nreal = obj->scratch / PAGE_SIZE; 81b414fcd5SChris Wilson const unsigned long npages = obj->base.size / PAGE_SIZE; 82b414fcd5SChris Wilson struct i915_vma *vma; 83b414fcd5SChris Wilson unsigned long page; 84b414fcd5SChris Wilson int err; 85b414fcd5SChris Wilson 86b414fcd5SChris Wilson if (igt_timeout(end_time, 87b414fcd5SChris Wilson "%s: timed out before tiling=%d stride=%d\n", 88b414fcd5SChris Wilson __func__, tile->tiling, tile->stride)) 89b414fcd5SChris Wilson return -EINTR; 90b414fcd5SChris Wilson 91b414fcd5SChris Wilson err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 92b414fcd5SChris Wilson if (err) { 93b414fcd5SChris Wilson pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 94b414fcd5SChris Wilson tile->tiling, tile->stride, err); 95b414fcd5SChris Wilson return err; 96b414fcd5SChris Wilson } 97b414fcd5SChris Wilson 98b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 99b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 100b414fcd5SChris Wilson 101b414fcd5SChris Wilson for_each_prime_number_from(page, 1, npages) { 102b414fcd5SChris Wilson struct i915_ggtt_view view = 103b414fcd5SChris Wilson compute_partial_view(obj, page, MIN_CHUNK_PAGES); 104b414fcd5SChris Wilson u32 __iomem *io; 105b414fcd5SChris Wilson struct page *p; 106b414fcd5SChris Wilson unsigned int n; 107b414fcd5SChris Wilson u64 offset; 108b414fcd5SChris Wilson u32 *cpu; 109b414fcd5SChris Wilson 110b414fcd5SChris Wilson GEM_BUG_ON(view.partial.size > nreal); 111b414fcd5SChris Wilson cond_resched(); 112b414fcd5SChris Wilson 113b414fcd5SChris Wilson err = i915_gem_object_set_to_gtt_domain(obj, true); 114b414fcd5SChris Wilson if (err) { 115b414fcd5SChris Wilson pr_err("Failed to flush to GTT write domain; err=%d\n", 116b414fcd5SChris Wilson err); 117b414fcd5SChris Wilson return err; 118b414fcd5SChris Wilson } 119b414fcd5SChris Wilson 120b414fcd5SChris Wilson vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 121b414fcd5SChris Wilson if (IS_ERR(vma)) { 122b414fcd5SChris Wilson pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 123b414fcd5SChris Wilson page, (int)PTR_ERR(vma)); 124b414fcd5SChris Wilson return PTR_ERR(vma); 125b414fcd5SChris Wilson } 126b414fcd5SChris Wilson 127b414fcd5SChris Wilson n = page - view.partial.offset; 128b414fcd5SChris Wilson GEM_BUG_ON(n >= view.partial.size); 129b414fcd5SChris Wilson 130b414fcd5SChris Wilson io = i915_vma_pin_iomap(vma); 131b414fcd5SChris Wilson i915_vma_unpin(vma); 132b414fcd5SChris Wilson if (IS_ERR(io)) { 133b414fcd5SChris Wilson pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 134b414fcd5SChris Wilson page, (int)PTR_ERR(io)); 135b414fcd5SChris Wilson return PTR_ERR(io); 136b414fcd5SChris Wilson } 137b414fcd5SChris Wilson 138b414fcd5SChris Wilson iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 139b414fcd5SChris Wilson i915_vma_unpin_iomap(vma); 140b414fcd5SChris Wilson 141b414fcd5SChris Wilson offset = tiled_offset(tile, page << PAGE_SHIFT); 142b414fcd5SChris Wilson if (offset >= obj->base.size) 143b414fcd5SChris Wilson continue; 144b414fcd5SChris Wilson 145b414fcd5SChris Wilson i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 146b414fcd5SChris Wilson 147b414fcd5SChris Wilson p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 148b414fcd5SChris Wilson cpu = kmap(p) + offset_in_page(offset); 149b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 150b414fcd5SChris Wilson if (*cpu != (u32)page) { 151b414fcd5SChris Wilson pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 152b414fcd5SChris Wilson page, n, 153b414fcd5SChris Wilson view.partial.offset, 154b414fcd5SChris Wilson view.partial.size, 155b414fcd5SChris Wilson vma->size >> PAGE_SHIFT, 156b414fcd5SChris Wilson tile->tiling ? tile_row_pages(obj) : 0, 157b414fcd5SChris Wilson vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 158b414fcd5SChris Wilson offset >> PAGE_SHIFT, 159b414fcd5SChris Wilson (unsigned int)offset_in_page(offset), 160b414fcd5SChris Wilson offset, 161b414fcd5SChris Wilson (u32)page, *cpu); 162b414fcd5SChris Wilson err = -EINVAL; 163b414fcd5SChris Wilson } 164b414fcd5SChris Wilson *cpu = 0; 165b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 166b414fcd5SChris Wilson kunmap(p); 167b414fcd5SChris Wilson if (err) 168b414fcd5SChris Wilson return err; 169b414fcd5SChris Wilson 170b414fcd5SChris Wilson i915_vma_destroy(vma); 171b414fcd5SChris Wilson } 172b414fcd5SChris Wilson 173b414fcd5SChris Wilson return 0; 174b414fcd5SChris Wilson } 175b414fcd5SChris Wilson 176b414fcd5SChris Wilson static int igt_partial_tiling(void *arg) 177b414fcd5SChris Wilson { 178b414fcd5SChris Wilson const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 179b414fcd5SChris Wilson struct drm_i915_private *i915 = arg; 180b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 181b414fcd5SChris Wilson intel_wakeref_t wakeref; 182b414fcd5SChris Wilson int tiling; 183b414fcd5SChris Wilson int err; 184b414fcd5SChris Wilson 185b414fcd5SChris Wilson /* We want to check the page mapping and fencing of a large object 186b414fcd5SChris Wilson * mmapped through the GTT. The object we create is larger than can 187b414fcd5SChris Wilson * possibly be mmaped as a whole, and so we must use partial GGTT vma. 188b414fcd5SChris Wilson * We then check that a write through each partial GGTT vma ends up 189b414fcd5SChris Wilson * in the right set of pages within the object, and with the expected 190b414fcd5SChris Wilson * tiling, which we verify by manual swizzling. 191b414fcd5SChris Wilson */ 192b414fcd5SChris Wilson 193b414fcd5SChris Wilson obj = huge_gem_object(i915, 194b414fcd5SChris Wilson nreal << PAGE_SHIFT, 195b414fcd5SChris Wilson (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 196b414fcd5SChris Wilson if (IS_ERR(obj)) 197b414fcd5SChris Wilson return PTR_ERR(obj); 198b414fcd5SChris Wilson 199b414fcd5SChris Wilson err = i915_gem_object_pin_pages(obj); 200b414fcd5SChris Wilson if (err) { 201b414fcd5SChris Wilson pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 202b414fcd5SChris Wilson nreal, obj->base.size / PAGE_SIZE, err); 203b414fcd5SChris Wilson goto out; 204b414fcd5SChris Wilson } 205b414fcd5SChris Wilson 206b414fcd5SChris Wilson mutex_lock(&i915->drm.struct_mutex); 207b414fcd5SChris Wilson wakeref = intel_runtime_pm_get(i915); 208b414fcd5SChris Wilson 209b414fcd5SChris Wilson if (1) { 210b414fcd5SChris Wilson IGT_TIMEOUT(end); 211b414fcd5SChris Wilson struct tile tile; 212b414fcd5SChris Wilson 213b414fcd5SChris Wilson tile.height = 1; 214b414fcd5SChris Wilson tile.width = 1; 215b414fcd5SChris Wilson tile.size = 0; 216b414fcd5SChris Wilson tile.stride = 0; 217b414fcd5SChris Wilson tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 218b414fcd5SChris Wilson tile.tiling = I915_TILING_NONE; 219b414fcd5SChris Wilson 220b414fcd5SChris Wilson err = check_partial_mapping(obj, &tile, end); 221b414fcd5SChris Wilson if (err && err != -EINTR) 222b414fcd5SChris Wilson goto out_unlock; 223b414fcd5SChris Wilson } 224b414fcd5SChris Wilson 225b414fcd5SChris Wilson for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { 226b414fcd5SChris Wilson IGT_TIMEOUT(end); 227b414fcd5SChris Wilson unsigned int max_pitch; 228b414fcd5SChris Wilson unsigned int pitch; 229b414fcd5SChris Wilson struct tile tile; 230b414fcd5SChris Wilson 231b414fcd5SChris Wilson if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 232b414fcd5SChris Wilson /* 233b414fcd5SChris Wilson * The swizzling pattern is actually unknown as it 234b414fcd5SChris Wilson * varies based on physical address of each page. 235b414fcd5SChris Wilson * See i915_gem_detect_bit_6_swizzle(). 236b414fcd5SChris Wilson */ 237b414fcd5SChris Wilson break; 238b414fcd5SChris Wilson 239b414fcd5SChris Wilson tile.tiling = tiling; 240b414fcd5SChris Wilson switch (tiling) { 241b414fcd5SChris Wilson case I915_TILING_X: 242b414fcd5SChris Wilson tile.swizzle = i915->mm.bit_6_swizzle_x; 243b414fcd5SChris Wilson break; 244b414fcd5SChris Wilson case I915_TILING_Y: 245b414fcd5SChris Wilson tile.swizzle = i915->mm.bit_6_swizzle_y; 246b414fcd5SChris Wilson break; 247b414fcd5SChris Wilson } 248b414fcd5SChris Wilson 249b414fcd5SChris Wilson GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); 250b414fcd5SChris Wilson if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 251b414fcd5SChris Wilson tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 252b414fcd5SChris Wilson continue; 253b414fcd5SChris Wilson 254b414fcd5SChris Wilson if (INTEL_GEN(i915) <= 2) { 255b414fcd5SChris Wilson tile.height = 16; 256b414fcd5SChris Wilson tile.width = 128; 257b414fcd5SChris Wilson tile.size = 11; 258b414fcd5SChris Wilson } else if (tile.tiling == I915_TILING_Y && 259b414fcd5SChris Wilson HAS_128_BYTE_Y_TILING(i915)) { 260b414fcd5SChris Wilson tile.height = 32; 261b414fcd5SChris Wilson tile.width = 128; 262b414fcd5SChris Wilson tile.size = 12; 263b414fcd5SChris Wilson } else { 264b414fcd5SChris Wilson tile.height = 8; 265b414fcd5SChris Wilson tile.width = 512; 266b414fcd5SChris Wilson tile.size = 12; 267b414fcd5SChris Wilson } 268b414fcd5SChris Wilson 269b414fcd5SChris Wilson if (INTEL_GEN(i915) < 4) 270b414fcd5SChris Wilson max_pitch = 8192 / tile.width; 271b414fcd5SChris Wilson else if (INTEL_GEN(i915) < 7) 272b414fcd5SChris Wilson max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width; 273b414fcd5SChris Wilson else 274b414fcd5SChris Wilson max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width; 275b414fcd5SChris Wilson 276b414fcd5SChris Wilson for (pitch = max_pitch; pitch; pitch >>= 1) { 277b414fcd5SChris Wilson tile.stride = tile.width * pitch; 278b414fcd5SChris Wilson err = check_partial_mapping(obj, &tile, end); 279b414fcd5SChris Wilson if (err == -EINTR) 280b414fcd5SChris Wilson goto next_tiling; 281b414fcd5SChris Wilson if (err) 282b414fcd5SChris Wilson goto out_unlock; 283b414fcd5SChris Wilson 284b414fcd5SChris Wilson if (pitch > 2 && INTEL_GEN(i915) >= 4) { 285b414fcd5SChris Wilson tile.stride = tile.width * (pitch - 1); 286b414fcd5SChris Wilson err = check_partial_mapping(obj, &tile, end); 287b414fcd5SChris Wilson if (err == -EINTR) 288b414fcd5SChris Wilson goto next_tiling; 289b414fcd5SChris Wilson if (err) 290b414fcd5SChris Wilson goto out_unlock; 291b414fcd5SChris Wilson } 292b414fcd5SChris Wilson 293b414fcd5SChris Wilson if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { 294b414fcd5SChris Wilson tile.stride = tile.width * (pitch + 1); 295b414fcd5SChris Wilson err = check_partial_mapping(obj, &tile, end); 296b414fcd5SChris Wilson if (err == -EINTR) 297b414fcd5SChris Wilson goto next_tiling; 298b414fcd5SChris Wilson if (err) 299b414fcd5SChris Wilson goto out_unlock; 300b414fcd5SChris Wilson } 301b414fcd5SChris Wilson } 302b414fcd5SChris Wilson 303b414fcd5SChris Wilson if (INTEL_GEN(i915) >= 4) { 304b414fcd5SChris Wilson for_each_prime_number(pitch, max_pitch) { 305b414fcd5SChris Wilson tile.stride = tile.width * pitch; 306b414fcd5SChris Wilson err = check_partial_mapping(obj, &tile, end); 307b414fcd5SChris Wilson if (err == -EINTR) 308b414fcd5SChris Wilson goto next_tiling; 309b414fcd5SChris Wilson if (err) 310b414fcd5SChris Wilson goto out_unlock; 311b414fcd5SChris Wilson } 312b414fcd5SChris Wilson } 313b414fcd5SChris Wilson 314b414fcd5SChris Wilson next_tiling: ; 315b414fcd5SChris Wilson } 316b414fcd5SChris Wilson 317b414fcd5SChris Wilson out_unlock: 318b414fcd5SChris Wilson intel_runtime_pm_put(i915, wakeref); 319b414fcd5SChris Wilson mutex_unlock(&i915->drm.struct_mutex); 320b414fcd5SChris Wilson i915_gem_object_unpin_pages(obj); 321b414fcd5SChris Wilson out: 322b414fcd5SChris Wilson i915_gem_object_put(obj); 323b414fcd5SChris Wilson return err; 324b414fcd5SChris Wilson } 325b414fcd5SChris Wilson 326b414fcd5SChris Wilson static int make_obj_busy(struct drm_i915_gem_object *obj) 327b414fcd5SChris Wilson { 328b414fcd5SChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev); 329b414fcd5SChris Wilson struct i915_request *rq; 330b414fcd5SChris Wilson struct i915_vma *vma; 331b414fcd5SChris Wilson int err; 332b414fcd5SChris Wilson 333b414fcd5SChris Wilson vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 334b414fcd5SChris Wilson if (IS_ERR(vma)) 335b414fcd5SChris Wilson return PTR_ERR(vma); 336b414fcd5SChris Wilson 337b414fcd5SChris Wilson err = i915_vma_pin(vma, 0, 0, PIN_USER); 338b414fcd5SChris Wilson if (err) 339b414fcd5SChris Wilson return err; 340b414fcd5SChris Wilson 341b414fcd5SChris Wilson rq = i915_request_create(i915->engine[RCS0]->kernel_context); 342b414fcd5SChris Wilson if (IS_ERR(rq)) { 343b414fcd5SChris Wilson i915_vma_unpin(vma); 344b414fcd5SChris Wilson return PTR_ERR(rq); 345b414fcd5SChris Wilson } 346b414fcd5SChris Wilson 347b414fcd5SChris Wilson err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 348b414fcd5SChris Wilson 349b414fcd5SChris Wilson i915_request_add(rq); 350b414fcd5SChris Wilson 351b414fcd5SChris Wilson __i915_gem_object_release_unless_active(obj); 352b414fcd5SChris Wilson i915_vma_unpin(vma); 353b414fcd5SChris Wilson 354b414fcd5SChris Wilson return err; 355b414fcd5SChris Wilson } 356b414fcd5SChris Wilson 357b414fcd5SChris Wilson static bool assert_mmap_offset(struct drm_i915_private *i915, 358b414fcd5SChris Wilson unsigned long size, 359b414fcd5SChris Wilson int expected) 360b414fcd5SChris Wilson { 361b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 362b414fcd5SChris Wilson int err; 363b414fcd5SChris Wilson 364b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, size); 365b414fcd5SChris Wilson if (IS_ERR(obj)) 366b414fcd5SChris Wilson return PTR_ERR(obj); 367b414fcd5SChris Wilson 368b414fcd5SChris Wilson err = create_mmap_offset(obj); 369b414fcd5SChris Wilson i915_gem_object_put(obj); 370b414fcd5SChris Wilson 371b414fcd5SChris Wilson return err == expected; 372b414fcd5SChris Wilson } 373b414fcd5SChris Wilson 374b414fcd5SChris Wilson static void disable_retire_worker(struct drm_i915_private *i915) 375b414fcd5SChris Wilson { 376b414fcd5SChris Wilson i915_gem_shrinker_unregister(i915); 377b414fcd5SChris Wilson 378b414fcd5SChris Wilson intel_gt_pm_get(i915); 379b414fcd5SChris Wilson 380b414fcd5SChris Wilson cancel_delayed_work_sync(&i915->gem.retire_work); 381b414fcd5SChris Wilson flush_work(&i915->gem.idle_work); 382b414fcd5SChris Wilson } 383b414fcd5SChris Wilson 384b414fcd5SChris Wilson static void restore_retire_worker(struct drm_i915_private *i915) 385b414fcd5SChris Wilson { 386b414fcd5SChris Wilson intel_gt_pm_put(i915); 387b414fcd5SChris Wilson 388b414fcd5SChris Wilson mutex_lock(&i915->drm.struct_mutex); 389b414fcd5SChris Wilson igt_flush_test(i915, I915_WAIT_LOCKED); 390b414fcd5SChris Wilson mutex_unlock(&i915->drm.struct_mutex); 391b414fcd5SChris Wilson 392b414fcd5SChris Wilson i915_gem_shrinker_register(i915); 393b414fcd5SChris Wilson } 394b414fcd5SChris Wilson 395b414fcd5SChris Wilson static int igt_mmap_offset_exhaustion(void *arg) 396b414fcd5SChris Wilson { 397b414fcd5SChris Wilson struct drm_i915_private *i915 = arg; 398b414fcd5SChris Wilson struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; 399b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 400b414fcd5SChris Wilson struct drm_mm_node resv, *hole; 401b414fcd5SChris Wilson u64 hole_start, hole_end; 402b414fcd5SChris Wilson int loop, err; 403b414fcd5SChris Wilson 404b414fcd5SChris Wilson /* Disable background reaper */ 405b414fcd5SChris Wilson disable_retire_worker(i915); 406b414fcd5SChris Wilson GEM_BUG_ON(!i915->gt.awake); 407b414fcd5SChris Wilson 408b414fcd5SChris Wilson /* Trim the device mmap space to only a page */ 409b414fcd5SChris Wilson memset(&resv, 0, sizeof(resv)); 410b414fcd5SChris Wilson drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 411b414fcd5SChris Wilson resv.start = hole_start; 412b414fcd5SChris Wilson resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ 413b414fcd5SChris Wilson err = drm_mm_reserve_node(mm, &resv); 414b414fcd5SChris Wilson if (err) { 415b414fcd5SChris Wilson pr_err("Failed to trim VMA manager, err=%d\n", err); 416b414fcd5SChris Wilson goto out_park; 417b414fcd5SChris Wilson } 418b414fcd5SChris Wilson break; 419b414fcd5SChris Wilson } 420b414fcd5SChris Wilson 421b414fcd5SChris Wilson /* Just fits! */ 422b414fcd5SChris Wilson if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { 423b414fcd5SChris Wilson pr_err("Unable to insert object into single page hole\n"); 424b414fcd5SChris Wilson err = -EINVAL; 425b414fcd5SChris Wilson goto out; 426b414fcd5SChris Wilson } 427b414fcd5SChris Wilson 428b414fcd5SChris Wilson /* Too large */ 429b414fcd5SChris Wilson if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { 430b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); 431b414fcd5SChris Wilson err = -EINVAL; 432b414fcd5SChris Wilson goto out; 433b414fcd5SChris Wilson } 434b414fcd5SChris Wilson 435b414fcd5SChris Wilson /* Fill the hole, further allocation attempts should then fail */ 436b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 437b414fcd5SChris Wilson if (IS_ERR(obj)) { 438b414fcd5SChris Wilson err = PTR_ERR(obj); 439b414fcd5SChris Wilson goto out; 440b414fcd5SChris Wilson } 441b414fcd5SChris Wilson 442b414fcd5SChris Wilson err = create_mmap_offset(obj); 443b414fcd5SChris Wilson if (err) { 444b414fcd5SChris Wilson pr_err("Unable to insert object into reclaimed hole\n"); 445b414fcd5SChris Wilson goto err_obj; 446b414fcd5SChris Wilson } 447b414fcd5SChris Wilson 448b414fcd5SChris Wilson if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { 449b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); 450b414fcd5SChris Wilson err = -EINVAL; 451b414fcd5SChris Wilson goto err_obj; 452b414fcd5SChris Wilson } 453b414fcd5SChris Wilson 454b414fcd5SChris Wilson i915_gem_object_put(obj); 455b414fcd5SChris Wilson 456b414fcd5SChris Wilson /* Now fill with busy dead objects that we expect to reap */ 457b414fcd5SChris Wilson for (loop = 0; loop < 3; loop++) { 458b414fcd5SChris Wilson if (i915_terminally_wedged(i915)) 459b414fcd5SChris Wilson break; 460b414fcd5SChris Wilson 461b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 462b414fcd5SChris Wilson if (IS_ERR(obj)) { 463b414fcd5SChris Wilson err = PTR_ERR(obj); 464b414fcd5SChris Wilson goto out; 465b414fcd5SChris Wilson } 466b414fcd5SChris Wilson 467b414fcd5SChris Wilson mutex_lock(&i915->drm.struct_mutex); 468b414fcd5SChris Wilson err = make_obj_busy(obj); 469b414fcd5SChris Wilson mutex_unlock(&i915->drm.struct_mutex); 470b414fcd5SChris Wilson if (err) { 471b414fcd5SChris Wilson pr_err("[loop %d] Failed to busy the object\n", loop); 472b414fcd5SChris Wilson goto err_obj; 473b414fcd5SChris Wilson } 474b414fcd5SChris Wilson 475b414fcd5SChris Wilson /* NB we rely on the _active_ reference to access obj now */ 476b414fcd5SChris Wilson GEM_BUG_ON(!i915_gem_object_is_active(obj)); 477b414fcd5SChris Wilson err = create_mmap_offset(obj); 478b414fcd5SChris Wilson if (err) { 479b414fcd5SChris Wilson pr_err("[loop %d] create_mmap_offset failed with err=%d\n", 480b414fcd5SChris Wilson loop, err); 481b414fcd5SChris Wilson goto out; 482b414fcd5SChris Wilson } 483b414fcd5SChris Wilson } 484b414fcd5SChris Wilson 485b414fcd5SChris Wilson out: 486b414fcd5SChris Wilson drm_mm_remove_node(&resv); 487b414fcd5SChris Wilson out_park: 488b414fcd5SChris Wilson restore_retire_worker(i915); 489b414fcd5SChris Wilson return err; 490b414fcd5SChris Wilson err_obj: 491b414fcd5SChris Wilson i915_gem_object_put(obj); 492b414fcd5SChris Wilson goto out; 493b414fcd5SChris Wilson } 494b414fcd5SChris Wilson 495b414fcd5SChris Wilson int i915_gem_mman_live_selftests(struct drm_i915_private *i915) 496b414fcd5SChris Wilson { 497b414fcd5SChris Wilson static const struct i915_subtest tests[] = { 498b414fcd5SChris Wilson SUBTEST(igt_partial_tiling), 499b414fcd5SChris Wilson SUBTEST(igt_mmap_offset_exhaustion), 500b414fcd5SChris Wilson }; 501b414fcd5SChris Wilson 502b414fcd5SChris Wilson return i915_subtests(tests, i915); 503b414fcd5SChris Wilson } 504