1b414fcd5SChris Wilson /* 2b414fcd5SChris Wilson * SPDX-License-Identifier: MIT 3b414fcd5SChris Wilson * 4b414fcd5SChris Wilson * Copyright © 2016 Intel Corporation 5b414fcd5SChris Wilson */ 6b414fcd5SChris Wilson 7b414fcd5SChris Wilson #include <linux/prime_numbers.h> 8b414fcd5SChris Wilson 9de5825beSChris Wilson #include "gt/intel_engine_pm.h" 1045233ab2SChris Wilson #include "gt/intel_gpu_commands.h" 11a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h" 12b414fcd5SChris Wilson #include "gt/intel_gt_pm.h" 139771d5f7SAbdiel Janulgue #include "gem/i915_gem_region.h" 1410be98a7SChris Wilson #include "huge_gem_object.h" 15b414fcd5SChris Wilson #include "i915_selftest.h" 1607e98eb0SChris Wilson #include "selftests/i915_random.h" 17b414fcd5SChris Wilson #include "selftests/igt_flush_test.h" 186fedafacSChris Wilson #include "selftests/igt_mmap.h" 19b414fcd5SChris Wilson 20b414fcd5SChris Wilson struct tile { 21b414fcd5SChris Wilson unsigned int width; 22b414fcd5SChris Wilson unsigned int height; 23b414fcd5SChris Wilson unsigned int stride; 24b414fcd5SChris Wilson unsigned int size; 25b414fcd5SChris Wilson unsigned int tiling; 26b414fcd5SChris Wilson unsigned int swizzle; 27b414fcd5SChris Wilson }; 28b414fcd5SChris Wilson 29b414fcd5SChris Wilson static u64 swizzle_bit(unsigned int bit, u64 offset) 30b414fcd5SChris Wilson { 31b414fcd5SChris Wilson return (offset & BIT_ULL(bit)) >> (bit - 6); 32b414fcd5SChris Wilson } 33b414fcd5SChris Wilson 34b414fcd5SChris Wilson static u64 tiled_offset(const struct tile *tile, u64 v) 35b414fcd5SChris Wilson { 36b414fcd5SChris Wilson u64 x, y; 37b414fcd5SChris Wilson 38b414fcd5SChris Wilson if (tile->tiling == I915_TILING_NONE) 39b414fcd5SChris Wilson return v; 40b414fcd5SChris Wilson 41b414fcd5SChris Wilson y = div64_u64_rem(v, tile->stride, &x); 42b414fcd5SChris Wilson v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; 43b414fcd5SChris Wilson 44b414fcd5SChris Wilson if (tile->tiling == I915_TILING_X) { 45b414fcd5SChris Wilson v += y * tile->width; 46b414fcd5SChris Wilson v += div64_u64_rem(x, tile->width, &x) << tile->size; 47b414fcd5SChris Wilson v += x; 48b414fcd5SChris Wilson } else if (tile->width == 128) { 49b414fcd5SChris Wilson const unsigned int ytile_span = 16; 50b414fcd5SChris Wilson const unsigned int ytile_height = 512; 51b414fcd5SChris Wilson 52b414fcd5SChris Wilson v += y * ytile_span; 53b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 54b414fcd5SChris Wilson v += x; 55b414fcd5SChris Wilson } else { 56b414fcd5SChris Wilson const unsigned int ytile_span = 32; 57b414fcd5SChris Wilson const unsigned int ytile_height = 256; 58b414fcd5SChris Wilson 59b414fcd5SChris Wilson v += y * ytile_span; 60b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 61b414fcd5SChris Wilson v += x; 62b414fcd5SChris Wilson } 63b414fcd5SChris Wilson 64b414fcd5SChris Wilson switch (tile->swizzle) { 65b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9: 66b414fcd5SChris Wilson v ^= swizzle_bit(9, v); 67b414fcd5SChris Wilson break; 68b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10: 69b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); 70b414fcd5SChris Wilson break; 71b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_11: 72b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); 73b414fcd5SChris Wilson break; 74b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10_11: 75b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); 76b414fcd5SChris Wilson break; 77b414fcd5SChris Wilson } 78b414fcd5SChris Wilson 79b414fcd5SChris Wilson return v; 80b414fcd5SChris Wilson } 81b414fcd5SChris Wilson 82b414fcd5SChris Wilson static int check_partial_mapping(struct drm_i915_gem_object *obj, 83b414fcd5SChris Wilson const struct tile *tile, 8407e98eb0SChris Wilson struct rnd_state *prng) 8507e98eb0SChris Wilson { 8607e98eb0SChris Wilson const unsigned long npages = obj->base.size / PAGE_SIZE; 8707e98eb0SChris Wilson struct i915_ggtt_view view; 8807e98eb0SChris Wilson struct i915_vma *vma; 8907e98eb0SChris Wilson unsigned long page; 9007e98eb0SChris Wilson u32 __iomem *io; 9107e98eb0SChris Wilson struct page *p; 9207e98eb0SChris Wilson unsigned int n; 9307e98eb0SChris Wilson u64 offset; 9407e98eb0SChris Wilson u32 *cpu; 9507e98eb0SChris Wilson int err; 9607e98eb0SChris Wilson 9707e98eb0SChris Wilson err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 9807e98eb0SChris Wilson if (err) { 9907e98eb0SChris Wilson pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 10007e98eb0SChris Wilson tile->tiling, tile->stride, err); 10107e98eb0SChris Wilson return err; 10207e98eb0SChris Wilson } 10307e98eb0SChris Wilson 10407e98eb0SChris Wilson GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 10507e98eb0SChris Wilson GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 10607e98eb0SChris Wilson 10780f0b679SMaarten Lankhorst i915_gem_object_lock(obj, NULL); 10807e98eb0SChris Wilson err = i915_gem_object_set_to_gtt_domain(obj, true); 10907e98eb0SChris Wilson i915_gem_object_unlock(obj); 11007e98eb0SChris Wilson if (err) { 11107e98eb0SChris Wilson pr_err("Failed to flush to GTT write domain; err=%d\n", err); 11207e98eb0SChris Wilson return err; 11307e98eb0SChris Wilson } 11407e98eb0SChris Wilson 11507e98eb0SChris Wilson page = i915_prandom_u32_max_state(npages, prng); 11607e98eb0SChris Wilson view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); 11707e98eb0SChris Wilson 11807e98eb0SChris Wilson vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 11907e98eb0SChris Wilson if (IS_ERR(vma)) { 12007e98eb0SChris Wilson pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 12107e98eb0SChris Wilson page, (int)PTR_ERR(vma)); 12207e98eb0SChris Wilson return PTR_ERR(vma); 12307e98eb0SChris Wilson } 12407e98eb0SChris Wilson 12507e98eb0SChris Wilson n = page - view.partial.offset; 12607e98eb0SChris Wilson GEM_BUG_ON(n >= view.partial.size); 12707e98eb0SChris Wilson 12807e98eb0SChris Wilson io = i915_vma_pin_iomap(vma); 12907e98eb0SChris Wilson i915_vma_unpin(vma); 13007e98eb0SChris Wilson if (IS_ERR(io)) { 13107e98eb0SChris Wilson pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 13207e98eb0SChris Wilson page, (int)PTR_ERR(io)); 13307e98eb0SChris Wilson err = PTR_ERR(io); 13407e98eb0SChris Wilson goto out; 13507e98eb0SChris Wilson } 13607e98eb0SChris Wilson 13707e98eb0SChris Wilson iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 13807e98eb0SChris Wilson i915_vma_unpin_iomap(vma); 13907e98eb0SChris Wilson 14007e98eb0SChris Wilson offset = tiled_offset(tile, page << PAGE_SHIFT); 14107e98eb0SChris Wilson if (offset >= obj->base.size) 14207e98eb0SChris Wilson goto out; 14307e98eb0SChris Wilson 14407e98eb0SChris Wilson intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); 14507e98eb0SChris Wilson 14607e98eb0SChris Wilson p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 14707e98eb0SChris Wilson cpu = kmap(p) + offset_in_page(offset); 14807e98eb0SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 14907e98eb0SChris Wilson if (*cpu != (u32)page) { 15007e98eb0SChris Wilson pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 15107e98eb0SChris Wilson page, n, 15207e98eb0SChris Wilson view.partial.offset, 15307e98eb0SChris Wilson view.partial.size, 15407e98eb0SChris Wilson vma->size >> PAGE_SHIFT, 15507e98eb0SChris Wilson tile->tiling ? tile_row_pages(obj) : 0, 15607e98eb0SChris Wilson vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 15707e98eb0SChris Wilson offset >> PAGE_SHIFT, 15807e98eb0SChris Wilson (unsigned int)offset_in_page(offset), 15907e98eb0SChris Wilson offset, 16007e98eb0SChris Wilson (u32)page, *cpu); 16107e98eb0SChris Wilson err = -EINVAL; 16207e98eb0SChris Wilson } 16307e98eb0SChris Wilson *cpu = 0; 16407e98eb0SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 16507e98eb0SChris Wilson kunmap(p); 16607e98eb0SChris Wilson 16707e98eb0SChris Wilson out: 16876f9764cSChris Wilson __i915_vma_put(vma); 16907e98eb0SChris Wilson return err; 17007e98eb0SChris Wilson } 17107e98eb0SChris Wilson 17207e98eb0SChris Wilson static int check_partial_mappings(struct drm_i915_gem_object *obj, 17307e98eb0SChris Wilson const struct tile *tile, 174b414fcd5SChris Wilson unsigned long end_time) 175b414fcd5SChris Wilson { 176b414fcd5SChris Wilson const unsigned int nreal = obj->scratch / PAGE_SIZE; 177b414fcd5SChris Wilson const unsigned long npages = obj->base.size / PAGE_SIZE; 178b414fcd5SChris Wilson struct i915_vma *vma; 179b414fcd5SChris Wilson unsigned long page; 180b414fcd5SChris Wilson int err; 181b414fcd5SChris Wilson 182b414fcd5SChris Wilson err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 183b414fcd5SChris Wilson if (err) { 184b414fcd5SChris Wilson pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 185b414fcd5SChris Wilson tile->tiling, tile->stride, err); 186b414fcd5SChris Wilson return err; 187b414fcd5SChris Wilson } 188b414fcd5SChris Wilson 189b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 190b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); 191b414fcd5SChris Wilson 19280f0b679SMaarten Lankhorst i915_gem_object_lock(obj, NULL); 19387d1372dSChris Wilson err = i915_gem_object_set_to_gtt_domain(obj, true); 19487d1372dSChris Wilson i915_gem_object_unlock(obj); 19587d1372dSChris Wilson if (err) { 19687d1372dSChris Wilson pr_err("Failed to flush to GTT write domain; err=%d\n", err); 19787d1372dSChris Wilson return err; 19887d1372dSChris Wilson } 19987d1372dSChris Wilson 200b414fcd5SChris Wilson for_each_prime_number_from(page, 1, npages) { 201b414fcd5SChris Wilson struct i915_ggtt_view view = 202b414fcd5SChris Wilson compute_partial_view(obj, page, MIN_CHUNK_PAGES); 203b414fcd5SChris Wilson u32 __iomem *io; 204b414fcd5SChris Wilson struct page *p; 205b414fcd5SChris Wilson unsigned int n; 206b414fcd5SChris Wilson u64 offset; 207b414fcd5SChris Wilson u32 *cpu; 208b414fcd5SChris Wilson 209b414fcd5SChris Wilson GEM_BUG_ON(view.partial.size > nreal); 210b414fcd5SChris Wilson cond_resched(); 211b414fcd5SChris Wilson 212b414fcd5SChris Wilson vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 213b414fcd5SChris Wilson if (IS_ERR(vma)) { 214b414fcd5SChris Wilson pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 215b414fcd5SChris Wilson page, (int)PTR_ERR(vma)); 216b414fcd5SChris Wilson return PTR_ERR(vma); 217b414fcd5SChris Wilson } 218b414fcd5SChris Wilson 219b414fcd5SChris Wilson n = page - view.partial.offset; 220b414fcd5SChris Wilson GEM_BUG_ON(n >= view.partial.size); 221b414fcd5SChris Wilson 222b414fcd5SChris Wilson io = i915_vma_pin_iomap(vma); 223b414fcd5SChris Wilson i915_vma_unpin(vma); 224b414fcd5SChris Wilson if (IS_ERR(io)) { 225b414fcd5SChris Wilson pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 226b414fcd5SChris Wilson page, (int)PTR_ERR(io)); 227b414fcd5SChris Wilson return PTR_ERR(io); 228b414fcd5SChris Wilson } 229b414fcd5SChris Wilson 230b414fcd5SChris Wilson iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); 231b414fcd5SChris Wilson i915_vma_unpin_iomap(vma); 232b414fcd5SChris Wilson 233b414fcd5SChris Wilson offset = tiled_offset(tile, page << PAGE_SHIFT); 234b414fcd5SChris Wilson if (offset >= obj->base.size) 235b414fcd5SChris Wilson continue; 236b414fcd5SChris Wilson 237a1c8a09eSTvrtko Ursulin intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); 238b414fcd5SChris Wilson 239b414fcd5SChris Wilson p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); 240b414fcd5SChris Wilson cpu = kmap(p) + offset_in_page(offset); 241b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 242b414fcd5SChris Wilson if (*cpu != (u32)page) { 243b414fcd5SChris Wilson pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", 244b414fcd5SChris Wilson page, n, 245b414fcd5SChris Wilson view.partial.offset, 246b414fcd5SChris Wilson view.partial.size, 247b414fcd5SChris Wilson vma->size >> PAGE_SHIFT, 248b414fcd5SChris Wilson tile->tiling ? tile_row_pages(obj) : 0, 249b414fcd5SChris Wilson vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, 250b414fcd5SChris Wilson offset >> PAGE_SHIFT, 251b414fcd5SChris Wilson (unsigned int)offset_in_page(offset), 252b414fcd5SChris Wilson offset, 253b414fcd5SChris Wilson (u32)page, *cpu); 254b414fcd5SChris Wilson err = -EINVAL; 255b414fcd5SChris Wilson } 256b414fcd5SChris Wilson *cpu = 0; 257b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu)); 258b414fcd5SChris Wilson kunmap(p); 259b414fcd5SChris Wilson if (err) 260b414fcd5SChris Wilson return err; 261b414fcd5SChris Wilson 26276f9764cSChris Wilson __i915_vma_put(vma); 26307e98eb0SChris Wilson 26407e98eb0SChris Wilson if (igt_timeout(end_time, 26507e98eb0SChris Wilson "%s: timed out after tiling=%d stride=%d\n", 26607e98eb0SChris Wilson __func__, tile->tiling, tile->stride)) 26707e98eb0SChris Wilson return -EINTR; 268b414fcd5SChris Wilson } 269b414fcd5SChris Wilson 270b414fcd5SChris Wilson return 0; 271b414fcd5SChris Wilson } 272b414fcd5SChris Wilson 27307e98eb0SChris Wilson static unsigned int 27407e98eb0SChris Wilson setup_tile_size(struct tile *tile, struct drm_i915_private *i915) 27507e98eb0SChris Wilson { 276*40e1956eSLucas De Marchi if (GRAPHICS_VER(i915) <= 2) { 27707e98eb0SChris Wilson tile->height = 16; 27807e98eb0SChris Wilson tile->width = 128; 27907e98eb0SChris Wilson tile->size = 11; 28007e98eb0SChris Wilson } else if (tile->tiling == I915_TILING_Y && 28107e98eb0SChris Wilson HAS_128_BYTE_Y_TILING(i915)) { 28207e98eb0SChris Wilson tile->height = 32; 28307e98eb0SChris Wilson tile->width = 128; 28407e98eb0SChris Wilson tile->size = 12; 28507e98eb0SChris Wilson } else { 28607e98eb0SChris Wilson tile->height = 8; 28707e98eb0SChris Wilson tile->width = 512; 28807e98eb0SChris Wilson tile->size = 12; 28907e98eb0SChris Wilson } 29007e98eb0SChris Wilson 291*40e1956eSLucas De Marchi if (GRAPHICS_VER(i915) < 4) 29207e98eb0SChris Wilson return 8192 / tile->width; 293*40e1956eSLucas De Marchi else if (GRAPHICS_VER(i915) < 7) 29407e98eb0SChris Wilson return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; 29507e98eb0SChris Wilson else 29607e98eb0SChris Wilson return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; 29707e98eb0SChris Wilson } 29807e98eb0SChris Wilson 299b414fcd5SChris Wilson static int igt_partial_tiling(void *arg) 300b414fcd5SChris Wilson { 301b414fcd5SChris Wilson const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 302b414fcd5SChris Wilson struct drm_i915_private *i915 = arg; 303b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 304b414fcd5SChris Wilson intel_wakeref_t wakeref; 305b414fcd5SChris Wilson int tiling; 306b414fcd5SChris Wilson int err; 307b414fcd5SChris Wilson 308e60f7bb7SMatthew Auld if (!i915_ggtt_has_aperture(&i915->ggtt)) 309e60f7bb7SMatthew Auld return 0; 310e60f7bb7SMatthew Auld 311b414fcd5SChris Wilson /* We want to check the page mapping and fencing of a large object 312b414fcd5SChris Wilson * mmapped through the GTT. The object we create is larger than can 313b414fcd5SChris Wilson * possibly be mmaped as a whole, and so we must use partial GGTT vma. 314b414fcd5SChris Wilson * We then check that a write through each partial GGTT vma ends up 315b414fcd5SChris Wilson * in the right set of pages within the object, and with the expected 316b414fcd5SChris Wilson * tiling, which we verify by manual swizzling. 317b414fcd5SChris Wilson */ 318b414fcd5SChris Wilson 319b414fcd5SChris Wilson obj = huge_gem_object(i915, 320b414fcd5SChris Wilson nreal << PAGE_SHIFT, 321b414fcd5SChris Wilson (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 322b414fcd5SChris Wilson if (IS_ERR(obj)) 323b414fcd5SChris Wilson return PTR_ERR(obj); 324b414fcd5SChris Wilson 3256f791ffeSMaarten Lankhorst err = i915_gem_object_pin_pages_unlocked(obj); 326b414fcd5SChris Wilson if (err) { 327b414fcd5SChris Wilson pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 328b414fcd5SChris Wilson nreal, obj->base.size / PAGE_SIZE, err); 329b414fcd5SChris Wilson goto out; 330b414fcd5SChris Wilson } 331b414fcd5SChris Wilson 332d858d569SDaniele Ceraolo Spurio wakeref = intel_runtime_pm_get(&i915->runtime_pm); 333b414fcd5SChris Wilson 334b414fcd5SChris Wilson if (1) { 335b414fcd5SChris Wilson IGT_TIMEOUT(end); 336b414fcd5SChris Wilson struct tile tile; 337b414fcd5SChris Wilson 338b414fcd5SChris Wilson tile.height = 1; 339b414fcd5SChris Wilson tile.width = 1; 340b414fcd5SChris Wilson tile.size = 0; 341b414fcd5SChris Wilson tile.stride = 0; 342b414fcd5SChris Wilson tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 343b414fcd5SChris Wilson tile.tiling = I915_TILING_NONE; 344b414fcd5SChris Wilson 34507e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end); 346b414fcd5SChris Wilson if (err && err != -EINTR) 347b414fcd5SChris Wilson goto out_unlock; 348b414fcd5SChris Wilson } 349b414fcd5SChris Wilson 350b414fcd5SChris Wilson for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { 351b414fcd5SChris Wilson IGT_TIMEOUT(end); 352b414fcd5SChris Wilson unsigned int max_pitch; 353b414fcd5SChris Wilson unsigned int pitch; 354b414fcd5SChris Wilson struct tile tile; 355b414fcd5SChris Wilson 356b414fcd5SChris Wilson if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 357b414fcd5SChris Wilson /* 358b414fcd5SChris Wilson * The swizzling pattern is actually unknown as it 359b414fcd5SChris Wilson * varies based on physical address of each page. 360b414fcd5SChris Wilson * See i915_gem_detect_bit_6_swizzle(). 361b414fcd5SChris Wilson */ 362b414fcd5SChris Wilson break; 363b414fcd5SChris Wilson 364b414fcd5SChris Wilson tile.tiling = tiling; 365b414fcd5SChris Wilson switch (tiling) { 366b414fcd5SChris Wilson case I915_TILING_X: 367972c646fSChris Wilson tile.swizzle = i915->ggtt.bit_6_swizzle_x; 368b414fcd5SChris Wilson break; 369b414fcd5SChris Wilson case I915_TILING_Y: 370972c646fSChris Wilson tile.swizzle = i915->ggtt.bit_6_swizzle_y; 371b414fcd5SChris Wilson break; 372b414fcd5SChris Wilson } 373b414fcd5SChris Wilson 374b414fcd5SChris Wilson GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); 375b414fcd5SChris Wilson if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 376b414fcd5SChris Wilson tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 377b414fcd5SChris Wilson continue; 378b414fcd5SChris Wilson 37907e98eb0SChris Wilson max_pitch = setup_tile_size(&tile, i915); 380b414fcd5SChris Wilson 381b414fcd5SChris Wilson for (pitch = max_pitch; pitch; pitch >>= 1) { 382b414fcd5SChris Wilson tile.stride = tile.width * pitch; 38307e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end); 384b414fcd5SChris Wilson if (err == -EINTR) 385b414fcd5SChris Wilson goto next_tiling; 386b414fcd5SChris Wilson if (err) 387b414fcd5SChris Wilson goto out_unlock; 388b414fcd5SChris Wilson 389*40e1956eSLucas De Marchi if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { 390b414fcd5SChris Wilson tile.stride = tile.width * (pitch - 1); 39107e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end); 392b414fcd5SChris Wilson if (err == -EINTR) 393b414fcd5SChris Wilson goto next_tiling; 394b414fcd5SChris Wilson if (err) 395b414fcd5SChris Wilson goto out_unlock; 396b414fcd5SChris Wilson } 397b414fcd5SChris Wilson 398*40e1956eSLucas De Marchi if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { 399b414fcd5SChris Wilson tile.stride = tile.width * (pitch + 1); 40007e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end); 401b414fcd5SChris Wilson if (err == -EINTR) 402b414fcd5SChris Wilson goto next_tiling; 403b414fcd5SChris Wilson if (err) 404b414fcd5SChris Wilson goto out_unlock; 405b414fcd5SChris Wilson } 406b414fcd5SChris Wilson } 407b414fcd5SChris Wilson 408*40e1956eSLucas De Marchi if (GRAPHICS_VER(i915) >= 4) { 409b414fcd5SChris Wilson for_each_prime_number(pitch, max_pitch) { 410b414fcd5SChris Wilson tile.stride = tile.width * pitch; 41107e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end); 412b414fcd5SChris Wilson if (err == -EINTR) 413b414fcd5SChris Wilson goto next_tiling; 414b414fcd5SChris Wilson if (err) 415b414fcd5SChris Wilson goto out_unlock; 416b414fcd5SChris Wilson } 417b414fcd5SChris Wilson } 418b414fcd5SChris Wilson 419b414fcd5SChris Wilson next_tiling: ; 420b414fcd5SChris Wilson } 421b414fcd5SChris Wilson 422b414fcd5SChris Wilson out_unlock: 423d858d569SDaniele Ceraolo Spurio intel_runtime_pm_put(&i915->runtime_pm, wakeref); 424b414fcd5SChris Wilson i915_gem_object_unpin_pages(obj); 425b414fcd5SChris Wilson out: 426b414fcd5SChris Wilson i915_gem_object_put(obj); 427b414fcd5SChris Wilson return err; 428b414fcd5SChris Wilson } 429b414fcd5SChris Wilson 43007e98eb0SChris Wilson static int igt_smoke_tiling(void *arg) 43107e98eb0SChris Wilson { 43207e98eb0SChris Wilson const unsigned int nreal = 1 << 12; /* largest tile row x2 */ 43307e98eb0SChris Wilson struct drm_i915_private *i915 = arg; 43407e98eb0SChris Wilson struct drm_i915_gem_object *obj; 43507e98eb0SChris Wilson intel_wakeref_t wakeref; 43607e98eb0SChris Wilson I915_RND_STATE(prng); 43707e98eb0SChris Wilson unsigned long count; 43807e98eb0SChris Wilson IGT_TIMEOUT(end); 43907e98eb0SChris Wilson int err; 44007e98eb0SChris Wilson 441e60f7bb7SMatthew Auld if (!i915_ggtt_has_aperture(&i915->ggtt)) 442e60f7bb7SMatthew Auld return 0; 443e60f7bb7SMatthew Auld 44407e98eb0SChris Wilson /* 44507e98eb0SChris Wilson * igt_partial_tiling() does an exhastive check of partial tiling 44607e98eb0SChris Wilson * chunking, but will undoubtably run out of time. Here, we do a 44707e98eb0SChris Wilson * randomised search and hope over many runs of 1s with different 44807e98eb0SChris Wilson * seeds we will do a thorough check. 44907e98eb0SChris Wilson * 45007e98eb0SChris Wilson * Remember to look at the st_seed if we see a flip-flop in BAT! 45107e98eb0SChris Wilson */ 45207e98eb0SChris Wilson 45307e98eb0SChris Wilson if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 45407e98eb0SChris Wilson return 0; 45507e98eb0SChris Wilson 45607e98eb0SChris Wilson obj = huge_gem_object(i915, 45707e98eb0SChris Wilson nreal << PAGE_SHIFT, 45807e98eb0SChris Wilson (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 45907e98eb0SChris Wilson if (IS_ERR(obj)) 46007e98eb0SChris Wilson return PTR_ERR(obj); 46107e98eb0SChris Wilson 4626f791ffeSMaarten Lankhorst err = i915_gem_object_pin_pages_unlocked(obj); 46307e98eb0SChris Wilson if (err) { 46407e98eb0SChris Wilson pr_err("Failed to allocate %u pages (%lu total), err=%d\n", 46507e98eb0SChris Wilson nreal, obj->base.size / PAGE_SIZE, err); 46607e98eb0SChris Wilson goto out; 46707e98eb0SChris Wilson } 46807e98eb0SChris Wilson 46907e98eb0SChris Wilson wakeref = intel_runtime_pm_get(&i915->runtime_pm); 47007e98eb0SChris Wilson 47107e98eb0SChris Wilson count = 0; 47207e98eb0SChris Wilson do { 47307e98eb0SChris Wilson struct tile tile; 47407e98eb0SChris Wilson 47507e98eb0SChris Wilson tile.tiling = 47607e98eb0SChris Wilson i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); 47707e98eb0SChris Wilson switch (tile.tiling) { 47807e98eb0SChris Wilson case I915_TILING_NONE: 47907e98eb0SChris Wilson tile.height = 1; 48007e98eb0SChris Wilson tile.width = 1; 48107e98eb0SChris Wilson tile.size = 0; 48207e98eb0SChris Wilson tile.stride = 0; 48307e98eb0SChris Wilson tile.swizzle = I915_BIT_6_SWIZZLE_NONE; 48407e98eb0SChris Wilson break; 48507e98eb0SChris Wilson 48607e98eb0SChris Wilson case I915_TILING_X: 487972c646fSChris Wilson tile.swizzle = i915->ggtt.bit_6_swizzle_x; 48807e98eb0SChris Wilson break; 48907e98eb0SChris Wilson case I915_TILING_Y: 490972c646fSChris Wilson tile.swizzle = i915->ggtt.bit_6_swizzle_y; 49107e98eb0SChris Wilson break; 49207e98eb0SChris Wilson } 49307e98eb0SChris Wilson 49407e98eb0SChris Wilson if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || 49507e98eb0SChris Wilson tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) 49607e98eb0SChris Wilson continue; 49707e98eb0SChris Wilson 49807e98eb0SChris Wilson if (tile.tiling != I915_TILING_NONE) { 49907e98eb0SChris Wilson unsigned int max_pitch = setup_tile_size(&tile, i915); 50007e98eb0SChris Wilson 50107e98eb0SChris Wilson tile.stride = 50207e98eb0SChris Wilson i915_prandom_u32_max_state(max_pitch, &prng); 50307e98eb0SChris Wilson tile.stride = (1 + tile.stride) * tile.width; 504*40e1956eSLucas De Marchi if (GRAPHICS_VER(i915) < 4) 50507e98eb0SChris Wilson tile.stride = rounddown_pow_of_two(tile.stride); 50607e98eb0SChris Wilson } 50707e98eb0SChris Wilson 50807e98eb0SChris Wilson err = check_partial_mapping(obj, &tile, &prng); 50907e98eb0SChris Wilson if (err) 51007e98eb0SChris Wilson break; 51107e98eb0SChris Wilson 51207e98eb0SChris Wilson count++; 51307e98eb0SChris Wilson } while (!__igt_timeout(end, NULL)); 51407e98eb0SChris Wilson 51507e98eb0SChris Wilson pr_info("%s: Completed %lu trials\n", __func__, count); 51607e98eb0SChris Wilson 51707e98eb0SChris Wilson intel_runtime_pm_put(&i915->runtime_pm, wakeref); 51807e98eb0SChris Wilson i915_gem_object_unpin_pages(obj); 51907e98eb0SChris Wilson out: 52007e98eb0SChris Wilson i915_gem_object_put(obj); 52107e98eb0SChris Wilson return err; 52207e98eb0SChris Wilson } 52307e98eb0SChris Wilson 524b414fcd5SChris Wilson static int make_obj_busy(struct drm_i915_gem_object *obj) 525b414fcd5SChris Wilson { 526b414fcd5SChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev); 5278f856c74SChris Wilson struct intel_engine_cs *engine; 528e948761fSChris Wilson 529e948761fSChris Wilson for_each_uabi_engine(engine, i915) { 530e948761fSChris Wilson struct i915_request *rq; 531b414fcd5SChris Wilson struct i915_vma *vma; 53215b6c924SMaarten Lankhorst struct i915_gem_ww_ctx ww; 533b414fcd5SChris Wilson int err; 534b414fcd5SChris Wilson 535e948761fSChris Wilson vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 536b414fcd5SChris Wilson if (IS_ERR(vma)) 537b414fcd5SChris Wilson return PTR_ERR(vma); 538b414fcd5SChris Wilson 53915b6c924SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false); 54015b6c924SMaarten Lankhorst retry: 54115b6c924SMaarten Lankhorst err = i915_gem_object_lock(obj, &ww); 54215b6c924SMaarten Lankhorst if (!err) 54315b6c924SMaarten Lankhorst err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 544b414fcd5SChris Wilson if (err) 54515b6c924SMaarten Lankhorst goto err; 546b414fcd5SChris Wilson 547de5825beSChris Wilson rq = intel_engine_create_kernel_request(engine); 548b414fcd5SChris Wilson if (IS_ERR(rq)) { 54915b6c924SMaarten Lankhorst err = PTR_ERR(rq); 55015b6c924SMaarten Lankhorst goto err_unpin; 551b414fcd5SChris Wilson } 552b414fcd5SChris Wilson 55370d6894dSChris Wilson err = i915_request_await_object(rq, vma->obj, true); 55470d6894dSChris Wilson if (err == 0) 55570d6894dSChris Wilson err = i915_vma_move_to_active(vma, rq, 55670d6894dSChris Wilson EXEC_OBJECT_WRITE); 557b414fcd5SChris Wilson 558b414fcd5SChris Wilson i915_request_add(rq); 55915b6c924SMaarten Lankhorst err_unpin: 560e948761fSChris Wilson i915_vma_unpin(vma); 56115b6c924SMaarten Lankhorst err: 56215b6c924SMaarten Lankhorst if (err == -EDEADLK) { 56315b6c924SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww); 56415b6c924SMaarten Lankhorst if (!err) 56515b6c924SMaarten Lankhorst goto retry; 56615b6c924SMaarten Lankhorst } 56715b6c924SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww); 568e948761fSChris Wilson if (err) 569e948761fSChris Wilson return err; 5708f856c74SChris Wilson } 571b414fcd5SChris Wilson 572c017cf6bSChris Wilson i915_gem_object_put(obj); /* leave it only alive via its active ref */ 573e948761fSChris Wilson return 0; 574b414fcd5SChris Wilson } 575b414fcd5SChris Wilson 576b414fcd5SChris Wilson static bool assert_mmap_offset(struct drm_i915_private *i915, 577b414fcd5SChris Wilson unsigned long size, 578b414fcd5SChris Wilson int expected) 579b414fcd5SChris Wilson { 580b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 581cc662126SAbdiel Janulgue struct i915_mmap_offset *mmo; 582b414fcd5SChris Wilson 583b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, size); 584b414fcd5SChris Wilson if (IS_ERR(obj)) 585efbf9288SDan Carpenter return false; 586b414fcd5SChris Wilson 587cc662126SAbdiel Janulgue mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); 588b414fcd5SChris Wilson i915_gem_object_put(obj); 589b414fcd5SChris Wilson 590cc662126SAbdiel Janulgue return PTR_ERR_OR_ZERO(mmo) == expected; 591b414fcd5SChris Wilson } 592b414fcd5SChris Wilson 593b414fcd5SChris Wilson static void disable_retire_worker(struct drm_i915_private *i915) 594b414fcd5SChris Wilson { 595c29579d2SChris Wilson i915_gem_driver_unregister__shrinker(i915); 5960c91621cSChris Wilson intel_gt_pm_get(&i915->gt); 59766101975SChris Wilson cancel_delayed_work_sync(&i915->gt.requests.retire_work); 598b414fcd5SChris Wilson } 599b414fcd5SChris Wilson 600b414fcd5SChris Wilson static void restore_retire_worker(struct drm_i915_private *i915) 601b414fcd5SChris Wilson { 6027e805762SChris Wilson igt_flush_test(i915); 6030c91621cSChris Wilson intel_gt_pm_put(&i915->gt); 604c29579d2SChris Wilson i915_gem_driver_register__shrinker(i915); 605b414fcd5SChris Wilson } 606b414fcd5SChris Wilson 607f63dfc14SChris Wilson static void mmap_offset_lock(struct drm_i915_private *i915) 608f63dfc14SChris Wilson __acquires(&i915->drm.vma_offset_manager->vm_lock) 609f63dfc14SChris Wilson { 610f63dfc14SChris Wilson write_lock(&i915->drm.vma_offset_manager->vm_lock); 611f63dfc14SChris Wilson } 612f63dfc14SChris Wilson 613f63dfc14SChris Wilson static void mmap_offset_unlock(struct drm_i915_private *i915) 614f63dfc14SChris Wilson __releases(&i915->drm.vma_offset_manager->vm_lock) 615f63dfc14SChris Wilson { 616f63dfc14SChris Wilson write_unlock(&i915->drm.vma_offset_manager->vm_lock); 617f63dfc14SChris Wilson } 618f63dfc14SChris Wilson 619b414fcd5SChris Wilson static int igt_mmap_offset_exhaustion(void *arg) 620b414fcd5SChris Wilson { 621b414fcd5SChris Wilson struct drm_i915_private *i915 = arg; 622b414fcd5SChris Wilson struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; 623b414fcd5SChris Wilson struct drm_i915_gem_object *obj; 6241af65515SChris Wilson struct drm_mm_node *hole, *next; 625cc662126SAbdiel Janulgue struct i915_mmap_offset *mmo; 626cc662126SAbdiel Janulgue int loop, err = 0; 627b414fcd5SChris Wilson 628b414fcd5SChris Wilson /* Disable background reaper */ 629b414fcd5SChris Wilson disable_retire_worker(i915); 630b414fcd5SChris Wilson GEM_BUG_ON(!i915->gt.awake); 6311af65515SChris Wilson intel_gt_retire_requests(&i915->gt); 6321af65515SChris Wilson i915_gem_drain_freed_objects(i915); 633b414fcd5SChris Wilson 634b414fcd5SChris Wilson /* Trim the device mmap space to only a page */ 635f63dfc14SChris Wilson mmap_offset_lock(i915); 6361af65515SChris Wilson loop = 1; /* PAGE_SIZE units */ 6371af65515SChris Wilson list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { 6381af65515SChris Wilson struct drm_mm_node *resv; 6391af65515SChris Wilson 6401af65515SChris Wilson resv = kzalloc(sizeof(*resv), GFP_NOWAIT); 6411af65515SChris Wilson if (!resv) { 6421af65515SChris Wilson err = -ENOMEM; 643b414fcd5SChris Wilson goto out_park; 644b414fcd5SChris Wilson } 6451af65515SChris Wilson 6461af65515SChris Wilson resv->start = drm_mm_hole_node_start(hole) + loop; 6471af65515SChris Wilson resv->size = hole->hole_size - loop; 6481af65515SChris Wilson resv->color = -1ul; 6491af65515SChris Wilson loop = 0; 6501af65515SChris Wilson 6511af65515SChris Wilson if (!resv->size) { 6521af65515SChris Wilson kfree(resv); 6531af65515SChris Wilson continue; 654b414fcd5SChris Wilson } 655b414fcd5SChris Wilson 6561af65515SChris Wilson pr_debug("Reserving hole [%llx + %llx]\n", 6571af65515SChris Wilson resv->start, resv->size); 6581af65515SChris Wilson 6591af65515SChris Wilson err = drm_mm_reserve_node(mm, resv); 6601af65515SChris Wilson if (err) { 6611af65515SChris Wilson pr_err("Failed to trim VMA manager, err=%d\n", err); 6621af65515SChris Wilson kfree(resv); 6631af65515SChris Wilson goto out_park; 6641af65515SChris Wilson } 6651af65515SChris Wilson } 6661af65515SChris Wilson GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); 6671af65515SChris Wilson mmap_offset_unlock(i915); 6681af65515SChris Wilson 669b414fcd5SChris Wilson /* Just fits! */ 670b414fcd5SChris Wilson if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { 671b414fcd5SChris Wilson pr_err("Unable to insert object into single page hole\n"); 672b414fcd5SChris Wilson err = -EINVAL; 673b414fcd5SChris Wilson goto out; 674b414fcd5SChris Wilson } 675b414fcd5SChris Wilson 676b414fcd5SChris Wilson /* Too large */ 677b414fcd5SChris Wilson if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { 678b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); 679b414fcd5SChris Wilson err = -EINVAL; 680b414fcd5SChris Wilson goto out; 681b414fcd5SChris Wilson } 682b414fcd5SChris Wilson 683b414fcd5SChris Wilson /* Fill the hole, further allocation attempts should then fail */ 684b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 685b414fcd5SChris Wilson if (IS_ERR(obj)) { 686b414fcd5SChris Wilson err = PTR_ERR(obj); 687b414fcd5SChris Wilson goto out; 688b414fcd5SChris Wilson } 689b414fcd5SChris Wilson 690cc662126SAbdiel Janulgue mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); 691cc662126SAbdiel Janulgue if (IS_ERR(mmo)) { 692b414fcd5SChris Wilson pr_err("Unable to insert object into reclaimed hole\n"); 693cc662126SAbdiel Janulgue err = PTR_ERR(mmo); 694b414fcd5SChris Wilson goto err_obj; 695b414fcd5SChris Wilson } 696b414fcd5SChris Wilson 697b414fcd5SChris Wilson if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { 698b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); 699b414fcd5SChris Wilson err = -EINVAL; 700b414fcd5SChris Wilson goto err_obj; 701b414fcd5SChris Wilson } 702b414fcd5SChris Wilson 703b414fcd5SChris Wilson i915_gem_object_put(obj); 704b414fcd5SChris Wilson 705b414fcd5SChris Wilson /* Now fill with busy dead objects that we expect to reap */ 706b414fcd5SChris Wilson for (loop = 0; loop < 3; loop++) { 707cb823ed9SChris Wilson if (intel_gt_is_wedged(&i915->gt)) 708b414fcd5SChris Wilson break; 709b414fcd5SChris Wilson 710b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 711b414fcd5SChris Wilson if (IS_ERR(obj)) { 712b414fcd5SChris Wilson err = PTR_ERR(obj); 713b414fcd5SChris Wilson goto out; 714b414fcd5SChris Wilson } 715b414fcd5SChris Wilson 716b414fcd5SChris Wilson err = make_obj_busy(obj); 717b414fcd5SChris Wilson if (err) { 718b414fcd5SChris Wilson pr_err("[loop %d] Failed to busy the object\n", loop); 719b414fcd5SChris Wilson goto err_obj; 720b414fcd5SChris Wilson } 721b414fcd5SChris Wilson } 722b414fcd5SChris Wilson 723b414fcd5SChris Wilson out: 724f63dfc14SChris Wilson mmap_offset_lock(i915); 725b414fcd5SChris Wilson out_park: 7261af65515SChris Wilson drm_mm_for_each_node_safe(hole, next, mm) { 7271af65515SChris Wilson if (hole->color != -1ul) 7281af65515SChris Wilson continue; 7291af65515SChris Wilson 7301af65515SChris Wilson drm_mm_remove_node(hole); 7311af65515SChris Wilson kfree(hole); 7321af65515SChris Wilson } 7331af65515SChris Wilson mmap_offset_unlock(i915); 734b414fcd5SChris Wilson restore_retire_worker(i915); 735b414fcd5SChris Wilson return err; 736b414fcd5SChris Wilson err_obj: 737b414fcd5SChris Wilson i915_gem_object_put(obj); 738b414fcd5SChris Wilson goto out; 739b414fcd5SChris Wilson } 740b414fcd5SChris Wilson 7419771d5f7SAbdiel Janulgue static int gtt_set(struct drm_i915_gem_object *obj) 7426fedafacSChris Wilson { 7439771d5f7SAbdiel Janulgue struct i915_vma *vma; 7449771d5f7SAbdiel Janulgue void __iomem *map; 7459771d5f7SAbdiel Janulgue int err = 0; 7466fedafacSChris Wilson 7479771d5f7SAbdiel Janulgue vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 7489771d5f7SAbdiel Janulgue if (IS_ERR(vma)) 7499771d5f7SAbdiel Janulgue return PTR_ERR(vma); 7506fedafacSChris Wilson 7519771d5f7SAbdiel Janulgue intel_gt_pm_get(vma->vm->gt); 7529771d5f7SAbdiel Janulgue map = i915_vma_pin_iomap(vma); 7539771d5f7SAbdiel Janulgue i915_vma_unpin(vma); 7549771d5f7SAbdiel Janulgue if (IS_ERR(map)) { 7559771d5f7SAbdiel Janulgue err = PTR_ERR(map); 7566fedafacSChris Wilson goto out; 7576fedafacSChris Wilson } 7589771d5f7SAbdiel Janulgue 7599771d5f7SAbdiel Janulgue memset_io(map, POISON_INUSE, obj->base.size); 7609771d5f7SAbdiel Janulgue i915_vma_unpin_iomap(vma); 7619771d5f7SAbdiel Janulgue 7629771d5f7SAbdiel Janulgue out: 7639771d5f7SAbdiel Janulgue intel_gt_pm_put(vma->vm->gt); 7649771d5f7SAbdiel Janulgue return err; 7659771d5f7SAbdiel Janulgue } 7669771d5f7SAbdiel Janulgue 7679771d5f7SAbdiel Janulgue static int gtt_check(struct drm_i915_gem_object *obj) 7689771d5f7SAbdiel Janulgue { 7699771d5f7SAbdiel Janulgue struct i915_vma *vma; 7709771d5f7SAbdiel Janulgue void __iomem *map; 7719771d5f7SAbdiel Janulgue int err = 0; 7729771d5f7SAbdiel Janulgue 7739771d5f7SAbdiel Janulgue vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); 7749771d5f7SAbdiel Janulgue if (IS_ERR(vma)) 7759771d5f7SAbdiel Janulgue return PTR_ERR(vma); 7769771d5f7SAbdiel Janulgue 7779771d5f7SAbdiel Janulgue intel_gt_pm_get(vma->vm->gt); 7789771d5f7SAbdiel Janulgue map = i915_vma_pin_iomap(vma); 7799771d5f7SAbdiel Janulgue i915_vma_unpin(vma); 7809771d5f7SAbdiel Janulgue if (IS_ERR(map)) { 7819771d5f7SAbdiel Janulgue err = PTR_ERR(map); 7829771d5f7SAbdiel Janulgue goto out; 7839771d5f7SAbdiel Janulgue } 7849771d5f7SAbdiel Janulgue 7859771d5f7SAbdiel Janulgue if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { 7869771d5f7SAbdiel Janulgue pr_err("%s: Write via mmap did not land in backing store (GTT)\n", 7879771d5f7SAbdiel Janulgue obj->mm.region->name); 7889771d5f7SAbdiel Janulgue err = -EINVAL; 7899771d5f7SAbdiel Janulgue } 7909771d5f7SAbdiel Janulgue i915_vma_unpin_iomap(vma); 7919771d5f7SAbdiel Janulgue 7929771d5f7SAbdiel Janulgue out: 7939771d5f7SAbdiel Janulgue intel_gt_pm_put(vma->vm->gt); 7949771d5f7SAbdiel Janulgue return err; 7959771d5f7SAbdiel Janulgue } 7969771d5f7SAbdiel Janulgue 7979771d5f7SAbdiel Janulgue static int wc_set(struct drm_i915_gem_object *obj) 7989771d5f7SAbdiel Janulgue { 7999771d5f7SAbdiel Janulgue void *vaddr; 8009771d5f7SAbdiel Janulgue 8016f791ffeSMaarten Lankhorst vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 8029771d5f7SAbdiel Janulgue if (IS_ERR(vaddr)) 8039771d5f7SAbdiel Janulgue return PTR_ERR(vaddr); 8049771d5f7SAbdiel Janulgue 8059771d5f7SAbdiel Janulgue memset(vaddr, POISON_INUSE, obj->base.size); 8066fedafacSChris Wilson i915_gem_object_flush_map(obj); 8076fedafacSChris Wilson i915_gem_object_unpin_map(obj); 8086fedafacSChris Wilson 8099771d5f7SAbdiel Janulgue return 0; 810cc662126SAbdiel Janulgue } 8116fedafacSChris Wilson 8129771d5f7SAbdiel Janulgue static int wc_check(struct drm_i915_gem_object *obj) 8139771d5f7SAbdiel Janulgue { 8149771d5f7SAbdiel Janulgue void *vaddr; 8159771d5f7SAbdiel Janulgue int err = 0; 8169771d5f7SAbdiel Janulgue 8176f791ffeSMaarten Lankhorst vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 8189771d5f7SAbdiel Janulgue if (IS_ERR(vaddr)) 8199771d5f7SAbdiel Janulgue return PTR_ERR(vaddr); 8209771d5f7SAbdiel Janulgue 8219771d5f7SAbdiel Janulgue if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { 8229771d5f7SAbdiel Janulgue pr_err("%s: Write via mmap did not land in backing store (WC)\n", 8239771d5f7SAbdiel Janulgue obj->mm.region->name); 8249771d5f7SAbdiel Janulgue err = -EINVAL; 8259771d5f7SAbdiel Janulgue } 8269771d5f7SAbdiel Janulgue i915_gem_object_unpin_map(obj); 8279771d5f7SAbdiel Janulgue 8289771d5f7SAbdiel Janulgue return err; 8299771d5f7SAbdiel Janulgue } 8309771d5f7SAbdiel Janulgue 8319771d5f7SAbdiel Janulgue static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) 8329771d5f7SAbdiel Janulgue { 8339771d5f7SAbdiel Janulgue if (type == I915_MMAP_TYPE_GTT && 8349771d5f7SAbdiel Janulgue !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) 8359771d5f7SAbdiel Janulgue return false; 8369771d5f7SAbdiel Janulgue 8379771d5f7SAbdiel Janulgue if (type != I915_MMAP_TYPE_GTT && 838c471748dSMaarten Lankhorst !i915_gem_object_has_struct_page(obj) && 839c471748dSMaarten Lankhorst !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) 8409771d5f7SAbdiel Janulgue return false; 8419771d5f7SAbdiel Janulgue 8429771d5f7SAbdiel Janulgue return true; 8439771d5f7SAbdiel Janulgue } 8449771d5f7SAbdiel Janulgue 8452459e56fSMatthew Auld static void object_set_placements(struct drm_i915_gem_object *obj, 8462459e56fSMatthew Auld struct intel_memory_region **placements, 8472459e56fSMatthew Auld unsigned int n_placements) 8482459e56fSMatthew Auld { 8492459e56fSMatthew Auld GEM_BUG_ON(!n_placements); 8502459e56fSMatthew Auld 8512459e56fSMatthew Auld if (n_placements == 1) { 8522459e56fSMatthew Auld struct drm_i915_private *i915 = to_i915(obj->base.dev); 8532459e56fSMatthew Auld struct intel_memory_region *mr = placements[0]; 8542459e56fSMatthew Auld 8552459e56fSMatthew Auld obj->mm.placements = &i915->mm.regions[mr->id]; 8562459e56fSMatthew Auld obj->mm.n_placements = 1; 8572459e56fSMatthew Auld } else { 8582459e56fSMatthew Auld obj->mm.placements = placements; 8592459e56fSMatthew Auld obj->mm.n_placements = n_placements; 8602459e56fSMatthew Auld } 8612459e56fSMatthew Auld } 8622459e56fSMatthew Auld 8639771d5f7SAbdiel Janulgue #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) 8649771d5f7SAbdiel Janulgue static int __igt_mmap(struct drm_i915_private *i915, 8659771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj, 8669771d5f7SAbdiel Janulgue enum i915_mmap_type type) 8679771d5f7SAbdiel Janulgue { 8689771d5f7SAbdiel Janulgue struct i915_mmap_offset *mmo; 8699771d5f7SAbdiel Janulgue struct vm_area_struct *area; 8709771d5f7SAbdiel Janulgue unsigned long addr; 8719771d5f7SAbdiel Janulgue int err, i; 8729771d5f7SAbdiel Janulgue 8739771d5f7SAbdiel Janulgue if (!can_mmap(obj, type)) 8749771d5f7SAbdiel Janulgue return 0; 8759771d5f7SAbdiel Janulgue 8769771d5f7SAbdiel Janulgue err = wc_set(obj); 8779771d5f7SAbdiel Janulgue if (err == -ENXIO) 8789771d5f7SAbdiel Janulgue err = gtt_set(obj); 8799771d5f7SAbdiel Janulgue if (err) 8809771d5f7SAbdiel Janulgue return err; 8819771d5f7SAbdiel Janulgue 8829771d5f7SAbdiel Janulgue mmo = mmap_offset_attach(obj, type, NULL); 8839771d5f7SAbdiel Janulgue if (IS_ERR(mmo)) 8849771d5f7SAbdiel Janulgue return PTR_ERR(mmo); 8859771d5f7SAbdiel Janulgue 886cc662126SAbdiel Janulgue addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 8879771d5f7SAbdiel Janulgue if (IS_ERR_VALUE(addr)) 8889771d5f7SAbdiel Janulgue return addr; 8896fedafacSChris Wilson 8909771d5f7SAbdiel Janulgue pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); 8916fedafacSChris Wilson 8926fedafacSChris Wilson area = find_vma(current->mm, addr); 8936fedafacSChris Wilson if (!area) { 8949771d5f7SAbdiel Janulgue pr_err("%s: Did not create a vm_area_struct for the mmap\n", 8959771d5f7SAbdiel Janulgue obj->mm.region->name); 8966fedafacSChris Wilson err = -EINVAL; 8976fedafacSChris Wilson goto out_unmap; 8986fedafacSChris Wilson } 8996fedafacSChris Wilson 900cc662126SAbdiel Janulgue if (area->vm_private_data != mmo) { 9019771d5f7SAbdiel Janulgue pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", 9029771d5f7SAbdiel Janulgue obj->mm.region->name); 9036fedafacSChris Wilson err = -EINVAL; 9046fedafacSChris Wilson goto out_unmap; 9056fedafacSChris Wilson } 9066fedafacSChris Wilson 9079771d5f7SAbdiel Janulgue for (i = 0; i < obj->base.size / sizeof(u32); i++) { 9086fedafacSChris Wilson u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); 9096fedafacSChris Wilson u32 x; 9106fedafacSChris Wilson 9116fedafacSChris Wilson if (get_user(x, ux)) { 9129771d5f7SAbdiel Janulgue pr_err("%s: Unable to read from mmap, offset:%zd\n", 9139771d5f7SAbdiel Janulgue obj->mm.region->name, i * sizeof(x)); 9146fedafacSChris Wilson err = -EFAULT; 9159771d5f7SAbdiel Janulgue goto out_unmap; 9166fedafacSChris Wilson } 9176fedafacSChris Wilson 9186fedafacSChris Wilson if (x != expand32(POISON_INUSE)) { 9199771d5f7SAbdiel Janulgue pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", 9209771d5f7SAbdiel Janulgue obj->mm.region->name, 9216fedafacSChris Wilson i * sizeof(x), x, expand32(POISON_INUSE)); 9226fedafacSChris Wilson err = -EINVAL; 9239771d5f7SAbdiel Janulgue goto out_unmap; 9246fedafacSChris Wilson } 9256fedafacSChris Wilson 9266fedafacSChris Wilson x = expand32(POISON_FREE); 9276fedafacSChris Wilson if (put_user(x, ux)) { 9289771d5f7SAbdiel Janulgue pr_err("%s: Unable to write to mmap, offset:%zd\n", 9299771d5f7SAbdiel Janulgue obj->mm.region->name, i * sizeof(x)); 9306fedafacSChris Wilson err = -EFAULT; 9319771d5f7SAbdiel Janulgue goto out_unmap; 9326fedafacSChris Wilson } 9336fedafacSChris Wilson } 9346fedafacSChris Wilson 9359771d5f7SAbdiel Janulgue if (type == I915_MMAP_TYPE_GTT) 9369771d5f7SAbdiel Janulgue intel_gt_flush_ggtt_writes(&i915->gt); 9379771d5f7SAbdiel Janulgue 9389771d5f7SAbdiel Janulgue err = wc_check(obj); 9399771d5f7SAbdiel Janulgue if (err == -ENXIO) 9409771d5f7SAbdiel Janulgue err = gtt_check(obj); 9416fedafacSChris Wilson out_unmap: 9429771d5f7SAbdiel Janulgue vm_munmap(addr, obj->base.size); 9436fedafacSChris Wilson return err; 9446fedafacSChris Wilson } 9456fedafacSChris Wilson 9469771d5f7SAbdiel Janulgue static int igt_mmap(void *arg) 947cc662126SAbdiel Janulgue { 9489771d5f7SAbdiel Janulgue struct drm_i915_private *i915 = arg; 9499771d5f7SAbdiel Janulgue struct intel_memory_region *mr; 9509771d5f7SAbdiel Janulgue enum intel_region_id id; 9519771d5f7SAbdiel Janulgue 9529771d5f7SAbdiel Janulgue for_each_memory_region(mr, i915, id) { 9539771d5f7SAbdiel Janulgue unsigned long sizes[] = { 9549771d5f7SAbdiel Janulgue PAGE_SIZE, 9559771d5f7SAbdiel Janulgue mr->min_page_size, 9569771d5f7SAbdiel Janulgue SZ_4M, 9579771d5f7SAbdiel Janulgue }; 9589771d5f7SAbdiel Janulgue int i; 9599771d5f7SAbdiel Janulgue 9609771d5f7SAbdiel Janulgue for (i = 0; i < ARRAY_SIZE(sizes); i++) { 9619771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj; 9629771d5f7SAbdiel Janulgue int err; 9639771d5f7SAbdiel Janulgue 9649771d5f7SAbdiel Janulgue obj = i915_gem_object_create_region(mr, sizes[i], 0); 9659771d5f7SAbdiel Janulgue if (obj == ERR_PTR(-ENODEV)) 9669771d5f7SAbdiel Janulgue continue; 9679771d5f7SAbdiel Janulgue 9689771d5f7SAbdiel Janulgue if (IS_ERR(obj)) 9699771d5f7SAbdiel Janulgue return PTR_ERR(obj); 9709771d5f7SAbdiel Janulgue 9712459e56fSMatthew Auld object_set_placements(obj, &mr, 1); 9722459e56fSMatthew Auld 9739771d5f7SAbdiel Janulgue err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); 9749771d5f7SAbdiel Janulgue if (err == 0) 9759771d5f7SAbdiel Janulgue err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); 9769771d5f7SAbdiel Janulgue 9779771d5f7SAbdiel Janulgue i915_gem_object_put(obj); 9789771d5f7SAbdiel Janulgue if (err) 9799771d5f7SAbdiel Janulgue return err; 9809771d5f7SAbdiel Janulgue } 981cc662126SAbdiel Janulgue } 982cc662126SAbdiel Janulgue 9839771d5f7SAbdiel Janulgue return 0; 984cc662126SAbdiel Janulgue } 985cc662126SAbdiel Janulgue 9869f909e21SChris Wilson static const char *repr_mmap_type(enum i915_mmap_type type) 9879f909e21SChris Wilson { 9889f909e21SChris Wilson switch (type) { 9899f909e21SChris Wilson case I915_MMAP_TYPE_GTT: return "gtt"; 9909f909e21SChris Wilson case I915_MMAP_TYPE_WB: return "wb"; 9919f909e21SChris Wilson case I915_MMAP_TYPE_WC: return "wc"; 9929f909e21SChris Wilson case I915_MMAP_TYPE_UC: return "uc"; 9939f909e21SChris Wilson default: return "unknown"; 9949f909e21SChris Wilson } 9959f909e21SChris Wilson } 9969f909e21SChris Wilson 9979f909e21SChris Wilson static bool can_access(const struct drm_i915_gem_object *obj) 9989f909e21SChris Wilson { 999c471748dSMaarten Lankhorst return i915_gem_object_has_struct_page(obj) || 1000c471748dSMaarten Lankhorst i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM); 10019f909e21SChris Wilson } 10029f909e21SChris Wilson 10039f909e21SChris Wilson static int __igt_mmap_access(struct drm_i915_private *i915, 10049f909e21SChris Wilson struct drm_i915_gem_object *obj, 10059f909e21SChris Wilson enum i915_mmap_type type) 10069f909e21SChris Wilson { 10079f909e21SChris Wilson struct i915_mmap_offset *mmo; 10089f909e21SChris Wilson unsigned long __user *ptr; 10099f909e21SChris Wilson unsigned long A, B; 10109f909e21SChris Wilson unsigned long x, y; 10119f909e21SChris Wilson unsigned long addr; 10129f909e21SChris Wilson int err; 10139f909e21SChris Wilson 10149f909e21SChris Wilson memset(&A, 0xAA, sizeof(A)); 10159f909e21SChris Wilson memset(&B, 0xBB, sizeof(B)); 10169f909e21SChris Wilson 10179f909e21SChris Wilson if (!can_mmap(obj, type) || !can_access(obj)) 10189f909e21SChris Wilson return 0; 10199f909e21SChris Wilson 10209f909e21SChris Wilson mmo = mmap_offset_attach(obj, type, NULL); 10219f909e21SChris Wilson if (IS_ERR(mmo)) 10229f909e21SChris Wilson return PTR_ERR(mmo); 10239f909e21SChris Wilson 10249f909e21SChris Wilson addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 10259f909e21SChris Wilson if (IS_ERR_VALUE(addr)) 10269f909e21SChris Wilson return addr; 10279f909e21SChris Wilson ptr = (unsigned long __user *)addr; 10289f909e21SChris Wilson 10299f909e21SChris Wilson err = __put_user(A, ptr); 10309f909e21SChris Wilson if (err) { 10319f909e21SChris Wilson pr_err("%s(%s): failed to write into user mmap\n", 10329f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type)); 10339f909e21SChris Wilson goto out_unmap; 10349f909e21SChris Wilson } 10359f909e21SChris Wilson 10369f909e21SChris Wilson intel_gt_flush_ggtt_writes(&i915->gt); 10379f909e21SChris Wilson 10389f909e21SChris Wilson err = access_process_vm(current, addr, &x, sizeof(x), 0); 10399f909e21SChris Wilson if (err != sizeof(x)) { 10409f909e21SChris Wilson pr_err("%s(%s): access_process_vm() read failed\n", 10419f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type)); 10429f909e21SChris Wilson goto out_unmap; 10439f909e21SChris Wilson } 10449f909e21SChris Wilson 10459f909e21SChris Wilson err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE); 10469f909e21SChris Wilson if (err != sizeof(B)) { 10479f909e21SChris Wilson pr_err("%s(%s): access_process_vm() write failed\n", 10489f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type)); 10499f909e21SChris Wilson goto out_unmap; 10509f909e21SChris Wilson } 10519f909e21SChris Wilson 10529f909e21SChris Wilson intel_gt_flush_ggtt_writes(&i915->gt); 10539f909e21SChris Wilson 10549f909e21SChris Wilson err = __get_user(y, ptr); 10559f909e21SChris Wilson if (err) { 10569f909e21SChris Wilson pr_err("%s(%s): failed to read from user mmap\n", 10579f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type)); 10589f909e21SChris Wilson goto out_unmap; 10599f909e21SChris Wilson } 10609f909e21SChris Wilson 10619f909e21SChris Wilson if (x != A || y != B) { 10629f909e21SChris Wilson pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n", 10639f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type), 10649f909e21SChris Wilson x, y); 10659f909e21SChris Wilson err = -EINVAL; 10669f909e21SChris Wilson goto out_unmap; 10679f909e21SChris Wilson } 10689f909e21SChris Wilson 10699f909e21SChris Wilson out_unmap: 10709f909e21SChris Wilson vm_munmap(addr, obj->base.size); 10719f909e21SChris Wilson return err; 10729f909e21SChris Wilson } 10739f909e21SChris Wilson 10749f909e21SChris Wilson static int igt_mmap_access(void *arg) 10759f909e21SChris Wilson { 10769f909e21SChris Wilson struct drm_i915_private *i915 = arg; 10779f909e21SChris Wilson struct intel_memory_region *mr; 10789f909e21SChris Wilson enum intel_region_id id; 10799f909e21SChris Wilson 10809f909e21SChris Wilson for_each_memory_region(mr, i915, id) { 10819f909e21SChris Wilson struct drm_i915_gem_object *obj; 10829f909e21SChris Wilson int err; 10839f909e21SChris Wilson 10849f909e21SChris Wilson obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 10859f909e21SChris Wilson if (obj == ERR_PTR(-ENODEV)) 10869f909e21SChris Wilson continue; 10879f909e21SChris Wilson 10889f909e21SChris Wilson if (IS_ERR(obj)) 10899f909e21SChris Wilson return PTR_ERR(obj); 10909f909e21SChris Wilson 10912459e56fSMatthew Auld object_set_placements(obj, &mr, 1); 10922459e56fSMatthew Auld 10939f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); 10949f909e21SChris Wilson if (err == 0) 10959f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); 10969f909e21SChris Wilson if (err == 0) 10979f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); 10989f909e21SChris Wilson if (err == 0) 10999f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); 11009f909e21SChris Wilson 11019f909e21SChris Wilson i915_gem_object_put(obj); 11029f909e21SChris Wilson if (err) 11039f909e21SChris Wilson return err; 11049f909e21SChris Wilson } 11059f909e21SChris Wilson 11069f909e21SChris Wilson return 0; 11079f909e21SChris Wilson } 11089f909e21SChris Wilson 110906581862SChris Wilson static int __igt_mmap_gpu(struct drm_i915_private *i915, 111006581862SChris Wilson struct drm_i915_gem_object *obj, 111106581862SChris Wilson enum i915_mmap_type type) 111206581862SChris Wilson { 111306581862SChris Wilson struct intel_engine_cs *engine; 111406581862SChris Wilson struct i915_mmap_offset *mmo; 111506581862SChris Wilson unsigned long addr; 1116a5799832SChris Wilson u32 __user *ux; 1117a5799832SChris Wilson u32 bbe; 111806581862SChris Wilson int err; 111906581862SChris Wilson 112006581862SChris Wilson /* 112106581862SChris Wilson * Verify that the mmap access into the backing store aligns with 112206581862SChris Wilson * that of the GPU, i.e. that mmap is indeed writing into the same 112306581862SChris Wilson * page as being read by the GPU. 112406581862SChris Wilson */ 112506581862SChris Wilson 112606581862SChris Wilson if (!can_mmap(obj, type)) 112706581862SChris Wilson return 0; 112806581862SChris Wilson 112906581862SChris Wilson err = wc_set(obj); 113006581862SChris Wilson if (err == -ENXIO) 113106581862SChris Wilson err = gtt_set(obj); 113206581862SChris Wilson if (err) 113306581862SChris Wilson return err; 113406581862SChris Wilson 113506581862SChris Wilson mmo = mmap_offset_attach(obj, type, NULL); 113606581862SChris Wilson if (IS_ERR(mmo)) 113706581862SChris Wilson return PTR_ERR(mmo); 113806581862SChris Wilson 113906581862SChris Wilson addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 114006581862SChris Wilson if (IS_ERR_VALUE(addr)) 114106581862SChris Wilson return addr; 114206581862SChris Wilson 114306581862SChris Wilson ux = u64_to_user_ptr((u64)addr); 114406581862SChris Wilson bbe = MI_BATCH_BUFFER_END; 114506581862SChris Wilson if (put_user(bbe, ux)) { 114606581862SChris Wilson pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); 114706581862SChris Wilson err = -EFAULT; 114806581862SChris Wilson goto out_unmap; 114906581862SChris Wilson } 115006581862SChris Wilson 115106581862SChris Wilson if (type == I915_MMAP_TYPE_GTT) 115206581862SChris Wilson intel_gt_flush_ggtt_writes(&i915->gt); 115306581862SChris Wilson 115406581862SChris Wilson for_each_uabi_engine(engine, i915) { 115506581862SChris Wilson struct i915_request *rq; 115606581862SChris Wilson struct i915_vma *vma; 115715b6c924SMaarten Lankhorst struct i915_gem_ww_ctx ww; 115806581862SChris Wilson 115906581862SChris Wilson vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); 116006581862SChris Wilson if (IS_ERR(vma)) { 116106581862SChris Wilson err = PTR_ERR(vma); 116206581862SChris Wilson goto out_unmap; 116306581862SChris Wilson } 116406581862SChris Wilson 116515b6c924SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false); 116615b6c924SMaarten Lankhorst retry: 116715b6c924SMaarten Lankhorst err = i915_gem_object_lock(obj, &ww); 116815b6c924SMaarten Lankhorst if (!err) 116915b6c924SMaarten Lankhorst err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 117006581862SChris Wilson if (err) 117115b6c924SMaarten Lankhorst goto out_ww; 117206581862SChris Wilson 117306581862SChris Wilson rq = i915_request_create(engine->kernel_context); 117406581862SChris Wilson if (IS_ERR(rq)) { 117506581862SChris Wilson err = PTR_ERR(rq); 117606581862SChris Wilson goto out_unpin; 117706581862SChris Wilson } 117806581862SChris Wilson 117906581862SChris Wilson err = i915_request_await_object(rq, vma->obj, false); 118006581862SChris Wilson if (err == 0) 118106581862SChris Wilson err = i915_vma_move_to_active(vma, rq, 0); 118206581862SChris Wilson 118306581862SChris Wilson err = engine->emit_bb_start(rq, vma->node.start, 0, 0); 118406581862SChris Wilson i915_request_get(rq); 118506581862SChris Wilson i915_request_add(rq); 118606581862SChris Wilson 118706581862SChris Wilson if (i915_request_wait(rq, 0, HZ / 5) < 0) { 118806581862SChris Wilson struct drm_printer p = 118906581862SChris Wilson drm_info_printer(engine->i915->drm.dev); 119006581862SChris Wilson 119106581862SChris Wilson pr_err("%s(%s, %s): Failed to execute batch\n", 119206581862SChris Wilson __func__, engine->name, obj->mm.region->name); 119306581862SChris Wilson intel_engine_dump(engine, &p, 119406581862SChris Wilson "%s\n", engine->name); 119506581862SChris Wilson 119606581862SChris Wilson intel_gt_set_wedged(engine->gt); 119706581862SChris Wilson err = -EIO; 119806581862SChris Wilson } 119906581862SChris Wilson i915_request_put(rq); 120006581862SChris Wilson 120106581862SChris Wilson out_unpin: 120206581862SChris Wilson i915_vma_unpin(vma); 120315b6c924SMaarten Lankhorst out_ww: 120415b6c924SMaarten Lankhorst if (err == -EDEADLK) { 120515b6c924SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww); 120615b6c924SMaarten Lankhorst if (!err) 120715b6c924SMaarten Lankhorst goto retry; 120815b6c924SMaarten Lankhorst } 120915b6c924SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww); 121006581862SChris Wilson if (err) 121106581862SChris Wilson goto out_unmap; 121206581862SChris Wilson } 121306581862SChris Wilson 121406581862SChris Wilson out_unmap: 121506581862SChris Wilson vm_munmap(addr, obj->base.size); 121606581862SChris Wilson return err; 121706581862SChris Wilson } 121806581862SChris Wilson 121906581862SChris Wilson static int igt_mmap_gpu(void *arg) 122006581862SChris Wilson { 122106581862SChris Wilson struct drm_i915_private *i915 = arg; 122206581862SChris Wilson struct intel_memory_region *mr; 122306581862SChris Wilson enum intel_region_id id; 122406581862SChris Wilson 122506581862SChris Wilson for_each_memory_region(mr, i915, id) { 122606581862SChris Wilson struct drm_i915_gem_object *obj; 122706581862SChris Wilson int err; 122806581862SChris Wilson 122906581862SChris Wilson obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 123006581862SChris Wilson if (obj == ERR_PTR(-ENODEV)) 123106581862SChris Wilson continue; 123206581862SChris Wilson 123306581862SChris Wilson if (IS_ERR(obj)) 123406581862SChris Wilson return PTR_ERR(obj); 123506581862SChris Wilson 12362459e56fSMatthew Auld object_set_placements(obj, &mr, 1); 12372459e56fSMatthew Auld 123806581862SChris Wilson err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); 123906581862SChris Wilson if (err == 0) 124006581862SChris Wilson err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); 124106581862SChris Wilson 124206581862SChris Wilson i915_gem_object_put(obj); 124306581862SChris Wilson if (err) 124406581862SChris Wilson return err; 124506581862SChris Wilson } 124606581862SChris Wilson 124706581862SChris Wilson return 0; 124806581862SChris Wilson } 124906581862SChris Wilson 12501d1d0af6SChris Wilson static int check_present_pte(pte_t *pte, unsigned long addr, void *data) 12511d1d0af6SChris Wilson { 12521d1d0af6SChris Wilson if (!pte_present(*pte) || pte_none(*pte)) { 12531d1d0af6SChris Wilson pr_err("missing PTE:%lx\n", 12541d1d0af6SChris Wilson (addr - (unsigned long)data) >> PAGE_SHIFT); 12551d1d0af6SChris Wilson return -EINVAL; 12561d1d0af6SChris Wilson } 12571d1d0af6SChris Wilson 12581d1d0af6SChris Wilson return 0; 12591d1d0af6SChris Wilson } 12601d1d0af6SChris Wilson 12611d1d0af6SChris Wilson static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) 12621d1d0af6SChris Wilson { 12631d1d0af6SChris Wilson if (pte_present(*pte) && !pte_none(*pte)) { 12641d1d0af6SChris Wilson pr_err("present PTE:%lx; expected to be revoked\n", 12651d1d0af6SChris Wilson (addr - (unsigned long)data) >> PAGE_SHIFT); 12661d1d0af6SChris Wilson return -EINVAL; 12671d1d0af6SChris Wilson } 12681d1d0af6SChris Wilson 12691d1d0af6SChris Wilson return 0; 12701d1d0af6SChris Wilson } 12711d1d0af6SChris Wilson 12721d1d0af6SChris Wilson static int check_present(unsigned long addr, unsigned long len) 12731d1d0af6SChris Wilson { 12741d1d0af6SChris Wilson return apply_to_page_range(current->mm, addr, len, 12751d1d0af6SChris Wilson check_present_pte, (void *)addr); 12761d1d0af6SChris Wilson } 12771d1d0af6SChris Wilson 12781d1d0af6SChris Wilson static int check_absent(unsigned long addr, unsigned long len) 12791d1d0af6SChris Wilson { 12801d1d0af6SChris Wilson return apply_to_page_range(current->mm, addr, len, 12811d1d0af6SChris Wilson check_absent_pte, (void *)addr); 12821d1d0af6SChris Wilson } 12831d1d0af6SChris Wilson 12841d1d0af6SChris Wilson static int prefault_range(u64 start, u64 len) 12851d1d0af6SChris Wilson { 12861d1d0af6SChris Wilson const char __user *addr, *end; 12871d1d0af6SChris Wilson char __maybe_unused c; 12881d1d0af6SChris Wilson int err; 12891d1d0af6SChris Wilson 12901d1d0af6SChris Wilson addr = u64_to_user_ptr(start); 12911d1d0af6SChris Wilson end = addr + len; 12921d1d0af6SChris Wilson 12931d1d0af6SChris Wilson for (; addr < end; addr += PAGE_SIZE) { 12941d1d0af6SChris Wilson err = __get_user(c, addr); 12951d1d0af6SChris Wilson if (err) 12961d1d0af6SChris Wilson return err; 12971d1d0af6SChris Wilson } 12981d1d0af6SChris Wilson 12991d1d0af6SChris Wilson return __get_user(c, end - 1); 13001d1d0af6SChris Wilson } 13011d1d0af6SChris Wilson 13029771d5f7SAbdiel Janulgue static int __igt_mmap_revoke(struct drm_i915_private *i915, 13039771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj, 13049771d5f7SAbdiel Janulgue enum i915_mmap_type type) 13051d1d0af6SChris Wilson { 1306cc662126SAbdiel Janulgue struct i915_mmap_offset *mmo; 13071d1d0af6SChris Wilson unsigned long addr; 13081d1d0af6SChris Wilson int err; 13091d1d0af6SChris Wilson 13109771d5f7SAbdiel Janulgue if (!can_mmap(obj, type)) 13111d1d0af6SChris Wilson return 0; 13121d1d0af6SChris Wilson 1313cc662126SAbdiel Janulgue mmo = mmap_offset_attach(obj, type, NULL); 13149771d5f7SAbdiel Janulgue if (IS_ERR(mmo)) 13159771d5f7SAbdiel Janulgue return PTR_ERR(mmo); 13161d1d0af6SChris Wilson 1317cc662126SAbdiel Janulgue addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); 13189771d5f7SAbdiel Janulgue if (IS_ERR_VALUE(addr)) 13199771d5f7SAbdiel Janulgue return addr; 13201d1d0af6SChris Wilson 13211d1d0af6SChris Wilson err = prefault_range(addr, obj->base.size); 13221d1d0af6SChris Wilson if (err) 13231d1d0af6SChris Wilson goto out_unmap; 13241d1d0af6SChris Wilson 13251d1d0af6SChris Wilson err = check_present(addr, obj->base.size); 13269771d5f7SAbdiel Janulgue if (err) { 13279771d5f7SAbdiel Janulgue pr_err("%s: was not present\n", obj->mm.region->name); 13281d1d0af6SChris Wilson goto out_unmap; 13299771d5f7SAbdiel Janulgue } 13301d1d0af6SChris Wilson 13311d1d0af6SChris Wilson /* 13321d1d0af6SChris Wilson * After unbinding the object from the GGTT, its address may be reused 13331d1d0af6SChris Wilson * for other objects. Ergo we have to revoke the previous mmap PTE 13341d1d0af6SChris Wilson * access as it no longer points to the same object. 13351d1d0af6SChris Wilson */ 13361d1d0af6SChris Wilson err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 13371d1d0af6SChris Wilson if (err) { 13381d1d0af6SChris Wilson pr_err("Failed to unbind object!\n"); 13391d1d0af6SChris Wilson goto out_unmap; 13401d1d0af6SChris Wilson } 13411d1d0af6SChris Wilson 1342cc662126SAbdiel Janulgue if (type != I915_MMAP_TYPE_GTT) { 13436f791ffeSMaarten Lankhorst i915_gem_object_lock(obj, NULL); 1344cc662126SAbdiel Janulgue __i915_gem_object_put_pages(obj); 13456f791ffeSMaarten Lankhorst i915_gem_object_unlock(obj); 1346cc662126SAbdiel Janulgue if (i915_gem_object_has_pages(obj)) { 1347cc662126SAbdiel Janulgue pr_err("Failed to put-pages object!\n"); 1348cc662126SAbdiel Janulgue err = -EINVAL; 1349cc662126SAbdiel Janulgue goto out_unmap; 1350cc662126SAbdiel Janulgue } 1351cc662126SAbdiel Janulgue } 1352cc662126SAbdiel Janulgue 13531d1d0af6SChris Wilson err = check_absent(addr, obj->base.size); 13549771d5f7SAbdiel Janulgue if (err) { 13559771d5f7SAbdiel Janulgue pr_err("%s: was not absent\n", obj->mm.region->name); 13561d1d0af6SChris Wilson goto out_unmap; 13579771d5f7SAbdiel Janulgue } 13581d1d0af6SChris Wilson 13591d1d0af6SChris Wilson out_unmap: 13601d1d0af6SChris Wilson vm_munmap(addr, obj->base.size); 13611d1d0af6SChris Wilson return err; 13621d1d0af6SChris Wilson } 13631d1d0af6SChris Wilson 13649771d5f7SAbdiel Janulgue static int igt_mmap_revoke(void *arg) 1365cc662126SAbdiel Janulgue { 13669771d5f7SAbdiel Janulgue struct drm_i915_private *i915 = arg; 13679771d5f7SAbdiel Janulgue struct intel_memory_region *mr; 13689771d5f7SAbdiel Janulgue enum intel_region_id id; 13699771d5f7SAbdiel Janulgue 13709771d5f7SAbdiel Janulgue for_each_memory_region(mr, i915, id) { 13719771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj; 13729771d5f7SAbdiel Janulgue int err; 13739771d5f7SAbdiel Janulgue 13749771d5f7SAbdiel Janulgue obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); 13759771d5f7SAbdiel Janulgue if (obj == ERR_PTR(-ENODEV)) 13769771d5f7SAbdiel Janulgue continue; 13779771d5f7SAbdiel Janulgue 13789771d5f7SAbdiel Janulgue if (IS_ERR(obj)) 13799771d5f7SAbdiel Janulgue return PTR_ERR(obj); 13809771d5f7SAbdiel Janulgue 13812459e56fSMatthew Auld object_set_placements(obj, &mr, 1); 13822459e56fSMatthew Auld 13839771d5f7SAbdiel Janulgue err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); 13849771d5f7SAbdiel Janulgue if (err == 0) 13859771d5f7SAbdiel Janulgue err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); 13869771d5f7SAbdiel Janulgue 13879771d5f7SAbdiel Janulgue i915_gem_object_put(obj); 13889771d5f7SAbdiel Janulgue if (err) 13899771d5f7SAbdiel Janulgue return err; 1390cc662126SAbdiel Janulgue } 1391cc662126SAbdiel Janulgue 13929771d5f7SAbdiel Janulgue return 0; 1393cc662126SAbdiel Janulgue } 1394cc662126SAbdiel Janulgue 1395b414fcd5SChris Wilson int i915_gem_mman_live_selftests(struct drm_i915_private *i915) 1396b414fcd5SChris Wilson { 1397b414fcd5SChris Wilson static const struct i915_subtest tests[] = { 1398b414fcd5SChris Wilson SUBTEST(igt_partial_tiling), 139907e98eb0SChris Wilson SUBTEST(igt_smoke_tiling), 1400b414fcd5SChris Wilson SUBTEST(igt_mmap_offset_exhaustion), 14019771d5f7SAbdiel Janulgue SUBTEST(igt_mmap), 14029f909e21SChris Wilson SUBTEST(igt_mmap_access), 14039771d5f7SAbdiel Janulgue SUBTEST(igt_mmap_revoke), 140406581862SChris Wilson SUBTEST(igt_mmap_gpu), 1405b414fcd5SChris Wilson }; 1406b414fcd5SChris Wilson 1407b414fcd5SChris Wilson return i915_subtests(tests, i915); 1408b414fcd5SChris Wilson } 1409