1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/random.h> 8 9 #include "gem/selftests/igt_gem_utils.h" 10 #include "gem/selftests/mock_context.h" 11 #include "gt/intel_gt.h" 12 13 #include "i915_selftest.h" 14 15 #include "igt_flush_test.h" 16 #include "mock_drm.h" 17 18 static int switch_to_context(struct drm_i915_private *i915, 19 struct i915_gem_context *ctx) 20 { 21 struct intel_engine_cs *engine; 22 enum intel_engine_id id; 23 24 for_each_engine(engine, i915, id) { 25 struct i915_request *rq; 26 27 rq = igt_request_alloc(ctx, engine); 28 if (IS_ERR(rq)) 29 return PTR_ERR(rq); 30 31 i915_request_add(rq); 32 } 33 34 return 0; 35 } 36 37 static void trash_stolen(struct drm_i915_private *i915) 38 { 39 struct i915_ggtt *ggtt = &i915->ggtt; 40 const u64 slot = ggtt->error_capture.start; 41 const resource_size_t size = resource_size(&i915->dsm); 42 unsigned long page; 43 u32 prng = 0x12345678; 44 45 for (page = 0; page < size; page += PAGE_SIZE) { 46 const dma_addr_t dma = i915->dsm.start + page; 47 u32 __iomem *s; 48 int x; 49 50 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); 51 52 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); 53 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) { 54 prng = next_pseudo_random32(prng); 55 iowrite32(prng, &s[x]); 56 } 57 io_mapping_unmap_atomic(s); 58 } 59 60 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 61 } 62 63 static void simulate_hibernate(struct drm_i915_private *i915) 64 { 65 intel_wakeref_t wakeref; 66 67 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 68 69 /* 70 * As a final sting in the tail, invalidate stolen. Under a real S4, 71 * stolen is lost and needs to be refilled on resume. However, under 72 * CI we merely do S4-device testing (as full S4 is too unreliable 73 * for automated testing across a cluster), so to simulate the effect 74 * of stolen being trashed across S4, we trash it ourselves. 75 */ 76 trash_stolen(i915); 77 78 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 79 } 80 81 static int pm_prepare(struct drm_i915_private *i915) 82 { 83 i915_gem_suspend(i915); 84 85 return 0; 86 } 87 88 static void pm_suspend(struct drm_i915_private *i915) 89 { 90 intel_wakeref_t wakeref; 91 92 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 93 i915_gem_suspend_gtt_mappings(i915); 94 i915_gem_suspend_late(i915); 95 } 96 } 97 98 static void pm_hibernate(struct drm_i915_private *i915) 99 { 100 intel_wakeref_t wakeref; 101 102 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 103 i915_gem_suspend_gtt_mappings(i915); 104 105 i915_gem_freeze(i915); 106 i915_gem_freeze_late(i915); 107 } 108 } 109 110 static void pm_resume(struct drm_i915_private *i915) 111 { 112 intel_wakeref_t wakeref; 113 114 /* 115 * Both suspend and hibernate follow the same wakeup path and assume 116 * that runtime-pm just works. 117 */ 118 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 119 intel_gt_sanitize(&i915->gt, false); 120 i915_gem_sanitize(i915); 121 122 mutex_lock(&i915->drm.struct_mutex); 123 i915_gem_restore_gtt_mappings(i915); 124 i915_gem_restore_fences(i915); 125 mutex_unlock(&i915->drm.struct_mutex); 126 127 i915_gem_resume(i915); 128 } 129 } 130 131 static int igt_gem_suspend(void *arg) 132 { 133 struct drm_i915_private *i915 = arg; 134 struct i915_gem_context *ctx; 135 struct drm_file *file; 136 int err; 137 138 file = mock_file(i915); 139 if (IS_ERR(file)) 140 return PTR_ERR(file); 141 142 err = -ENOMEM; 143 mutex_lock(&i915->drm.struct_mutex); 144 ctx = live_context(i915, file); 145 if (!IS_ERR(ctx)) 146 err = switch_to_context(i915, ctx); 147 mutex_unlock(&i915->drm.struct_mutex); 148 if (err) 149 goto out; 150 151 err = pm_prepare(i915); 152 if (err) 153 goto out; 154 155 pm_suspend(i915); 156 157 /* Here be dragons! Note that with S3RST any S3 may become S4! */ 158 simulate_hibernate(i915); 159 160 pm_resume(i915); 161 162 mutex_lock(&i915->drm.struct_mutex); 163 err = switch_to_context(i915, ctx); 164 mutex_unlock(&i915->drm.struct_mutex); 165 out: 166 mock_file_free(i915, file); 167 return err; 168 } 169 170 static int igt_gem_hibernate(void *arg) 171 { 172 struct drm_i915_private *i915 = arg; 173 struct i915_gem_context *ctx; 174 struct drm_file *file; 175 int err; 176 177 file = mock_file(i915); 178 if (IS_ERR(file)) 179 return PTR_ERR(file); 180 181 err = -ENOMEM; 182 mutex_lock(&i915->drm.struct_mutex); 183 ctx = live_context(i915, file); 184 if (!IS_ERR(ctx)) 185 err = switch_to_context(i915, ctx); 186 mutex_unlock(&i915->drm.struct_mutex); 187 if (err) 188 goto out; 189 190 err = pm_prepare(i915); 191 if (err) 192 goto out; 193 194 pm_hibernate(i915); 195 196 /* Here be dragons! */ 197 simulate_hibernate(i915); 198 199 pm_resume(i915); 200 201 mutex_lock(&i915->drm.struct_mutex); 202 err = switch_to_context(i915, ctx); 203 mutex_unlock(&i915->drm.struct_mutex); 204 out: 205 mock_file_free(i915, file); 206 return err; 207 } 208 209 int i915_gem_live_selftests(struct drm_i915_private *i915) 210 { 211 static const struct i915_subtest tests[] = { 212 SUBTEST(igt_gem_suspend), 213 SUBTEST(igt_gem_hibernate), 214 }; 215 216 if (intel_gt_is_wedged(&i915->gt)) 217 return 0; 218 219 return i915_live_subtests(tests, i915); 220 } 221