1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/random.h> 8 9 #include "gem/i915_gem_internal.h" 10 #include "gem/i915_gem_pm.h" 11 #include "gem/selftests/igt_gem_utils.h" 12 #include "gem/selftests/mock_context.h" 13 #include "gt/intel_gt.h" 14 #include "gt/intel_gt_pm.h" 15 16 #include "i915_selftest.h" 17 18 #include "igt_flush_test.h" 19 #include "mock_drm.h" 20 21 static int switch_to_context(struct i915_gem_context *ctx) 22 { 23 struct i915_gem_engines_iter it; 24 struct intel_context *ce; 25 int err = 0; 26 27 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 28 struct i915_request *rq; 29 30 rq = intel_context_create_request(ce); 31 if (IS_ERR(rq)) { 32 err = PTR_ERR(rq); 33 break; 34 } 35 36 i915_request_add(rq); 37 } 38 i915_gem_context_unlock_engines(ctx); 39 40 return err; 41 } 42 43 static void trash_stolen(struct drm_i915_private *i915) 44 { 45 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 46 const u64 slot = ggtt->error_capture.start; 47 const resource_size_t size = resource_size(&i915->dsm); 48 unsigned long page; 49 u32 prng = 0x12345678; 50 51 /* XXX: fsck. needs some more thought... */ 52 if (!i915_ggtt_has_aperture(ggtt)) 53 return; 54 55 for (page = 0; page < size; page += PAGE_SIZE) { 56 const dma_addr_t dma = i915->dsm.start + page; 57 u32 __iomem *s; 58 int x; 59 60 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); 61 62 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); 63 for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) { 64 prng = next_pseudo_random32(prng); 65 iowrite32(prng, &s[x]); 66 } 67 io_mapping_unmap_atomic(s); 68 } 69 70 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 71 } 72 73 static void simulate_hibernate(struct drm_i915_private *i915) 74 { 75 intel_wakeref_t wakeref; 76 77 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 78 79 /* 80 * As a final sting in the tail, invalidate stolen. Under a real S4, 81 * stolen is lost and needs to be refilled on resume. However, under 82 * CI we merely do S4-device testing (as full S4 is too unreliable 83 * for automated testing across a cluster), so to simulate the effect 84 * of stolen being trashed across S4, we trash it ourselves. 85 */ 86 trash_stolen(i915); 87 88 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 89 } 90 91 static int igt_pm_prepare(struct drm_i915_private *i915) 92 { 93 i915_gem_suspend(i915); 94 95 return 0; 96 } 97 98 static void igt_pm_suspend(struct drm_i915_private *i915) 99 { 100 intel_wakeref_t wakeref; 101 102 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 103 i915_ggtt_suspend(to_gt(i915)->ggtt); 104 i915_gem_suspend_late(i915); 105 } 106 } 107 108 static void igt_pm_hibernate(struct drm_i915_private *i915) 109 { 110 intel_wakeref_t wakeref; 111 112 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 113 i915_ggtt_suspend(to_gt(i915)->ggtt); 114 115 i915_gem_freeze(i915); 116 i915_gem_freeze_late(i915); 117 } 118 } 119 120 static void igt_pm_resume(struct drm_i915_private *i915) 121 { 122 intel_wakeref_t wakeref; 123 124 /* 125 * Both suspend and hibernate follow the same wakeup path and assume 126 * that runtime-pm just works. 127 */ 128 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 129 i915_ggtt_resume(to_gt(i915)->ggtt); 130 i915_gem_resume(i915); 131 } 132 } 133 134 static int igt_gem_suspend(void *arg) 135 { 136 struct drm_i915_private *i915 = arg; 137 struct i915_gem_context *ctx; 138 struct file *file; 139 int err; 140 141 file = mock_file(i915); 142 if (IS_ERR(file)) 143 return PTR_ERR(file); 144 145 err = -ENOMEM; 146 ctx = live_context(i915, file); 147 if (!IS_ERR(ctx)) 148 err = switch_to_context(ctx); 149 if (err) 150 goto out; 151 152 err = igt_pm_prepare(i915); 153 if (err) 154 goto out; 155 156 igt_pm_suspend(i915); 157 158 /* Here be dragons! Note that with S3RST any S3 may become S4! */ 159 simulate_hibernate(i915); 160 161 igt_pm_resume(i915); 162 163 err = switch_to_context(ctx); 164 out: 165 fput(file); 166 return err; 167 } 168 169 static int igt_gem_hibernate(void *arg) 170 { 171 struct drm_i915_private *i915 = arg; 172 struct i915_gem_context *ctx; 173 struct file *file; 174 int err; 175 176 file = mock_file(i915); 177 if (IS_ERR(file)) 178 return PTR_ERR(file); 179 180 err = -ENOMEM; 181 ctx = live_context(i915, file); 182 if (!IS_ERR(ctx)) 183 err = switch_to_context(ctx); 184 if (err) 185 goto out; 186 187 err = igt_pm_prepare(i915); 188 if (err) 189 goto out; 190 191 igt_pm_hibernate(i915); 192 193 /* Here be dragons! */ 194 simulate_hibernate(i915); 195 196 igt_pm_resume(i915); 197 198 err = switch_to_context(ctx); 199 out: 200 fput(file); 201 return err; 202 } 203 204 static int igt_gem_ww_ctx(void *arg) 205 { 206 struct drm_i915_private *i915 = arg; 207 struct drm_i915_gem_object *obj, *obj2; 208 struct i915_gem_ww_ctx ww; 209 int err = 0; 210 211 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 212 if (IS_ERR(obj)) 213 return PTR_ERR(obj); 214 215 obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); 216 if (IS_ERR(obj2)) { 217 err = PTR_ERR(obj2); 218 goto put1; 219 } 220 221 i915_gem_ww_ctx_init(&ww, true); 222 retry: 223 /* Lock the objects, twice for good measure (-EALREADY handling) */ 224 err = i915_gem_object_lock(obj, &ww); 225 if (!err) 226 err = i915_gem_object_lock_interruptible(obj, &ww); 227 if (!err) 228 err = i915_gem_object_lock_interruptible(obj2, &ww); 229 if (!err) 230 err = i915_gem_object_lock(obj2, &ww); 231 232 if (err == -EDEADLK) { 233 err = i915_gem_ww_ctx_backoff(&ww); 234 if (!err) 235 goto retry; 236 } 237 i915_gem_ww_ctx_fini(&ww); 238 i915_gem_object_put(obj2); 239 put1: 240 i915_gem_object_put(obj); 241 return err; 242 } 243 244 int i915_gem_live_selftests(struct drm_i915_private *i915) 245 { 246 static const struct i915_subtest tests[] = { 247 SUBTEST(igt_gem_suspend), 248 SUBTEST(igt_gem_hibernate), 249 SUBTEST(igt_gem_ww_ctx), 250 }; 251 252 if (intel_gt_is_wedged(to_gt(i915))) 253 return 0; 254 255 return i915_live_subtests(tests, i915); 256 } 257