1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright �� 2021 Intel Corporation 4 */ 5 6 #include "selftests/igt_spinner.h" 7 #include "selftests/intel_scheduler_helpers.h" 8 9 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 10 { 11 int err = 0; 12 13 i915_request_get(rq); 14 i915_request_add(rq); 15 if (spin && !igt_wait_for_spinner(spin, rq)) 16 err = -ETIMEDOUT; 17 18 return err; 19 } 20 21 static struct i915_request *nop_user_request(struct intel_context *ce, 22 struct i915_request *from) 23 { 24 struct i915_request *rq; 25 int ret; 26 27 rq = intel_context_create_request(ce); 28 if (IS_ERR(rq)) 29 return rq; 30 31 if (from) { 32 ret = i915_sw_fence_await_dma_fence(&rq->submit, 33 &from->fence, 0, 34 I915_FENCE_GFP); 35 if (ret < 0) { 36 i915_request_put(rq); 37 return ERR_PTR(ret); 38 } 39 } 40 41 i915_request_get(rq); 42 i915_request_add(rq); 43 44 return rq; 45 } 46 47 static int intel_guc_scrub_ctbs(void *arg) 48 { 49 struct intel_gt *gt = arg; 50 int ret = 0; 51 int i; 52 struct i915_request *last[3] = {NULL, NULL, NULL}, *rq; 53 intel_wakeref_t wakeref; 54 struct intel_engine_cs *engine; 55 struct intel_context *ce; 56 57 wakeref = intel_runtime_pm_get(gt->uncore->rpm); 58 engine = intel_selftest_find_any_engine(gt); 59 60 /* Submit requests and inject errors forcing G2H to be dropped */ 61 for (i = 0; i < 3; ++i) { 62 ce = intel_context_create(engine); 63 if (IS_ERR(ce)) { 64 ret = PTR_ERR(ce); 65 pr_err("Failed to create context, %d: %d\n", i, ret); 66 goto err; 67 } 68 69 switch (i) { 70 case 0: 71 ce->drop_schedule_enable = true; 72 break; 73 case 1: 74 ce->drop_schedule_disable = true; 75 break; 76 case 2: 77 ce->drop_deregister = true; 78 break; 79 } 80 81 rq = nop_user_request(ce, NULL); 82 intel_context_put(ce); 83 84 if (IS_ERR(rq)) { 85 ret = PTR_ERR(rq); 86 pr_err("Failed to create request, %d: %d\n", i, ret); 87 goto err; 88 } 89 90 last[i] = rq; 91 } 92 93 for (i = 0; i < 3; ++i) { 94 ret = i915_request_wait(last[i], 0, HZ); 95 if (ret < 0) { 96 pr_err("Last request failed to complete: %d\n", ret); 97 goto err; 98 } 99 i915_request_put(last[i]); 100 last[i] = NULL; 101 } 102 103 /* Force all H2G / G2H to be submitted / processed */ 104 intel_gt_retire_requests(gt); 105 msleep(500); 106 107 /* Scrub missing G2H */ 108 intel_gt_handle_error(engine->gt, -1, 0, "selftest reset"); 109 110 /* GT will not idle if G2H are lost */ 111 ret = intel_gt_wait_for_idle(gt, HZ); 112 if (ret < 0) { 113 pr_err("GT failed to idle: %d\n", ret); 114 goto err; 115 } 116 117 err: 118 for (i = 0; i < 3; ++i) 119 if (last[i]) 120 i915_request_put(last[i]); 121 intel_runtime_pm_put(gt->uncore->rpm, wakeref); 122 123 return ret; 124 } 125 126 /* 127 * intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one 128 * 129 * This test creates a spinner which is used to block all subsequent submissions 130 * until it completes. Next, a loop creates a context and a NOP request each 131 * iteration until the guc_ids are exhausted (request creation returns -EAGAIN). 132 * The spinner is ended, unblocking all requests created in the loop. At this 133 * point all guc_ids are exhausted but are available to steal. Try to create 134 * another request which should successfully steal a guc_id. Wait on last 135 * request to complete, idle GPU, verify a guc_id was stolen via a counter, and 136 * exit the test. Test also artificially reduces the number of guc_ids so the 137 * test runs in a timely manner. 138 */ 139 static int intel_guc_steal_guc_ids(void *arg) 140 { 141 struct intel_gt *gt = arg; 142 struct intel_guc *guc = >->uc.guc; 143 int ret, sv, context_index = 0; 144 intel_wakeref_t wakeref; 145 struct intel_engine_cs *engine; 146 struct intel_context **ce; 147 struct igt_spinner spin; 148 struct i915_request *spin_rq = NULL, *rq, *last = NULL; 149 int number_guc_id_stolen = guc->number_guc_id_stolen; 150 151 ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL); 152 if (!ce) { 153 pr_err("Context array allocation failed\n"); 154 return -ENOMEM; 155 } 156 157 wakeref = intel_runtime_pm_get(gt->uncore->rpm); 158 engine = intel_selftest_find_any_engine(gt); 159 sv = guc->submission_state.num_guc_ids; 160 guc->submission_state.num_guc_ids = 512; 161 162 /* Create spinner to block requests in below loop */ 163 ce[context_index] = intel_context_create(engine); 164 if (IS_ERR(ce[context_index])) { 165 ret = PTR_ERR(ce[context_index]); 166 ce[context_index] = NULL; 167 pr_err("Failed to create context: %d\n", ret); 168 goto err_wakeref; 169 } 170 ret = igt_spinner_init(&spin, engine->gt); 171 if (ret) { 172 pr_err("Failed to create spinner: %d\n", ret); 173 goto err_contexts; 174 } 175 spin_rq = igt_spinner_create_request(&spin, ce[context_index], 176 MI_ARB_CHECK); 177 if (IS_ERR(spin_rq)) { 178 ret = PTR_ERR(spin_rq); 179 pr_err("Failed to create spinner request: %d\n", ret); 180 goto err_contexts; 181 } 182 ret = request_add_spin(spin_rq, &spin); 183 if (ret) { 184 pr_err("Failed to add Spinner request: %d\n", ret); 185 goto err_spin_rq; 186 } 187 188 /* Use all guc_ids */ 189 while (ret != -EAGAIN) { 190 ce[++context_index] = intel_context_create(engine); 191 if (IS_ERR(ce[context_index])) { 192 ret = PTR_ERR(ce[context_index--]); 193 ce[context_index] = NULL; 194 pr_err("Failed to create context: %d\n", ret); 195 goto err_spin_rq; 196 } 197 198 rq = nop_user_request(ce[context_index], spin_rq); 199 if (IS_ERR(rq)) { 200 ret = PTR_ERR(rq); 201 rq = NULL; 202 if (ret != -EAGAIN) { 203 pr_err("Failed to create request, %d: %d\n", 204 context_index, ret); 205 goto err_spin_rq; 206 } 207 } else { 208 if (last) 209 i915_request_put(last); 210 last = rq; 211 } 212 } 213 214 /* Release blocked requests */ 215 igt_spinner_end(&spin); 216 ret = intel_selftest_wait_for_rq(spin_rq); 217 if (ret) { 218 pr_err("Spin request failed to complete: %d\n", ret); 219 i915_request_put(last); 220 goto err_spin_rq; 221 } 222 i915_request_put(spin_rq); 223 igt_spinner_fini(&spin); 224 spin_rq = NULL; 225 226 /* Wait for last request */ 227 ret = i915_request_wait(last, 0, HZ * 30); 228 i915_request_put(last); 229 if (ret < 0) { 230 pr_err("Last request failed to complete: %d\n", ret); 231 goto err_spin_rq; 232 } 233 234 /* Try to steal guc_id */ 235 rq = nop_user_request(ce[context_index], NULL); 236 if (IS_ERR(rq)) { 237 ret = PTR_ERR(rq); 238 pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret); 239 goto err_spin_rq; 240 } 241 242 /* Wait for request with stolen guc_id */ 243 ret = i915_request_wait(rq, 0, HZ); 244 i915_request_put(rq); 245 if (ret < 0) { 246 pr_err("Request with stolen guc_id failed to complete: %d\n", 247 ret); 248 goto err_spin_rq; 249 } 250 251 /* Wait for idle */ 252 ret = intel_gt_wait_for_idle(gt, HZ * 30); 253 if (ret < 0) { 254 pr_err("GT failed to idle: %d\n", ret); 255 goto err_spin_rq; 256 } 257 258 /* Verify a guc_id was stolen */ 259 if (guc->number_guc_id_stolen == number_guc_id_stolen) { 260 pr_err("No guc_id was stolen"); 261 ret = -EINVAL; 262 } else { 263 ret = 0; 264 } 265 266 err_spin_rq: 267 if (spin_rq) { 268 igt_spinner_end(&spin); 269 intel_selftest_wait_for_rq(spin_rq); 270 i915_request_put(spin_rq); 271 igt_spinner_fini(&spin); 272 intel_gt_wait_for_idle(gt, HZ * 30); 273 } 274 err_contexts: 275 for (; context_index >= 0 && ce[context_index]; --context_index) 276 intel_context_put(ce[context_index]); 277 err_wakeref: 278 intel_runtime_pm_put(gt->uncore->rpm, wakeref); 279 kfree(ce); 280 guc->submission_state.num_guc_ids = sv; 281 282 return ret; 283 } 284 285 int intel_guc_live_selftests(struct drm_i915_private *i915) 286 { 287 static const struct i915_subtest tests[] = { 288 SUBTEST(intel_guc_scrub_ctbs), 289 SUBTEST(intel_guc_steal_guc_ids), 290 }; 291 struct intel_gt *gt = to_gt(i915); 292 293 if (intel_gt_is_wedged(gt)) 294 return 0; 295 296 if (!intel_uc_uses_guc_submission(>->uc)) 297 return 0; 298 299 return intel_gt_live_subtests(tests, gt); 300 } 301