1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "gt/intel_engine_pm.h" 7 #include "gt/intel_gpu_commands.h" 8 #include "i915_selftest.h" 9 10 #include "gem/selftests/igt_gem_utils.h" 11 #include "gem/selftests/mock_context.h" 12 #include "selftests/igt_reset.h" 13 #include "selftests/igt_spinner.h" 14 #include "selftests/intel_scheduler_helpers.h" 15 16 struct live_mocs { 17 struct drm_i915_mocs_table table; 18 struct drm_i915_mocs_table *mocs; 19 struct drm_i915_mocs_table *l3cc; 20 struct i915_vma *scratch; 21 void *vaddr; 22 }; 23 24 static struct intel_context *mocs_context_create(struct intel_engine_cs *engine) 25 { 26 struct intel_context *ce; 27 28 ce = intel_context_create(engine); 29 if (IS_ERR(ce)) 30 return ce; 31 32 /* We build large requests to read the registers from the ring */ 33 ce->ring_size = SZ_16K; 34 35 return ce; 36 } 37 38 static int request_add_sync(struct i915_request *rq, int err) 39 { 40 i915_request_get(rq); 41 i915_request_add(rq); 42 if (i915_request_wait(rq, 0, HZ / 5) < 0) 43 err = -ETIME; 44 i915_request_put(rq); 45 46 return err; 47 } 48 49 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 50 { 51 int err = 0; 52 53 i915_request_get(rq); 54 i915_request_add(rq); 55 if (spin && !igt_wait_for_spinner(spin, rq)) 56 err = -ETIME; 57 i915_request_put(rq); 58 59 return err; 60 } 61 62 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) 63 { 64 unsigned int flags; 65 int err; 66 67 memset(arg, 0, sizeof(*arg)); 68 69 flags = get_mocs_settings(gt->i915, &arg->table); 70 if (!flags) 71 return -EINVAL; 72 73 if (flags & HAS_RENDER_L3CC) 74 arg->l3cc = &arg->table; 75 76 if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) 77 arg->mocs = &arg->table; 78 79 arg->scratch = 80 __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE); 81 if (IS_ERR(arg->scratch)) 82 return PTR_ERR(arg->scratch); 83 84 arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB); 85 if (IS_ERR(arg->vaddr)) { 86 err = PTR_ERR(arg->vaddr); 87 goto err_scratch; 88 } 89 90 return 0; 91 92 err_scratch: 93 i915_vma_unpin_and_release(&arg->scratch, 0); 94 return err; 95 } 96 97 static void live_mocs_fini(struct live_mocs *arg) 98 { 99 i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP); 100 } 101 102 static int read_regs(struct i915_request *rq, 103 u32 addr, unsigned int count, 104 u32 *offset) 105 { 106 unsigned int i; 107 u32 *cs; 108 109 GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32))); 110 111 cs = intel_ring_begin(rq, 4 * count); 112 if (IS_ERR(cs)) 113 return PTR_ERR(cs); 114 115 for (i = 0; i < count; i++) { 116 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; 117 *cs++ = addr; 118 *cs++ = *offset; 119 *cs++ = 0; 120 121 addr += sizeof(u32); 122 *offset += sizeof(u32); 123 } 124 125 intel_ring_advance(rq, cs); 126 127 return 0; 128 } 129 130 static int read_mocs_table(struct i915_request *rq, 131 const struct drm_i915_mocs_table *table, 132 u32 *offset) 133 { 134 struct intel_gt *gt = rq->engine->gt; 135 u32 addr; 136 137 if (!table) 138 return 0; 139 140 if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) 141 addr = global_mocs_offset() + gt->uncore->gsi_offset; 142 else 143 addr = mocs_offset(rq->engine); 144 145 return read_regs(rq, addr, table->n_entries, offset); 146 } 147 148 static int read_l3cc_table(struct i915_request *rq, 149 const struct drm_i915_mocs_table *table, 150 u32 *offset) 151 { 152 u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); 153 154 if (!table) 155 return 0; 156 157 return read_regs(rq, addr, (table->n_entries + 1) / 2, offset); 158 } 159 160 static int check_mocs_table(struct intel_engine_cs *engine, 161 const struct drm_i915_mocs_table *table, 162 u32 **vaddr) 163 { 164 unsigned int i; 165 u32 expect; 166 167 if (!table) 168 return 0; 169 170 for_each_mocs(expect, table, i) { 171 if (**vaddr != expect) { 172 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n", 173 engine->name, i, **vaddr, expect); 174 return -EINVAL; 175 } 176 ++*vaddr; 177 } 178 179 return 0; 180 } 181 182 static bool mcr_range(struct drm_i915_private *i915, u32 offset) 183 { 184 /* 185 * Registers in this range are affected by the MCR selector 186 * which only controls CPU initiated MMIO. Routing does not 187 * work for CS access so we cannot verify them on this path. 188 */ 189 return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; 190 } 191 192 static int check_l3cc_table(struct intel_engine_cs *engine, 193 const struct drm_i915_mocs_table *table, 194 u32 **vaddr) 195 { 196 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */ 197 u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); 198 unsigned int i; 199 u32 expect; 200 201 if (!table) 202 return 0; 203 204 for_each_l3cc(expect, table, i) { 205 if (!mcr_range(engine->i915, reg) && **vaddr != expect) { 206 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n", 207 engine->name, i, **vaddr, expect); 208 return -EINVAL; 209 } 210 ++*vaddr; 211 reg += 4; 212 } 213 214 return 0; 215 } 216 217 static int check_mocs_engine(struct live_mocs *arg, 218 struct intel_context *ce) 219 { 220 struct i915_vma *vma = arg->scratch; 221 struct i915_request *rq; 222 u32 offset; 223 u32 *vaddr; 224 int err; 225 226 memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); 227 228 rq = intel_context_create_request(ce); 229 if (IS_ERR(rq)) 230 return PTR_ERR(rq); 231 232 err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); 233 234 /* Read the mocs tables back using SRM */ 235 offset = i915_ggtt_offset(vma); 236 if (!err) 237 err = read_mocs_table(rq, arg->mocs, &offset); 238 if (!err && ce->engine->class == RENDER_CLASS) 239 err = read_l3cc_table(rq, arg->l3cc, &offset); 240 offset -= i915_ggtt_offset(vma); 241 GEM_BUG_ON(offset > PAGE_SIZE); 242 243 err = request_add_sync(rq, err); 244 if (err) 245 return err; 246 247 /* Compare the results against the expected tables */ 248 vaddr = arg->vaddr; 249 if (!err) 250 err = check_mocs_table(ce->engine, arg->mocs, &vaddr); 251 if (!err && ce->engine->class == RENDER_CLASS) 252 err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr); 253 if (err) 254 return err; 255 256 GEM_BUG_ON(arg->vaddr + offset != vaddr); 257 return 0; 258 } 259 260 static int live_mocs_kernel(void *arg) 261 { 262 struct intel_gt *gt = arg; 263 struct intel_engine_cs *engine; 264 enum intel_engine_id id; 265 struct live_mocs mocs; 266 int err; 267 268 /* Basic check the system is configured with the expected mocs table */ 269 270 err = live_mocs_init(&mocs, gt); 271 if (err) 272 return err; 273 274 for_each_engine(engine, gt, id) { 275 intel_engine_pm_get(engine); 276 err = check_mocs_engine(&mocs, engine->kernel_context); 277 intel_engine_pm_put(engine); 278 if (err) 279 break; 280 } 281 282 live_mocs_fini(&mocs); 283 return err; 284 } 285 286 static int live_mocs_clean(void *arg) 287 { 288 struct intel_gt *gt = arg; 289 struct intel_engine_cs *engine; 290 enum intel_engine_id id; 291 struct live_mocs mocs; 292 int err; 293 294 /* Every new context should see the same mocs table */ 295 296 err = live_mocs_init(&mocs, gt); 297 if (err) 298 return err; 299 300 for_each_engine(engine, gt, id) { 301 struct intel_context *ce; 302 303 ce = mocs_context_create(engine); 304 if (IS_ERR(ce)) { 305 err = PTR_ERR(ce); 306 break; 307 } 308 309 err = check_mocs_engine(&mocs, ce); 310 intel_context_put(ce); 311 if (err) 312 break; 313 } 314 315 live_mocs_fini(&mocs); 316 return err; 317 } 318 319 static int active_engine_reset(struct intel_context *ce, 320 const char *reason, 321 bool using_guc) 322 { 323 struct igt_spinner spin; 324 struct i915_request *rq; 325 int err; 326 327 err = igt_spinner_init(&spin, ce->engine->gt); 328 if (err) 329 return err; 330 331 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 332 if (IS_ERR(rq)) { 333 igt_spinner_fini(&spin); 334 return PTR_ERR(rq); 335 } 336 337 err = request_add_spin(rq, &spin); 338 if (err == 0 && !using_guc) 339 err = intel_engine_reset(ce->engine, reason); 340 341 /* Ensure the reset happens and kills the engine */ 342 if (err == 0) 343 err = intel_selftest_wait_for_rq(rq); 344 345 igt_spinner_end(&spin); 346 igt_spinner_fini(&spin); 347 348 return err; 349 } 350 351 static int __live_mocs_reset(struct live_mocs *mocs, 352 struct intel_context *ce, bool using_guc) 353 { 354 struct intel_gt *gt = ce->engine->gt; 355 int err; 356 357 if (intel_has_reset_engine(gt)) { 358 if (!using_guc) { 359 err = intel_engine_reset(ce->engine, "mocs"); 360 if (err) 361 return err; 362 363 err = check_mocs_engine(mocs, ce); 364 if (err) 365 return err; 366 } 367 368 err = active_engine_reset(ce, "mocs", using_guc); 369 if (err) 370 return err; 371 372 err = check_mocs_engine(mocs, ce); 373 if (err) 374 return err; 375 } 376 377 if (intel_has_gpu_reset(gt)) { 378 intel_gt_reset(gt, ce->engine->mask, "mocs"); 379 380 err = check_mocs_engine(mocs, ce); 381 if (err) 382 return err; 383 } 384 385 return 0; 386 } 387 388 static int live_mocs_reset(void *arg) 389 { 390 struct intel_gt *gt = arg; 391 struct intel_engine_cs *engine; 392 enum intel_engine_id id; 393 struct live_mocs mocs; 394 int err = 0; 395 396 /* Check the mocs setup is retained over per-engine and global resets */ 397 398 err = live_mocs_init(&mocs, gt); 399 if (err) 400 return err; 401 402 igt_global_reset_lock(gt); 403 for_each_engine(engine, gt, id) { 404 bool using_guc = intel_engine_uses_guc(engine); 405 struct intel_selftest_saved_policy saved; 406 struct intel_context *ce; 407 int err2; 408 409 err = intel_selftest_modify_policy(engine, &saved, 410 SELFTEST_SCHEDULER_MODIFY_FAST_RESET); 411 if (err) 412 break; 413 414 ce = mocs_context_create(engine); 415 if (IS_ERR(ce)) { 416 err = PTR_ERR(ce); 417 goto restore; 418 } 419 420 intel_engine_pm_get(engine); 421 422 err = __live_mocs_reset(&mocs, ce, using_guc); 423 424 intel_engine_pm_put(engine); 425 intel_context_put(ce); 426 427 restore: 428 err2 = intel_selftest_restore_policy(engine, &saved); 429 if (err == 0) 430 err = err2; 431 if (err) 432 break; 433 } 434 igt_global_reset_unlock(gt); 435 436 live_mocs_fini(&mocs); 437 return err; 438 } 439 440 int intel_mocs_live_selftests(struct drm_i915_private *i915) 441 { 442 static const struct i915_subtest tests[] = { 443 SUBTEST(live_mocs_kernel), 444 SUBTEST(live_mocs_clean), 445 SUBTEST(live_mocs_reset), 446 }; 447 struct drm_i915_mocs_table table; 448 449 if (!get_mocs_settings(i915, &table)) 450 return 0; 451 452 return intel_gt_live_subtests(tests, to_gt(i915)); 453 } 454