1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "gt/intel_engine_pm.h" 8 #include "i915_selftest.h" 9 10 #include "gem/selftests/mock_context.h" 11 #include "selftests/igt_reset.h" 12 #include "selftests/igt_spinner.h" 13 14 struct live_mocs { 15 struct drm_i915_mocs_table mocs; 16 struct drm_i915_mocs_table l3cc; 17 struct i915_vma *scratch; 18 void *vaddr; 19 }; 20 21 static int request_add_sync(struct i915_request *rq, int err) 22 { 23 i915_request_get(rq); 24 i915_request_add(rq); 25 if (i915_request_wait(rq, 0, HZ / 5) < 0) 26 err = -ETIME; 27 i915_request_put(rq); 28 29 return err; 30 } 31 32 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 33 { 34 int err = 0; 35 36 i915_request_get(rq); 37 i915_request_add(rq); 38 if (spin && !igt_wait_for_spinner(spin, rq)) 39 err = -ETIME; 40 i915_request_put(rq); 41 42 return err; 43 } 44 45 static struct i915_vma *create_scratch(struct intel_gt *gt) 46 { 47 struct drm_i915_gem_object *obj; 48 struct i915_vma *vma; 49 int err; 50 51 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); 52 if (IS_ERR(obj)) 53 return ERR_CAST(obj); 54 55 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); 56 57 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 58 if (IS_ERR(vma)) { 59 i915_gem_object_put(obj); 60 return vma; 61 } 62 63 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 64 if (err) { 65 i915_gem_object_put(obj); 66 return ERR_PTR(err); 67 } 68 69 return vma; 70 } 71 72 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) 73 { 74 struct drm_i915_mocs_table table; 75 unsigned int flags; 76 int err; 77 78 memset(arg, 0, sizeof(*arg)); 79 80 flags = get_mocs_settings(gt->i915, &table); 81 if (!flags) 82 return -EINVAL; 83 84 if (flags & HAS_RENDER_L3CC) 85 arg->l3cc = table; 86 87 if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) 88 arg->mocs = table; 89 90 arg->scratch = create_scratch(gt); 91 if (IS_ERR(arg->scratch)) 92 return PTR_ERR(arg->scratch); 93 94 arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB); 95 if (IS_ERR(arg->vaddr)) { 96 err = PTR_ERR(arg->vaddr); 97 goto err_scratch; 98 } 99 100 return 0; 101 102 err_scratch: 103 i915_vma_unpin_and_release(&arg->scratch, 0); 104 return err; 105 } 106 107 static void live_mocs_fini(struct live_mocs *arg) 108 { 109 i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP); 110 } 111 112 static int read_regs(struct i915_request *rq, 113 u32 addr, unsigned int count, 114 uint32_t *offset) 115 { 116 unsigned int i; 117 u32 *cs; 118 119 GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32))); 120 121 cs = intel_ring_begin(rq, 4 * count); 122 if (IS_ERR(cs)) 123 return PTR_ERR(cs); 124 125 for (i = 0; i < count; i++) { 126 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; 127 *cs++ = addr; 128 *cs++ = *offset; 129 *cs++ = 0; 130 131 addr += sizeof(u32); 132 *offset += sizeof(u32); 133 } 134 135 intel_ring_advance(rq, cs); 136 137 return 0; 138 } 139 140 static int read_mocs_table(struct i915_request *rq, 141 const struct drm_i915_mocs_table *table, 142 uint32_t *offset) 143 { 144 u32 addr; 145 146 if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) 147 addr = global_mocs_offset(); 148 else 149 addr = mocs_offset(rq->engine); 150 151 return read_regs(rq, addr, table->n_entries, offset); 152 } 153 154 static int read_l3cc_table(struct i915_request *rq, 155 const struct drm_i915_mocs_table *table, 156 uint32_t *offset) 157 { 158 u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); 159 160 return read_regs(rq, addr, (table->n_entries + 1) / 2, offset); 161 } 162 163 static int check_mocs_table(struct intel_engine_cs *engine, 164 const struct drm_i915_mocs_table *table, 165 uint32_t **vaddr) 166 { 167 unsigned int i; 168 u32 expect; 169 170 for_each_mocs(expect, table, i) { 171 if (**vaddr != expect) { 172 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n", 173 engine->name, i, **vaddr, expect); 174 return -EINVAL; 175 } 176 ++*vaddr; 177 } 178 179 return 0; 180 } 181 182 static bool mcr_range(struct drm_i915_private *i915, u32 offset) 183 { 184 /* 185 * Registers in this range are affected by the MCR selector 186 * which only controls CPU initiated MMIO. Routing does not 187 * work for CS access so we cannot verify them on this path. 188 */ 189 return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; 190 } 191 192 static int check_l3cc_table(struct intel_engine_cs *engine, 193 const struct drm_i915_mocs_table *table, 194 uint32_t **vaddr) 195 { 196 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */ 197 u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); 198 unsigned int i; 199 u32 expect; 200 201 for_each_l3cc(expect, table, i) { 202 if (!mcr_range(engine->i915, reg) && **vaddr != expect) { 203 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n", 204 engine->name, i, **vaddr, expect); 205 return -EINVAL; 206 } 207 ++*vaddr; 208 reg += 4; 209 } 210 211 return 0; 212 } 213 214 static int check_mocs_engine(struct live_mocs *arg, 215 struct intel_context *ce) 216 { 217 struct i915_vma *vma = arg->scratch; 218 struct i915_request *rq; 219 u32 offset; 220 u32 *vaddr; 221 int err; 222 223 memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); 224 225 rq = intel_context_create_request(ce); 226 if (IS_ERR(rq)) 227 return PTR_ERR(rq); 228 229 i915_vma_lock(vma); 230 err = i915_request_await_object(rq, vma->obj, true); 231 if (!err) 232 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 233 i915_vma_unlock(vma); 234 235 /* Read the mocs tables back using SRM */ 236 offset = i915_ggtt_offset(vma); 237 if (!err) 238 err = read_mocs_table(rq, &arg->mocs, &offset); 239 if (!err && ce->engine->class == RENDER_CLASS) 240 err = read_l3cc_table(rq, &arg->l3cc, &offset); 241 offset -= i915_ggtt_offset(vma); 242 GEM_BUG_ON(offset > PAGE_SIZE); 243 244 err = request_add_sync(rq, err); 245 if (err) 246 return err; 247 248 /* Compare the results against the expected tables */ 249 vaddr = arg->vaddr; 250 if (!err) 251 err = check_mocs_table(ce->engine, &arg->mocs, &vaddr); 252 if (!err && ce->engine->class == RENDER_CLASS) 253 err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr); 254 if (err) 255 return err; 256 257 GEM_BUG_ON(arg->vaddr + offset != vaddr); 258 return 0; 259 } 260 261 static int live_mocs_kernel(void *arg) 262 { 263 struct intel_gt *gt = arg; 264 struct intel_engine_cs *engine; 265 enum intel_engine_id id; 266 struct live_mocs mocs; 267 int err; 268 269 /* Basic check the system is configured with the expected mocs table */ 270 271 err = live_mocs_init(&mocs, gt); 272 if (err) 273 return err; 274 275 for_each_engine(engine, gt, id) { 276 intel_engine_pm_get(engine); 277 err = check_mocs_engine(&mocs, engine->kernel_context); 278 intel_engine_pm_put(engine); 279 if (err) 280 break; 281 } 282 283 live_mocs_fini(&mocs); 284 return err; 285 } 286 287 static int live_mocs_clean(void *arg) 288 { 289 struct intel_gt *gt = arg; 290 struct intel_engine_cs *engine; 291 enum intel_engine_id id; 292 struct live_mocs mocs; 293 int err; 294 295 /* Every new context should see the same mocs table */ 296 297 err = live_mocs_init(&mocs, gt); 298 if (err) 299 return err; 300 301 for_each_engine(engine, gt, id) { 302 struct intel_context *ce; 303 304 ce = intel_context_create(engine); 305 if (IS_ERR(ce)) { 306 err = PTR_ERR(ce); 307 break; 308 } 309 310 err = check_mocs_engine(&mocs, ce); 311 intel_context_put(ce); 312 if (err) 313 break; 314 } 315 316 live_mocs_fini(&mocs); 317 return err; 318 } 319 320 static int active_engine_reset(struct intel_context *ce, 321 const char *reason) 322 { 323 struct igt_spinner spin; 324 struct i915_request *rq; 325 int err; 326 327 err = igt_spinner_init(&spin, ce->engine->gt); 328 if (err) 329 return err; 330 331 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 332 if (IS_ERR(rq)) { 333 igt_spinner_fini(&spin); 334 return PTR_ERR(rq); 335 } 336 337 err = request_add_spin(rq, &spin); 338 if (err == 0) 339 err = intel_engine_reset(ce->engine, reason); 340 341 igt_spinner_end(&spin); 342 igt_spinner_fini(&spin); 343 344 return err; 345 } 346 347 static int __live_mocs_reset(struct live_mocs *mocs, 348 struct intel_context *ce) 349 { 350 int err; 351 352 err = intel_engine_reset(ce->engine, "mocs"); 353 if (err) 354 return err; 355 356 err = check_mocs_engine(mocs, ce); 357 if (err) 358 return err; 359 360 err = active_engine_reset(ce, "mocs"); 361 if (err) 362 return err; 363 364 err = check_mocs_engine(mocs, ce); 365 if (err) 366 return err; 367 368 intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs"); 369 370 err = check_mocs_engine(mocs, ce); 371 if (err) 372 return err; 373 374 return 0; 375 } 376 377 static int live_mocs_reset(void *arg) 378 { 379 struct intel_gt *gt = arg; 380 struct intel_engine_cs *engine; 381 enum intel_engine_id id; 382 struct live_mocs mocs; 383 int err = 0; 384 385 /* Check the mocs setup is retained over per-engine and global resets */ 386 387 if (!intel_has_reset_engine(gt)) 388 return 0; 389 390 err = live_mocs_init(&mocs, gt); 391 if (err) 392 return err; 393 394 igt_global_reset_lock(gt); 395 for_each_engine(engine, gt, id) { 396 struct intel_context *ce; 397 398 ce = intel_context_create(engine); 399 if (IS_ERR(ce)) { 400 err = PTR_ERR(ce); 401 break; 402 } 403 404 intel_engine_pm_get(engine); 405 err = __live_mocs_reset(&mocs, ce); 406 intel_engine_pm_put(engine); 407 408 intel_context_put(ce); 409 if (err) 410 break; 411 } 412 igt_global_reset_unlock(gt); 413 414 live_mocs_fini(&mocs); 415 return err; 416 } 417 418 int intel_mocs_live_selftests(struct drm_i915_private *i915) 419 { 420 static const struct i915_subtest tests[] = { 421 SUBTEST(live_mocs_kernel), 422 SUBTEST(live_mocs_clean), 423 SUBTEST(live_mocs_reset), 424 }; 425 struct drm_i915_mocs_table table; 426 427 if (!get_mocs_settings(i915, &table)) 428 return 0; 429 430 return intel_gt_live_subtests(tests, &i915->gt); 431 } 432