124f90d66SChris Wilson // SPDX-License-Identifier: MIT 2112ed2d3SChris Wilson /* 3112ed2d3SChris Wilson * Copyright © 2018 Intel Corporation 4112ed2d3SChris Wilson */ 5112ed2d3SChris Wilson 610be98a7SChris Wilson #include "gem/i915_gem_pm.h" 7750e76b4SChris Wilson #include "gt/intel_engine_user.h" 8baea429dSTvrtko Ursulin #include "gt/intel_gt.h" 9112ed2d3SChris Wilson #include "i915_selftest.h" 10112ed2d3SChris Wilson #include "intel_reset.h" 11112ed2d3SChris Wilson 12112ed2d3SChris Wilson #include "selftests/igt_flush_test.h" 13112ed2d3SChris Wilson #include "selftests/igt_reset.h" 14112ed2d3SChris Wilson #include "selftests/igt_spinner.h" 153a4bfa09SRahul Kumar Singh #include "selftests/intel_scheduler_helpers.h" 16112ed2d3SChris Wilson #include "selftests/mock_drm.h" 17112ed2d3SChris Wilson 1810be98a7SChris Wilson #include "gem/selftests/igt_gem_utils.h" 1910be98a7SChris Wilson #include "gem/selftests/mock_context.h" 2010be98a7SChris Wilson 21112ed2d3SChris Wilson static const struct wo_register { 22112ed2d3SChris Wilson enum intel_platform platform; 23112ed2d3SChris Wilson u32 reg; 24112ed2d3SChris Wilson } wo_registers[] = { 25112ed2d3SChris Wilson { INTEL_GEMINILAKE, 0x731c } 26112ed2d3SChris Wilson }; 27112ed2d3SChris Wilson 28112ed2d3SChris Wilson struct wa_lists { 29112ed2d3SChris Wilson struct i915_wa_list gt_wa_list; 30112ed2d3SChris Wilson struct { 31112ed2d3SChris Wilson struct i915_wa_list wa_list; 32fde93886STvrtko Ursulin struct i915_wa_list ctx_wa_list; 33112ed2d3SChris Wilson } engine[I915_NUM_ENGINES]; 34112ed2d3SChris Wilson }; 35112ed2d3SChris Wilson 3641f0bc49SChris Wilson static int request_add_sync(struct i915_request *rq, int err) 3741f0bc49SChris Wilson { 3841f0bc49SChris Wilson i915_request_get(rq); 3941f0bc49SChris Wilson i915_request_add(rq); 4041f0bc49SChris Wilson if (i915_request_wait(rq, 0, HZ / 5) < 0) 4141f0bc49SChris Wilson err = -EIO; 4241f0bc49SChris Wilson i915_request_put(rq); 4341f0bc49SChris Wilson 4441f0bc49SChris Wilson return err; 4541f0bc49SChris Wilson } 4641f0bc49SChris Wilson 4741f0bc49SChris Wilson static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) 4841f0bc49SChris Wilson { 4941f0bc49SChris Wilson int err = 0; 5041f0bc49SChris Wilson 5141f0bc49SChris Wilson i915_request_get(rq); 5241f0bc49SChris Wilson i915_request_add(rq); 5341f0bc49SChris Wilson if (spin && !igt_wait_for_spinner(spin, rq)) 5441f0bc49SChris Wilson err = -ETIMEDOUT; 5541f0bc49SChris Wilson i915_request_put(rq); 5641f0bc49SChris Wilson 5741f0bc49SChris Wilson return err; 5841f0bc49SChris Wilson } 5941f0bc49SChris Wilson 60112ed2d3SChris Wilson static void 61bb3d4c9dSChris Wilson reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) 62112ed2d3SChris Wilson { 63112ed2d3SChris Wilson struct intel_engine_cs *engine; 64112ed2d3SChris Wilson enum intel_engine_id id; 65112ed2d3SChris Wilson 66112ed2d3SChris Wilson memset(lists, 0, sizeof(*lists)); 67112ed2d3SChris Wilson 683e1f0a51SJohn Harrison wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); 69d0a65249SVenkata Sandeep Dhanalakota gt_init_workarounds(gt, &lists->gt_wa_list); 70112ed2d3SChris Wilson wa_init_finish(&lists->gt_wa_list); 71112ed2d3SChris Wilson 725d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) { 73112ed2d3SChris Wilson struct i915_wa_list *wal = &lists->engine[id].wa_list; 74112ed2d3SChris Wilson 753e1f0a51SJohn Harrison wa_init_start(wal, "REF", engine->name); 76112ed2d3SChris Wilson engine_init_workarounds(engine, wal); 77112ed2d3SChris Wilson wa_init_finish(wal); 78fde93886STvrtko Ursulin 79fde93886STvrtko Ursulin __intel_engine_init_ctx_wa(engine, 80fde93886STvrtko Ursulin &lists->engine[id].ctx_wa_list, 813e1f0a51SJohn Harrison "CTX_REF"); 82112ed2d3SChris Wilson } 83112ed2d3SChris Wilson } 84112ed2d3SChris Wilson 85112ed2d3SChris Wilson static void 86bb3d4c9dSChris Wilson reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists) 87112ed2d3SChris Wilson { 88112ed2d3SChris Wilson struct intel_engine_cs *engine; 89112ed2d3SChris Wilson enum intel_engine_id id; 90112ed2d3SChris Wilson 915d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) 92112ed2d3SChris Wilson intel_wa_list_free(&lists->engine[id].wa_list); 93112ed2d3SChris Wilson 94112ed2d3SChris Wilson intel_wa_list_free(&lists->gt_wa_list); 95112ed2d3SChris Wilson } 96112ed2d3SChris Wilson 97112ed2d3SChris Wilson static struct drm_i915_gem_object * 9804adaba8SChris Wilson read_nonprivs(struct intel_context *ce) 99112ed2d3SChris Wilson { 10004adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine; 101112ed2d3SChris Wilson const u32 base = engine->mmio_base; 102112ed2d3SChris Wilson struct drm_i915_gem_object *result; 103112ed2d3SChris Wilson struct i915_request *rq; 104112ed2d3SChris Wilson struct i915_vma *vma; 105112ed2d3SChris Wilson u32 srm, *cs; 106112ed2d3SChris Wilson int err; 107112ed2d3SChris Wilson int i; 108112ed2d3SChris Wilson 109112ed2d3SChris Wilson result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 110112ed2d3SChris Wilson if (IS_ERR(result)) 111112ed2d3SChris Wilson return result; 112112ed2d3SChris Wilson 113112ed2d3SChris Wilson i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); 114112ed2d3SChris Wilson 11574827b53SMaarten Lankhorst cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB); 116112ed2d3SChris Wilson if (IS_ERR(cs)) { 117112ed2d3SChris Wilson err = PTR_ERR(cs); 118112ed2d3SChris Wilson goto err_obj; 119112ed2d3SChris Wilson } 120112ed2d3SChris Wilson memset(cs, 0xc5, PAGE_SIZE); 121112ed2d3SChris Wilson i915_gem_object_flush_map(result); 122112ed2d3SChris Wilson i915_gem_object_unpin_map(result); 123112ed2d3SChris Wilson 124ba4134a4STvrtko Ursulin vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); 125112ed2d3SChris Wilson if (IS_ERR(vma)) { 126112ed2d3SChris Wilson err = PTR_ERR(vma); 127112ed2d3SChris Wilson goto err_obj; 128112ed2d3SChris Wilson } 129112ed2d3SChris Wilson 130112ed2d3SChris Wilson err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 131112ed2d3SChris Wilson if (err) 132112ed2d3SChris Wilson goto err_obj; 133112ed2d3SChris Wilson 13404adaba8SChris Wilson rq = intel_context_create_request(ce); 135112ed2d3SChris Wilson if (IS_ERR(rq)) { 136112ed2d3SChris Wilson err = PTR_ERR(rq); 137112ed2d3SChris Wilson goto err_pin; 138112ed2d3SChris Wilson } 139112ed2d3SChris Wilson 1406951e589SChris Wilson i915_vma_lock(vma); 14170d6894dSChris Wilson err = i915_request_await_object(rq, vma->obj, true); 14270d6894dSChris Wilson if (err == 0) 143112ed2d3SChris Wilson err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1446951e589SChris Wilson i915_vma_unlock(vma); 145112ed2d3SChris Wilson if (err) 146112ed2d3SChris Wilson goto err_req; 147112ed2d3SChris Wilson 148112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 149c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8) 150112ed2d3SChris Wilson srm++; 151112ed2d3SChris Wilson 152112ed2d3SChris Wilson cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); 153112ed2d3SChris Wilson if (IS_ERR(cs)) { 154112ed2d3SChris Wilson err = PTR_ERR(cs); 155112ed2d3SChris Wilson goto err_req; 156112ed2d3SChris Wilson } 157112ed2d3SChris Wilson 158112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { 159112ed2d3SChris Wilson *cs++ = srm; 160112ed2d3SChris Wilson *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); 161112ed2d3SChris Wilson *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; 162112ed2d3SChris Wilson *cs++ = 0; 163112ed2d3SChris Wilson } 164112ed2d3SChris Wilson intel_ring_advance(rq, cs); 165112ed2d3SChris Wilson 166112ed2d3SChris Wilson i915_request_add(rq); 167112ed2d3SChris Wilson i915_vma_unpin(vma); 168112ed2d3SChris Wilson 169112ed2d3SChris Wilson return result; 170112ed2d3SChris Wilson 171112ed2d3SChris Wilson err_req: 172112ed2d3SChris Wilson i915_request_add(rq); 173112ed2d3SChris Wilson err_pin: 174112ed2d3SChris Wilson i915_vma_unpin(vma); 175112ed2d3SChris Wilson err_obj: 176112ed2d3SChris Wilson i915_gem_object_put(result); 177112ed2d3SChris Wilson return ERR_PTR(err); 178112ed2d3SChris Wilson } 179112ed2d3SChris Wilson 180112ed2d3SChris Wilson static u32 181112ed2d3SChris Wilson get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) 182112ed2d3SChris Wilson { 183112ed2d3SChris Wilson i915_reg_t reg = i < engine->whitelist.count ? 184112ed2d3SChris Wilson engine->whitelist.list[i].reg : 185112ed2d3SChris Wilson RING_NOPID(engine->mmio_base); 186112ed2d3SChris Wilson 187112ed2d3SChris Wilson return i915_mmio_reg_offset(reg); 188112ed2d3SChris Wilson } 189112ed2d3SChris Wilson 190112ed2d3SChris Wilson static void 191112ed2d3SChris Wilson print_results(const struct intel_engine_cs *engine, const u32 *results) 192112ed2d3SChris Wilson { 193112ed2d3SChris Wilson unsigned int i; 194112ed2d3SChris Wilson 195112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { 196112ed2d3SChris Wilson u32 expected = get_whitelist_reg(engine, i); 197112ed2d3SChris Wilson u32 actual = results[i]; 198112ed2d3SChris Wilson 199112ed2d3SChris Wilson pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", 200112ed2d3SChris Wilson i, expected, actual); 201112ed2d3SChris Wilson } 202112ed2d3SChris Wilson } 203112ed2d3SChris Wilson 20404adaba8SChris Wilson static int check_whitelist(struct intel_context *ce) 205112ed2d3SChris Wilson { 20604adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine; 207112ed2d3SChris Wilson struct drm_i915_gem_object *results; 208cb823ed9SChris Wilson struct intel_wedge_me wedge; 209112ed2d3SChris Wilson u32 *vaddr; 210112ed2d3SChris Wilson int err; 211112ed2d3SChris Wilson int i; 212112ed2d3SChris Wilson 21304adaba8SChris Wilson results = read_nonprivs(ce); 214112ed2d3SChris Wilson if (IS_ERR(results)) 215112ed2d3SChris Wilson return PTR_ERR(results); 216112ed2d3SChris Wilson 217112ed2d3SChris Wilson err = 0; 21880f0b679SMaarten Lankhorst i915_gem_object_lock(results, NULL); 219bb3d4c9dSChris Wilson intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ 220112ed2d3SChris Wilson err = i915_gem_object_set_to_cpu_domain(results, false); 22174827b53SMaarten Lankhorst 222bb3d4c9dSChris Wilson if (intel_gt_is_wedged(engine->gt)) 223112ed2d3SChris Wilson err = -EIO; 224112ed2d3SChris Wilson if (err) 225112ed2d3SChris Wilson goto out_put; 226112ed2d3SChris Wilson 227112ed2d3SChris Wilson vaddr = i915_gem_object_pin_map(results, I915_MAP_WB); 228112ed2d3SChris Wilson if (IS_ERR(vaddr)) { 229112ed2d3SChris Wilson err = PTR_ERR(vaddr); 230112ed2d3SChris Wilson goto out_put; 231112ed2d3SChris Wilson } 232112ed2d3SChris Wilson 233112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { 234112ed2d3SChris Wilson u32 expected = get_whitelist_reg(engine, i); 235112ed2d3SChris Wilson u32 actual = vaddr[i]; 236112ed2d3SChris Wilson 237112ed2d3SChris Wilson if (expected != actual) { 238112ed2d3SChris Wilson print_results(engine, vaddr); 239112ed2d3SChris Wilson pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", 240112ed2d3SChris Wilson i, expected, actual); 241112ed2d3SChris Wilson 242112ed2d3SChris Wilson err = -EINVAL; 243112ed2d3SChris Wilson break; 244112ed2d3SChris Wilson } 245112ed2d3SChris Wilson } 246112ed2d3SChris Wilson 247112ed2d3SChris Wilson i915_gem_object_unpin_map(results); 248112ed2d3SChris Wilson out_put: 24974827b53SMaarten Lankhorst i915_gem_object_unlock(results); 250112ed2d3SChris Wilson i915_gem_object_put(results); 251112ed2d3SChris Wilson return err; 252112ed2d3SChris Wilson } 253112ed2d3SChris Wilson 254112ed2d3SChris Wilson static int do_device_reset(struct intel_engine_cs *engine) 255112ed2d3SChris Wilson { 256cb823ed9SChris Wilson intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); 257112ed2d3SChris Wilson return 0; 258112ed2d3SChris Wilson } 259112ed2d3SChris Wilson 260112ed2d3SChris Wilson static int do_engine_reset(struct intel_engine_cs *engine) 261112ed2d3SChris Wilson { 262cb823ed9SChris Wilson return intel_engine_reset(engine, "live_workarounds"); 263112ed2d3SChris Wilson } 264112ed2d3SChris Wilson 2653a4bfa09SRahul Kumar Singh static int do_guc_reset(struct intel_engine_cs *engine) 2663a4bfa09SRahul Kumar Singh { 2673a4bfa09SRahul Kumar Singh /* Currently a no-op as the reset is handled by GuC */ 2683a4bfa09SRahul Kumar Singh return 0; 2693a4bfa09SRahul Kumar Singh } 2703a4bfa09SRahul Kumar Singh 271112ed2d3SChris Wilson static int 272112ed2d3SChris Wilson switch_to_scratch_context(struct intel_engine_cs *engine, 2733a4bfa09SRahul Kumar Singh struct igt_spinner *spin, 2743a4bfa09SRahul Kumar Singh struct i915_request **rq) 275112ed2d3SChris Wilson { 276f277bc0cSChris Wilson struct intel_context *ce; 277112ed2d3SChris Wilson int err = 0; 278112ed2d3SChris Wilson 279e6ba7648SChris Wilson ce = intel_context_create(engine); 280e6ba7648SChris Wilson if (IS_ERR(ce)) 281e6ba7648SChris Wilson return PTR_ERR(ce); 282f277bc0cSChris Wilson 2833a4bfa09SRahul Kumar Singh *rq = igt_spinner_create_request(spin, ce, MI_NOOP); 284f277bc0cSChris Wilson intel_context_put(ce); 285112ed2d3SChris Wilson 2863a4bfa09SRahul Kumar Singh if (IS_ERR(*rq)) { 287112ed2d3SChris Wilson spin = NULL; 2883a4bfa09SRahul Kumar Singh err = PTR_ERR(*rq); 289112ed2d3SChris Wilson goto err; 290112ed2d3SChris Wilson } 291112ed2d3SChris Wilson 2923a4bfa09SRahul Kumar Singh err = request_add_spin(*rq, spin); 293112ed2d3SChris Wilson err: 294112ed2d3SChris Wilson if (err && spin) 295112ed2d3SChris Wilson igt_spinner_end(spin); 296112ed2d3SChris Wilson 297112ed2d3SChris Wilson return err; 298112ed2d3SChris Wilson } 299112ed2d3SChris Wilson 300112ed2d3SChris Wilson static int check_whitelist_across_reset(struct intel_engine_cs *engine, 301112ed2d3SChris Wilson int (*reset)(struct intel_engine_cs *), 302112ed2d3SChris Wilson const char *name) 303112ed2d3SChris Wilson { 30404adaba8SChris Wilson struct intel_context *ce, *tmp; 305112ed2d3SChris Wilson struct igt_spinner spin; 3063a4bfa09SRahul Kumar Singh struct i915_request *rq; 307112ed2d3SChris Wilson intel_wakeref_t wakeref; 308112ed2d3SChris Wilson int err; 309112ed2d3SChris Wilson 3103e1f0a51SJohn Harrison pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n", 3113e1f0a51SJohn Harrison engine->whitelist.count, engine->name, name); 312112ed2d3SChris Wilson 31304adaba8SChris Wilson ce = intel_context_create(engine); 31404adaba8SChris Wilson if (IS_ERR(ce)) 31504adaba8SChris Wilson return PTR_ERR(ce); 316112ed2d3SChris Wilson 317f277bc0cSChris Wilson err = igt_spinner_init(&spin, engine->gt); 318cf3bd1a0SChris Wilson if (err) 319cf3bd1a0SChris Wilson goto out_ctx; 320cf3bd1a0SChris Wilson 32104adaba8SChris Wilson err = check_whitelist(ce); 322112ed2d3SChris Wilson if (err) { 323112ed2d3SChris Wilson pr_err("Invalid whitelist *before* %s reset!\n", name); 324cf3bd1a0SChris Wilson goto out_spin; 325112ed2d3SChris Wilson } 326112ed2d3SChris Wilson 3273a4bfa09SRahul Kumar Singh err = switch_to_scratch_context(engine, &spin, &rq); 328112ed2d3SChris Wilson if (err) 329cf3bd1a0SChris Wilson goto out_spin; 330112ed2d3SChris Wilson 3313a4bfa09SRahul Kumar Singh /* Ensure the spinner hasn't aborted */ 3323a4bfa09SRahul Kumar Singh if (i915_request_completed(rq)) { 3333a4bfa09SRahul Kumar Singh pr_err("%s spinner failed to start\n", name); 3343a4bfa09SRahul Kumar Singh err = -ETIMEDOUT; 3353a4bfa09SRahul Kumar Singh goto out_spin; 3363a4bfa09SRahul Kumar Singh } 3373a4bfa09SRahul Kumar Singh 338cd6a8513SChris Wilson with_intel_runtime_pm(engine->uncore->rpm, wakeref) 339112ed2d3SChris Wilson err = reset(engine); 340112ed2d3SChris Wilson 3413a4bfa09SRahul Kumar Singh /* Ensure the reset happens and kills the engine */ 3423a4bfa09SRahul Kumar Singh if (err == 0) 3433a4bfa09SRahul Kumar Singh err = intel_selftest_wait_for_rq(rq); 3443a4bfa09SRahul Kumar Singh 345112ed2d3SChris Wilson igt_spinner_end(&spin); 346112ed2d3SChris Wilson 347112ed2d3SChris Wilson if (err) { 348112ed2d3SChris Wilson pr_err("%s reset failed\n", name); 349cf3bd1a0SChris Wilson goto out_spin; 350112ed2d3SChris Wilson } 351112ed2d3SChris Wilson 35204adaba8SChris Wilson err = check_whitelist(ce); 353112ed2d3SChris Wilson if (err) { 354112ed2d3SChris Wilson pr_err("Whitelist not preserved in context across %s reset!\n", 355112ed2d3SChris Wilson name); 356cf3bd1a0SChris Wilson goto out_spin; 357112ed2d3SChris Wilson } 358112ed2d3SChris Wilson 35904adaba8SChris Wilson tmp = intel_context_create(engine); 360cf3bd1a0SChris Wilson if (IS_ERR(tmp)) { 361cf3bd1a0SChris Wilson err = PTR_ERR(tmp); 362cf3bd1a0SChris Wilson goto out_spin; 363cf3bd1a0SChris Wilson } 36404adaba8SChris Wilson intel_context_put(ce); 36504adaba8SChris Wilson ce = tmp; 366112ed2d3SChris Wilson 36704adaba8SChris Wilson err = check_whitelist(ce); 368112ed2d3SChris Wilson if (err) { 369112ed2d3SChris Wilson pr_err("Invalid whitelist *after* %s reset in fresh context!\n", 370112ed2d3SChris Wilson name); 371cf3bd1a0SChris Wilson goto out_spin; 372112ed2d3SChris Wilson } 373112ed2d3SChris Wilson 374cf3bd1a0SChris Wilson out_spin: 375cf3bd1a0SChris Wilson igt_spinner_fini(&spin); 376cf3bd1a0SChris Wilson out_ctx: 37704adaba8SChris Wilson intel_context_put(ce); 378112ed2d3SChris Wilson return err; 379112ed2d3SChris Wilson } 380112ed2d3SChris Wilson 381e6ba7648SChris Wilson static struct i915_vma *create_batch(struct i915_address_space *vm) 382112ed2d3SChris Wilson { 383112ed2d3SChris Wilson struct drm_i915_gem_object *obj; 384112ed2d3SChris Wilson struct i915_vma *vma; 385112ed2d3SChris Wilson int err; 386112ed2d3SChris Wilson 387e6ba7648SChris Wilson obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE); 388112ed2d3SChris Wilson if (IS_ERR(obj)) 389112ed2d3SChris Wilson return ERR_CAST(obj); 390112ed2d3SChris Wilson 391a4e7ccdaSChris Wilson vma = i915_vma_instance(obj, vm, NULL); 392112ed2d3SChris Wilson if (IS_ERR(vma)) { 393112ed2d3SChris Wilson err = PTR_ERR(vma); 394112ed2d3SChris Wilson goto err_obj; 395112ed2d3SChris Wilson } 396112ed2d3SChris Wilson 397112ed2d3SChris Wilson err = i915_vma_pin(vma, 0, 0, PIN_USER); 398112ed2d3SChris Wilson if (err) 399112ed2d3SChris Wilson goto err_obj; 400112ed2d3SChris Wilson 401112ed2d3SChris Wilson return vma; 402112ed2d3SChris Wilson 403112ed2d3SChris Wilson err_obj: 404112ed2d3SChris Wilson i915_gem_object_put(obj); 405112ed2d3SChris Wilson return ERR_PTR(err); 406112ed2d3SChris Wilson } 407112ed2d3SChris Wilson 408112ed2d3SChris Wilson static u32 reg_write(u32 old, u32 new, u32 rsvd) 409112ed2d3SChris Wilson { 410112ed2d3SChris Wilson if (rsvd == 0x0000ffff) { 411112ed2d3SChris Wilson old &= ~(new >> 16); 412112ed2d3SChris Wilson old |= new & (new >> 16); 413112ed2d3SChris Wilson } else { 414112ed2d3SChris Wilson old &= ~rsvd; 415112ed2d3SChris Wilson old |= new & rsvd; 416112ed2d3SChris Wilson } 417112ed2d3SChris Wilson 418112ed2d3SChris Wilson return old; 419112ed2d3SChris Wilson } 420112ed2d3SChris Wilson 421112ed2d3SChris Wilson static bool wo_register(struct intel_engine_cs *engine, u32 reg) 422112ed2d3SChris Wilson { 423112ed2d3SChris Wilson enum intel_platform platform = INTEL_INFO(engine->i915)->platform; 424112ed2d3SChris Wilson int i; 425112ed2d3SChris Wilson 4261e2b7f49SJohn Harrison if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == 4271e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_WR) 4281e2b7f49SJohn Harrison return true; 4291e2b7f49SJohn Harrison 430112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { 431112ed2d3SChris Wilson if (wo_registers[i].platform == platform && 432112ed2d3SChris Wilson wo_registers[i].reg == reg) 433112ed2d3SChris Wilson return true; 434112ed2d3SChris Wilson } 435112ed2d3SChris Wilson 436112ed2d3SChris Wilson return false; 437112ed2d3SChris Wilson } 438112ed2d3SChris Wilson 439c95ebab1SChris Wilson static bool timestamp(const struct intel_engine_cs *engine, u32 reg) 440c95ebab1SChris Wilson { 441c95ebab1SChris Wilson reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; 442c95ebab1SChris Wilson switch (reg) { 443c95ebab1SChris Wilson case 0x358: 444c95ebab1SChris Wilson case 0x35c: 445c95ebab1SChris Wilson case 0x3a8: 446c95ebab1SChris Wilson return true; 447c95ebab1SChris Wilson 448c95ebab1SChris Wilson default: 449c95ebab1SChris Wilson return false; 450c95ebab1SChris Wilson } 451c95ebab1SChris Wilson } 452c95ebab1SChris Wilson 453767662bcSRobert M. Fosha static bool ro_register(u32 reg) 454767662bcSRobert M. Fosha { 4551e2b7f49SJohn Harrison if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == 4561e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_RD) 457767662bcSRobert M. Fosha return true; 458767662bcSRobert M. Fosha 459767662bcSRobert M. Fosha return false; 460767662bcSRobert M. Fosha } 461767662bcSRobert M. Fosha 462767662bcSRobert M. Fosha static int whitelist_writable_count(struct intel_engine_cs *engine) 463767662bcSRobert M. Fosha { 464767662bcSRobert M. Fosha int count = engine->whitelist.count; 465767662bcSRobert M. Fosha int i; 466767662bcSRobert M. Fosha 467767662bcSRobert M. Fosha for (i = 0; i < engine->whitelist.count; i++) { 468767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); 469767662bcSRobert M. Fosha 470767662bcSRobert M. Fosha if (ro_register(reg)) 471767662bcSRobert M. Fosha count--; 472767662bcSRobert M. Fosha } 473767662bcSRobert M. Fosha 474767662bcSRobert M. Fosha return count; 475767662bcSRobert M. Fosha } 476767662bcSRobert M. Fosha 477e6ba7648SChris Wilson static int check_dirty_whitelist(struct intel_context *ce) 478112ed2d3SChris Wilson { 479112ed2d3SChris Wilson const u32 values[] = { 480112ed2d3SChris Wilson 0x00000000, 481112ed2d3SChris Wilson 0x01010101, 482112ed2d3SChris Wilson 0x10100101, 483112ed2d3SChris Wilson 0x03030303, 484112ed2d3SChris Wilson 0x30300303, 485112ed2d3SChris Wilson 0x05050505, 486112ed2d3SChris Wilson 0x50500505, 487112ed2d3SChris Wilson 0x0f0f0f0f, 488112ed2d3SChris Wilson 0xf00ff00f, 489112ed2d3SChris Wilson 0x10101010, 490112ed2d3SChris Wilson 0xf0f01010, 491112ed2d3SChris Wilson 0x30303030, 492112ed2d3SChris Wilson 0xa0a03030, 493112ed2d3SChris Wilson 0x50505050, 494112ed2d3SChris Wilson 0xc0c05050, 495112ed2d3SChris Wilson 0xf0f0f0f0, 496112ed2d3SChris Wilson 0x11111111, 497112ed2d3SChris Wilson 0x33333333, 498112ed2d3SChris Wilson 0x55555555, 499112ed2d3SChris Wilson 0x0000ffff, 500112ed2d3SChris Wilson 0x00ff00ff, 501112ed2d3SChris Wilson 0xff0000ff, 502112ed2d3SChris Wilson 0xffff00ff, 503112ed2d3SChris Wilson 0xffffffff, 504112ed2d3SChris Wilson }; 505e6ba7648SChris Wilson struct intel_engine_cs *engine = ce->engine; 506112ed2d3SChris Wilson struct i915_vma *scratch; 507112ed2d3SChris Wilson struct i915_vma *batch; 508a4d86249SChris Wilson int err = 0, i, v, sz; 509112ed2d3SChris Wilson u32 *cs, *results; 510112ed2d3SChris Wilson 511a4d86249SChris Wilson sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32); 5122a665968SMaarten Lankhorst scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz); 513112ed2d3SChris Wilson if (IS_ERR(scratch)) 514112ed2d3SChris Wilson return PTR_ERR(scratch); 515112ed2d3SChris Wilson 516e6ba7648SChris Wilson batch = create_batch(ce->vm); 517112ed2d3SChris Wilson if (IS_ERR(batch)) { 518112ed2d3SChris Wilson err = PTR_ERR(batch); 519112ed2d3SChris Wilson goto out_scratch; 520112ed2d3SChris Wilson } 521112ed2d3SChris Wilson 522112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) { 523112ed2d3SChris Wilson u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); 52474827b53SMaarten Lankhorst struct i915_gem_ww_ctx ww; 525112ed2d3SChris Wilson u64 addr = scratch->node.start; 526112ed2d3SChris Wilson struct i915_request *rq; 527112ed2d3SChris Wilson u32 srm, lrm, rsvd; 528112ed2d3SChris Wilson u32 expect; 529112ed2d3SChris Wilson int idx; 530aee20aaeSJohn Harrison bool ro_reg; 531112ed2d3SChris Wilson 532112ed2d3SChris Wilson if (wo_register(engine, reg)) 533112ed2d3SChris Wilson continue; 534112ed2d3SChris Wilson 535c95ebab1SChris Wilson if (timestamp(engine, reg)) 536c95ebab1SChris Wilson continue; /* timestamps are expected to autoincrement */ 537c95ebab1SChris Wilson 538aee20aaeSJohn Harrison ro_reg = ro_register(reg); 539767662bcSRobert M. Fosha 54074827b53SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false); 54174827b53SMaarten Lankhorst retry: 54274827b53SMaarten Lankhorst cs = NULL; 54374827b53SMaarten Lankhorst err = i915_gem_object_lock(scratch->obj, &ww); 54474827b53SMaarten Lankhorst if (!err) 54574827b53SMaarten Lankhorst err = i915_gem_object_lock(batch->obj, &ww); 54674827b53SMaarten Lankhorst if (!err) 54774827b53SMaarten Lankhorst err = intel_context_pin_ww(ce, &ww); 54874827b53SMaarten Lankhorst if (err) 54974827b53SMaarten Lankhorst goto out; 55074827b53SMaarten Lankhorst 55174827b53SMaarten Lankhorst cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); 55274827b53SMaarten Lankhorst if (IS_ERR(cs)) { 55374827b53SMaarten Lankhorst err = PTR_ERR(cs); 55474827b53SMaarten Lankhorst goto out_ctx; 55574827b53SMaarten Lankhorst } 55674827b53SMaarten Lankhorst 55774827b53SMaarten Lankhorst results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); 55874827b53SMaarten Lankhorst if (IS_ERR(results)) { 55974827b53SMaarten Lankhorst err = PTR_ERR(results); 56074827b53SMaarten Lankhorst goto out_unmap_batch; 56174827b53SMaarten Lankhorst } 56274827b53SMaarten Lankhorst 5636b441c62SMika Kuoppala /* Clear non priv flags */ 5646b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; 5656b441c62SMika Kuoppala 566112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM; 567112ed2d3SChris Wilson lrm = MI_LOAD_REGISTER_MEM; 568c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8) 569112ed2d3SChris Wilson lrm++, srm++; 570112ed2d3SChris Wilson 571112ed2d3SChris Wilson pr_debug("%s: Writing garbage to %x\n", 572112ed2d3SChris Wilson engine->name, reg); 573112ed2d3SChris Wilson 574112ed2d3SChris Wilson /* SRM original */ 575112ed2d3SChris Wilson *cs++ = srm; 576112ed2d3SChris Wilson *cs++ = reg; 577112ed2d3SChris Wilson *cs++ = lower_32_bits(addr); 578112ed2d3SChris Wilson *cs++ = upper_32_bits(addr); 579112ed2d3SChris Wilson 580112ed2d3SChris Wilson idx = 1; 581112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 582112ed2d3SChris Wilson /* LRI garbage */ 583112ed2d3SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(1); 584112ed2d3SChris Wilson *cs++ = reg; 585112ed2d3SChris Wilson *cs++ = values[v]; 586112ed2d3SChris Wilson 587112ed2d3SChris Wilson /* SRM result */ 588112ed2d3SChris Wilson *cs++ = srm; 589112ed2d3SChris Wilson *cs++ = reg; 590112ed2d3SChris Wilson *cs++ = lower_32_bits(addr + sizeof(u32) * idx); 591112ed2d3SChris Wilson *cs++ = upper_32_bits(addr + sizeof(u32) * idx); 592112ed2d3SChris Wilson idx++; 593112ed2d3SChris Wilson } 594112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 595112ed2d3SChris Wilson /* LRI garbage */ 596112ed2d3SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(1); 597112ed2d3SChris Wilson *cs++ = reg; 598112ed2d3SChris Wilson *cs++ = ~values[v]; 599112ed2d3SChris Wilson 600112ed2d3SChris Wilson /* SRM result */ 601112ed2d3SChris Wilson *cs++ = srm; 602112ed2d3SChris Wilson *cs++ = reg; 603112ed2d3SChris Wilson *cs++ = lower_32_bits(addr + sizeof(u32) * idx); 604112ed2d3SChris Wilson *cs++ = upper_32_bits(addr + sizeof(u32) * idx); 605112ed2d3SChris Wilson idx++; 606112ed2d3SChris Wilson } 607112ed2d3SChris Wilson GEM_BUG_ON(idx * sizeof(u32) > scratch->size); 608112ed2d3SChris Wilson 609112ed2d3SChris Wilson /* LRM original -- don't leave garbage in the context! */ 610112ed2d3SChris Wilson *cs++ = lrm; 611112ed2d3SChris Wilson *cs++ = reg; 612112ed2d3SChris Wilson *cs++ = lower_32_bits(addr); 613112ed2d3SChris Wilson *cs++ = upper_32_bits(addr); 614112ed2d3SChris Wilson 615112ed2d3SChris Wilson *cs++ = MI_BATCH_BUFFER_END; 616112ed2d3SChris Wilson 617112ed2d3SChris Wilson i915_gem_object_flush_map(batch->obj); 618112ed2d3SChris Wilson i915_gem_object_unpin_map(batch->obj); 619baea429dSTvrtko Ursulin intel_gt_chipset_flush(engine->gt); 62074827b53SMaarten Lankhorst cs = NULL; 621112ed2d3SChris Wilson 62274827b53SMaarten Lankhorst rq = i915_request_create(ce); 623112ed2d3SChris Wilson if (IS_ERR(rq)) { 624112ed2d3SChris Wilson err = PTR_ERR(rq); 62574827b53SMaarten Lankhorst goto out_unmap_scratch; 626112ed2d3SChris Wilson } 627112ed2d3SChris Wilson 628112ed2d3SChris Wilson if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ 629112ed2d3SChris Wilson err = engine->emit_init_breadcrumb(rq); 630112ed2d3SChris Wilson if (err) 631112ed2d3SChris Wilson goto err_request; 632112ed2d3SChris Wilson } 633112ed2d3SChris Wilson 6341d5b7773SChris Wilson err = i915_request_await_object(rq, batch->obj, false); 6351d5b7773SChris Wilson if (err == 0) 6361d5b7773SChris Wilson err = i915_vma_move_to_active(batch, rq, 0); 6371d5b7773SChris Wilson if (err) 6381d5b7773SChris Wilson goto err_request; 6391d5b7773SChris Wilson 640bd46aa22SChris Wilson err = i915_request_await_object(rq, scratch->obj, true); 641bd46aa22SChris Wilson if (err == 0) 642bd46aa22SChris Wilson err = i915_vma_move_to_active(scratch, rq, 643bd46aa22SChris Wilson EXEC_OBJECT_WRITE); 644bd46aa22SChris Wilson if (err) 645bd46aa22SChris Wilson goto err_request; 646bd46aa22SChris Wilson 647112ed2d3SChris Wilson err = engine->emit_bb_start(rq, 648112ed2d3SChris Wilson batch->node.start, PAGE_SIZE, 649112ed2d3SChris Wilson 0); 650112ed2d3SChris Wilson if (err) 651112ed2d3SChris Wilson goto err_request; 652112ed2d3SChris Wilson 653112ed2d3SChris Wilson err_request: 65441f0bc49SChris Wilson err = request_add_sync(rq, err); 65541f0bc49SChris Wilson if (err) { 656112ed2d3SChris Wilson pr_err("%s: Futzing %x timedout; cancelling test\n", 657112ed2d3SChris Wilson engine->name, reg); 658bb3d4c9dSChris Wilson intel_gt_set_wedged(engine->gt); 65974827b53SMaarten Lankhorst goto out_unmap_scratch; 660112ed2d3SChris Wilson } 661112ed2d3SChris Wilson 662112ed2d3SChris Wilson GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); 663aee20aaeSJohn Harrison if (!ro_reg) { 664aee20aaeSJohn Harrison /* detect write masking */ 665aee20aaeSJohn Harrison rsvd = results[ARRAY_SIZE(values)]; 666112ed2d3SChris Wilson if (!rsvd) { 667112ed2d3SChris Wilson pr_err("%s: Unable to write to whitelisted register %x\n", 668112ed2d3SChris Wilson engine->name, reg); 669112ed2d3SChris Wilson err = -EINVAL; 67074827b53SMaarten Lankhorst goto out_unmap_scratch; 671112ed2d3SChris Wilson } 672cc649a9eSArnd Bergmann } else { 673cc649a9eSArnd Bergmann rsvd = 0; 674aee20aaeSJohn Harrison } 675112ed2d3SChris Wilson 676112ed2d3SChris Wilson expect = results[0]; 677112ed2d3SChris Wilson idx = 1; 678112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 679aee20aaeSJohn Harrison if (ro_reg) 680aee20aaeSJohn Harrison expect = results[0]; 681aee20aaeSJohn Harrison else 682112ed2d3SChris Wilson expect = reg_write(expect, values[v], rsvd); 683aee20aaeSJohn Harrison 684112ed2d3SChris Wilson if (results[idx] != expect) 685112ed2d3SChris Wilson err++; 686112ed2d3SChris Wilson idx++; 687112ed2d3SChris Wilson } 688112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 689aee20aaeSJohn Harrison if (ro_reg) 690aee20aaeSJohn Harrison expect = results[0]; 691aee20aaeSJohn Harrison else 692112ed2d3SChris Wilson expect = reg_write(expect, ~values[v], rsvd); 693aee20aaeSJohn Harrison 694112ed2d3SChris Wilson if (results[idx] != expect) 695112ed2d3SChris Wilson err++; 696112ed2d3SChris Wilson idx++; 697112ed2d3SChris Wilson } 698112ed2d3SChris Wilson if (err) { 699112ed2d3SChris Wilson pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", 700112ed2d3SChris Wilson engine->name, err, reg); 701112ed2d3SChris Wilson 702aee20aaeSJohn Harrison if (ro_reg) 703aee20aaeSJohn Harrison pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", 704aee20aaeSJohn Harrison engine->name, reg, results[0]); 705aee20aaeSJohn Harrison else 706112ed2d3SChris Wilson pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", 707112ed2d3SChris Wilson engine->name, reg, results[0], rsvd); 708112ed2d3SChris Wilson 709112ed2d3SChris Wilson expect = results[0]; 710112ed2d3SChris Wilson idx = 1; 711112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 712112ed2d3SChris Wilson u32 w = values[v]; 713112ed2d3SChris Wilson 714aee20aaeSJohn Harrison if (ro_reg) 715aee20aaeSJohn Harrison expect = results[0]; 716aee20aaeSJohn Harrison else 717112ed2d3SChris Wilson expect = reg_write(expect, w, rsvd); 718112ed2d3SChris Wilson pr_info("Wrote %08x, read %08x, expect %08x\n", 719112ed2d3SChris Wilson w, results[idx], expect); 720112ed2d3SChris Wilson idx++; 721112ed2d3SChris Wilson } 722112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) { 723112ed2d3SChris Wilson u32 w = ~values[v]; 724112ed2d3SChris Wilson 725aee20aaeSJohn Harrison if (ro_reg) 726aee20aaeSJohn Harrison expect = results[0]; 727aee20aaeSJohn Harrison else 728112ed2d3SChris Wilson expect = reg_write(expect, w, rsvd); 729112ed2d3SChris Wilson pr_info("Wrote %08x, read %08x, expect %08x\n", 730112ed2d3SChris Wilson w, results[idx], expect); 731112ed2d3SChris Wilson idx++; 732112ed2d3SChris Wilson } 733112ed2d3SChris Wilson 734112ed2d3SChris Wilson err = -EINVAL; 735112ed2d3SChris Wilson } 73674827b53SMaarten Lankhorst out_unmap_scratch: 737112ed2d3SChris Wilson i915_gem_object_unpin_map(scratch->obj); 73874827b53SMaarten Lankhorst out_unmap_batch: 73974827b53SMaarten Lankhorst if (cs) 74074827b53SMaarten Lankhorst i915_gem_object_unpin_map(batch->obj); 74174827b53SMaarten Lankhorst out_ctx: 74274827b53SMaarten Lankhorst intel_context_unpin(ce); 74374827b53SMaarten Lankhorst out: 74474827b53SMaarten Lankhorst if (err == -EDEADLK) { 74574827b53SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww); 74674827b53SMaarten Lankhorst if (!err) 74774827b53SMaarten Lankhorst goto retry; 74874827b53SMaarten Lankhorst } 74974827b53SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww); 750112ed2d3SChris Wilson if (err) 751112ed2d3SChris Wilson break; 752112ed2d3SChris Wilson } 753112ed2d3SChris Wilson 754e6ba7648SChris Wilson if (igt_flush_test(engine->i915)) 755112ed2d3SChris Wilson err = -EIO; 75674827b53SMaarten Lankhorst 757112ed2d3SChris Wilson i915_vma_unpin_and_release(&batch, 0); 758112ed2d3SChris Wilson out_scratch: 759112ed2d3SChris Wilson i915_vma_unpin_and_release(&scratch, 0); 760112ed2d3SChris Wilson return err; 761112ed2d3SChris Wilson } 762112ed2d3SChris Wilson 763112ed2d3SChris Wilson static int live_dirty_whitelist(void *arg) 764112ed2d3SChris Wilson { 765bb3d4c9dSChris Wilson struct intel_gt *gt = arg; 766112ed2d3SChris Wilson struct intel_engine_cs *engine; 767112ed2d3SChris Wilson enum intel_engine_id id; 768112ed2d3SChris Wilson 769112ed2d3SChris Wilson /* Can the user write to the whitelisted registers? */ 770112ed2d3SChris Wilson 771c816723bSLucas De Marchi if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ 772112ed2d3SChris Wilson return 0; 773112ed2d3SChris Wilson 7745d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) { 775e6ba7648SChris Wilson struct intel_context *ce; 776e6ba7648SChris Wilson int err; 777e6ba7648SChris Wilson 778112ed2d3SChris Wilson if (engine->whitelist.count == 0) 779112ed2d3SChris Wilson continue; 780112ed2d3SChris Wilson 781e6ba7648SChris Wilson ce = intel_context_create(engine); 782e6ba7648SChris Wilson if (IS_ERR(ce)) 783e6ba7648SChris Wilson return PTR_ERR(ce); 784e6ba7648SChris Wilson 785e6ba7648SChris Wilson err = check_dirty_whitelist(ce); 786e6ba7648SChris Wilson intel_context_put(ce); 787112ed2d3SChris Wilson if (err) 788e6ba7648SChris Wilson return err; 789112ed2d3SChris Wilson } 790112ed2d3SChris Wilson 791e6ba7648SChris Wilson return 0; 792112ed2d3SChris Wilson } 793112ed2d3SChris Wilson 794112ed2d3SChris Wilson static int live_reset_whitelist(void *arg) 795112ed2d3SChris Wilson { 796bb3d4c9dSChris Wilson struct intel_gt *gt = arg; 797bb3d4c9dSChris Wilson struct intel_engine_cs *engine; 798bb3d4c9dSChris Wilson enum intel_engine_id id; 799112ed2d3SChris Wilson int err = 0; 800112ed2d3SChris Wilson 801112ed2d3SChris Wilson /* If we reset the gpu, we should not lose the RING_NONPRIV */ 802bb3d4c9dSChris Wilson igt_global_reset_lock(gt); 803112ed2d3SChris Wilson 8045d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) { 805bb3d4c9dSChris Wilson if (engine->whitelist.count == 0) 806bb3d4c9dSChris Wilson continue; 807112ed2d3SChris Wilson 808bb3d4c9dSChris Wilson if (intel_has_reset_engine(gt)) { 8093a4bfa09SRahul Kumar Singh if (intel_engine_uses_guc(engine)) { 8103a4bfa09SRahul Kumar Singh struct intel_selftest_saved_policy saved; 8113a4bfa09SRahul Kumar Singh int err2; 8123a4bfa09SRahul Kumar Singh 813617e87c0SJohn Harrison err = intel_selftest_modify_policy(engine, &saved, 814617e87c0SJohn Harrison SELFTEST_SCHEDULER_MODIFY_FAST_RESET); 8153a4bfa09SRahul Kumar Singh if (err) 8163a4bfa09SRahul Kumar Singh goto out; 8173a4bfa09SRahul Kumar Singh 8183a4bfa09SRahul Kumar Singh err = check_whitelist_across_reset(engine, 8193a4bfa09SRahul Kumar Singh do_guc_reset, 8203a4bfa09SRahul Kumar Singh "guc"); 8213a4bfa09SRahul Kumar Singh 8223a4bfa09SRahul Kumar Singh err2 = intel_selftest_restore_policy(engine, &saved); 8233a4bfa09SRahul Kumar Singh if (err == 0) 8243a4bfa09SRahul Kumar Singh err = err2; 8253a4bfa09SRahul Kumar Singh } else { 826112ed2d3SChris Wilson err = check_whitelist_across_reset(engine, 827112ed2d3SChris Wilson do_engine_reset, 828112ed2d3SChris Wilson "engine"); 8293a4bfa09SRahul Kumar Singh } 8303a4bfa09SRahul Kumar Singh 831112ed2d3SChris Wilson if (err) 832112ed2d3SChris Wilson goto out; 833112ed2d3SChris Wilson } 834112ed2d3SChris Wilson 835bb3d4c9dSChris Wilson if (intel_has_gpu_reset(gt)) { 836112ed2d3SChris Wilson err = check_whitelist_across_reset(engine, 837112ed2d3SChris Wilson do_device_reset, 838112ed2d3SChris Wilson "device"); 839112ed2d3SChris Wilson if (err) 840112ed2d3SChris Wilson goto out; 841112ed2d3SChris Wilson } 842bb3d4c9dSChris Wilson } 843112ed2d3SChris Wilson 844112ed2d3SChris Wilson out: 845bb3d4c9dSChris Wilson igt_global_reset_unlock(gt); 846112ed2d3SChris Wilson return err; 847112ed2d3SChris Wilson } 848112ed2d3SChris Wilson 84904adaba8SChris Wilson static int read_whitelisted_registers(struct intel_context *ce, 850112ed2d3SChris Wilson struct i915_vma *results) 851112ed2d3SChris Wilson { 85204adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine; 853112ed2d3SChris Wilson struct i915_request *rq; 854112ed2d3SChris Wilson int i, err = 0; 855112ed2d3SChris Wilson u32 srm, *cs; 856112ed2d3SChris Wilson 85704adaba8SChris Wilson rq = intel_context_create_request(ce); 858112ed2d3SChris Wilson if (IS_ERR(rq)) 859112ed2d3SChris Wilson return PTR_ERR(rq); 860112ed2d3SChris Wilson 861cd9ba7b6SChris Wilson i915_vma_lock(results); 862cd9ba7b6SChris Wilson err = i915_request_await_object(rq, results->obj, true); 863cd9ba7b6SChris Wilson if (err == 0) 864cd9ba7b6SChris Wilson err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE); 865cd9ba7b6SChris Wilson i915_vma_unlock(results); 866cd9ba7b6SChris Wilson if (err) 867cd9ba7b6SChris Wilson goto err_req; 868cd9ba7b6SChris Wilson 869112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM; 870c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8) 871112ed2d3SChris Wilson srm++; 872112ed2d3SChris Wilson 873112ed2d3SChris Wilson cs = intel_ring_begin(rq, 4 * engine->whitelist.count); 874112ed2d3SChris Wilson if (IS_ERR(cs)) { 875112ed2d3SChris Wilson err = PTR_ERR(cs); 876112ed2d3SChris Wilson goto err_req; 877112ed2d3SChris Wilson } 878112ed2d3SChris Wilson 879112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) { 880112ed2d3SChris Wilson u64 offset = results->node.start + sizeof(u32) * i; 881767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); 882767662bcSRobert M. Fosha 8836b441c62SMika Kuoppala /* Clear non priv flags */ 8846b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; 885112ed2d3SChris Wilson 886112ed2d3SChris Wilson *cs++ = srm; 887767662bcSRobert M. Fosha *cs++ = reg; 888112ed2d3SChris Wilson *cs++ = lower_32_bits(offset); 889112ed2d3SChris Wilson *cs++ = upper_32_bits(offset); 890112ed2d3SChris Wilson } 891112ed2d3SChris Wilson intel_ring_advance(rq, cs); 892112ed2d3SChris Wilson 893112ed2d3SChris Wilson err_req: 89441f0bc49SChris Wilson return request_add_sync(rq, err); 895112ed2d3SChris Wilson } 896112ed2d3SChris Wilson 89704adaba8SChris Wilson static int scrub_whitelisted_registers(struct intel_context *ce) 898112ed2d3SChris Wilson { 89904adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine; 900112ed2d3SChris Wilson struct i915_request *rq; 901112ed2d3SChris Wilson struct i915_vma *batch; 902112ed2d3SChris Wilson int i, err = 0; 903112ed2d3SChris Wilson u32 *cs; 904112ed2d3SChris Wilson 90504adaba8SChris Wilson batch = create_batch(ce->vm); 906112ed2d3SChris Wilson if (IS_ERR(batch)) 907112ed2d3SChris Wilson return PTR_ERR(batch); 908112ed2d3SChris Wilson 90974827b53SMaarten Lankhorst cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC); 910112ed2d3SChris Wilson if (IS_ERR(cs)) { 911112ed2d3SChris Wilson err = PTR_ERR(cs); 912112ed2d3SChris Wilson goto err_batch; 913112ed2d3SChris Wilson } 914112ed2d3SChris Wilson 915767662bcSRobert M. Fosha *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); 916112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) { 917767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); 918767662bcSRobert M. Fosha 919767662bcSRobert M. Fosha if (ro_register(reg)) 920767662bcSRobert M. Fosha continue; 921767662bcSRobert M. Fosha 9226b441c62SMika Kuoppala /* Clear non priv flags */ 9236b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; 9246b441c62SMika Kuoppala 925767662bcSRobert M. Fosha *cs++ = reg; 926112ed2d3SChris Wilson *cs++ = 0xffffffff; 927112ed2d3SChris Wilson } 928112ed2d3SChris Wilson *cs++ = MI_BATCH_BUFFER_END; 929112ed2d3SChris Wilson 930112ed2d3SChris Wilson i915_gem_object_flush_map(batch->obj); 931baea429dSTvrtko Ursulin intel_gt_chipset_flush(engine->gt); 932112ed2d3SChris Wilson 93304adaba8SChris Wilson rq = intel_context_create_request(ce); 934112ed2d3SChris Wilson if (IS_ERR(rq)) { 935112ed2d3SChris Wilson err = PTR_ERR(rq); 936112ed2d3SChris Wilson goto err_unpin; 937112ed2d3SChris Wilson } 938112ed2d3SChris Wilson 939112ed2d3SChris Wilson if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ 940112ed2d3SChris Wilson err = engine->emit_init_breadcrumb(rq); 941112ed2d3SChris Wilson if (err) 942112ed2d3SChris Wilson goto err_request; 943112ed2d3SChris Wilson } 944112ed2d3SChris Wilson 9451d5b7773SChris Wilson i915_vma_lock(batch); 9461d5b7773SChris Wilson err = i915_request_await_object(rq, batch->obj, false); 9471d5b7773SChris Wilson if (err == 0) 9481d5b7773SChris Wilson err = i915_vma_move_to_active(batch, rq, 0); 9491d5b7773SChris Wilson i915_vma_unlock(batch); 9501d5b7773SChris Wilson if (err) 9511d5b7773SChris Wilson goto err_request; 9521d5b7773SChris Wilson 953112ed2d3SChris Wilson /* Perform the writes from an unprivileged "user" batch */ 954112ed2d3SChris Wilson err = engine->emit_bb_start(rq, batch->node.start, 0, 0); 955112ed2d3SChris Wilson 956112ed2d3SChris Wilson err_request: 95741f0bc49SChris Wilson err = request_add_sync(rq, err); 958112ed2d3SChris Wilson 959112ed2d3SChris Wilson err_unpin: 960112ed2d3SChris Wilson i915_gem_object_unpin_map(batch->obj); 961112ed2d3SChris Wilson err_batch: 962112ed2d3SChris Wilson i915_vma_unpin_and_release(&batch, 0); 963112ed2d3SChris Wilson return err; 964112ed2d3SChris Wilson } 965112ed2d3SChris Wilson 966112ed2d3SChris Wilson struct regmask { 967112ed2d3SChris Wilson i915_reg_t reg; 9683e6e4c21SLucas De Marchi u8 graphics_ver; 969112ed2d3SChris Wilson }; 970112ed2d3SChris Wilson 971112ed2d3SChris Wilson static bool find_reg(struct drm_i915_private *i915, 972112ed2d3SChris Wilson i915_reg_t reg, 973112ed2d3SChris Wilson const struct regmask *tbl, 974112ed2d3SChris Wilson unsigned long count) 975112ed2d3SChris Wilson { 976112ed2d3SChris Wilson u32 offset = i915_mmio_reg_offset(reg); 977112ed2d3SChris Wilson 978112ed2d3SChris Wilson while (count--) { 9793e6e4c21SLucas De Marchi if (GRAPHICS_VER(i915) == tbl->graphics_ver && 980112ed2d3SChris Wilson i915_mmio_reg_offset(tbl->reg) == offset) 981112ed2d3SChris Wilson return true; 982112ed2d3SChris Wilson tbl++; 983112ed2d3SChris Wilson } 984112ed2d3SChris Wilson 985112ed2d3SChris Wilson return false; 986112ed2d3SChris Wilson } 987112ed2d3SChris Wilson 988112ed2d3SChris Wilson static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg) 989112ed2d3SChris Wilson { 990112ed2d3SChris Wilson /* Alas, we must pardon some whitelists. Mistakes already made */ 991112ed2d3SChris Wilson static const struct regmask pardon[] = { 9923e6e4c21SLucas De Marchi { GEN9_CTX_PREEMPT_REG, 9 }, 9933e6e4c21SLucas De Marchi { GEN8_L3SQCREG4, 9 }, 994112ed2d3SChris Wilson }; 995112ed2d3SChris Wilson 996112ed2d3SChris Wilson return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon)); 997112ed2d3SChris Wilson } 998112ed2d3SChris Wilson 999112ed2d3SChris Wilson static bool result_eq(struct intel_engine_cs *engine, 1000112ed2d3SChris Wilson u32 a, u32 b, i915_reg_t reg) 1001112ed2d3SChris Wilson { 1002112ed2d3SChris Wilson if (a != b && !pardon_reg(engine->i915, reg)) { 1003112ed2d3SChris Wilson pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n", 1004112ed2d3SChris Wilson i915_mmio_reg_offset(reg), a, b); 1005112ed2d3SChris Wilson return false; 1006112ed2d3SChris Wilson } 1007112ed2d3SChris Wilson 1008112ed2d3SChris Wilson return true; 1009112ed2d3SChris Wilson } 1010112ed2d3SChris Wilson 1011112ed2d3SChris Wilson static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg) 1012112ed2d3SChris Wilson { 1013112ed2d3SChris Wilson /* Some registers do not seem to behave and our writes unreadable */ 1014112ed2d3SChris Wilson static const struct regmask wo[] = { 10153e6e4c21SLucas De Marchi { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 }, 1016112ed2d3SChris Wilson }; 1017112ed2d3SChris Wilson 1018112ed2d3SChris Wilson return find_reg(i915, reg, wo, ARRAY_SIZE(wo)); 1019112ed2d3SChris Wilson } 1020112ed2d3SChris Wilson 1021112ed2d3SChris Wilson static bool result_neq(struct intel_engine_cs *engine, 1022112ed2d3SChris Wilson u32 a, u32 b, i915_reg_t reg) 1023112ed2d3SChris Wilson { 1024112ed2d3SChris Wilson if (a == b && !writeonly_reg(engine->i915, reg)) { 1025112ed2d3SChris Wilson pr_err("Whitelist register 0x%4x:%08x was unwritable\n", 1026112ed2d3SChris Wilson i915_mmio_reg_offset(reg), a); 1027112ed2d3SChris Wilson return false; 1028112ed2d3SChris Wilson } 1029112ed2d3SChris Wilson 1030112ed2d3SChris Wilson return true; 1031112ed2d3SChris Wilson } 1032112ed2d3SChris Wilson 1033112ed2d3SChris Wilson static int 1034112ed2d3SChris Wilson check_whitelisted_registers(struct intel_engine_cs *engine, 1035112ed2d3SChris Wilson struct i915_vma *A, 1036112ed2d3SChris Wilson struct i915_vma *B, 1037112ed2d3SChris Wilson bool (*fn)(struct intel_engine_cs *engine, 1038112ed2d3SChris Wilson u32 a, u32 b, 1039112ed2d3SChris Wilson i915_reg_t reg)) 1040112ed2d3SChris Wilson { 1041112ed2d3SChris Wilson u32 *a, *b; 1042112ed2d3SChris Wilson int i, err; 1043112ed2d3SChris Wilson 104474827b53SMaarten Lankhorst a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB); 1045112ed2d3SChris Wilson if (IS_ERR(a)) 1046112ed2d3SChris Wilson return PTR_ERR(a); 1047112ed2d3SChris Wilson 104874827b53SMaarten Lankhorst b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB); 1049112ed2d3SChris Wilson if (IS_ERR(b)) { 1050112ed2d3SChris Wilson err = PTR_ERR(b); 1051112ed2d3SChris Wilson goto err_a; 1052112ed2d3SChris Wilson } 1053112ed2d3SChris Wilson 1054112ed2d3SChris Wilson err = 0; 1055112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) { 1056361b6905SLionel Landwerlin const struct i915_wa *wa = &engine->whitelist.list[i]; 1057361b6905SLionel Landwerlin 10581e2b7f49SJohn Harrison if (i915_mmio_reg_offset(wa->reg) & 10591e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_RD) 1060361b6905SLionel Landwerlin continue; 1061361b6905SLionel Landwerlin 1062361b6905SLionel Landwerlin if (!fn(engine, a[i], b[i], wa->reg)) 1063112ed2d3SChris Wilson err = -EINVAL; 1064112ed2d3SChris Wilson } 1065112ed2d3SChris Wilson 1066112ed2d3SChris Wilson i915_gem_object_unpin_map(B->obj); 1067112ed2d3SChris Wilson err_a: 1068112ed2d3SChris Wilson i915_gem_object_unpin_map(A->obj); 1069112ed2d3SChris Wilson return err; 1070112ed2d3SChris Wilson } 1071112ed2d3SChris Wilson 1072112ed2d3SChris Wilson static int live_isolated_whitelist(void *arg) 1073112ed2d3SChris Wilson { 1074bb3d4c9dSChris Wilson struct intel_gt *gt = arg; 1075112ed2d3SChris Wilson struct { 1076112ed2d3SChris Wilson struct i915_vma *scratch[2]; 1077112ed2d3SChris Wilson } client[2] = {}; 1078112ed2d3SChris Wilson struct intel_engine_cs *engine; 1079112ed2d3SChris Wilson enum intel_engine_id id; 1080112ed2d3SChris Wilson int i, err = 0; 1081112ed2d3SChris Wilson 1082112ed2d3SChris Wilson /* 1083112ed2d3SChris Wilson * Check that a write into a whitelist register works, but 1084112ed2d3SChris Wilson * invisible to a second context. 1085112ed2d3SChris Wilson */ 1086112ed2d3SChris Wilson 1087bb3d4c9dSChris Wilson if (!intel_engines_has_context_isolation(gt->i915)) 1088112ed2d3SChris Wilson return 0; 1089112ed2d3SChris Wilson 1090112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(client); i++) { 1091a4d86249SChris Wilson client[i].scratch[0] = 10922a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(gt->vm, 4096); 1093112ed2d3SChris Wilson if (IS_ERR(client[i].scratch[0])) { 1094112ed2d3SChris Wilson err = PTR_ERR(client[i].scratch[0]); 1095112ed2d3SChris Wilson goto err; 1096112ed2d3SChris Wilson } 1097112ed2d3SChris Wilson 1098a4d86249SChris Wilson client[i].scratch[1] = 10992a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(gt->vm, 4096); 1100112ed2d3SChris Wilson if (IS_ERR(client[i].scratch[1])) { 1101112ed2d3SChris Wilson err = PTR_ERR(client[i].scratch[1]); 1102112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[0], 0); 1103112ed2d3SChris Wilson goto err; 1104112ed2d3SChris Wilson } 1105112ed2d3SChris Wilson } 1106112ed2d3SChris Wilson 11075d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) { 110804adaba8SChris Wilson struct intel_context *ce[2]; 110904adaba8SChris Wilson 1110bb3d4c9dSChris Wilson if (!engine->kernel_context->vm) 1111bb3d4c9dSChris Wilson continue; 1112bb3d4c9dSChris Wilson 1113767662bcSRobert M. Fosha if (!whitelist_writable_count(engine)) 1114112ed2d3SChris Wilson continue; 1115112ed2d3SChris Wilson 111604adaba8SChris Wilson ce[0] = intel_context_create(engine); 111704adaba8SChris Wilson if (IS_ERR(ce[0])) { 111804adaba8SChris Wilson err = PTR_ERR(ce[0]); 111904adaba8SChris Wilson break; 112004adaba8SChris Wilson } 112104adaba8SChris Wilson ce[1] = intel_context_create(engine); 112204adaba8SChris Wilson if (IS_ERR(ce[1])) { 112304adaba8SChris Wilson err = PTR_ERR(ce[1]); 112404adaba8SChris Wilson intel_context_put(ce[0]); 112504adaba8SChris Wilson break; 112604adaba8SChris Wilson } 112704adaba8SChris Wilson 1128112ed2d3SChris Wilson /* Read default values */ 112904adaba8SChris Wilson err = read_whitelisted_registers(ce[0], client[0].scratch[0]); 1130112ed2d3SChris Wilson if (err) 113104adaba8SChris Wilson goto err_ce; 1132112ed2d3SChris Wilson 1133112ed2d3SChris Wilson /* Try to overwrite registers (should only affect ctx0) */ 113404adaba8SChris Wilson err = scrub_whitelisted_registers(ce[0]); 1135112ed2d3SChris Wilson if (err) 113604adaba8SChris Wilson goto err_ce; 1137112ed2d3SChris Wilson 1138112ed2d3SChris Wilson /* Read values from ctx1, we expect these to be defaults */ 113904adaba8SChris Wilson err = read_whitelisted_registers(ce[1], client[1].scratch[0]); 1140112ed2d3SChris Wilson if (err) 114104adaba8SChris Wilson goto err_ce; 1142112ed2d3SChris Wilson 1143112ed2d3SChris Wilson /* Verify that both reads return the same default values */ 1144112ed2d3SChris Wilson err = check_whitelisted_registers(engine, 1145112ed2d3SChris Wilson client[0].scratch[0], 1146112ed2d3SChris Wilson client[1].scratch[0], 1147112ed2d3SChris Wilson result_eq); 1148112ed2d3SChris Wilson if (err) 114904adaba8SChris Wilson goto err_ce; 1150112ed2d3SChris Wilson 1151112ed2d3SChris Wilson /* Read back the updated values in ctx0 */ 115204adaba8SChris Wilson err = read_whitelisted_registers(ce[0], client[0].scratch[1]); 1153112ed2d3SChris Wilson if (err) 115404adaba8SChris Wilson goto err_ce; 1155112ed2d3SChris Wilson 1156112ed2d3SChris Wilson /* User should be granted privilege to overwhite regs */ 1157112ed2d3SChris Wilson err = check_whitelisted_registers(engine, 1158112ed2d3SChris Wilson client[0].scratch[0], 1159112ed2d3SChris Wilson client[0].scratch[1], 1160112ed2d3SChris Wilson result_neq); 116104adaba8SChris Wilson err_ce: 116204adaba8SChris Wilson intel_context_put(ce[1]); 116304adaba8SChris Wilson intel_context_put(ce[0]); 1164112ed2d3SChris Wilson if (err) 116504adaba8SChris Wilson break; 1166112ed2d3SChris Wilson } 1167112ed2d3SChris Wilson 1168112ed2d3SChris Wilson err: 1169112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(client); i++) { 1170112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[1], 0); 1171112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[0], 0); 1172112ed2d3SChris Wilson } 1173112ed2d3SChris Wilson 1174bb3d4c9dSChris Wilson if (igt_flush_test(gt->i915)) 1175112ed2d3SChris Wilson err = -EIO; 1176112ed2d3SChris Wilson 1177112ed2d3SChris Wilson return err; 1178112ed2d3SChris Wilson } 1179112ed2d3SChris Wilson 1180fde93886STvrtko Ursulin static bool 118104adaba8SChris Wilson verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists, 1182fde93886STvrtko Ursulin const char *str) 1183112ed2d3SChris Wilson { 118404adaba8SChris Wilson struct intel_engine_cs *engine; 118504adaba8SChris Wilson enum intel_engine_id id; 1186112ed2d3SChris Wilson bool ok = true; 1187112ed2d3SChris Wilson 11880957e931SMatt Roper ok &= wa_list_verify(gt, &lists->gt_wa_list, str); 1189112ed2d3SChris Wilson 119004adaba8SChris Wilson for_each_engine(engine, gt, id) { 119104adaba8SChris Wilson struct intel_context *ce; 119204adaba8SChris Wilson 119304adaba8SChris Wilson ce = intel_context_create(engine); 119404adaba8SChris Wilson if (IS_ERR(ce)) 119504adaba8SChris Wilson return false; 1196fde93886STvrtko Ursulin 1197fde93886STvrtko Ursulin ok &= engine_wa_list_verify(ce, 1198112ed2d3SChris Wilson &lists->engine[id].wa_list, 1199112ed2d3SChris Wilson str) == 0; 1200fde93886STvrtko Ursulin 1201fde93886STvrtko Ursulin ok &= engine_wa_list_verify(ce, 1202fde93886STvrtko Ursulin &lists->engine[id].ctx_wa_list, 1203fde93886STvrtko Ursulin str) == 0; 120404adaba8SChris Wilson 120504adaba8SChris Wilson intel_context_put(ce); 1206112ed2d3SChris Wilson } 1207112ed2d3SChris Wilson 1208112ed2d3SChris Wilson return ok; 1209112ed2d3SChris Wilson } 1210112ed2d3SChris Wilson 1211112ed2d3SChris Wilson static int 1212fde93886STvrtko Ursulin live_gpu_reset_workarounds(void *arg) 1213112ed2d3SChris Wilson { 1214bb3d4c9dSChris Wilson struct intel_gt *gt = arg; 1215112ed2d3SChris Wilson intel_wakeref_t wakeref; 12168f57f295SJohn Harrison struct wa_lists *lists; 1217112ed2d3SChris Wilson bool ok; 1218112ed2d3SChris Wilson 1219bb3d4c9dSChris Wilson if (!intel_has_gpu_reset(gt)) 1220112ed2d3SChris Wilson return 0; 1221112ed2d3SChris Wilson 12228f57f295SJohn Harrison lists = kzalloc(sizeof(*lists), GFP_KERNEL); 12238f57f295SJohn Harrison if (!lists) 12248f57f295SJohn Harrison return -ENOMEM; 12258f57f295SJohn Harrison 1226112ed2d3SChris Wilson pr_info("Verifying after GPU reset...\n"); 1227112ed2d3SChris Wilson 1228bb3d4c9dSChris Wilson igt_global_reset_lock(gt); 1229bb3d4c9dSChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm); 1230112ed2d3SChris Wilson 12318f57f295SJohn Harrison reference_lists_init(gt, lists); 1232112ed2d3SChris Wilson 12338f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "before reset"); 1234112ed2d3SChris Wilson if (!ok) 1235112ed2d3SChris Wilson goto out; 1236112ed2d3SChris Wilson 1237bb3d4c9dSChris Wilson intel_gt_reset(gt, ALL_ENGINES, "live_workarounds"); 1238112ed2d3SChris Wilson 12398f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after reset"); 1240112ed2d3SChris Wilson 1241112ed2d3SChris Wilson out: 12428f57f295SJohn Harrison reference_lists_fini(gt, lists); 1243bb3d4c9dSChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref); 1244bb3d4c9dSChris Wilson igt_global_reset_unlock(gt); 12458f57f295SJohn Harrison kfree(lists); 1246112ed2d3SChris Wilson 1247112ed2d3SChris Wilson return ok ? 0 : -ESRCH; 1248112ed2d3SChris Wilson } 1249112ed2d3SChris Wilson 1250112ed2d3SChris Wilson static int 1251fde93886STvrtko Ursulin live_engine_reset_workarounds(void *arg) 1252112ed2d3SChris Wilson { 1253bb3d4c9dSChris Wilson struct intel_gt *gt = arg; 125404adaba8SChris Wilson struct intel_engine_cs *engine; 125504adaba8SChris Wilson enum intel_engine_id id; 1256f277bc0cSChris Wilson struct intel_context *ce; 1257112ed2d3SChris Wilson struct igt_spinner spin; 1258112ed2d3SChris Wilson struct i915_request *rq; 1259112ed2d3SChris Wilson intel_wakeref_t wakeref; 12608f57f295SJohn Harrison struct wa_lists *lists; 1261112ed2d3SChris Wilson int ret = 0; 1262112ed2d3SChris Wilson 1263bb3d4c9dSChris Wilson if (!intel_has_reset_engine(gt)) 1264112ed2d3SChris Wilson return 0; 1265112ed2d3SChris Wilson 12668f57f295SJohn Harrison lists = kzalloc(sizeof(*lists), GFP_KERNEL); 12678f57f295SJohn Harrison if (!lists) 12688f57f295SJohn Harrison return -ENOMEM; 12698f57f295SJohn Harrison 1270bb3d4c9dSChris Wilson igt_global_reset_lock(gt); 1271bb3d4c9dSChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm); 1272112ed2d3SChris Wilson 12738f57f295SJohn Harrison reference_lists_init(gt, lists); 1274112ed2d3SChris Wilson 127504adaba8SChris Wilson for_each_engine(engine, gt, id) { 12763a4bfa09SRahul Kumar Singh struct intel_selftest_saved_policy saved; 12773a4bfa09SRahul Kumar Singh bool using_guc = intel_engine_uses_guc(engine); 1278112ed2d3SChris Wilson bool ok; 12793a4bfa09SRahul Kumar Singh int ret2; 1280112ed2d3SChris Wilson 1281112ed2d3SChris Wilson pr_info("Verifying after %s reset...\n", engine->name); 1282617e87c0SJohn Harrison ret = intel_selftest_modify_policy(engine, &saved, 1283617e87c0SJohn Harrison SELFTEST_SCHEDULER_MODIFY_FAST_RESET); 12843a4bfa09SRahul Kumar Singh if (ret) 12853a4bfa09SRahul Kumar Singh break; 12863a4bfa09SRahul Kumar Singh 128704adaba8SChris Wilson ce = intel_context_create(engine); 128804adaba8SChris Wilson if (IS_ERR(ce)) { 128904adaba8SChris Wilson ret = PTR_ERR(ce); 12903a4bfa09SRahul Kumar Singh goto restore; 129104adaba8SChris Wilson } 1292112ed2d3SChris Wilson 12933a4bfa09SRahul Kumar Singh if (!using_guc) { 12948f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "before reset"); 1295112ed2d3SChris Wilson if (!ok) { 1296112ed2d3SChris Wilson ret = -ESRCH; 1297112ed2d3SChris Wilson goto err; 1298112ed2d3SChris Wilson } 1299112ed2d3SChris Wilson 130031052811SChris Wilson ret = intel_engine_reset(engine, "live_workarounds:idle"); 130131052811SChris Wilson if (ret) { 130231052811SChris Wilson pr_err("%s: Reset failed while idle\n", engine->name); 130331052811SChris Wilson goto err; 130431052811SChris Wilson } 1305112ed2d3SChris Wilson 13068f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after idle reset"); 1307112ed2d3SChris Wilson if (!ok) { 1308112ed2d3SChris Wilson ret = -ESRCH; 1309112ed2d3SChris Wilson goto err; 1310112ed2d3SChris Wilson } 13113a4bfa09SRahul Kumar Singh } 1312112ed2d3SChris Wilson 1313f277bc0cSChris Wilson ret = igt_spinner_init(&spin, engine->gt); 1314112ed2d3SChris Wilson if (ret) 1315112ed2d3SChris Wilson goto err; 1316112ed2d3SChris Wilson 1317f277bc0cSChris Wilson rq = igt_spinner_create_request(&spin, ce, MI_NOOP); 1318112ed2d3SChris Wilson if (IS_ERR(rq)) { 1319112ed2d3SChris Wilson ret = PTR_ERR(rq); 1320112ed2d3SChris Wilson igt_spinner_fini(&spin); 1321112ed2d3SChris Wilson goto err; 1322112ed2d3SChris Wilson } 1323112ed2d3SChris Wilson 132441f0bc49SChris Wilson ret = request_add_spin(rq, &spin); 132541f0bc49SChris Wilson if (ret) { 132631052811SChris Wilson pr_err("%s: Spinner failed to start\n", engine->name); 1327112ed2d3SChris Wilson igt_spinner_fini(&spin); 1328112ed2d3SChris Wilson goto err; 1329112ed2d3SChris Wilson } 1330112ed2d3SChris Wilson 13313a4bfa09SRahul Kumar Singh /* Ensure the spinner hasn't aborted */ 13323a4bfa09SRahul Kumar Singh if (i915_request_completed(rq)) { 13333a4bfa09SRahul Kumar Singh ret = -ETIMEDOUT; 13343a4bfa09SRahul Kumar Singh goto skip; 13353a4bfa09SRahul Kumar Singh } 13363a4bfa09SRahul Kumar Singh 13373a4bfa09SRahul Kumar Singh if (!using_guc) { 133831052811SChris Wilson ret = intel_engine_reset(engine, "live_workarounds:active"); 133931052811SChris Wilson if (ret) { 134031052811SChris Wilson pr_err("%s: Reset failed on an active spinner\n", 134131052811SChris Wilson engine->name); 134231052811SChris Wilson igt_spinner_fini(&spin); 134331052811SChris Wilson goto err; 134431052811SChris Wilson } 13453a4bfa09SRahul Kumar Singh } 1346112ed2d3SChris Wilson 13473a4bfa09SRahul Kumar Singh /* Ensure the reset happens and kills the engine */ 13483a4bfa09SRahul Kumar Singh if (ret == 0) 13493a4bfa09SRahul Kumar Singh ret = intel_selftest_wait_for_rq(rq); 13503a4bfa09SRahul Kumar Singh 13513a4bfa09SRahul Kumar Singh skip: 1352112ed2d3SChris Wilson igt_spinner_end(&spin); 1353112ed2d3SChris Wilson igt_spinner_fini(&spin); 1354112ed2d3SChris Wilson 13558f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after busy reset"); 13563a4bfa09SRahul Kumar Singh if (!ok) 1357112ed2d3SChris Wilson ret = -ESRCH; 135804adaba8SChris Wilson 1359112ed2d3SChris Wilson err: 136004adaba8SChris Wilson intel_context_put(ce); 13613a4bfa09SRahul Kumar Singh 13623a4bfa09SRahul Kumar Singh restore: 13633a4bfa09SRahul Kumar Singh ret2 = intel_selftest_restore_policy(engine, &saved); 13643a4bfa09SRahul Kumar Singh if (ret == 0) 13653a4bfa09SRahul Kumar Singh ret = ret2; 136604adaba8SChris Wilson if (ret) 136704adaba8SChris Wilson break; 136804adaba8SChris Wilson } 136904adaba8SChris Wilson 13708f57f295SJohn Harrison reference_lists_fini(gt, lists); 1371bb3d4c9dSChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref); 1372bb3d4c9dSChris Wilson igt_global_reset_unlock(gt); 13738f57f295SJohn Harrison kfree(lists); 1374112ed2d3SChris Wilson 1375bb3d4c9dSChris Wilson igt_flush_test(gt->i915); 1376112ed2d3SChris Wilson 1377112ed2d3SChris Wilson return ret; 1378112ed2d3SChris Wilson } 1379112ed2d3SChris Wilson 1380112ed2d3SChris Wilson int intel_workarounds_live_selftests(struct drm_i915_private *i915) 1381112ed2d3SChris Wilson { 1382112ed2d3SChris Wilson static const struct i915_subtest tests[] = { 1383112ed2d3SChris Wilson SUBTEST(live_dirty_whitelist), 1384112ed2d3SChris Wilson SUBTEST(live_reset_whitelist), 1385112ed2d3SChris Wilson SUBTEST(live_isolated_whitelist), 1386fde93886STvrtko Ursulin SUBTEST(live_gpu_reset_workarounds), 1387fde93886STvrtko Ursulin SUBTEST(live_engine_reset_workarounds), 1388112ed2d3SChris Wilson }; 1389112ed2d3SChris Wilson 1390*c14adcbdSMichał Winiarski if (intel_gt_is_wedged(to_gt(i915))) 1391112ed2d3SChris Wilson return 0; 1392112ed2d3SChris Wilson 1393*c14adcbdSMichał Winiarski return intel_gt_live_subtests(tests, to_gt(i915)); 1394112ed2d3SChris Wilson } 1395