124f90d66SChris Wilson // SPDX-License-Identifier: MIT
2112ed2d3SChris Wilson /*
3112ed2d3SChris Wilson * Copyright © 2018 Intel Corporation
4112ed2d3SChris Wilson */
5112ed2d3SChris Wilson
6b508d01fSJani Nikula #include "gem/i915_gem_internal.h"
710be98a7SChris Wilson #include "gem/i915_gem_pm.h"
8750e76b4SChris Wilson #include "gt/intel_engine_user.h"
9baea429dSTvrtko Ursulin #include "gt/intel_gt.h"
10112ed2d3SChris Wilson #include "i915_selftest.h"
11112ed2d3SChris Wilson #include "intel_reset.h"
12112ed2d3SChris Wilson
13112ed2d3SChris Wilson #include "selftests/igt_flush_test.h"
14112ed2d3SChris Wilson #include "selftests/igt_reset.h"
15112ed2d3SChris Wilson #include "selftests/igt_spinner.h"
163a4bfa09SRahul Kumar Singh #include "selftests/intel_scheduler_helpers.h"
17112ed2d3SChris Wilson #include "selftests/mock_drm.h"
18112ed2d3SChris Wilson
1910be98a7SChris Wilson #include "gem/selftests/igt_gem_utils.h"
2010be98a7SChris Wilson #include "gem/selftests/mock_context.h"
2110be98a7SChris Wilson
22112ed2d3SChris Wilson static const struct wo_register {
23112ed2d3SChris Wilson enum intel_platform platform;
24112ed2d3SChris Wilson u32 reg;
25112ed2d3SChris Wilson } wo_registers[] = {
26112ed2d3SChris Wilson { INTEL_GEMINILAKE, 0x731c }
27112ed2d3SChris Wilson };
28112ed2d3SChris Wilson
29112ed2d3SChris Wilson struct wa_lists {
30112ed2d3SChris Wilson struct i915_wa_list gt_wa_list;
31112ed2d3SChris Wilson struct {
32112ed2d3SChris Wilson struct i915_wa_list wa_list;
33fde93886STvrtko Ursulin struct i915_wa_list ctx_wa_list;
34112ed2d3SChris Wilson } engine[I915_NUM_ENGINES];
35112ed2d3SChris Wilson };
36112ed2d3SChris Wilson
request_add_sync(struct i915_request * rq,int err)3741f0bc49SChris Wilson static int request_add_sync(struct i915_request *rq, int err)
3841f0bc49SChris Wilson {
3941f0bc49SChris Wilson i915_request_get(rq);
4041f0bc49SChris Wilson i915_request_add(rq);
4141f0bc49SChris Wilson if (i915_request_wait(rq, 0, HZ / 5) < 0)
4241f0bc49SChris Wilson err = -EIO;
4341f0bc49SChris Wilson i915_request_put(rq);
4441f0bc49SChris Wilson
4541f0bc49SChris Wilson return err;
4641f0bc49SChris Wilson }
4741f0bc49SChris Wilson
request_add_spin(struct i915_request * rq,struct igt_spinner * spin)4841f0bc49SChris Wilson static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
4941f0bc49SChris Wilson {
5041f0bc49SChris Wilson int err = 0;
5141f0bc49SChris Wilson
5241f0bc49SChris Wilson i915_request_get(rq);
5341f0bc49SChris Wilson i915_request_add(rq);
5441f0bc49SChris Wilson if (spin && !igt_wait_for_spinner(spin, rq))
5541f0bc49SChris Wilson err = -ETIMEDOUT;
5641f0bc49SChris Wilson i915_request_put(rq);
5741f0bc49SChris Wilson
5841f0bc49SChris Wilson return err;
5941f0bc49SChris Wilson }
6041f0bc49SChris Wilson
61112ed2d3SChris Wilson static void
reference_lists_init(struct intel_gt * gt,struct wa_lists * lists)62bb3d4c9dSChris Wilson reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
63112ed2d3SChris Wilson {
64112ed2d3SChris Wilson struct intel_engine_cs *engine;
65112ed2d3SChris Wilson enum intel_engine_id id;
66112ed2d3SChris Wilson
67112ed2d3SChris Wilson memset(lists, 0, sizeof(*lists));
68112ed2d3SChris Wilson
69a10234fdSTvrtko Ursulin wa_init_start(&lists->gt_wa_list, gt, "GT_REF", "global");
70d0a65249SVenkata Sandeep Dhanalakota gt_init_workarounds(gt, &lists->gt_wa_list);
71112ed2d3SChris Wilson wa_init_finish(&lists->gt_wa_list);
72112ed2d3SChris Wilson
735d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
74112ed2d3SChris Wilson struct i915_wa_list *wal = &lists->engine[id].wa_list;
75112ed2d3SChris Wilson
76a10234fdSTvrtko Ursulin wa_init_start(wal, gt, "REF", engine->name);
77112ed2d3SChris Wilson engine_init_workarounds(engine, wal);
78112ed2d3SChris Wilson wa_init_finish(wal);
79fde93886STvrtko Ursulin
80fde93886STvrtko Ursulin __intel_engine_init_ctx_wa(engine,
81fde93886STvrtko Ursulin &lists->engine[id].ctx_wa_list,
823e1f0a51SJohn Harrison "CTX_REF");
83112ed2d3SChris Wilson }
84112ed2d3SChris Wilson }
85112ed2d3SChris Wilson
86112ed2d3SChris Wilson static void
reference_lists_fini(struct intel_gt * gt,struct wa_lists * lists)87bb3d4c9dSChris Wilson reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
88112ed2d3SChris Wilson {
89112ed2d3SChris Wilson struct intel_engine_cs *engine;
90112ed2d3SChris Wilson enum intel_engine_id id;
91112ed2d3SChris Wilson
925d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id)
93112ed2d3SChris Wilson intel_wa_list_free(&lists->engine[id].wa_list);
94112ed2d3SChris Wilson
95112ed2d3SChris Wilson intel_wa_list_free(&lists->gt_wa_list);
96112ed2d3SChris Wilson }
97112ed2d3SChris Wilson
98112ed2d3SChris Wilson static struct drm_i915_gem_object *
read_nonprivs(struct intel_context * ce)9904adaba8SChris Wilson read_nonprivs(struct intel_context *ce)
100112ed2d3SChris Wilson {
10104adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine;
102112ed2d3SChris Wilson const u32 base = engine->mmio_base;
103112ed2d3SChris Wilson struct drm_i915_gem_object *result;
104112ed2d3SChris Wilson struct i915_request *rq;
105112ed2d3SChris Wilson struct i915_vma *vma;
106112ed2d3SChris Wilson u32 srm, *cs;
107112ed2d3SChris Wilson int err;
108112ed2d3SChris Wilson int i;
109112ed2d3SChris Wilson
110112ed2d3SChris Wilson result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
111112ed2d3SChris Wilson if (IS_ERR(result))
112112ed2d3SChris Wilson return result;
113112ed2d3SChris Wilson
114112ed2d3SChris Wilson i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
115112ed2d3SChris Wilson
11674827b53SMaarten Lankhorst cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
117112ed2d3SChris Wilson if (IS_ERR(cs)) {
118112ed2d3SChris Wilson err = PTR_ERR(cs);
119112ed2d3SChris Wilson goto err_obj;
120112ed2d3SChris Wilson }
121112ed2d3SChris Wilson memset(cs, 0xc5, PAGE_SIZE);
122112ed2d3SChris Wilson i915_gem_object_flush_map(result);
123112ed2d3SChris Wilson i915_gem_object_unpin_map(result);
124112ed2d3SChris Wilson
125ba4134a4STvrtko Ursulin vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
126112ed2d3SChris Wilson if (IS_ERR(vma)) {
127112ed2d3SChris Wilson err = PTR_ERR(vma);
128112ed2d3SChris Wilson goto err_obj;
129112ed2d3SChris Wilson }
130112ed2d3SChris Wilson
131112ed2d3SChris Wilson err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
132112ed2d3SChris Wilson if (err)
133112ed2d3SChris Wilson goto err_obj;
134112ed2d3SChris Wilson
13504adaba8SChris Wilson rq = intel_context_create_request(ce);
136112ed2d3SChris Wilson if (IS_ERR(rq)) {
137112ed2d3SChris Wilson err = PTR_ERR(rq);
138112ed2d3SChris Wilson goto err_pin;
139112ed2d3SChris Wilson }
140112ed2d3SChris Wilson
141*4f16749fSAndrzej Hajda err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
142112ed2d3SChris Wilson if (err)
143112ed2d3SChris Wilson goto err_req;
144112ed2d3SChris Wilson
145112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
146c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8)
147112ed2d3SChris Wilson srm++;
148112ed2d3SChris Wilson
149112ed2d3SChris Wilson cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
150112ed2d3SChris Wilson if (IS_ERR(cs)) {
151112ed2d3SChris Wilson err = PTR_ERR(cs);
152112ed2d3SChris Wilson goto err_req;
153112ed2d3SChris Wilson }
154112ed2d3SChris Wilson
155112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
156112ed2d3SChris Wilson *cs++ = srm;
157112ed2d3SChris Wilson *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
158112ed2d3SChris Wilson *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
159112ed2d3SChris Wilson *cs++ = 0;
160112ed2d3SChris Wilson }
161112ed2d3SChris Wilson intel_ring_advance(rq, cs);
162112ed2d3SChris Wilson
163112ed2d3SChris Wilson i915_request_add(rq);
164112ed2d3SChris Wilson i915_vma_unpin(vma);
165112ed2d3SChris Wilson
166112ed2d3SChris Wilson return result;
167112ed2d3SChris Wilson
168112ed2d3SChris Wilson err_req:
169112ed2d3SChris Wilson i915_request_add(rq);
170112ed2d3SChris Wilson err_pin:
171112ed2d3SChris Wilson i915_vma_unpin(vma);
172112ed2d3SChris Wilson err_obj:
173112ed2d3SChris Wilson i915_gem_object_put(result);
174112ed2d3SChris Wilson return ERR_PTR(err);
175112ed2d3SChris Wilson }
176112ed2d3SChris Wilson
177112ed2d3SChris Wilson static u32
get_whitelist_reg(const struct intel_engine_cs * engine,unsigned int i)178112ed2d3SChris Wilson get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
179112ed2d3SChris Wilson {
180112ed2d3SChris Wilson i915_reg_t reg = i < engine->whitelist.count ?
181112ed2d3SChris Wilson engine->whitelist.list[i].reg :
182112ed2d3SChris Wilson RING_NOPID(engine->mmio_base);
183112ed2d3SChris Wilson
184112ed2d3SChris Wilson return i915_mmio_reg_offset(reg);
185112ed2d3SChris Wilson }
186112ed2d3SChris Wilson
187112ed2d3SChris Wilson static void
print_results(const struct intel_engine_cs * engine,const u32 * results)188112ed2d3SChris Wilson print_results(const struct intel_engine_cs *engine, const u32 *results)
189112ed2d3SChris Wilson {
190112ed2d3SChris Wilson unsigned int i;
191112ed2d3SChris Wilson
192112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
193112ed2d3SChris Wilson u32 expected = get_whitelist_reg(engine, i);
194112ed2d3SChris Wilson u32 actual = results[i];
195112ed2d3SChris Wilson
196112ed2d3SChris Wilson pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
197112ed2d3SChris Wilson i, expected, actual);
198112ed2d3SChris Wilson }
199112ed2d3SChris Wilson }
200112ed2d3SChris Wilson
check_whitelist(struct intel_context * ce)20104adaba8SChris Wilson static int check_whitelist(struct intel_context *ce)
202112ed2d3SChris Wilson {
20304adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine;
204112ed2d3SChris Wilson struct drm_i915_gem_object *results;
205cb823ed9SChris Wilson struct intel_wedge_me wedge;
206112ed2d3SChris Wilson u32 *vaddr;
207112ed2d3SChris Wilson int err;
208112ed2d3SChris Wilson int i;
209112ed2d3SChris Wilson
21004adaba8SChris Wilson results = read_nonprivs(ce);
211112ed2d3SChris Wilson if (IS_ERR(results))
212112ed2d3SChris Wilson return PTR_ERR(results);
213112ed2d3SChris Wilson
214112ed2d3SChris Wilson err = 0;
21580f0b679SMaarten Lankhorst i915_gem_object_lock(results, NULL);
216bb3d4c9dSChris Wilson intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
217112ed2d3SChris Wilson err = i915_gem_object_set_to_cpu_domain(results, false);
21874827b53SMaarten Lankhorst
219bb3d4c9dSChris Wilson if (intel_gt_is_wedged(engine->gt))
220112ed2d3SChris Wilson err = -EIO;
221112ed2d3SChris Wilson if (err)
222112ed2d3SChris Wilson goto out_put;
223112ed2d3SChris Wilson
224112ed2d3SChris Wilson vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
225112ed2d3SChris Wilson if (IS_ERR(vaddr)) {
226112ed2d3SChris Wilson err = PTR_ERR(vaddr);
227112ed2d3SChris Wilson goto out_put;
228112ed2d3SChris Wilson }
229112ed2d3SChris Wilson
230112ed2d3SChris Wilson for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
231112ed2d3SChris Wilson u32 expected = get_whitelist_reg(engine, i);
232112ed2d3SChris Wilson u32 actual = vaddr[i];
233112ed2d3SChris Wilson
234112ed2d3SChris Wilson if (expected != actual) {
235112ed2d3SChris Wilson print_results(engine, vaddr);
236112ed2d3SChris Wilson pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
237112ed2d3SChris Wilson i, expected, actual);
238112ed2d3SChris Wilson
239112ed2d3SChris Wilson err = -EINVAL;
240112ed2d3SChris Wilson break;
241112ed2d3SChris Wilson }
242112ed2d3SChris Wilson }
243112ed2d3SChris Wilson
244112ed2d3SChris Wilson i915_gem_object_unpin_map(results);
245112ed2d3SChris Wilson out_put:
24674827b53SMaarten Lankhorst i915_gem_object_unlock(results);
247112ed2d3SChris Wilson i915_gem_object_put(results);
248112ed2d3SChris Wilson return err;
249112ed2d3SChris Wilson }
250112ed2d3SChris Wilson
do_device_reset(struct intel_engine_cs * engine)251112ed2d3SChris Wilson static int do_device_reset(struct intel_engine_cs *engine)
252112ed2d3SChris Wilson {
253cb823ed9SChris Wilson intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
254112ed2d3SChris Wilson return 0;
255112ed2d3SChris Wilson }
256112ed2d3SChris Wilson
do_engine_reset(struct intel_engine_cs * engine)257112ed2d3SChris Wilson static int do_engine_reset(struct intel_engine_cs *engine)
258112ed2d3SChris Wilson {
259cb823ed9SChris Wilson return intel_engine_reset(engine, "live_workarounds");
260112ed2d3SChris Wilson }
261112ed2d3SChris Wilson
do_guc_reset(struct intel_engine_cs * engine)2623a4bfa09SRahul Kumar Singh static int do_guc_reset(struct intel_engine_cs *engine)
2633a4bfa09SRahul Kumar Singh {
2643a4bfa09SRahul Kumar Singh /* Currently a no-op as the reset is handled by GuC */
2653a4bfa09SRahul Kumar Singh return 0;
2663a4bfa09SRahul Kumar Singh }
2673a4bfa09SRahul Kumar Singh
268112ed2d3SChris Wilson static int
switch_to_scratch_context(struct intel_engine_cs * engine,struct igt_spinner * spin,struct i915_request ** rq)269112ed2d3SChris Wilson switch_to_scratch_context(struct intel_engine_cs *engine,
2703a4bfa09SRahul Kumar Singh struct igt_spinner *spin,
2713a4bfa09SRahul Kumar Singh struct i915_request **rq)
272112ed2d3SChris Wilson {
273f277bc0cSChris Wilson struct intel_context *ce;
274112ed2d3SChris Wilson int err = 0;
275112ed2d3SChris Wilson
276e6ba7648SChris Wilson ce = intel_context_create(engine);
277e6ba7648SChris Wilson if (IS_ERR(ce))
278e6ba7648SChris Wilson return PTR_ERR(ce);
279f277bc0cSChris Wilson
2803a4bfa09SRahul Kumar Singh *rq = igt_spinner_create_request(spin, ce, MI_NOOP);
281f277bc0cSChris Wilson intel_context_put(ce);
282112ed2d3SChris Wilson
2833a4bfa09SRahul Kumar Singh if (IS_ERR(*rq)) {
284112ed2d3SChris Wilson spin = NULL;
2853a4bfa09SRahul Kumar Singh err = PTR_ERR(*rq);
286112ed2d3SChris Wilson goto err;
287112ed2d3SChris Wilson }
288112ed2d3SChris Wilson
2893a4bfa09SRahul Kumar Singh err = request_add_spin(*rq, spin);
290112ed2d3SChris Wilson err:
291112ed2d3SChris Wilson if (err && spin)
292112ed2d3SChris Wilson igt_spinner_end(spin);
293112ed2d3SChris Wilson
294112ed2d3SChris Wilson return err;
295112ed2d3SChris Wilson }
296112ed2d3SChris Wilson
check_whitelist_across_reset(struct intel_engine_cs * engine,int (* reset)(struct intel_engine_cs *),const char * name)297112ed2d3SChris Wilson static int check_whitelist_across_reset(struct intel_engine_cs *engine,
298112ed2d3SChris Wilson int (*reset)(struct intel_engine_cs *),
299112ed2d3SChris Wilson const char *name)
300112ed2d3SChris Wilson {
30104adaba8SChris Wilson struct intel_context *ce, *tmp;
302112ed2d3SChris Wilson struct igt_spinner spin;
3033a4bfa09SRahul Kumar Singh struct i915_request *rq;
304112ed2d3SChris Wilson intel_wakeref_t wakeref;
305112ed2d3SChris Wilson int err;
306112ed2d3SChris Wilson
3073e1f0a51SJohn Harrison pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
3083e1f0a51SJohn Harrison engine->whitelist.count, engine->name, name);
309112ed2d3SChris Wilson
31004adaba8SChris Wilson ce = intel_context_create(engine);
31104adaba8SChris Wilson if (IS_ERR(ce))
31204adaba8SChris Wilson return PTR_ERR(ce);
313112ed2d3SChris Wilson
314f277bc0cSChris Wilson err = igt_spinner_init(&spin, engine->gt);
315cf3bd1a0SChris Wilson if (err)
316cf3bd1a0SChris Wilson goto out_ctx;
317cf3bd1a0SChris Wilson
31804adaba8SChris Wilson err = check_whitelist(ce);
319112ed2d3SChris Wilson if (err) {
320112ed2d3SChris Wilson pr_err("Invalid whitelist *before* %s reset!\n", name);
321cf3bd1a0SChris Wilson goto out_spin;
322112ed2d3SChris Wilson }
323112ed2d3SChris Wilson
3243a4bfa09SRahul Kumar Singh err = switch_to_scratch_context(engine, &spin, &rq);
325112ed2d3SChris Wilson if (err)
326cf3bd1a0SChris Wilson goto out_spin;
327112ed2d3SChris Wilson
3283a4bfa09SRahul Kumar Singh /* Ensure the spinner hasn't aborted */
3293a4bfa09SRahul Kumar Singh if (i915_request_completed(rq)) {
3303a4bfa09SRahul Kumar Singh pr_err("%s spinner failed to start\n", name);
3313a4bfa09SRahul Kumar Singh err = -ETIMEDOUT;
3323a4bfa09SRahul Kumar Singh goto out_spin;
3333a4bfa09SRahul Kumar Singh }
3343a4bfa09SRahul Kumar Singh
335cd6a8513SChris Wilson with_intel_runtime_pm(engine->uncore->rpm, wakeref)
336112ed2d3SChris Wilson err = reset(engine);
337112ed2d3SChris Wilson
3383a4bfa09SRahul Kumar Singh /* Ensure the reset happens and kills the engine */
3393a4bfa09SRahul Kumar Singh if (err == 0)
3403a4bfa09SRahul Kumar Singh err = intel_selftest_wait_for_rq(rq);
3413a4bfa09SRahul Kumar Singh
342112ed2d3SChris Wilson igt_spinner_end(&spin);
343112ed2d3SChris Wilson
344112ed2d3SChris Wilson if (err) {
345112ed2d3SChris Wilson pr_err("%s reset failed\n", name);
346cf3bd1a0SChris Wilson goto out_spin;
347112ed2d3SChris Wilson }
348112ed2d3SChris Wilson
34904adaba8SChris Wilson err = check_whitelist(ce);
350112ed2d3SChris Wilson if (err) {
351112ed2d3SChris Wilson pr_err("Whitelist not preserved in context across %s reset!\n",
352112ed2d3SChris Wilson name);
353cf3bd1a0SChris Wilson goto out_spin;
354112ed2d3SChris Wilson }
355112ed2d3SChris Wilson
35604adaba8SChris Wilson tmp = intel_context_create(engine);
357cf3bd1a0SChris Wilson if (IS_ERR(tmp)) {
358cf3bd1a0SChris Wilson err = PTR_ERR(tmp);
359cf3bd1a0SChris Wilson goto out_spin;
360cf3bd1a0SChris Wilson }
36104adaba8SChris Wilson intel_context_put(ce);
36204adaba8SChris Wilson ce = tmp;
363112ed2d3SChris Wilson
36404adaba8SChris Wilson err = check_whitelist(ce);
365112ed2d3SChris Wilson if (err) {
366112ed2d3SChris Wilson pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
367112ed2d3SChris Wilson name);
368cf3bd1a0SChris Wilson goto out_spin;
369112ed2d3SChris Wilson }
370112ed2d3SChris Wilson
371cf3bd1a0SChris Wilson out_spin:
372cf3bd1a0SChris Wilson igt_spinner_fini(&spin);
373cf3bd1a0SChris Wilson out_ctx:
37404adaba8SChris Wilson intel_context_put(ce);
375112ed2d3SChris Wilson return err;
376112ed2d3SChris Wilson }
377112ed2d3SChris Wilson
create_batch(struct i915_address_space * vm)378e6ba7648SChris Wilson static struct i915_vma *create_batch(struct i915_address_space *vm)
379112ed2d3SChris Wilson {
380112ed2d3SChris Wilson struct drm_i915_gem_object *obj;
381112ed2d3SChris Wilson struct i915_vma *vma;
382112ed2d3SChris Wilson int err;
383112ed2d3SChris Wilson
384e6ba7648SChris Wilson obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
385112ed2d3SChris Wilson if (IS_ERR(obj))
386112ed2d3SChris Wilson return ERR_CAST(obj);
387112ed2d3SChris Wilson
388a4e7ccdaSChris Wilson vma = i915_vma_instance(obj, vm, NULL);
389112ed2d3SChris Wilson if (IS_ERR(vma)) {
390112ed2d3SChris Wilson err = PTR_ERR(vma);
391112ed2d3SChris Wilson goto err_obj;
392112ed2d3SChris Wilson }
393112ed2d3SChris Wilson
394112ed2d3SChris Wilson err = i915_vma_pin(vma, 0, 0, PIN_USER);
395112ed2d3SChris Wilson if (err)
396112ed2d3SChris Wilson goto err_obj;
397112ed2d3SChris Wilson
398112ed2d3SChris Wilson return vma;
399112ed2d3SChris Wilson
400112ed2d3SChris Wilson err_obj:
401112ed2d3SChris Wilson i915_gem_object_put(obj);
402112ed2d3SChris Wilson return ERR_PTR(err);
403112ed2d3SChris Wilson }
404112ed2d3SChris Wilson
reg_write(u32 old,u32 new,u32 rsvd)405112ed2d3SChris Wilson static u32 reg_write(u32 old, u32 new, u32 rsvd)
406112ed2d3SChris Wilson {
407112ed2d3SChris Wilson if (rsvd == 0x0000ffff) {
408112ed2d3SChris Wilson old &= ~(new >> 16);
409112ed2d3SChris Wilson old |= new & (new >> 16);
410112ed2d3SChris Wilson } else {
411112ed2d3SChris Wilson old &= ~rsvd;
412112ed2d3SChris Wilson old |= new & rsvd;
413112ed2d3SChris Wilson }
414112ed2d3SChris Wilson
415112ed2d3SChris Wilson return old;
416112ed2d3SChris Wilson }
417112ed2d3SChris Wilson
wo_register(struct intel_engine_cs * engine,u32 reg)418112ed2d3SChris Wilson static bool wo_register(struct intel_engine_cs *engine, u32 reg)
419112ed2d3SChris Wilson {
420112ed2d3SChris Wilson enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
421112ed2d3SChris Wilson int i;
422112ed2d3SChris Wilson
4231e2b7f49SJohn Harrison if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
4241e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_WR)
4251e2b7f49SJohn Harrison return true;
4261e2b7f49SJohn Harrison
427112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
428112ed2d3SChris Wilson if (wo_registers[i].platform == platform &&
429112ed2d3SChris Wilson wo_registers[i].reg == reg)
430112ed2d3SChris Wilson return true;
431112ed2d3SChris Wilson }
432112ed2d3SChris Wilson
433112ed2d3SChris Wilson return false;
434112ed2d3SChris Wilson }
435112ed2d3SChris Wilson
timestamp(const struct intel_engine_cs * engine,u32 reg)436c95ebab1SChris Wilson static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
437c95ebab1SChris Wilson {
438c95ebab1SChris Wilson reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
439c95ebab1SChris Wilson switch (reg) {
440c95ebab1SChris Wilson case 0x358:
441c95ebab1SChris Wilson case 0x35c:
442c95ebab1SChris Wilson case 0x3a8:
443c95ebab1SChris Wilson return true;
444c95ebab1SChris Wilson
445c95ebab1SChris Wilson default:
446c95ebab1SChris Wilson return false;
447c95ebab1SChris Wilson }
448c95ebab1SChris Wilson }
449c95ebab1SChris Wilson
ro_register(u32 reg)450767662bcSRobert M. Fosha static bool ro_register(u32 reg)
451767662bcSRobert M. Fosha {
4521e2b7f49SJohn Harrison if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
4531e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_RD)
454767662bcSRobert M. Fosha return true;
455767662bcSRobert M. Fosha
456767662bcSRobert M. Fosha return false;
457767662bcSRobert M. Fosha }
458767662bcSRobert M. Fosha
whitelist_writable_count(struct intel_engine_cs * engine)459767662bcSRobert M. Fosha static int whitelist_writable_count(struct intel_engine_cs *engine)
460767662bcSRobert M. Fosha {
461767662bcSRobert M. Fosha int count = engine->whitelist.count;
462767662bcSRobert M. Fosha int i;
463767662bcSRobert M. Fosha
464767662bcSRobert M. Fosha for (i = 0; i < engine->whitelist.count; i++) {
465767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
466767662bcSRobert M. Fosha
467767662bcSRobert M. Fosha if (ro_register(reg))
468767662bcSRobert M. Fosha count--;
469767662bcSRobert M. Fosha }
470767662bcSRobert M. Fosha
471767662bcSRobert M. Fosha return count;
472767662bcSRobert M. Fosha }
473767662bcSRobert M. Fosha
check_dirty_whitelist(struct intel_context * ce)474e6ba7648SChris Wilson static int check_dirty_whitelist(struct intel_context *ce)
475112ed2d3SChris Wilson {
476112ed2d3SChris Wilson const u32 values[] = {
477112ed2d3SChris Wilson 0x00000000,
478112ed2d3SChris Wilson 0x01010101,
479112ed2d3SChris Wilson 0x10100101,
480112ed2d3SChris Wilson 0x03030303,
481112ed2d3SChris Wilson 0x30300303,
482112ed2d3SChris Wilson 0x05050505,
483112ed2d3SChris Wilson 0x50500505,
484112ed2d3SChris Wilson 0x0f0f0f0f,
485112ed2d3SChris Wilson 0xf00ff00f,
486112ed2d3SChris Wilson 0x10101010,
487112ed2d3SChris Wilson 0xf0f01010,
488112ed2d3SChris Wilson 0x30303030,
489112ed2d3SChris Wilson 0xa0a03030,
490112ed2d3SChris Wilson 0x50505050,
491112ed2d3SChris Wilson 0xc0c05050,
492112ed2d3SChris Wilson 0xf0f0f0f0,
493112ed2d3SChris Wilson 0x11111111,
494112ed2d3SChris Wilson 0x33333333,
495112ed2d3SChris Wilson 0x55555555,
496112ed2d3SChris Wilson 0x0000ffff,
497112ed2d3SChris Wilson 0x00ff00ff,
498112ed2d3SChris Wilson 0xff0000ff,
499112ed2d3SChris Wilson 0xffff00ff,
500112ed2d3SChris Wilson 0xffffffff,
501112ed2d3SChris Wilson };
502e6ba7648SChris Wilson struct intel_engine_cs *engine = ce->engine;
503112ed2d3SChris Wilson struct i915_vma *scratch;
504112ed2d3SChris Wilson struct i915_vma *batch;
505a4d86249SChris Wilson int err = 0, i, v, sz;
506112ed2d3SChris Wilson u32 *cs, *results;
507112ed2d3SChris Wilson
508a4d86249SChris Wilson sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
5092a665968SMaarten Lankhorst scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
510112ed2d3SChris Wilson if (IS_ERR(scratch))
511112ed2d3SChris Wilson return PTR_ERR(scratch);
512112ed2d3SChris Wilson
513e6ba7648SChris Wilson batch = create_batch(ce->vm);
514112ed2d3SChris Wilson if (IS_ERR(batch)) {
515112ed2d3SChris Wilson err = PTR_ERR(batch);
516112ed2d3SChris Wilson goto out_scratch;
517112ed2d3SChris Wilson }
518112ed2d3SChris Wilson
519112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) {
520112ed2d3SChris Wilson u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
52174827b53SMaarten Lankhorst struct i915_gem_ww_ctx ww;
5228e4ee5e8SChris Wilson u64 addr = i915_vma_offset(scratch);
523112ed2d3SChris Wilson struct i915_request *rq;
524112ed2d3SChris Wilson u32 srm, lrm, rsvd;
525112ed2d3SChris Wilson u32 expect;
526112ed2d3SChris Wilson int idx;
527aee20aaeSJohn Harrison bool ro_reg;
528112ed2d3SChris Wilson
529112ed2d3SChris Wilson if (wo_register(engine, reg))
530112ed2d3SChris Wilson continue;
531112ed2d3SChris Wilson
532c95ebab1SChris Wilson if (timestamp(engine, reg))
533c95ebab1SChris Wilson continue; /* timestamps are expected to autoincrement */
534c95ebab1SChris Wilson
535aee20aaeSJohn Harrison ro_reg = ro_register(reg);
536767662bcSRobert M. Fosha
53774827b53SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false);
53874827b53SMaarten Lankhorst retry:
53974827b53SMaarten Lankhorst cs = NULL;
54074827b53SMaarten Lankhorst err = i915_gem_object_lock(scratch->obj, &ww);
54174827b53SMaarten Lankhorst if (!err)
54274827b53SMaarten Lankhorst err = i915_gem_object_lock(batch->obj, &ww);
54374827b53SMaarten Lankhorst if (!err)
54474827b53SMaarten Lankhorst err = intel_context_pin_ww(ce, &ww);
54574827b53SMaarten Lankhorst if (err)
54674827b53SMaarten Lankhorst goto out;
54774827b53SMaarten Lankhorst
54874827b53SMaarten Lankhorst cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
54974827b53SMaarten Lankhorst if (IS_ERR(cs)) {
55074827b53SMaarten Lankhorst err = PTR_ERR(cs);
55174827b53SMaarten Lankhorst goto out_ctx;
55274827b53SMaarten Lankhorst }
55374827b53SMaarten Lankhorst
55474827b53SMaarten Lankhorst results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
55574827b53SMaarten Lankhorst if (IS_ERR(results)) {
55674827b53SMaarten Lankhorst err = PTR_ERR(results);
55774827b53SMaarten Lankhorst goto out_unmap_batch;
55874827b53SMaarten Lankhorst }
55974827b53SMaarten Lankhorst
5606b441c62SMika Kuoppala /* Clear non priv flags */
5616b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
5626b441c62SMika Kuoppala
563112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM;
564112ed2d3SChris Wilson lrm = MI_LOAD_REGISTER_MEM;
565c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8)
566112ed2d3SChris Wilson lrm++, srm++;
567112ed2d3SChris Wilson
568112ed2d3SChris Wilson pr_debug("%s: Writing garbage to %x\n",
569112ed2d3SChris Wilson engine->name, reg);
570112ed2d3SChris Wilson
571112ed2d3SChris Wilson /* SRM original */
572112ed2d3SChris Wilson *cs++ = srm;
573112ed2d3SChris Wilson *cs++ = reg;
574112ed2d3SChris Wilson *cs++ = lower_32_bits(addr);
575112ed2d3SChris Wilson *cs++ = upper_32_bits(addr);
576112ed2d3SChris Wilson
577112ed2d3SChris Wilson idx = 1;
578112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
579112ed2d3SChris Wilson /* LRI garbage */
580112ed2d3SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(1);
581112ed2d3SChris Wilson *cs++ = reg;
582112ed2d3SChris Wilson *cs++ = values[v];
583112ed2d3SChris Wilson
584112ed2d3SChris Wilson /* SRM result */
585112ed2d3SChris Wilson *cs++ = srm;
586112ed2d3SChris Wilson *cs++ = reg;
587112ed2d3SChris Wilson *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
588112ed2d3SChris Wilson *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
589112ed2d3SChris Wilson idx++;
590112ed2d3SChris Wilson }
591112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
592112ed2d3SChris Wilson /* LRI garbage */
593112ed2d3SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(1);
594112ed2d3SChris Wilson *cs++ = reg;
595112ed2d3SChris Wilson *cs++ = ~values[v];
596112ed2d3SChris Wilson
597112ed2d3SChris Wilson /* SRM result */
598112ed2d3SChris Wilson *cs++ = srm;
599112ed2d3SChris Wilson *cs++ = reg;
600112ed2d3SChris Wilson *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
601112ed2d3SChris Wilson *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
602112ed2d3SChris Wilson idx++;
603112ed2d3SChris Wilson }
604112ed2d3SChris Wilson GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
605112ed2d3SChris Wilson
606112ed2d3SChris Wilson /* LRM original -- don't leave garbage in the context! */
607112ed2d3SChris Wilson *cs++ = lrm;
608112ed2d3SChris Wilson *cs++ = reg;
609112ed2d3SChris Wilson *cs++ = lower_32_bits(addr);
610112ed2d3SChris Wilson *cs++ = upper_32_bits(addr);
611112ed2d3SChris Wilson
612112ed2d3SChris Wilson *cs++ = MI_BATCH_BUFFER_END;
613112ed2d3SChris Wilson
614112ed2d3SChris Wilson i915_gem_object_flush_map(batch->obj);
615112ed2d3SChris Wilson i915_gem_object_unpin_map(batch->obj);
616baea429dSTvrtko Ursulin intel_gt_chipset_flush(engine->gt);
61774827b53SMaarten Lankhorst cs = NULL;
618112ed2d3SChris Wilson
61974827b53SMaarten Lankhorst rq = i915_request_create(ce);
620112ed2d3SChris Wilson if (IS_ERR(rq)) {
621112ed2d3SChris Wilson err = PTR_ERR(rq);
62274827b53SMaarten Lankhorst goto out_unmap_scratch;
623112ed2d3SChris Wilson }
624112ed2d3SChris Wilson
625112ed2d3SChris Wilson if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
626112ed2d3SChris Wilson err = engine->emit_init_breadcrumb(rq);
627112ed2d3SChris Wilson if (err)
628112ed2d3SChris Wilson goto err_request;
629112ed2d3SChris Wilson }
630112ed2d3SChris Wilson
6311d5b7773SChris Wilson err = i915_vma_move_to_active(batch, rq, 0);
6321d5b7773SChris Wilson if (err)
6331d5b7773SChris Wilson goto err_request;
6341d5b7773SChris Wilson
635bd46aa22SChris Wilson err = i915_vma_move_to_active(scratch, rq,
636bd46aa22SChris Wilson EXEC_OBJECT_WRITE);
637bd46aa22SChris Wilson if (err)
638bd46aa22SChris Wilson goto err_request;
639bd46aa22SChris Wilson
640112ed2d3SChris Wilson err = engine->emit_bb_start(rq,
6418e4ee5e8SChris Wilson i915_vma_offset(batch), PAGE_SIZE,
642112ed2d3SChris Wilson 0);
643112ed2d3SChris Wilson if (err)
644112ed2d3SChris Wilson goto err_request;
645112ed2d3SChris Wilson
646112ed2d3SChris Wilson err_request:
64741f0bc49SChris Wilson err = request_add_sync(rq, err);
64841f0bc49SChris Wilson if (err) {
649112ed2d3SChris Wilson pr_err("%s: Futzing %x timedout; cancelling test\n",
650112ed2d3SChris Wilson engine->name, reg);
651bb3d4c9dSChris Wilson intel_gt_set_wedged(engine->gt);
65274827b53SMaarten Lankhorst goto out_unmap_scratch;
653112ed2d3SChris Wilson }
654112ed2d3SChris Wilson
655112ed2d3SChris Wilson GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
656aee20aaeSJohn Harrison if (!ro_reg) {
657aee20aaeSJohn Harrison /* detect write masking */
658aee20aaeSJohn Harrison rsvd = results[ARRAY_SIZE(values)];
659112ed2d3SChris Wilson if (!rsvd) {
660112ed2d3SChris Wilson pr_err("%s: Unable to write to whitelisted register %x\n",
661112ed2d3SChris Wilson engine->name, reg);
662112ed2d3SChris Wilson err = -EINVAL;
66374827b53SMaarten Lankhorst goto out_unmap_scratch;
664112ed2d3SChris Wilson }
665cc649a9eSArnd Bergmann } else {
666cc649a9eSArnd Bergmann rsvd = 0;
667aee20aaeSJohn Harrison }
668112ed2d3SChris Wilson
669112ed2d3SChris Wilson expect = results[0];
670112ed2d3SChris Wilson idx = 1;
671112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
672aee20aaeSJohn Harrison if (ro_reg)
673aee20aaeSJohn Harrison expect = results[0];
674aee20aaeSJohn Harrison else
675112ed2d3SChris Wilson expect = reg_write(expect, values[v], rsvd);
676aee20aaeSJohn Harrison
677112ed2d3SChris Wilson if (results[idx] != expect)
678112ed2d3SChris Wilson err++;
679112ed2d3SChris Wilson idx++;
680112ed2d3SChris Wilson }
681112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
682aee20aaeSJohn Harrison if (ro_reg)
683aee20aaeSJohn Harrison expect = results[0];
684aee20aaeSJohn Harrison else
685112ed2d3SChris Wilson expect = reg_write(expect, ~values[v], rsvd);
686aee20aaeSJohn Harrison
687112ed2d3SChris Wilson if (results[idx] != expect)
688112ed2d3SChris Wilson err++;
689112ed2d3SChris Wilson idx++;
690112ed2d3SChris Wilson }
691112ed2d3SChris Wilson if (err) {
692112ed2d3SChris Wilson pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
693112ed2d3SChris Wilson engine->name, err, reg);
694112ed2d3SChris Wilson
695aee20aaeSJohn Harrison if (ro_reg)
696aee20aaeSJohn Harrison pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
697aee20aaeSJohn Harrison engine->name, reg, results[0]);
698aee20aaeSJohn Harrison else
699112ed2d3SChris Wilson pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
700112ed2d3SChris Wilson engine->name, reg, results[0], rsvd);
701112ed2d3SChris Wilson
702112ed2d3SChris Wilson expect = results[0];
703112ed2d3SChris Wilson idx = 1;
704112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
705112ed2d3SChris Wilson u32 w = values[v];
706112ed2d3SChris Wilson
707aee20aaeSJohn Harrison if (ro_reg)
708aee20aaeSJohn Harrison expect = results[0];
709aee20aaeSJohn Harrison else
710112ed2d3SChris Wilson expect = reg_write(expect, w, rsvd);
711112ed2d3SChris Wilson pr_info("Wrote %08x, read %08x, expect %08x\n",
712112ed2d3SChris Wilson w, results[idx], expect);
713112ed2d3SChris Wilson idx++;
714112ed2d3SChris Wilson }
715112ed2d3SChris Wilson for (v = 0; v < ARRAY_SIZE(values); v++) {
716112ed2d3SChris Wilson u32 w = ~values[v];
717112ed2d3SChris Wilson
718aee20aaeSJohn Harrison if (ro_reg)
719aee20aaeSJohn Harrison expect = results[0];
720aee20aaeSJohn Harrison else
721112ed2d3SChris Wilson expect = reg_write(expect, w, rsvd);
722112ed2d3SChris Wilson pr_info("Wrote %08x, read %08x, expect %08x\n",
723112ed2d3SChris Wilson w, results[idx], expect);
724112ed2d3SChris Wilson idx++;
725112ed2d3SChris Wilson }
726112ed2d3SChris Wilson
727112ed2d3SChris Wilson err = -EINVAL;
728112ed2d3SChris Wilson }
72974827b53SMaarten Lankhorst out_unmap_scratch:
730112ed2d3SChris Wilson i915_gem_object_unpin_map(scratch->obj);
73174827b53SMaarten Lankhorst out_unmap_batch:
73274827b53SMaarten Lankhorst if (cs)
73374827b53SMaarten Lankhorst i915_gem_object_unpin_map(batch->obj);
73474827b53SMaarten Lankhorst out_ctx:
73574827b53SMaarten Lankhorst intel_context_unpin(ce);
73674827b53SMaarten Lankhorst out:
73774827b53SMaarten Lankhorst if (err == -EDEADLK) {
73874827b53SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww);
73974827b53SMaarten Lankhorst if (!err)
74074827b53SMaarten Lankhorst goto retry;
74174827b53SMaarten Lankhorst }
74274827b53SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww);
743112ed2d3SChris Wilson if (err)
744112ed2d3SChris Wilson break;
745112ed2d3SChris Wilson }
746112ed2d3SChris Wilson
747e6ba7648SChris Wilson if (igt_flush_test(engine->i915))
748112ed2d3SChris Wilson err = -EIO;
74974827b53SMaarten Lankhorst
750112ed2d3SChris Wilson i915_vma_unpin_and_release(&batch, 0);
751112ed2d3SChris Wilson out_scratch:
752112ed2d3SChris Wilson i915_vma_unpin_and_release(&scratch, 0);
753112ed2d3SChris Wilson return err;
754112ed2d3SChris Wilson }
755112ed2d3SChris Wilson
live_dirty_whitelist(void * arg)756112ed2d3SChris Wilson static int live_dirty_whitelist(void *arg)
757112ed2d3SChris Wilson {
758bb3d4c9dSChris Wilson struct intel_gt *gt = arg;
759112ed2d3SChris Wilson struct intel_engine_cs *engine;
760112ed2d3SChris Wilson enum intel_engine_id id;
761112ed2d3SChris Wilson
762112ed2d3SChris Wilson /* Can the user write to the whitelisted registers? */
763112ed2d3SChris Wilson
764c816723bSLucas De Marchi if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
765112ed2d3SChris Wilson return 0;
766112ed2d3SChris Wilson
7675d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
768e6ba7648SChris Wilson struct intel_context *ce;
769e6ba7648SChris Wilson int err;
770e6ba7648SChris Wilson
771112ed2d3SChris Wilson if (engine->whitelist.count == 0)
772112ed2d3SChris Wilson continue;
773112ed2d3SChris Wilson
774e6ba7648SChris Wilson ce = intel_context_create(engine);
775e6ba7648SChris Wilson if (IS_ERR(ce))
776e6ba7648SChris Wilson return PTR_ERR(ce);
777e6ba7648SChris Wilson
778e6ba7648SChris Wilson err = check_dirty_whitelist(ce);
779e6ba7648SChris Wilson intel_context_put(ce);
780112ed2d3SChris Wilson if (err)
781e6ba7648SChris Wilson return err;
782112ed2d3SChris Wilson }
783112ed2d3SChris Wilson
784e6ba7648SChris Wilson return 0;
785112ed2d3SChris Wilson }
786112ed2d3SChris Wilson
live_reset_whitelist(void * arg)787112ed2d3SChris Wilson static int live_reset_whitelist(void *arg)
788112ed2d3SChris Wilson {
789bb3d4c9dSChris Wilson struct intel_gt *gt = arg;
790bb3d4c9dSChris Wilson struct intel_engine_cs *engine;
791bb3d4c9dSChris Wilson enum intel_engine_id id;
792112ed2d3SChris Wilson int err = 0;
793112ed2d3SChris Wilson
794112ed2d3SChris Wilson /* If we reset the gpu, we should not lose the RING_NONPRIV */
795bb3d4c9dSChris Wilson igt_global_reset_lock(gt);
796112ed2d3SChris Wilson
7975d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
798bb3d4c9dSChris Wilson if (engine->whitelist.count == 0)
799bb3d4c9dSChris Wilson continue;
800112ed2d3SChris Wilson
801bb3d4c9dSChris Wilson if (intel_has_reset_engine(gt)) {
8023a4bfa09SRahul Kumar Singh if (intel_engine_uses_guc(engine)) {
8033a4bfa09SRahul Kumar Singh struct intel_selftest_saved_policy saved;
8043a4bfa09SRahul Kumar Singh int err2;
8053a4bfa09SRahul Kumar Singh
806617e87c0SJohn Harrison err = intel_selftest_modify_policy(engine, &saved,
807617e87c0SJohn Harrison SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
8083a4bfa09SRahul Kumar Singh if (err)
8093a4bfa09SRahul Kumar Singh goto out;
8103a4bfa09SRahul Kumar Singh
8113a4bfa09SRahul Kumar Singh err = check_whitelist_across_reset(engine,
8123a4bfa09SRahul Kumar Singh do_guc_reset,
8133a4bfa09SRahul Kumar Singh "guc");
8143a4bfa09SRahul Kumar Singh
8153a4bfa09SRahul Kumar Singh err2 = intel_selftest_restore_policy(engine, &saved);
8163a4bfa09SRahul Kumar Singh if (err == 0)
8173a4bfa09SRahul Kumar Singh err = err2;
8183a4bfa09SRahul Kumar Singh } else {
819112ed2d3SChris Wilson err = check_whitelist_across_reset(engine,
820112ed2d3SChris Wilson do_engine_reset,
821112ed2d3SChris Wilson "engine");
8223a4bfa09SRahul Kumar Singh }
8233a4bfa09SRahul Kumar Singh
824112ed2d3SChris Wilson if (err)
825112ed2d3SChris Wilson goto out;
826112ed2d3SChris Wilson }
827112ed2d3SChris Wilson
828bb3d4c9dSChris Wilson if (intel_has_gpu_reset(gt)) {
829112ed2d3SChris Wilson err = check_whitelist_across_reset(engine,
830112ed2d3SChris Wilson do_device_reset,
831112ed2d3SChris Wilson "device");
832112ed2d3SChris Wilson if (err)
833112ed2d3SChris Wilson goto out;
834112ed2d3SChris Wilson }
835bb3d4c9dSChris Wilson }
836112ed2d3SChris Wilson
837112ed2d3SChris Wilson out:
838bb3d4c9dSChris Wilson igt_global_reset_unlock(gt);
839112ed2d3SChris Wilson return err;
840112ed2d3SChris Wilson }
841112ed2d3SChris Wilson
read_whitelisted_registers(struct intel_context * ce,struct i915_vma * results)84204adaba8SChris Wilson static int read_whitelisted_registers(struct intel_context *ce,
843112ed2d3SChris Wilson struct i915_vma *results)
844112ed2d3SChris Wilson {
84504adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine;
846112ed2d3SChris Wilson struct i915_request *rq;
847112ed2d3SChris Wilson int i, err = 0;
848112ed2d3SChris Wilson u32 srm, *cs;
849112ed2d3SChris Wilson
85004adaba8SChris Wilson rq = intel_context_create_request(ce);
851112ed2d3SChris Wilson if (IS_ERR(rq))
852112ed2d3SChris Wilson return PTR_ERR(rq);
853112ed2d3SChris Wilson
854*4f16749fSAndrzej Hajda err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
855cd9ba7b6SChris Wilson if (err)
856cd9ba7b6SChris Wilson goto err_req;
857cd9ba7b6SChris Wilson
858112ed2d3SChris Wilson srm = MI_STORE_REGISTER_MEM;
859c816723bSLucas De Marchi if (GRAPHICS_VER(engine->i915) >= 8)
860112ed2d3SChris Wilson srm++;
861112ed2d3SChris Wilson
862112ed2d3SChris Wilson cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
863112ed2d3SChris Wilson if (IS_ERR(cs)) {
864112ed2d3SChris Wilson err = PTR_ERR(cs);
865112ed2d3SChris Wilson goto err_req;
866112ed2d3SChris Wilson }
867112ed2d3SChris Wilson
868112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) {
8698e4ee5e8SChris Wilson u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
870767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
871767662bcSRobert M. Fosha
8726b441c62SMika Kuoppala /* Clear non priv flags */
8736b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
874112ed2d3SChris Wilson
875112ed2d3SChris Wilson *cs++ = srm;
876767662bcSRobert M. Fosha *cs++ = reg;
877112ed2d3SChris Wilson *cs++ = lower_32_bits(offset);
878112ed2d3SChris Wilson *cs++ = upper_32_bits(offset);
879112ed2d3SChris Wilson }
880112ed2d3SChris Wilson intel_ring_advance(rq, cs);
881112ed2d3SChris Wilson
882112ed2d3SChris Wilson err_req:
88341f0bc49SChris Wilson return request_add_sync(rq, err);
884112ed2d3SChris Wilson }
885112ed2d3SChris Wilson
scrub_whitelisted_registers(struct intel_context * ce)88604adaba8SChris Wilson static int scrub_whitelisted_registers(struct intel_context *ce)
887112ed2d3SChris Wilson {
88804adaba8SChris Wilson struct intel_engine_cs *engine = ce->engine;
889112ed2d3SChris Wilson struct i915_request *rq;
890112ed2d3SChris Wilson struct i915_vma *batch;
891112ed2d3SChris Wilson int i, err = 0;
892112ed2d3SChris Wilson u32 *cs;
893112ed2d3SChris Wilson
89404adaba8SChris Wilson batch = create_batch(ce->vm);
895112ed2d3SChris Wilson if (IS_ERR(batch))
896112ed2d3SChris Wilson return PTR_ERR(batch);
897112ed2d3SChris Wilson
89874827b53SMaarten Lankhorst cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
899112ed2d3SChris Wilson if (IS_ERR(cs)) {
900112ed2d3SChris Wilson err = PTR_ERR(cs);
901112ed2d3SChris Wilson goto err_batch;
902112ed2d3SChris Wilson }
903112ed2d3SChris Wilson
904767662bcSRobert M. Fosha *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
905112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) {
906767662bcSRobert M. Fosha u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
907767662bcSRobert M. Fosha
908767662bcSRobert M. Fosha if (ro_register(reg))
909767662bcSRobert M. Fosha continue;
910767662bcSRobert M. Fosha
9116b441c62SMika Kuoppala /* Clear non priv flags */
9126b441c62SMika Kuoppala reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
9136b441c62SMika Kuoppala
914767662bcSRobert M. Fosha *cs++ = reg;
915112ed2d3SChris Wilson *cs++ = 0xffffffff;
916112ed2d3SChris Wilson }
917112ed2d3SChris Wilson *cs++ = MI_BATCH_BUFFER_END;
918112ed2d3SChris Wilson
919112ed2d3SChris Wilson i915_gem_object_flush_map(batch->obj);
920baea429dSTvrtko Ursulin intel_gt_chipset_flush(engine->gt);
921112ed2d3SChris Wilson
92204adaba8SChris Wilson rq = intel_context_create_request(ce);
923112ed2d3SChris Wilson if (IS_ERR(rq)) {
924112ed2d3SChris Wilson err = PTR_ERR(rq);
925112ed2d3SChris Wilson goto err_unpin;
926112ed2d3SChris Wilson }
927112ed2d3SChris Wilson
928112ed2d3SChris Wilson if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
929112ed2d3SChris Wilson err = engine->emit_init_breadcrumb(rq);
930112ed2d3SChris Wilson if (err)
931112ed2d3SChris Wilson goto err_request;
932112ed2d3SChris Wilson }
933112ed2d3SChris Wilson
934*4f16749fSAndrzej Hajda err = igt_vma_move_to_active_unlocked(batch, rq, 0);
9351d5b7773SChris Wilson if (err)
9361d5b7773SChris Wilson goto err_request;
9371d5b7773SChris Wilson
938112ed2d3SChris Wilson /* Perform the writes from an unprivileged "user" batch */
9398e4ee5e8SChris Wilson err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
940112ed2d3SChris Wilson
941112ed2d3SChris Wilson err_request:
94241f0bc49SChris Wilson err = request_add_sync(rq, err);
943112ed2d3SChris Wilson
944112ed2d3SChris Wilson err_unpin:
945112ed2d3SChris Wilson i915_gem_object_unpin_map(batch->obj);
946112ed2d3SChris Wilson err_batch:
947112ed2d3SChris Wilson i915_vma_unpin_and_release(&batch, 0);
948112ed2d3SChris Wilson return err;
949112ed2d3SChris Wilson }
950112ed2d3SChris Wilson
951112ed2d3SChris Wilson struct regmask {
952112ed2d3SChris Wilson i915_reg_t reg;
9533e6e4c21SLucas De Marchi u8 graphics_ver;
954112ed2d3SChris Wilson };
955112ed2d3SChris Wilson
find_reg(struct drm_i915_private * i915,i915_reg_t reg,const struct regmask * tbl,unsigned long count)956112ed2d3SChris Wilson static bool find_reg(struct drm_i915_private *i915,
957112ed2d3SChris Wilson i915_reg_t reg,
958112ed2d3SChris Wilson const struct regmask *tbl,
959112ed2d3SChris Wilson unsigned long count)
960112ed2d3SChris Wilson {
961112ed2d3SChris Wilson u32 offset = i915_mmio_reg_offset(reg);
962112ed2d3SChris Wilson
963112ed2d3SChris Wilson while (count--) {
9643e6e4c21SLucas De Marchi if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
965112ed2d3SChris Wilson i915_mmio_reg_offset(tbl->reg) == offset)
966112ed2d3SChris Wilson return true;
967112ed2d3SChris Wilson tbl++;
968112ed2d3SChris Wilson }
969112ed2d3SChris Wilson
970112ed2d3SChris Wilson return false;
971112ed2d3SChris Wilson }
972112ed2d3SChris Wilson
pardon_reg(struct drm_i915_private * i915,i915_reg_t reg)973112ed2d3SChris Wilson static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
974112ed2d3SChris Wilson {
975112ed2d3SChris Wilson /* Alas, we must pardon some whitelists. Mistakes already made */
976112ed2d3SChris Wilson static const struct regmask pardon[] = {
9773e6e4c21SLucas De Marchi { GEN9_CTX_PREEMPT_REG, 9 },
97858bc2453SMatt Roper { _MMIO(0xb118), 9 }, /* GEN8_L3SQCREG4 */
979112ed2d3SChris Wilson };
980112ed2d3SChris Wilson
981112ed2d3SChris Wilson return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
982112ed2d3SChris Wilson }
983112ed2d3SChris Wilson
result_eq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)984112ed2d3SChris Wilson static bool result_eq(struct intel_engine_cs *engine,
985112ed2d3SChris Wilson u32 a, u32 b, i915_reg_t reg)
986112ed2d3SChris Wilson {
987112ed2d3SChris Wilson if (a != b && !pardon_reg(engine->i915, reg)) {
988112ed2d3SChris Wilson pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
989112ed2d3SChris Wilson i915_mmio_reg_offset(reg), a, b);
990112ed2d3SChris Wilson return false;
991112ed2d3SChris Wilson }
992112ed2d3SChris Wilson
993112ed2d3SChris Wilson return true;
994112ed2d3SChris Wilson }
995112ed2d3SChris Wilson
writeonly_reg(struct drm_i915_private * i915,i915_reg_t reg)996112ed2d3SChris Wilson static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
997112ed2d3SChris Wilson {
998112ed2d3SChris Wilson /* Some registers do not seem to behave and our writes unreadable */
999112ed2d3SChris Wilson static const struct regmask wo[] = {
10003e6e4c21SLucas De Marchi { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
1001112ed2d3SChris Wilson };
1002112ed2d3SChris Wilson
1003112ed2d3SChris Wilson return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
1004112ed2d3SChris Wilson }
1005112ed2d3SChris Wilson
result_neq(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg)1006112ed2d3SChris Wilson static bool result_neq(struct intel_engine_cs *engine,
1007112ed2d3SChris Wilson u32 a, u32 b, i915_reg_t reg)
1008112ed2d3SChris Wilson {
1009112ed2d3SChris Wilson if (a == b && !writeonly_reg(engine->i915, reg)) {
1010112ed2d3SChris Wilson pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
1011112ed2d3SChris Wilson i915_mmio_reg_offset(reg), a);
1012112ed2d3SChris Wilson return false;
1013112ed2d3SChris Wilson }
1014112ed2d3SChris Wilson
1015112ed2d3SChris Wilson return true;
1016112ed2d3SChris Wilson }
1017112ed2d3SChris Wilson
1018112ed2d3SChris Wilson static int
check_whitelisted_registers(struct intel_engine_cs * engine,struct i915_vma * A,struct i915_vma * B,bool (* fn)(struct intel_engine_cs * engine,u32 a,u32 b,i915_reg_t reg))1019112ed2d3SChris Wilson check_whitelisted_registers(struct intel_engine_cs *engine,
1020112ed2d3SChris Wilson struct i915_vma *A,
1021112ed2d3SChris Wilson struct i915_vma *B,
1022112ed2d3SChris Wilson bool (*fn)(struct intel_engine_cs *engine,
1023112ed2d3SChris Wilson u32 a, u32 b,
1024112ed2d3SChris Wilson i915_reg_t reg))
1025112ed2d3SChris Wilson {
1026112ed2d3SChris Wilson u32 *a, *b;
1027112ed2d3SChris Wilson int i, err;
1028112ed2d3SChris Wilson
102974827b53SMaarten Lankhorst a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1030112ed2d3SChris Wilson if (IS_ERR(a))
1031112ed2d3SChris Wilson return PTR_ERR(a);
1032112ed2d3SChris Wilson
103374827b53SMaarten Lankhorst b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1034112ed2d3SChris Wilson if (IS_ERR(b)) {
1035112ed2d3SChris Wilson err = PTR_ERR(b);
1036112ed2d3SChris Wilson goto err_a;
1037112ed2d3SChris Wilson }
1038112ed2d3SChris Wilson
1039112ed2d3SChris Wilson err = 0;
1040112ed2d3SChris Wilson for (i = 0; i < engine->whitelist.count; i++) {
1041361b6905SLionel Landwerlin const struct i915_wa *wa = &engine->whitelist.list[i];
1042361b6905SLionel Landwerlin
10431e2b7f49SJohn Harrison if (i915_mmio_reg_offset(wa->reg) &
10441e2b7f49SJohn Harrison RING_FORCE_TO_NONPRIV_ACCESS_RD)
1045361b6905SLionel Landwerlin continue;
1046361b6905SLionel Landwerlin
1047361b6905SLionel Landwerlin if (!fn(engine, a[i], b[i], wa->reg))
1048112ed2d3SChris Wilson err = -EINVAL;
1049112ed2d3SChris Wilson }
1050112ed2d3SChris Wilson
1051112ed2d3SChris Wilson i915_gem_object_unpin_map(B->obj);
1052112ed2d3SChris Wilson err_a:
1053112ed2d3SChris Wilson i915_gem_object_unpin_map(A->obj);
1054112ed2d3SChris Wilson return err;
1055112ed2d3SChris Wilson }
1056112ed2d3SChris Wilson
live_isolated_whitelist(void * arg)1057112ed2d3SChris Wilson static int live_isolated_whitelist(void *arg)
1058112ed2d3SChris Wilson {
1059bb3d4c9dSChris Wilson struct intel_gt *gt = arg;
1060112ed2d3SChris Wilson struct {
1061112ed2d3SChris Wilson struct i915_vma *scratch[2];
1062112ed2d3SChris Wilson } client[2] = {};
1063112ed2d3SChris Wilson struct intel_engine_cs *engine;
1064112ed2d3SChris Wilson enum intel_engine_id id;
1065112ed2d3SChris Wilson int i, err = 0;
1066112ed2d3SChris Wilson
1067112ed2d3SChris Wilson /*
1068112ed2d3SChris Wilson * Check that a write into a whitelist register works, but
1069112ed2d3SChris Wilson * invisible to a second context.
1070112ed2d3SChris Wilson */
1071112ed2d3SChris Wilson
1072bb3d4c9dSChris Wilson if (!intel_engines_has_context_isolation(gt->i915))
1073112ed2d3SChris Wilson return 0;
1074112ed2d3SChris Wilson
1075112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(client); i++) {
1076a4d86249SChris Wilson client[i].scratch[0] =
10772a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1078112ed2d3SChris Wilson if (IS_ERR(client[i].scratch[0])) {
1079112ed2d3SChris Wilson err = PTR_ERR(client[i].scratch[0]);
1080112ed2d3SChris Wilson goto err;
1081112ed2d3SChris Wilson }
1082112ed2d3SChris Wilson
1083a4d86249SChris Wilson client[i].scratch[1] =
10842a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1085112ed2d3SChris Wilson if (IS_ERR(client[i].scratch[1])) {
1086112ed2d3SChris Wilson err = PTR_ERR(client[i].scratch[1]);
1087112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1088112ed2d3SChris Wilson goto err;
1089112ed2d3SChris Wilson }
1090112ed2d3SChris Wilson }
1091112ed2d3SChris Wilson
10925d904e3cSTvrtko Ursulin for_each_engine(engine, gt, id) {
109304adaba8SChris Wilson struct intel_context *ce[2];
109404adaba8SChris Wilson
1095bb3d4c9dSChris Wilson if (!engine->kernel_context->vm)
1096bb3d4c9dSChris Wilson continue;
1097bb3d4c9dSChris Wilson
1098767662bcSRobert M. Fosha if (!whitelist_writable_count(engine))
1099112ed2d3SChris Wilson continue;
1100112ed2d3SChris Wilson
110104adaba8SChris Wilson ce[0] = intel_context_create(engine);
110204adaba8SChris Wilson if (IS_ERR(ce[0])) {
110304adaba8SChris Wilson err = PTR_ERR(ce[0]);
110404adaba8SChris Wilson break;
110504adaba8SChris Wilson }
110604adaba8SChris Wilson ce[1] = intel_context_create(engine);
110704adaba8SChris Wilson if (IS_ERR(ce[1])) {
110804adaba8SChris Wilson err = PTR_ERR(ce[1]);
110904adaba8SChris Wilson intel_context_put(ce[0]);
111004adaba8SChris Wilson break;
111104adaba8SChris Wilson }
111204adaba8SChris Wilson
1113112ed2d3SChris Wilson /* Read default values */
111404adaba8SChris Wilson err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1115112ed2d3SChris Wilson if (err)
111604adaba8SChris Wilson goto err_ce;
1117112ed2d3SChris Wilson
1118112ed2d3SChris Wilson /* Try to overwrite registers (should only affect ctx0) */
111904adaba8SChris Wilson err = scrub_whitelisted_registers(ce[0]);
1120112ed2d3SChris Wilson if (err)
112104adaba8SChris Wilson goto err_ce;
1122112ed2d3SChris Wilson
1123112ed2d3SChris Wilson /* Read values from ctx1, we expect these to be defaults */
112404adaba8SChris Wilson err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1125112ed2d3SChris Wilson if (err)
112604adaba8SChris Wilson goto err_ce;
1127112ed2d3SChris Wilson
1128112ed2d3SChris Wilson /* Verify that both reads return the same default values */
1129112ed2d3SChris Wilson err = check_whitelisted_registers(engine,
1130112ed2d3SChris Wilson client[0].scratch[0],
1131112ed2d3SChris Wilson client[1].scratch[0],
1132112ed2d3SChris Wilson result_eq);
1133112ed2d3SChris Wilson if (err)
113404adaba8SChris Wilson goto err_ce;
1135112ed2d3SChris Wilson
1136112ed2d3SChris Wilson /* Read back the updated values in ctx0 */
113704adaba8SChris Wilson err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1138112ed2d3SChris Wilson if (err)
113904adaba8SChris Wilson goto err_ce;
1140112ed2d3SChris Wilson
1141112ed2d3SChris Wilson /* User should be granted privilege to overwhite regs */
1142112ed2d3SChris Wilson err = check_whitelisted_registers(engine,
1143112ed2d3SChris Wilson client[0].scratch[0],
1144112ed2d3SChris Wilson client[0].scratch[1],
1145112ed2d3SChris Wilson result_neq);
114604adaba8SChris Wilson err_ce:
114704adaba8SChris Wilson intel_context_put(ce[1]);
114804adaba8SChris Wilson intel_context_put(ce[0]);
1149112ed2d3SChris Wilson if (err)
115004adaba8SChris Wilson break;
1151112ed2d3SChris Wilson }
1152112ed2d3SChris Wilson
1153112ed2d3SChris Wilson err:
1154112ed2d3SChris Wilson for (i = 0; i < ARRAY_SIZE(client); i++) {
1155112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1156112ed2d3SChris Wilson i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1157112ed2d3SChris Wilson }
1158112ed2d3SChris Wilson
1159bb3d4c9dSChris Wilson if (igt_flush_test(gt->i915))
1160112ed2d3SChris Wilson err = -EIO;
1161112ed2d3SChris Wilson
1162112ed2d3SChris Wilson return err;
1163112ed2d3SChris Wilson }
1164112ed2d3SChris Wilson
1165fde93886STvrtko Ursulin static bool
verify_wa_lists(struct intel_gt * gt,struct wa_lists * lists,const char * str)116604adaba8SChris Wilson verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1167fde93886STvrtko Ursulin const char *str)
1168112ed2d3SChris Wilson {
116904adaba8SChris Wilson struct intel_engine_cs *engine;
117004adaba8SChris Wilson enum intel_engine_id id;
1171112ed2d3SChris Wilson bool ok = true;
1172112ed2d3SChris Wilson
11730957e931SMatt Roper ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
1174112ed2d3SChris Wilson
117504adaba8SChris Wilson for_each_engine(engine, gt, id) {
117604adaba8SChris Wilson struct intel_context *ce;
117704adaba8SChris Wilson
117804adaba8SChris Wilson ce = intel_context_create(engine);
117904adaba8SChris Wilson if (IS_ERR(ce))
118004adaba8SChris Wilson return false;
1181fde93886STvrtko Ursulin
1182fde93886STvrtko Ursulin ok &= engine_wa_list_verify(ce,
1183112ed2d3SChris Wilson &lists->engine[id].wa_list,
1184112ed2d3SChris Wilson str) == 0;
1185fde93886STvrtko Ursulin
1186fde93886STvrtko Ursulin ok &= engine_wa_list_verify(ce,
1187fde93886STvrtko Ursulin &lists->engine[id].ctx_wa_list,
1188fde93886STvrtko Ursulin str) == 0;
118904adaba8SChris Wilson
119004adaba8SChris Wilson intel_context_put(ce);
1191112ed2d3SChris Wilson }
1192112ed2d3SChris Wilson
1193112ed2d3SChris Wilson return ok;
1194112ed2d3SChris Wilson }
1195112ed2d3SChris Wilson
1196112ed2d3SChris Wilson static int
live_gpu_reset_workarounds(void * arg)1197fde93886STvrtko Ursulin live_gpu_reset_workarounds(void *arg)
1198112ed2d3SChris Wilson {
1199bb3d4c9dSChris Wilson struct intel_gt *gt = arg;
1200112ed2d3SChris Wilson intel_wakeref_t wakeref;
12018f57f295SJohn Harrison struct wa_lists *lists;
1202112ed2d3SChris Wilson bool ok;
1203112ed2d3SChris Wilson
1204bb3d4c9dSChris Wilson if (!intel_has_gpu_reset(gt))
1205112ed2d3SChris Wilson return 0;
1206112ed2d3SChris Wilson
12078f57f295SJohn Harrison lists = kzalloc(sizeof(*lists), GFP_KERNEL);
12088f57f295SJohn Harrison if (!lists)
12098f57f295SJohn Harrison return -ENOMEM;
12108f57f295SJohn Harrison
1211112ed2d3SChris Wilson pr_info("Verifying after GPU reset...\n");
1212112ed2d3SChris Wilson
1213bb3d4c9dSChris Wilson igt_global_reset_lock(gt);
1214bb3d4c9dSChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1215112ed2d3SChris Wilson
12168f57f295SJohn Harrison reference_lists_init(gt, lists);
1217112ed2d3SChris Wilson
12188f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "before reset");
1219112ed2d3SChris Wilson if (!ok)
1220112ed2d3SChris Wilson goto out;
1221112ed2d3SChris Wilson
1222bb3d4c9dSChris Wilson intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1223112ed2d3SChris Wilson
12248f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after reset");
1225112ed2d3SChris Wilson
1226112ed2d3SChris Wilson out:
12278f57f295SJohn Harrison reference_lists_fini(gt, lists);
1228bb3d4c9dSChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1229bb3d4c9dSChris Wilson igt_global_reset_unlock(gt);
12308f57f295SJohn Harrison kfree(lists);
1231112ed2d3SChris Wilson
1232112ed2d3SChris Wilson return ok ? 0 : -ESRCH;
1233112ed2d3SChris Wilson }
1234112ed2d3SChris Wilson
1235112ed2d3SChris Wilson static int
live_engine_reset_workarounds(void * arg)1236fde93886STvrtko Ursulin live_engine_reset_workarounds(void *arg)
1237112ed2d3SChris Wilson {
1238bb3d4c9dSChris Wilson struct intel_gt *gt = arg;
123904adaba8SChris Wilson struct intel_engine_cs *engine;
124004adaba8SChris Wilson enum intel_engine_id id;
1241f277bc0cSChris Wilson struct intel_context *ce;
1242112ed2d3SChris Wilson struct igt_spinner spin;
1243112ed2d3SChris Wilson struct i915_request *rq;
1244112ed2d3SChris Wilson intel_wakeref_t wakeref;
12458f57f295SJohn Harrison struct wa_lists *lists;
1246112ed2d3SChris Wilson int ret = 0;
1247112ed2d3SChris Wilson
1248bb3d4c9dSChris Wilson if (!intel_has_reset_engine(gt))
1249112ed2d3SChris Wilson return 0;
1250112ed2d3SChris Wilson
12518f57f295SJohn Harrison lists = kzalloc(sizeof(*lists), GFP_KERNEL);
12528f57f295SJohn Harrison if (!lists)
12538f57f295SJohn Harrison return -ENOMEM;
12548f57f295SJohn Harrison
1255bb3d4c9dSChris Wilson igt_global_reset_lock(gt);
1256bb3d4c9dSChris Wilson wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1257112ed2d3SChris Wilson
12588f57f295SJohn Harrison reference_lists_init(gt, lists);
1259112ed2d3SChris Wilson
126004adaba8SChris Wilson for_each_engine(engine, gt, id) {
12613a4bfa09SRahul Kumar Singh struct intel_selftest_saved_policy saved;
12623a4bfa09SRahul Kumar Singh bool using_guc = intel_engine_uses_guc(engine);
1263112ed2d3SChris Wilson bool ok;
12643a4bfa09SRahul Kumar Singh int ret2;
1265112ed2d3SChris Wilson
1266112ed2d3SChris Wilson pr_info("Verifying after %s reset...\n", engine->name);
1267617e87c0SJohn Harrison ret = intel_selftest_modify_policy(engine, &saved,
1268617e87c0SJohn Harrison SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
12693a4bfa09SRahul Kumar Singh if (ret)
12703a4bfa09SRahul Kumar Singh break;
12713a4bfa09SRahul Kumar Singh
127204adaba8SChris Wilson ce = intel_context_create(engine);
127304adaba8SChris Wilson if (IS_ERR(ce)) {
127404adaba8SChris Wilson ret = PTR_ERR(ce);
12753a4bfa09SRahul Kumar Singh goto restore;
127604adaba8SChris Wilson }
1277112ed2d3SChris Wilson
12783a4bfa09SRahul Kumar Singh if (!using_guc) {
12798f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "before reset");
1280112ed2d3SChris Wilson if (!ok) {
1281112ed2d3SChris Wilson ret = -ESRCH;
1282112ed2d3SChris Wilson goto err;
1283112ed2d3SChris Wilson }
1284112ed2d3SChris Wilson
128531052811SChris Wilson ret = intel_engine_reset(engine, "live_workarounds:idle");
128631052811SChris Wilson if (ret) {
128731052811SChris Wilson pr_err("%s: Reset failed while idle\n", engine->name);
128831052811SChris Wilson goto err;
128931052811SChris Wilson }
1290112ed2d3SChris Wilson
12918f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after idle reset");
1292112ed2d3SChris Wilson if (!ok) {
1293112ed2d3SChris Wilson ret = -ESRCH;
1294112ed2d3SChris Wilson goto err;
1295112ed2d3SChris Wilson }
12963a4bfa09SRahul Kumar Singh }
1297112ed2d3SChris Wilson
1298f277bc0cSChris Wilson ret = igt_spinner_init(&spin, engine->gt);
1299112ed2d3SChris Wilson if (ret)
1300112ed2d3SChris Wilson goto err;
1301112ed2d3SChris Wilson
1302f277bc0cSChris Wilson rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1303112ed2d3SChris Wilson if (IS_ERR(rq)) {
1304112ed2d3SChris Wilson ret = PTR_ERR(rq);
1305112ed2d3SChris Wilson igt_spinner_fini(&spin);
1306112ed2d3SChris Wilson goto err;
1307112ed2d3SChris Wilson }
1308112ed2d3SChris Wilson
130941f0bc49SChris Wilson ret = request_add_spin(rq, &spin);
131041f0bc49SChris Wilson if (ret) {
131131052811SChris Wilson pr_err("%s: Spinner failed to start\n", engine->name);
1312112ed2d3SChris Wilson igt_spinner_fini(&spin);
1313112ed2d3SChris Wilson goto err;
1314112ed2d3SChris Wilson }
1315112ed2d3SChris Wilson
13163a4bfa09SRahul Kumar Singh /* Ensure the spinner hasn't aborted */
13173a4bfa09SRahul Kumar Singh if (i915_request_completed(rq)) {
13183a4bfa09SRahul Kumar Singh ret = -ETIMEDOUT;
13193a4bfa09SRahul Kumar Singh goto skip;
13203a4bfa09SRahul Kumar Singh }
13213a4bfa09SRahul Kumar Singh
13223a4bfa09SRahul Kumar Singh if (!using_guc) {
132331052811SChris Wilson ret = intel_engine_reset(engine, "live_workarounds:active");
132431052811SChris Wilson if (ret) {
132531052811SChris Wilson pr_err("%s: Reset failed on an active spinner\n",
132631052811SChris Wilson engine->name);
132731052811SChris Wilson igt_spinner_fini(&spin);
132831052811SChris Wilson goto err;
132931052811SChris Wilson }
13303a4bfa09SRahul Kumar Singh }
1331112ed2d3SChris Wilson
13323a4bfa09SRahul Kumar Singh /* Ensure the reset happens and kills the engine */
13333a4bfa09SRahul Kumar Singh if (ret == 0)
13343a4bfa09SRahul Kumar Singh ret = intel_selftest_wait_for_rq(rq);
13353a4bfa09SRahul Kumar Singh
13363a4bfa09SRahul Kumar Singh skip:
1337112ed2d3SChris Wilson igt_spinner_end(&spin);
1338112ed2d3SChris Wilson igt_spinner_fini(&spin);
1339112ed2d3SChris Wilson
13408f57f295SJohn Harrison ok = verify_wa_lists(gt, lists, "after busy reset");
13413a4bfa09SRahul Kumar Singh if (!ok)
1342112ed2d3SChris Wilson ret = -ESRCH;
134304adaba8SChris Wilson
1344112ed2d3SChris Wilson err:
134504adaba8SChris Wilson intel_context_put(ce);
13463a4bfa09SRahul Kumar Singh
13473a4bfa09SRahul Kumar Singh restore:
13483a4bfa09SRahul Kumar Singh ret2 = intel_selftest_restore_policy(engine, &saved);
13493a4bfa09SRahul Kumar Singh if (ret == 0)
13503a4bfa09SRahul Kumar Singh ret = ret2;
135104adaba8SChris Wilson if (ret)
135204adaba8SChris Wilson break;
135304adaba8SChris Wilson }
135404adaba8SChris Wilson
13558f57f295SJohn Harrison reference_lists_fini(gt, lists);
1356bb3d4c9dSChris Wilson intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1357bb3d4c9dSChris Wilson igt_global_reset_unlock(gt);
13588f57f295SJohn Harrison kfree(lists);
1359112ed2d3SChris Wilson
1360bb3d4c9dSChris Wilson igt_flush_test(gt->i915);
1361112ed2d3SChris Wilson
1362112ed2d3SChris Wilson return ret;
1363112ed2d3SChris Wilson }
1364112ed2d3SChris Wilson
intel_workarounds_live_selftests(struct drm_i915_private * i915)1365112ed2d3SChris Wilson int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1366112ed2d3SChris Wilson {
1367112ed2d3SChris Wilson static const struct i915_subtest tests[] = {
1368112ed2d3SChris Wilson SUBTEST(live_dirty_whitelist),
1369112ed2d3SChris Wilson SUBTEST(live_reset_whitelist),
1370112ed2d3SChris Wilson SUBTEST(live_isolated_whitelist),
1371fde93886STvrtko Ursulin SUBTEST(live_gpu_reset_workarounds),
1372fde93886STvrtko Ursulin SUBTEST(live_engine_reset_workarounds),
1373112ed2d3SChris Wilson };
1374112ed2d3SChris Wilson
1375c14adcbdSMichał Winiarski if (intel_gt_is_wedged(to_gt(i915)))
1376112ed2d3SChris Wilson return 0;
1377112ed2d3SChris Wilson
1378c14adcbdSMichał Winiarski return intel_gt_live_subtests(tests, to_gt(i915));
1379112ed2d3SChris Wilson }
1380