1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "intel_context.h"
8 #include "intel_engine_pm.h"
9 #include "intel_gt_requests.h"
10 #include "intel_ring.h"
11 #include "selftest_rc6.h"
12 
13 #include "selftests/i915_random.h"
14 
15 static u64 rc6_residency(struct intel_rc6 *rc6)
16 {
17 	u64 result;
18 
19 	/* XXX VLV_GT_MEDIA_RC6? */
20 
21 	result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
22 	if (HAS_RC6p(rc6_to_i915(rc6)))
23 		result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
24 	if (HAS_RC6pp(rc6_to_i915(rc6)))
25 		result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
26 
27 	return result;
28 }
29 
30 int live_rc6_manual(void *arg)
31 {
32 	struct intel_gt *gt = arg;
33 	struct intel_rc6 *rc6 = &gt->rc6;
34 	intel_wakeref_t wakeref;
35 	u64 res[2];
36 	int err = 0;
37 
38 	/*
39 	 * Our claim is that we can "encourage" the GPU to enter rc6 at will.
40 	 * Let's try it!
41 	 */
42 
43 	if (!rc6->enabled)
44 		return 0;
45 
46 	/* bsw/byt use a PCU and decouple RC6 from our manual control */
47 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
48 		return 0;
49 
50 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
51 
52 	/* Force RC6 off for starters */
53 	__intel_rc6_disable(rc6);
54 	msleep(1); /* wakeup is not immediate, takes about 100us on icl */
55 
56 	res[0] = rc6_residency(rc6);
57 	msleep(250);
58 	res[1] = rc6_residency(rc6);
59 	if ((res[1] - res[0]) >> 10) {
60 		pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
61 		       (res[1] - res[0]) >> 10);
62 		err = -EINVAL;
63 		goto out_unlock;
64 	}
65 
66 	/* Manually enter RC6 */
67 	intel_rc6_park(rc6);
68 
69 	res[0] = rc6_residency(rc6);
70 	msleep(100);
71 	res[1] = rc6_residency(rc6);
72 
73 	if (res[1] == res[0]) {
74 		pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
75 		       intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
76 		       intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
77 		       res[0]);
78 		err = -EINVAL;
79 	}
80 
81 	/* Restore what should have been the original state! */
82 	intel_rc6_unpark(rc6);
83 
84 out_unlock:
85 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
86 	return err;
87 }
88 
89 static const u32 *__live_rc6_ctx(struct intel_context *ce)
90 {
91 	struct i915_request *rq;
92 	const u32 *result;
93 	u32 cmd;
94 	u32 *cs;
95 
96 	rq = intel_context_create_request(ce);
97 	if (IS_ERR(rq))
98 		return ERR_CAST(rq);
99 
100 	cs = intel_ring_begin(rq, 4);
101 	if (IS_ERR(cs)) {
102 		i915_request_add(rq);
103 		return cs;
104 	}
105 
106 	cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
107 	if (INTEL_GEN(rq->i915) >= 8)
108 		cmd++;
109 
110 	*cs++ = cmd;
111 	*cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
112 	*cs++ = ce->timeline->hwsp_offset + 8;
113 	*cs++ = 0;
114 	intel_ring_advance(rq, cs);
115 
116 	result = rq->hwsp_seqno + 2;
117 	i915_request_add(rq);
118 
119 	return result;
120 }
121 
122 static struct intel_engine_cs **
123 randomised_engines(struct intel_gt *gt,
124 		   struct rnd_state *prng,
125 		   unsigned int *count)
126 {
127 	struct intel_engine_cs *engine, **engines;
128 	enum intel_engine_id id;
129 	int n;
130 
131 	n = 0;
132 	for_each_engine(engine, gt, id)
133 		n++;
134 	if (!n)
135 		return NULL;
136 
137 	engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
138 	if (!engines)
139 		return NULL;
140 
141 	n = 0;
142 	for_each_engine(engine, gt, id)
143 		engines[n++] = engine;
144 
145 	i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
146 
147 	*count = n;
148 	return engines;
149 }
150 
151 int live_rc6_ctx_wa(void *arg)
152 {
153 	struct intel_gt *gt = arg;
154 	struct intel_engine_cs **engines;
155 	unsigned int n, count;
156 	I915_RND_STATE(prng);
157 	int err = 0;
158 
159 	/* A read of CTX_INFO upsets rc6. Poke the bear! */
160 	if (INTEL_GEN(gt->i915) < 8)
161 		return 0;
162 
163 	engines = randomised_engines(gt, &prng, &count);
164 	if (!engines)
165 		return 0;
166 
167 	for (n = 0; n < count; n++) {
168 		struct intel_engine_cs *engine = engines[n];
169 		int pass;
170 
171 		for (pass = 0; pass < 2; pass++) {
172 			struct intel_context *ce;
173 			unsigned int resets =
174 				i915_reset_engine_count(&gt->i915->gpu_error,
175 							engine);
176 			const u32 *res;
177 
178 			/* Use a sacrifical context */
179 			ce = intel_context_create(engine);
180 			if (IS_ERR(ce)) {
181 				err = PTR_ERR(ce);
182 				goto out;
183 			}
184 
185 			intel_engine_pm_get(engine);
186 			res = __live_rc6_ctx(ce);
187 			intel_engine_pm_put(engine);
188 			intel_context_put(ce);
189 			if (IS_ERR(res)) {
190 				err = PTR_ERR(res);
191 				goto out;
192 			}
193 
194 			if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
195 				intel_gt_set_wedged(gt);
196 				err = -ETIME;
197 				goto out;
198 			}
199 
200 			intel_gt_pm_wait_for_idle(gt);
201 			pr_debug("%s: CTX_INFO=%0x\n",
202 				 engine->name, READ_ONCE(*res));
203 
204 			if (resets !=
205 			    i915_reset_engine_count(&gt->i915->gpu_error,
206 						    engine)) {
207 				pr_err("%s: GPU reset required\n",
208 				       engine->name);
209 				add_taint_for_CI(TAINT_WARN);
210 				err = -EIO;
211 				goto out;
212 			}
213 		}
214 	}
215 
216 out:
217 	kfree(engines);
218 	return err;
219 }
220