1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Kevin Tian <kevin.tian@intel.com>
26  *
27  * Contributors:
28  *    Zhi Wang <zhi.a.wang@intel.com>
29  *    Changbin Du <changbin.du@intel.com>
30  *    Zhenyu Wang <zhenyuw@linux.intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Bing Niu <bing.niu@intel.com>
33  *
34  */
35 
36 #include "i915_drv.h"
37 #include "gvt.h"
38 #include "trace.h"
39 
40 /**
41  * Defined in Intel Open Source PRM.
42  * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
43  */
44 #define TRVATTL3PTRDW(i)	_MMIO(0x4de0 + (i)*4)
45 #define TRNULLDETCT		_MMIO(0x4de8)
46 #define TRINVTILEDETCT		_MMIO(0x4dec)
47 #define TRVADR			_MMIO(0x4df0)
48 #define TRTTE			_MMIO(0x4df4)
49 #define RING_EXCC(base)		_MMIO((base) + 0x28)
50 #define RING_GFX_MODE(base)	_MMIO((base) + 0x29c)
51 #define VF_GUARDBAND		_MMIO(0x83a4)
52 
53 /* Raw offset is appened to each line for convenience. */
54 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
55 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
56 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
57 	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
58 	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
59 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
60 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
61 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
62 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
63 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
64 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
65 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
66 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
67 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
68 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
69 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
70 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
71 	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
72 	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
73 	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
74 	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
75 	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
76 	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
77 
78 	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
79 	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
80 	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
81 	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
82 	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
83 	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
84 };
85 
86 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
87 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
88 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
89 	{RCS, HWSTAM, 0x0, false}, /* 0x2098 */
90 	{RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
91 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
92 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
93 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
94 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
95 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
96 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
97 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
98 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
99 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
100 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
101 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
102 	{RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
103 	{RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
104 	{RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
105 	{RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
106 	{RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
107 	{RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
108 	{RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
109 
110 	{RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
111 	{RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
112 	{RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
113 	{RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
114 	{RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
115 	{RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
116 	{RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
117 	{RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
118 	{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
119 	{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
120 	{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
121 	{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
122 	{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
123 	{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
124 	{RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
125 	{RCS, TRVADR, 0, false}, /* 0x4df0 */
126 	{RCS, TRTTE, 0, false}, /* 0x4df4 */
127 
128 	{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
129 	{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
130 	{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
131 	{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
132 	{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
133 
134 	{VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
135 
136 	{VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
137 
138 	{RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
139 	{RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
140 	{RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
141 	{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
142 
143 	{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
144 	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
145 
146 	{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
147 	{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
148 	{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
149 	{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
150 };
151 
152 static struct {
153 	bool initialized;
154 	u32 control_table[I915_NUM_ENGINES][64];
155 	u32 l3cc_table[32];
156 } gen9_render_mocs;
157 
158 static void load_render_mocs(struct drm_i915_private *dev_priv)
159 {
160 	i915_reg_t offset;
161 	u32 regs[] = {
162 		[RCS] = 0xc800,
163 		[VCS] = 0xc900,
164 		[VCS2] = 0xca00,
165 		[BCS] = 0xcc00,
166 		[VECS] = 0xcb00,
167 	};
168 	int ring_id, i;
169 
170 	for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
171 		offset.reg = regs[ring_id];
172 		for (i = 0; i < 64; i++) {
173 			gen9_render_mocs.control_table[ring_id][i] =
174 				I915_READ_FW(offset);
175 			offset.reg += 4;
176 		}
177 	}
178 
179 	offset.reg = 0xb020;
180 	for (i = 0; i < 32; i++) {
181 		gen9_render_mocs.l3cc_table[i] =
182 			I915_READ_FW(offset);
183 		offset.reg += 4;
184 	}
185 	gen9_render_mocs.initialized = true;
186 }
187 
188 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
189 {
190 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
191 	struct intel_vgpu_submission *s = &vgpu->submission;
192 	enum forcewake_domains fw;
193 	i915_reg_t reg;
194 	u32 regs[] = {
195 		[RCS] = 0x4260,
196 		[VCS] = 0x4264,
197 		[VCS2] = 0x4268,
198 		[BCS] = 0x426c,
199 		[VECS] = 0x4270,
200 	};
201 
202 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
203 		return;
204 
205 	if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
206 		return;
207 
208 	reg = _MMIO(regs[ring_id]);
209 
210 	/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
211 	 * we need to put a forcewake when invalidating RCS TLB caches,
212 	 * otherwise device can go to RC6 state and interrupt invalidation
213 	 * process
214 	 */
215 	fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
216 					    FW_REG_READ | FW_REG_WRITE);
217 	if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
218 		fw |= FORCEWAKE_RENDER;
219 
220 	intel_uncore_forcewake_get(dev_priv, fw);
221 
222 	I915_WRITE_FW(reg, 0x1);
223 
224 	if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
225 		gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
226 	else
227 		vgpu_vreg_t(vgpu, reg) = 0;
228 
229 	intel_uncore_forcewake_put(dev_priv, fw);
230 
231 	gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
232 }
233 
234 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
235 			int ring_id)
236 {
237 	struct drm_i915_private *dev_priv;
238 	i915_reg_t offset, l3_offset;
239 	u32 old_v, new_v;
240 
241 	u32 regs[] = {
242 		[RCS] = 0xc800,
243 		[VCS] = 0xc900,
244 		[VCS2] = 0xca00,
245 		[BCS] = 0xcc00,
246 		[VECS] = 0xcb00,
247 	};
248 	int i;
249 
250 	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
251 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
252 		return;
253 
254 	if (!pre && !gen9_render_mocs.initialized)
255 		load_render_mocs(dev_priv);
256 
257 	offset.reg = regs[ring_id];
258 	for (i = 0; i < 64; i++) {
259 		if (pre)
260 			old_v = vgpu_vreg_t(pre, offset);
261 		else
262 			old_v = gen9_render_mocs.control_table[ring_id][i];
263 		if (next)
264 			new_v = vgpu_vreg_t(next, offset);
265 		else
266 			new_v = gen9_render_mocs.control_table[ring_id][i];
267 
268 		if (old_v != new_v)
269 			I915_WRITE_FW(offset, new_v);
270 
271 		offset.reg += 4;
272 	}
273 
274 	if (ring_id == RCS) {
275 		l3_offset.reg = 0xb020;
276 		for (i = 0; i < 32; i++) {
277 			if (pre)
278 				old_v = vgpu_vreg_t(pre, l3_offset);
279 			else
280 				old_v = gen9_render_mocs.l3cc_table[i];
281 			if (next)
282 				new_v = vgpu_vreg_t(next, l3_offset);
283 			else
284 				new_v = gen9_render_mocs.l3cc_table[i];
285 
286 			if (old_v != new_v)
287 				I915_WRITE_FW(l3_offset, new_v);
288 
289 			l3_offset.reg += 4;
290 		}
291 	}
292 }
293 
294 #define CTX_CONTEXT_CONTROL_VAL	0x03
295 
296 /* Switch ring mmio values (context). */
297 static void switch_mmio(struct intel_vgpu *pre,
298 			struct intel_vgpu *next,
299 			int ring_id)
300 {
301 	struct drm_i915_private *dev_priv;
302 	struct intel_vgpu_submission *s;
303 	u32 *reg_state, ctx_ctrl;
304 	u32 inhibit_mask =
305 		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
306 	struct engine_mmio *mmio;
307 	u32 old_v, new_v;
308 
309 	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
310 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
311 		switch_mocs(pre, next, ring_id);
312 
313 	for (mmio = dev_priv->gvt->engine_mmio_list;
314 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
315 		if (mmio->ring_id != ring_id)
316 			continue;
317 		// save
318 		if (pre) {
319 			vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
320 			if (mmio->mask)
321 				vgpu_vreg_t(pre, mmio->reg) &=
322 						~(mmio->mask << 16);
323 			old_v = vgpu_vreg_t(pre, mmio->reg);
324 		} else
325 			old_v = mmio->value = I915_READ_FW(mmio->reg);
326 
327 		// restore
328 		if (next) {
329 			s = &next->submission;
330 			reg_state =
331 				s->shadow_ctx->engine[ring_id].lrc_reg_state;
332 			ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
333 			/*
334 			 * if it is an inhibit context, load in_context mmio
335 			 * into HW by mmio write. If it is not, skip this mmio
336 			 * write.
337 			 */
338 			if (mmio->in_context &&
339 			    (ctx_ctrl & inhibit_mask) != inhibit_mask)
340 				continue;
341 
342 			if (mmio->mask)
343 				new_v = vgpu_vreg_t(next, mmio->reg) |
344 							(mmio->mask << 16);
345 			else
346 				new_v = vgpu_vreg_t(next, mmio->reg);
347 		} else {
348 			if (mmio->in_context)
349 				continue;
350 			if (mmio->mask)
351 				new_v = mmio->value | (mmio->mask << 16);
352 			else
353 				new_v = mmio->value;
354 		}
355 
356 		I915_WRITE_FW(mmio->reg, new_v);
357 
358 		trace_render_mmio(pre ? pre->id : 0,
359 				  next ? next->id : 0,
360 				  "switch",
361 				  i915_mmio_reg_offset(mmio->reg),
362 				  old_v, new_v);
363 	}
364 
365 	if (next)
366 		handle_tlb_pending_event(next, ring_id);
367 }
368 
369 /**
370  * intel_gvt_switch_render_mmio - switch mmio context of specific engine
371  * @pre: the last vGPU that own the engine
372  * @next: the vGPU to switch to
373  * @ring_id: specify the engine
374  *
375  * If pre is null indicates that host own the engine. If next is null
376  * indicates that we are switching to host workload.
377  */
378 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
379 			   struct intel_vgpu *next, int ring_id)
380 {
381 	struct drm_i915_private *dev_priv;
382 
383 	if (WARN_ON(!pre && !next))
384 		return;
385 
386 	gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
387 		       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
388 
389 	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
390 
391 	/**
392 	 * We are using raw mmio access wrapper to improve the
393 	 * performace for batch mmio read/write, so we need
394 	 * handle forcewake mannually.
395 	 */
396 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
397 	switch_mmio(pre, next, ring_id);
398 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
399 }
400 
401 /**
402  * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
403  * @gvt: GVT device
404  *
405  */
406 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
407 {
408 	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
409 		gvt->engine_mmio_list = gen9_engine_mmio_list;
410 	else
411 		gvt->engine_mmio_list = gen8_engine_mmio_list;
412 }
413