xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/handlers.c (revision 3a9a6f3d)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Tina Zhang <tina.zhang@intel.com>
31  *    Pei Zhang <pei.zhang@intel.com>
32  *    Niu Bing <bing.niu@intel.com>
33  *    Ping Gao <ping.a.gao@intel.com>
34  *    Zhi Wang <zhi.a.wang@intel.com>
35  *
36 
37  */
38 
39 #include "i915_drv.h"
40 #include "i915_reg.h"
41 #include "gvt.h"
42 #include "i915_pvinfo.h"
43 #include "intel_mchbar_regs.h"
44 #include "display/intel_display_types.h"
45 #include "display/intel_fbc.h"
46 #include "display/vlv_dsi_pll_regs.h"
47 #include "gt/intel_gt_regs.h"
48 
49 /* XXX FIXME i915 has changed PP_XXX definition */
50 #define PCH_PP_STATUS  _MMIO(0xc7200)
51 #define PCH_PP_CONTROL _MMIO(0xc7204)
52 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
53 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
54 #define PCH_PP_DIVISOR _MMIO(0xc7210)
55 
56 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
57 {
58 	struct drm_i915_private *i915 = gvt->gt->i915;
59 
60 	if (IS_BROADWELL(i915))
61 		return D_BDW;
62 	else if (IS_SKYLAKE(i915))
63 		return D_SKL;
64 	else if (IS_KABYLAKE(i915))
65 		return D_KBL;
66 	else if (IS_BROXTON(i915))
67 		return D_BXT;
68 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
69 		return D_CFL;
70 
71 	return 0;
72 }
73 
74 bool intel_gvt_match_device(struct intel_gvt *gvt,
75 		unsigned long device)
76 {
77 	return intel_gvt_get_device_type(gvt) & device;
78 }
79 
80 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
81 	void *p_data, unsigned int bytes)
82 {
83 	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
84 }
85 
86 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
87 	void *p_data, unsigned int bytes)
88 {
89 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
90 }
91 
92 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
93 						  unsigned int offset)
94 {
95 	struct intel_gvt_mmio_info *e;
96 
97 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
98 		if (e->offset == offset)
99 			return e;
100 	}
101 	return NULL;
102 }
103 
104 static int new_mmio_info(struct intel_gvt *gvt,
105 		u32 offset, u16 flags, u32 size,
106 		u32 addr_mask, u32 ro_mask, u32 device,
107 		gvt_mmio_func read, gvt_mmio_func write)
108 {
109 	struct intel_gvt_mmio_info *info, *p;
110 	u32 start, end, i;
111 
112 	if (!intel_gvt_match_device(gvt, device))
113 		return 0;
114 
115 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
116 		return -EINVAL;
117 
118 	start = offset;
119 	end = offset + size;
120 
121 	for (i = start; i < end; i += 4) {
122 		info = kzalloc(sizeof(*info), GFP_KERNEL);
123 		if (!info)
124 			return -ENOMEM;
125 
126 		info->offset = i;
127 		p = intel_gvt_find_mmio_info(gvt, info->offset);
128 		if (p) {
129 			WARN(1, "dup mmio definition offset %x\n",
130 				info->offset);
131 			kfree(info);
132 
133 			/* We return -EEXIST here to make GVT-g load fail.
134 			 * So duplicated MMIO can be found as soon as
135 			 * possible.
136 			 */
137 			return -EEXIST;
138 		}
139 
140 		info->ro_mask = ro_mask;
141 		info->device = device;
142 		info->read = read ? read : intel_vgpu_default_mmio_read;
143 		info->write = write ? write : intel_vgpu_default_mmio_write;
144 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
145 		INIT_HLIST_NODE(&info->node);
146 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
147 		gvt->mmio.num_tracked_mmio++;
148 	}
149 	return 0;
150 }
151 
152 /**
153  * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
154  * @gvt: a GVT device
155  * @offset: register offset
156  *
157  * Returns:
158  * The engine containing the offset within its mmio page.
159  */
160 const struct intel_engine_cs *
161 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
162 {
163 	struct intel_engine_cs *engine;
164 	enum intel_engine_id id;
165 
166 	offset &= ~GENMASK(11, 0);
167 	for_each_engine(engine, gvt->gt, id)
168 		if (engine->mmio_base == offset)
169 			return engine;
170 
171 	return NULL;
172 }
173 
174 #define offset_to_fence_num(offset) \
175 	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
176 
177 #define fence_num_to_offset(num) \
178 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
179 
180 
181 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
182 {
183 	switch (reason) {
184 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
185 		pr_err("Detected your guest driver doesn't support GVT-g.\n");
186 		break;
187 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
188 		pr_err("Graphics resource is not enough for the guest\n");
189 		break;
190 	case GVT_FAILSAFE_GUEST_ERR:
191 		pr_err("GVT Internal error  for the guest\n");
192 		break;
193 	default:
194 		break;
195 	}
196 	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
197 	vgpu->failsafe = true;
198 }
199 
200 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
201 		unsigned int fence_num, void *p_data, unsigned int bytes)
202 {
203 	unsigned int max_fence = vgpu_fence_sz(vgpu);
204 
205 	if (fence_num >= max_fence) {
206 		gvt_vgpu_err("access oob fence reg %d/%d\n",
207 			     fence_num, max_fence);
208 
209 		/* When guest access oob fence regs without access
210 		 * pv_info first, we treat guest not supporting GVT,
211 		 * and we will let vgpu enter failsafe mode.
212 		 */
213 		if (!vgpu->pv_notified)
214 			enter_failsafe_mode(vgpu,
215 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
216 
217 		memset(p_data, 0, bytes);
218 		return -EINVAL;
219 	}
220 	return 0;
221 }
222 
223 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
224 		unsigned int offset, void *p_data, unsigned int bytes)
225 {
226 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
227 
228 	if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
229 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
230 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
231 		else if (!ips)
232 			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
233 		else {
234 			/* All engines must be enabled together for vGPU,
235 			 * since we don't know which engine the ppgtt will
236 			 * bind to when shadowing.
237 			 */
238 			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
239 				     ips);
240 			return -EINVAL;
241 		}
242 	}
243 
244 	write_vreg(vgpu, offset, p_data, bytes);
245 	return 0;
246 }
247 
248 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
249 		void *p_data, unsigned int bytes)
250 {
251 	int ret;
252 
253 	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
254 			p_data, bytes);
255 	if (ret)
256 		return ret;
257 	read_vreg(vgpu, off, p_data, bytes);
258 	return 0;
259 }
260 
261 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
262 		void *p_data, unsigned int bytes)
263 {
264 	struct intel_gvt *gvt = vgpu->gvt;
265 	unsigned int fence_num = offset_to_fence_num(off);
266 	int ret;
267 
268 	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
269 	if (ret)
270 		return ret;
271 	write_vreg(vgpu, off, p_data, bytes);
272 
273 	mmio_hw_access_pre(gvt->gt);
274 	intel_vgpu_write_fence(vgpu, fence_num,
275 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
276 	mmio_hw_access_post(gvt->gt);
277 	return 0;
278 }
279 
280 #define CALC_MODE_MASK_REG(old, new) \
281 	(((new) & GENMASK(31, 16)) \
282 	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
283 	 | ((new) & ((new) >> 16))))
284 
285 static int mul_force_wake_write(struct intel_vgpu *vgpu,
286 		unsigned int offset, void *p_data, unsigned int bytes)
287 {
288 	u32 old, new;
289 	u32 ack_reg_offset;
290 
291 	old = vgpu_vreg(vgpu, offset);
292 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
293 
294 	if (GRAPHICS_VER(vgpu->gvt->gt->i915)  >=  9) {
295 		switch (offset) {
296 		case FORCEWAKE_RENDER_GEN9_REG:
297 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
298 			break;
299 		case FORCEWAKE_GT_GEN9_REG:
300 			ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
301 			break;
302 		case FORCEWAKE_MEDIA_GEN9_REG:
303 			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
304 			break;
305 		default:
306 			/*should not hit here*/
307 			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
308 			return -EINVAL;
309 		}
310 	} else {
311 		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
312 	}
313 
314 	vgpu_vreg(vgpu, offset) = new;
315 	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
316 	return 0;
317 }
318 
319 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
320 			    void *p_data, unsigned int bytes)
321 {
322 	intel_engine_mask_t engine_mask = 0;
323 	u32 data;
324 
325 	write_vreg(vgpu, offset, p_data, bytes);
326 	data = vgpu_vreg(vgpu, offset);
327 
328 	if (data & GEN6_GRDOM_FULL) {
329 		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
330 		engine_mask = ALL_ENGINES;
331 	} else {
332 		if (data & GEN6_GRDOM_RENDER) {
333 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
334 			engine_mask |= BIT(RCS0);
335 		}
336 		if (data & GEN6_GRDOM_MEDIA) {
337 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
338 			engine_mask |= BIT(VCS0);
339 		}
340 		if (data & GEN6_GRDOM_BLT) {
341 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
342 			engine_mask |= BIT(BCS0);
343 		}
344 		if (data & GEN6_GRDOM_VECS) {
345 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
346 			engine_mask |= BIT(VECS0);
347 		}
348 		if (data & GEN8_GRDOM_MEDIA2) {
349 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
350 			engine_mask |= BIT(VCS1);
351 		}
352 		if (data & GEN9_GRDOM_GUC) {
353 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
354 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
355 		}
356 		engine_mask &= vgpu->gvt->gt->info.engine_mask;
357 	}
358 
359 	/* vgpu_lock already hold by emulate mmio r/w */
360 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
361 
362 	/* sw will wait for the device to ack the reset request */
363 	vgpu_vreg(vgpu, offset) = 0;
364 
365 	return 0;
366 }
367 
368 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
369 		void *p_data, unsigned int bytes)
370 {
371 	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
372 }
373 
374 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
375 		void *p_data, unsigned int bytes)
376 {
377 	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
378 }
379 
380 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
381 		unsigned int offset, void *p_data, unsigned int bytes)
382 {
383 	write_vreg(vgpu, offset, p_data, bytes);
384 
385 	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
386 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
387 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
388 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
389 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
390 
391 	} else
392 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
393 			~(PP_ON | PP_SEQUENCE_POWER_DOWN
394 					| PP_CYCLE_DELAY_ACTIVE);
395 	return 0;
396 }
397 
398 static int transconf_mmio_write(struct intel_vgpu *vgpu,
399 		unsigned int offset, void *p_data, unsigned int bytes)
400 {
401 	write_vreg(vgpu, offset, p_data, bytes);
402 
403 	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
404 		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
405 	else
406 		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
407 	return 0;
408 }
409 
410 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
411 		void *p_data, unsigned int bytes)
412 {
413 	write_vreg(vgpu, offset, p_data, bytes);
414 
415 	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
416 		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
417 	else
418 		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
419 
420 	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
421 		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
422 	else
423 		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
424 
425 	return 0;
426 }
427 
428 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
429 		void *p_data, unsigned int bytes)
430 {
431 	switch (offset) {
432 	case 0xe651c:
433 	case 0xe661c:
434 	case 0xe671c:
435 	case 0xe681c:
436 		vgpu_vreg(vgpu, offset) = 1 << 17;
437 		break;
438 	case 0xe6c04:
439 		vgpu_vreg(vgpu, offset) = 0x3;
440 		break;
441 	case 0xe6e1c:
442 		vgpu_vreg(vgpu, offset) = 0x2f << 16;
443 		break;
444 	default:
445 		return -EINVAL;
446 	}
447 
448 	read_vreg(vgpu, offset, p_data, bytes);
449 	return 0;
450 }
451 
452 /*
453  * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
454  *   TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
455  *   setup_virtual_dp_monitor().
456  * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
457  *   DPLL. Later guest driver may setup a different DPLLx when setting mode.
458  * So the correct sequence to find DP stream clock is:
459  *   Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
460  *   Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
461  * Then Refresh rate then can be calculated based on follow equations:
462  *   Pixel clock = h_total * v_total * refresh_rate
463  *   stream clock = Pixel clock
464  *   ls_clk = DP bitrate
465  *   Link M/N = strm_clk / ls_clk
466  */
467 
468 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
469 {
470 	u32 dp_br = 0;
471 	u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
472 
473 	switch (ddi_pll_sel) {
474 	case PORT_CLK_SEL_LCPLL_2700:
475 		dp_br = 270000 * 2;
476 		break;
477 	case PORT_CLK_SEL_LCPLL_1350:
478 		dp_br = 135000 * 2;
479 		break;
480 	case PORT_CLK_SEL_LCPLL_810:
481 		dp_br = 81000 * 2;
482 		break;
483 	case PORT_CLK_SEL_SPLL:
484 	{
485 		switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
486 		case SPLL_FREQ_810MHz:
487 			dp_br = 81000 * 2;
488 			break;
489 		case SPLL_FREQ_1350MHz:
490 			dp_br = 135000 * 2;
491 			break;
492 		case SPLL_FREQ_2700MHz:
493 			dp_br = 270000 * 2;
494 			break;
495 		default:
496 			gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
497 				    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
498 			break;
499 		}
500 		break;
501 	}
502 	case PORT_CLK_SEL_WRPLL1:
503 	case PORT_CLK_SEL_WRPLL2:
504 	{
505 		u32 wrpll_ctl;
506 		int refclk, n, p, r;
507 
508 		if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
509 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
510 		else
511 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
512 
513 		switch (wrpll_ctl & WRPLL_REF_MASK) {
514 		case WRPLL_REF_PCH_SSC:
515 			refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
516 			break;
517 		case WRPLL_REF_LCPLL:
518 			refclk = 2700000;
519 			break;
520 		default:
521 			gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
522 				    vgpu->id, port_name(port), wrpll_ctl);
523 			goto out;
524 		}
525 
526 		r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
527 		p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
528 		n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
529 
530 		dp_br = (refclk * n / 10) / (p * r) * 2;
531 		break;
532 	}
533 	default:
534 		gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
535 			    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
536 		break;
537 	}
538 
539 out:
540 	return dp_br;
541 }
542 
543 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
544 {
545 	u32 dp_br = 0;
546 	int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
547 	enum dpio_phy phy = DPIO_PHY0;
548 	enum dpio_channel ch = DPIO_CH0;
549 	struct dpll clock = {0};
550 	u32 temp;
551 
552 	/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
553 	switch (port) {
554 	case PORT_A:
555 		phy = DPIO_PHY1;
556 		ch = DPIO_CH0;
557 		break;
558 	case PORT_B:
559 		phy = DPIO_PHY0;
560 		ch = DPIO_CH0;
561 		break;
562 	case PORT_C:
563 		phy = DPIO_PHY0;
564 		ch = DPIO_CH1;
565 		break;
566 	default:
567 		gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
568 		goto out;
569 	}
570 
571 	temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
572 	if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
573 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
574 			    vgpu->id, port_name(port), temp);
575 		goto out;
576 	}
577 
578 	clock.m1 = 2;
579 	clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22;
580 	if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
581 		clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK;
582 	clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
583 	clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
584 	clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
585 	clock.m = clock.m1 * clock.m2;
586 	clock.p = clock.p1 * clock.p2;
587 
588 	if (clock.n == 0 || clock.p == 0) {
589 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
590 		goto out;
591 	}
592 
593 	clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
594 	clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
595 
596 	dp_br = clock.dot / 5;
597 
598 out:
599 	return dp_br;
600 }
601 
602 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
603 {
604 	u32 dp_br = 0;
605 	enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
606 
607 	/* Find the enabled DPLL for the DDI/PORT */
608 	if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
609 	    (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
610 		dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
611 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
612 			DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
613 	} else {
614 		gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
615 			    vgpu->id, port_name(port));
616 		return dp_br;
617 	}
618 
619 	/* Find PLL output frequency from correct DPLL, and get bir rate */
620 	switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
621 		DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
622 		DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
623 		case DPLL_CTRL1_LINK_RATE_810:
624 			dp_br = 81000 * 2;
625 			break;
626 		case DPLL_CTRL1_LINK_RATE_1080:
627 			dp_br = 108000 * 2;
628 			break;
629 		case DPLL_CTRL1_LINK_RATE_1350:
630 			dp_br = 135000 * 2;
631 			break;
632 		case DPLL_CTRL1_LINK_RATE_1620:
633 			dp_br = 162000 * 2;
634 			break;
635 		case DPLL_CTRL1_LINK_RATE_2160:
636 			dp_br = 216000 * 2;
637 			break;
638 		case DPLL_CTRL1_LINK_RATE_2700:
639 			dp_br = 270000 * 2;
640 			break;
641 		default:
642 			dp_br = 0;
643 			gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
644 				    vgpu->id, port_name(port), dpll_id);
645 	}
646 
647 	return dp_br;
648 }
649 
650 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
651 {
652 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
653 	enum port port;
654 	u32 dp_br, link_m, link_n, htotal, vtotal;
655 
656 	/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
657 	port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
658 		TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
659 	if (port != PORT_B && port != PORT_D) {
660 		gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
661 		return;
662 	}
663 
664 	/* Calculate DP bitrate from PLL */
665 	if (IS_BROADWELL(dev_priv))
666 		dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
667 	else if (IS_BROXTON(dev_priv))
668 		dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
669 	else
670 		dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
671 
672 	/* Get DP link symbol clock M/N */
673 	link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
674 	link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
675 
676 	/* Get H/V total from transcoder timing */
677 	htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
678 	vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
679 
680 	if (dp_br && link_n && htotal && vtotal) {
681 		u64 pixel_clk = 0;
682 		u32 new_rate = 0;
683 		u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
684 
685 		/* Calcuate pixel clock by (ls_clk * M / N) */
686 		pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
687 		pixel_clk *= MSEC_PER_SEC;
688 
689 		/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
690 		new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
691 
692 		if (*old_rate != new_rate)
693 			*old_rate = new_rate;
694 
695 		gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
696 			    vgpu->id, pipe_name(PIPE_A), new_rate);
697 	}
698 }
699 
700 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
701 		void *p_data, unsigned int bytes)
702 {
703 	u32 data;
704 
705 	write_vreg(vgpu, offset, p_data, bytes);
706 	data = vgpu_vreg(vgpu, offset);
707 
708 	if (data & PIPECONF_ENABLE) {
709 		vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
710 		vgpu_update_refresh_rate(vgpu);
711 		vgpu_update_vblank_emulation(vgpu, true);
712 	} else {
713 		vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
714 		vgpu_update_vblank_emulation(vgpu, false);
715 	}
716 	return 0;
717 }
718 
719 /* sorted in ascending order */
720 static i915_reg_t force_nonpriv_white_list[] = {
721 	_MMIO(0xd80),
722 	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
723 	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
724 	CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
725 	PS_INVOCATION_COUNT, //_MMIO(0x2348)
726 	PS_DEPTH_COUNT, //_MMIO(0x2350)
727 	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
728 	_MMIO(0x2690),
729 	_MMIO(0x2694),
730 	_MMIO(0x2698),
731 	_MMIO(0x2754),
732 	_MMIO(0x28a0),
733 	_MMIO(0x4de0),
734 	_MMIO(0x4de4),
735 	_MMIO(0x4dfc),
736 	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
737 	_MMIO(0x7014),
738 	HDC_CHICKEN0,//_MMIO(0x7300)
739 	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
740 	_MMIO(0x7700),
741 	_MMIO(0x7704),
742 	_MMIO(0x7708),
743 	_MMIO(0x770c),
744 	_MMIO(0x83a8),
745 	_MMIO(0xb110),
746 	GEN8_L3SQCREG4,//_MMIO(0xb118)
747 	_MMIO(0xe100),
748 	_MMIO(0xe18c),
749 	_MMIO(0xe48c),
750 	_MMIO(0xe5f4),
751 	_MMIO(0x64844),
752 };
753 
754 /* a simple bsearch */
755 static inline bool in_whitelist(u32 reg)
756 {
757 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
758 	i915_reg_t *array = force_nonpriv_white_list;
759 
760 	while (left < right) {
761 		int mid = (left + right)/2;
762 
763 		if (reg > array[mid].reg)
764 			left = mid + 1;
765 		else if (reg < array[mid].reg)
766 			right = mid;
767 		else
768 			return true;
769 	}
770 	return false;
771 }
772 
773 static int force_nonpriv_write(struct intel_vgpu *vgpu,
774 	unsigned int offset, void *p_data, unsigned int bytes)
775 {
776 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
777 	const struct intel_engine_cs *engine =
778 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
779 
780 	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
781 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
782 			vgpu->id, offset, bytes);
783 		return -EINVAL;
784 	}
785 
786 	if (!in_whitelist(reg_nonpriv) &&
787 	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
788 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
789 			vgpu->id, reg_nonpriv, offset);
790 	} else
791 		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
792 
793 	return 0;
794 }
795 
796 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
797 		void *p_data, unsigned int bytes)
798 {
799 	write_vreg(vgpu, offset, p_data, bytes);
800 
801 	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
802 		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
803 	} else {
804 		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
805 		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
806 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
807 				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
808 	}
809 	return 0;
810 }
811 
812 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
813 		unsigned int offset, void *p_data, unsigned int bytes)
814 {
815 	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
816 	return 0;
817 }
818 
819 #define FDI_LINK_TRAIN_PATTERN1         0
820 #define FDI_LINK_TRAIN_PATTERN2         1
821 
822 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
823 {
824 	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
825 	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
826 	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
827 
828 	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
829 			(rx_ctl & FDI_RX_ENABLE) &&
830 			(rx_ctl & FDI_AUTO_TRAINING) &&
831 			(tx_ctl & DP_TP_CTL_ENABLE) &&
832 			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
833 		return 1;
834 	else
835 		return 0;
836 }
837 
838 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
839 		enum pipe pipe, unsigned int train_pattern)
840 {
841 	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
842 	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
843 	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
844 	unsigned int fdi_iir_check_bits;
845 
846 	fdi_rx_imr = FDI_RX_IMR(pipe);
847 	fdi_tx_ctl = FDI_TX_CTL(pipe);
848 	fdi_rx_ctl = FDI_RX_CTL(pipe);
849 
850 	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
851 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
852 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
853 		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
854 	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
855 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
856 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
857 		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
858 	} else {
859 		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
860 		return -EINVAL;
861 	}
862 
863 	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
864 	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
865 
866 	/* If imr bit has been masked */
867 	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
868 		return 0;
869 
870 	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
871 			== fdi_tx_check_bits)
872 		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
873 			== fdi_rx_check_bits))
874 		return 1;
875 	else
876 		return 0;
877 }
878 
879 #define INVALID_INDEX (~0U)
880 
881 static unsigned int calc_index(unsigned int offset, unsigned int start,
882 	unsigned int next, unsigned int end, i915_reg_t i915_end)
883 {
884 	unsigned int range = next - start;
885 
886 	if (!end)
887 		end = i915_mmio_reg_offset(i915_end);
888 	if (offset < start || offset > end)
889 		return INVALID_INDEX;
890 	offset -= start;
891 	return offset / range;
892 }
893 
894 #define FDI_RX_CTL_TO_PIPE(offset) \
895 	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
896 
897 #define FDI_TX_CTL_TO_PIPE(offset) \
898 	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
899 
900 #define FDI_RX_IMR_TO_PIPE(offset) \
901 	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
902 
903 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
904 		unsigned int offset, void *p_data, unsigned int bytes)
905 {
906 	i915_reg_t fdi_rx_iir;
907 	unsigned int index;
908 	int ret;
909 
910 	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
911 		index = FDI_RX_CTL_TO_PIPE(offset);
912 	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
913 		index = FDI_TX_CTL_TO_PIPE(offset);
914 	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
915 		index = FDI_RX_IMR_TO_PIPE(offset);
916 	else {
917 		gvt_vgpu_err("Unsupport registers %x\n", offset);
918 		return -EINVAL;
919 	}
920 
921 	write_vreg(vgpu, offset, p_data, bytes);
922 
923 	fdi_rx_iir = FDI_RX_IIR(index);
924 
925 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
926 	if (ret < 0)
927 		return ret;
928 	if (ret)
929 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
930 
931 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
932 	if (ret < 0)
933 		return ret;
934 	if (ret)
935 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
936 
937 	if (offset == _FDI_RXA_CTL)
938 		if (fdi_auto_training_started(vgpu))
939 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
940 				DP_TP_STATUS_AUTOTRAIN_DONE;
941 	return 0;
942 }
943 
944 #define DP_TP_CTL_TO_PORT(offset) \
945 	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
946 
947 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
948 		void *p_data, unsigned int bytes)
949 {
950 	i915_reg_t status_reg;
951 	unsigned int index;
952 	u32 data;
953 
954 	write_vreg(vgpu, offset, p_data, bytes);
955 
956 	index = DP_TP_CTL_TO_PORT(offset);
957 	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
958 	if (data == 0x2) {
959 		status_reg = DP_TP_STATUS(index);
960 		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
961 	}
962 	return 0;
963 }
964 
965 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
966 		unsigned int offset, void *p_data, unsigned int bytes)
967 {
968 	u32 reg_val;
969 	u32 sticky_mask;
970 
971 	reg_val = *((u32 *)p_data);
972 	sticky_mask = GENMASK(27, 26) | (1 << 24);
973 
974 	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
975 		(vgpu_vreg(vgpu, offset) & sticky_mask);
976 	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
977 	return 0;
978 }
979 
980 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
981 		unsigned int offset, void *p_data, unsigned int bytes)
982 {
983 	u32 data;
984 
985 	write_vreg(vgpu, offset, p_data, bytes);
986 	data = vgpu_vreg(vgpu, offset);
987 
988 	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
989 		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
990 	return 0;
991 }
992 
993 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
994 		unsigned int offset, void *p_data, unsigned int bytes)
995 {
996 	u32 data;
997 
998 	write_vreg(vgpu, offset, p_data, bytes);
999 	data = vgpu_vreg(vgpu, offset);
1000 
1001 	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
1002 		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1003 	else
1004 		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1005 	return 0;
1006 }
1007 
1008 #define DSPSURF_TO_PIPE(offset) \
1009 	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
1010 
1011 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1012 		void *p_data, unsigned int bytes)
1013 {
1014 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1015 	u32 pipe = DSPSURF_TO_PIPE(offset);
1016 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1017 
1018 	write_vreg(vgpu, offset, p_data, bytes);
1019 	vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1020 
1021 	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1022 
1023 	if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1024 		intel_vgpu_trigger_virtual_event(vgpu, event);
1025 	else
1026 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1027 
1028 	return 0;
1029 }
1030 
1031 #define SPRSURF_TO_PIPE(offset) \
1032 	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
1033 
1034 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1035 		void *p_data, unsigned int bytes)
1036 {
1037 	u32 pipe = SPRSURF_TO_PIPE(offset);
1038 	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1039 
1040 	write_vreg(vgpu, offset, p_data, bytes);
1041 	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1042 
1043 	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1044 		intel_vgpu_trigger_virtual_event(vgpu, event);
1045 	else
1046 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1047 
1048 	return 0;
1049 }
1050 
1051 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1052 			       unsigned int offset, void *p_data,
1053 			       unsigned int bytes)
1054 {
1055 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1056 	enum pipe pipe = REG_50080_TO_PIPE(offset);
1057 	enum plane_id plane = REG_50080_TO_PLANE(offset);
1058 	int event = SKL_FLIP_EVENT(pipe, plane);
1059 
1060 	write_vreg(vgpu, offset, p_data, bytes);
1061 	if (plane == PLANE_PRIMARY) {
1062 		vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1063 		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1064 	} else {
1065 		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1066 	}
1067 
1068 	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1069 		intel_vgpu_trigger_virtual_event(vgpu, event);
1070 	else
1071 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1072 
1073 	return 0;
1074 }
1075 
1076 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1077 		unsigned int reg)
1078 {
1079 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1080 	enum intel_gvt_event_type event;
1081 
1082 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1083 		event = AUX_CHANNEL_A;
1084 	else if (reg == _PCH_DPB_AUX_CH_CTL ||
1085 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1086 		event = AUX_CHANNEL_B;
1087 	else if (reg == _PCH_DPC_AUX_CH_CTL ||
1088 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1089 		event = AUX_CHANNEL_C;
1090 	else if (reg == _PCH_DPD_AUX_CH_CTL ||
1091 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1092 		event = AUX_CHANNEL_D;
1093 	else {
1094 		drm_WARN_ON(&dev_priv->drm, true);
1095 		return -EINVAL;
1096 	}
1097 
1098 	intel_vgpu_trigger_virtual_event(vgpu, event);
1099 	return 0;
1100 }
1101 
1102 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1103 		unsigned int reg, int len, bool data_valid)
1104 {
1105 	/* mark transaction done */
1106 	value |= DP_AUX_CH_CTL_DONE;
1107 	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1108 	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1109 
1110 	if (data_valid)
1111 		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1112 	else
1113 		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1114 
1115 	/* message size */
1116 	value &= ~(0xf << 20);
1117 	value |= (len << 20);
1118 	vgpu_vreg(vgpu, reg) = value;
1119 
1120 	if (value & DP_AUX_CH_CTL_INTERRUPT)
1121 		return trigger_aux_channel_interrupt(vgpu, reg);
1122 	return 0;
1123 }
1124 
1125 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1126 		u8 t)
1127 {
1128 	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
1129 		/* training pattern 1 for CR */
1130 		/* set LANE0_CR_DONE, LANE1_CR_DONE */
1131 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
1132 		/* set LANE2_CR_DONE, LANE3_CR_DONE */
1133 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
1134 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1135 			DPCD_TRAINING_PATTERN_2) {
1136 		/* training pattern 2 for EQ */
1137 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
1138 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
1139 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
1140 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
1141 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
1142 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
1143 		/* set INTERLANE_ALIGN_DONE */
1144 		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
1145 			DPCD_INTERLANE_ALIGN_DONE;
1146 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1147 			DPCD_LINK_TRAINING_DISABLED) {
1148 		/* finish link training */
1149 		/* set sink status as synchronized */
1150 		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
1151 	}
1152 }
1153 
1154 #define _REG_HSW_DP_AUX_CH_CTL(dp) \
1155 	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
1156 
1157 #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
1158 
1159 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1160 
1161 #define dpy_is_valid_port(port)	\
1162 		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1163 
1164 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1165 		unsigned int offset, void *p_data, unsigned int bytes)
1166 {
1167 	struct intel_vgpu_display *display = &vgpu->display;
1168 	int msg, addr, ctrl, op, len;
1169 	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1170 	struct intel_vgpu_dpcd_data *dpcd = NULL;
1171 	struct intel_vgpu_port *port = NULL;
1172 	u32 data;
1173 
1174 	if (!dpy_is_valid_port(port_index)) {
1175 		gvt_vgpu_err("Unsupported DP port access!\n");
1176 		return 0;
1177 	}
1178 
1179 	write_vreg(vgpu, offset, p_data, bytes);
1180 	data = vgpu_vreg(vgpu, offset);
1181 
1182 	if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1183 		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
1184 		/* SKL DPB/C/D aux ctl register changed */
1185 		return 0;
1186 	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1187 		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
1188 		/* write to the data registers */
1189 		return 0;
1190 	}
1191 
1192 	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1193 		/* just want to clear the sticky bits */
1194 		vgpu_vreg(vgpu, offset) = 0;
1195 		return 0;
1196 	}
1197 
1198 	port = &display->ports[port_index];
1199 	dpcd = port->dpcd;
1200 
1201 	/* read out message from DATA1 register */
1202 	msg = vgpu_vreg(vgpu, offset + 4);
1203 	addr = (msg >> 8) & 0xffff;
1204 	ctrl = (msg >> 24) & 0xff;
1205 	len = msg & 0xff;
1206 	op = ctrl >> 4;
1207 
1208 	if (op == GVT_AUX_NATIVE_WRITE) {
1209 		int t;
1210 		u8 buf[16];
1211 
1212 		if ((addr + len + 1) >= DPCD_SIZE) {
1213 			/*
1214 			 * Write request exceeds what we supported,
1215 			 * DCPD spec: When a Source Device is writing a DPCD
1216 			 * address not supported by the Sink Device, the Sink
1217 			 * Device shall reply with AUX NACK and “M” equal to
1218 			 * zero.
1219 			 */
1220 
1221 			/* NAK the write */
1222 			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1223 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1224 			return 0;
1225 		}
1226 
1227 		/*
1228 		 * Write request format: Headr (command + address + size) occupies
1229 		 * 4 bytes, followed by (len + 1) bytes of data. See details at
1230 		 * intel_dp_aux_transfer().
1231 		 */
1232 		if ((len + 1 + 4) > AUX_BURST_SIZE) {
1233 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1234 			return -EINVAL;
1235 		}
1236 
1237 		/* unpack data from vreg to buf */
1238 		for (t = 0; t < 4; t++) {
1239 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1240 
1241 			buf[t * 4] = (r >> 24) & 0xff;
1242 			buf[t * 4 + 1] = (r >> 16) & 0xff;
1243 			buf[t * 4 + 2] = (r >> 8) & 0xff;
1244 			buf[t * 4 + 3] = r & 0xff;
1245 		}
1246 
1247 		/* write to virtual DPCD */
1248 		if (dpcd && dpcd->data_valid) {
1249 			for (t = 0; t <= len; t++) {
1250 				int p = addr + t;
1251 
1252 				dpcd->data[p] = buf[t];
1253 				/* check for link training */
1254 				if (p == DPCD_TRAINING_PATTERN_SET)
1255 					dp_aux_ch_ctl_link_training(dpcd,
1256 							buf[t]);
1257 			}
1258 		}
1259 
1260 		/* ACK the write */
1261 		vgpu_vreg(vgpu, offset + 4) = 0;
1262 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1263 				dpcd && dpcd->data_valid);
1264 		return 0;
1265 	}
1266 
1267 	if (op == GVT_AUX_NATIVE_READ) {
1268 		int idx, i, ret = 0;
1269 
1270 		if ((addr + len + 1) >= DPCD_SIZE) {
1271 			/*
1272 			 * read request exceeds what we supported
1273 			 * DPCD spec: A Sink Device receiving a Native AUX CH
1274 			 * read request for an unsupported DPCD address must
1275 			 * reply with an AUX ACK and read data set equal to
1276 			 * zero instead of replying with AUX NACK.
1277 			 */
1278 
1279 			/* ACK the READ*/
1280 			vgpu_vreg(vgpu, offset + 4) = 0;
1281 			vgpu_vreg(vgpu, offset + 8) = 0;
1282 			vgpu_vreg(vgpu, offset + 12) = 0;
1283 			vgpu_vreg(vgpu, offset + 16) = 0;
1284 			vgpu_vreg(vgpu, offset + 20) = 0;
1285 
1286 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1287 					true);
1288 			return 0;
1289 		}
1290 
1291 		for (idx = 1; idx <= 5; idx++) {
1292 			/* clear the data registers */
1293 			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1294 		}
1295 
1296 		/*
1297 		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1298 		 */
1299 		if ((len + 2) > AUX_BURST_SIZE) {
1300 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1301 			return -EINVAL;
1302 		}
1303 
1304 		/* read from virtual DPCD to vreg */
1305 		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1306 		if (dpcd && dpcd->data_valid) {
1307 			for (i = 1; i <= (len + 1); i++) {
1308 				int t;
1309 
1310 				t = dpcd->data[addr + i - 1];
1311 				t <<= (24 - 8 * (i % 4));
1312 				ret |= t;
1313 
1314 				if ((i % 4 == 3) || (i == (len + 1))) {
1315 					vgpu_vreg(vgpu, offset +
1316 							(i / 4 + 1) * 4) = ret;
1317 					ret = 0;
1318 				}
1319 			}
1320 		}
1321 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1322 				dpcd && dpcd->data_valid);
1323 		return 0;
1324 	}
1325 
1326 	/* i2c transaction starts */
1327 	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1328 
1329 	if (data & DP_AUX_CH_CTL_INTERRUPT)
1330 		trigger_aux_channel_interrupt(vgpu, offset);
1331 	return 0;
1332 }
1333 
1334 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1335 		void *p_data, unsigned int bytes)
1336 {
1337 	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1338 	write_vreg(vgpu, offset, p_data, bytes);
1339 	return 0;
1340 }
1341 
1342 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1343 		void *p_data, unsigned int bytes)
1344 {
1345 	bool vga_disable;
1346 
1347 	write_vreg(vgpu, offset, p_data, bytes);
1348 	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1349 
1350 	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1351 			vga_disable ? "Disable" : "Enable");
1352 	return 0;
1353 }
1354 
1355 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1356 		unsigned int sbi_offset)
1357 {
1358 	struct intel_vgpu_display *display = &vgpu->display;
1359 	int num = display->sbi.number;
1360 	int i;
1361 
1362 	for (i = 0; i < num; ++i)
1363 		if (display->sbi.registers[i].offset == sbi_offset)
1364 			break;
1365 
1366 	if (i == num)
1367 		return 0;
1368 
1369 	return display->sbi.registers[i].value;
1370 }
1371 
1372 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1373 		unsigned int offset, u32 value)
1374 {
1375 	struct intel_vgpu_display *display = &vgpu->display;
1376 	int num = display->sbi.number;
1377 	int i;
1378 
1379 	for (i = 0; i < num; ++i) {
1380 		if (display->sbi.registers[i].offset == offset)
1381 			break;
1382 	}
1383 
1384 	if (i == num) {
1385 		if (num == SBI_REG_MAX) {
1386 			gvt_vgpu_err("SBI caching meets maximum limits\n");
1387 			return;
1388 		}
1389 		display->sbi.number++;
1390 	}
1391 
1392 	display->sbi.registers[i].offset = offset;
1393 	display->sbi.registers[i].value = value;
1394 }
1395 
1396 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1397 		void *p_data, unsigned int bytes)
1398 {
1399 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1400 				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
1401 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1402 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1403 		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1404 				sbi_offset);
1405 	}
1406 	read_vreg(vgpu, offset, p_data, bytes);
1407 	return 0;
1408 }
1409 
1410 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1411 		void *p_data, unsigned int bytes)
1412 {
1413 	u32 data;
1414 
1415 	write_vreg(vgpu, offset, p_data, bytes);
1416 	data = vgpu_vreg(vgpu, offset);
1417 
1418 	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
1419 	data |= SBI_READY;
1420 
1421 	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
1422 	data |= SBI_RESPONSE_SUCCESS;
1423 
1424 	vgpu_vreg(vgpu, offset) = data;
1425 
1426 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1427 				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
1428 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1429 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1430 
1431 		write_virtual_sbi_register(vgpu, sbi_offset,
1432 					   vgpu_vreg_t(vgpu, SBI_DATA));
1433 	}
1434 	return 0;
1435 }
1436 
1437 #define _vgtif_reg(x) \
1438 	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1439 
1440 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1441 		void *p_data, unsigned int bytes)
1442 {
1443 	bool invalid_read = false;
1444 
1445 	read_vreg(vgpu, offset, p_data, bytes);
1446 
1447 	switch (offset) {
1448 	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1449 		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1450 			invalid_read = true;
1451 		break;
1452 	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1453 		_vgtif_reg(avail_rs.fence_num):
1454 		if (offset + bytes >
1455 			_vgtif_reg(avail_rs.fence_num) + 4)
1456 			invalid_read = true;
1457 		break;
1458 	case 0x78010:	/* vgt_caps */
1459 	case 0x7881c:
1460 		break;
1461 	default:
1462 		invalid_read = true;
1463 		break;
1464 	}
1465 	if (invalid_read)
1466 		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1467 				offset, bytes, *(u32 *)p_data);
1468 	vgpu->pv_notified = true;
1469 	return 0;
1470 }
1471 
1472 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1473 {
1474 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1475 	struct intel_vgpu_mm *mm;
1476 	u64 *pdps;
1477 
1478 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1479 
1480 	switch (notification) {
1481 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1482 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1483 		fallthrough;
1484 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1485 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1486 		return PTR_ERR_OR_ZERO(mm);
1487 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1488 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1489 		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1490 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1491 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1492 	case 1:	/* Remove this in guest driver. */
1493 		break;
1494 	default:
1495 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
1496 	}
1497 	return 0;
1498 }
1499 
1500 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1501 {
1502 	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1503 	char *env[3] = {NULL, NULL, NULL};
1504 	char vmid_str[20];
1505 	char display_ready_str[20];
1506 
1507 	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1508 	env[0] = display_ready_str;
1509 
1510 	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1511 	env[1] = vmid_str;
1512 
1513 	return kobject_uevent_env(kobj, KOBJ_ADD, env);
1514 }
1515 
1516 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1517 		void *p_data, unsigned int bytes)
1518 {
1519 	u32 data = *(u32 *)p_data;
1520 	bool invalid_write = false;
1521 
1522 	switch (offset) {
1523 	case _vgtif_reg(display_ready):
1524 		send_display_ready_uevent(vgpu, data ? 1 : 0);
1525 		break;
1526 	case _vgtif_reg(g2v_notify):
1527 		handle_g2v_notification(vgpu, data);
1528 		break;
1529 	/* add xhot and yhot to handled list to avoid error log */
1530 	case _vgtif_reg(cursor_x_hot):
1531 	case _vgtif_reg(cursor_y_hot):
1532 	case _vgtif_reg(pdp[0].lo):
1533 	case _vgtif_reg(pdp[0].hi):
1534 	case _vgtif_reg(pdp[1].lo):
1535 	case _vgtif_reg(pdp[1].hi):
1536 	case _vgtif_reg(pdp[2].lo):
1537 	case _vgtif_reg(pdp[2].hi):
1538 	case _vgtif_reg(pdp[3].lo):
1539 	case _vgtif_reg(pdp[3].hi):
1540 	case _vgtif_reg(execlist_context_descriptor_lo):
1541 	case _vgtif_reg(execlist_context_descriptor_hi):
1542 		break;
1543 	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1544 		invalid_write = true;
1545 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1546 		break;
1547 	default:
1548 		invalid_write = true;
1549 		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1550 				offset, bytes, data);
1551 		break;
1552 	}
1553 
1554 	if (!invalid_write)
1555 		write_vreg(vgpu, offset, p_data, bytes);
1556 
1557 	return 0;
1558 }
1559 
1560 static int pf_write(struct intel_vgpu *vgpu,
1561 		unsigned int offset, void *p_data, unsigned int bytes)
1562 {
1563 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1564 	u32 val = *(u32 *)p_data;
1565 
1566 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1567 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1568 	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
1569 		drm_WARN_ONCE(&i915->drm, true,
1570 			      "VM(%d): guest is trying to scaling a plane\n",
1571 			      vgpu->id);
1572 		return 0;
1573 	}
1574 
1575 	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1576 }
1577 
1578 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1579 		unsigned int offset, void *p_data, unsigned int bytes)
1580 {
1581 	write_vreg(vgpu, offset, p_data, bytes);
1582 
1583 	if (vgpu_vreg(vgpu, offset) &
1584 	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1585 		vgpu_vreg(vgpu, offset) |=
1586 			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1587 	else
1588 		vgpu_vreg(vgpu, offset) &=
1589 			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1590 	return 0;
1591 }
1592 
1593 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1594 		unsigned int offset, void *p_data, unsigned int bytes)
1595 {
1596 	write_vreg(vgpu, offset, p_data, bytes);
1597 
1598 	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1599 		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1600 	else
1601 		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1602 
1603 	return 0;
1604 }
1605 
1606 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1607 	unsigned int offset, void *p_data, unsigned int bytes)
1608 {
1609 	write_vreg(vgpu, offset, p_data, bytes);
1610 
1611 	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1612 		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1613 	return 0;
1614 }
1615 
1616 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1617 		void *p_data, unsigned int bytes)
1618 {
1619 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1620 	u32 mode;
1621 
1622 	write_vreg(vgpu, offset, p_data, bytes);
1623 	mode = vgpu_vreg(vgpu, offset);
1624 
1625 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1626 		drm_WARN_ONCE(&i915->drm, 1,
1627 				"VM(%d): iGVT-g doesn't support GuC\n",
1628 				vgpu->id);
1629 		return 0;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1636 		void *p_data, unsigned int bytes)
1637 {
1638 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1639 	u32 trtte = *(u32 *)p_data;
1640 
1641 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1642 		drm_WARN(&i915->drm, 1,
1643 				"VM(%d): Use physical address for TRTT!\n",
1644 				vgpu->id);
1645 		return -EINVAL;
1646 	}
1647 	write_vreg(vgpu, offset, p_data, bytes);
1648 
1649 	return 0;
1650 }
1651 
1652 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1653 		void *p_data, unsigned int bytes)
1654 {
1655 	write_vreg(vgpu, offset, p_data, bytes);
1656 	return 0;
1657 }
1658 
1659 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1660 		void *p_data, unsigned int bytes)
1661 {
1662 	u32 v = 0;
1663 
1664 	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1665 		v |= (1 << 0);
1666 
1667 	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1668 		v |= (1 << 8);
1669 
1670 	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1671 		v |= (1 << 16);
1672 
1673 	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1674 		v |= (1 << 24);
1675 
1676 	vgpu_vreg(vgpu, offset) = v;
1677 
1678 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1679 }
1680 
1681 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1682 		void *p_data, unsigned int bytes)
1683 {
1684 	u32 value = *(u32 *)p_data;
1685 	u32 cmd = value & 0xff;
1686 	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1687 
1688 	switch (cmd) {
1689 	case GEN9_PCODE_READ_MEM_LATENCY:
1690 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1691 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1692 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1693 		    IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1694 			/**
1695 			 * "Read memory latency" command on gen9.
1696 			 * Below memory latency values are read
1697 			 * from skylake platform.
1698 			 */
1699 			if (!*data0)
1700 				*data0 = 0x1e1a1100;
1701 			else
1702 				*data0 = 0x61514b3d;
1703 		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1704 			/**
1705 			 * "Read memory latency" command on gen9.
1706 			 * Below memory latency values are read
1707 			 * from Broxton MRB.
1708 			 */
1709 			if (!*data0)
1710 				*data0 = 0x16080707;
1711 			else
1712 				*data0 = 0x16161616;
1713 		}
1714 		break;
1715 	case SKL_PCODE_CDCLK_CONTROL:
1716 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1717 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1718 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1719 		    IS_COMETLAKE(vgpu->gvt->gt->i915))
1720 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
1721 		break;
1722 	case GEN6_PCODE_READ_RC6VIDS:
1723 		*data0 |= 0x1;
1724 		break;
1725 	}
1726 
1727 	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1728 		     vgpu->id, value, *data0);
1729 	/**
1730 	 * PCODE_READY clear means ready for pcode read/write,
1731 	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1732 	 * always emulate as pcode read/write success and ready for access
1733 	 * anytime, since we don't touch real physical registers here.
1734 	 */
1735 	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1736 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1737 }
1738 
1739 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1740 		void *p_data, unsigned int bytes)
1741 {
1742 	u32 value = *(u32 *)p_data;
1743 	const struct intel_engine_cs *engine =
1744 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1745 
1746 	if (value != 0 &&
1747 	    !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1748 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1749 			      offset, value);
1750 		return -EINVAL;
1751 	}
1752 
1753 	/*
1754 	 * Need to emulate all the HWSP register write to ensure host can
1755 	 * update the VM CSB status correctly. Here listed registers can
1756 	 * support BDW, SKL or other platforms with same HWSP registers.
1757 	 */
1758 	if (unlikely(!engine)) {
1759 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1760 			     offset);
1761 		return -EINVAL;
1762 	}
1763 	vgpu->hws_pga[engine->id] = value;
1764 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1765 		     vgpu->id, value, offset);
1766 
1767 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1768 }
1769 
1770 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1771 		unsigned int offset, void *p_data, unsigned int bytes)
1772 {
1773 	u32 v = *(u32 *)p_data;
1774 
1775 	if (IS_BROXTON(vgpu->gvt->gt->i915))
1776 		v &= (1 << 31) | (1 << 29);
1777 	else
1778 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
1779 			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1780 	v |= (v >> 1);
1781 
1782 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1783 }
1784 
1785 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1786 		void *p_data, unsigned int bytes)
1787 {
1788 	u32 v = *(u32 *)p_data;
1789 
1790 	/* other bits are MBZ. */
1791 	v &= (1 << 31) | (1 << 30);
1792 	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1793 
1794 	vgpu_vreg(vgpu, offset) = v;
1795 
1796 	return 0;
1797 }
1798 
1799 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1800 		unsigned int offset, void *p_data, unsigned int bytes)
1801 {
1802 	u32 v = *(u32 *)p_data;
1803 
1804 	if (v & BXT_DE_PLL_PLL_ENABLE)
1805 		v |= BXT_DE_PLL_LOCK;
1806 
1807 	vgpu_vreg(vgpu, offset) = v;
1808 
1809 	return 0;
1810 }
1811 
1812 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1813 		unsigned int offset, void *p_data, unsigned int bytes)
1814 {
1815 	u32 v = *(u32 *)p_data;
1816 
1817 	if (v & PORT_PLL_ENABLE)
1818 		v |= PORT_PLL_LOCK;
1819 
1820 	vgpu_vreg(vgpu, offset) = v;
1821 
1822 	return 0;
1823 }
1824 
1825 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1826 		unsigned int offset, void *p_data, unsigned int bytes)
1827 {
1828 	u32 v = *(u32 *)p_data;
1829 	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1830 
1831 	switch (offset) {
1832 	case _PHY_CTL_FAMILY_EDP:
1833 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1834 		break;
1835 	case _PHY_CTL_FAMILY_DDI:
1836 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1837 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1838 		break;
1839 	}
1840 
1841 	vgpu_vreg(vgpu, offset) = v;
1842 
1843 	return 0;
1844 }
1845 
1846 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1847 		unsigned int offset, void *p_data, unsigned int bytes)
1848 {
1849 	u32 v = vgpu_vreg(vgpu, offset);
1850 
1851 	v &= ~UNIQUE_TRANGE_EN_METHOD;
1852 
1853 	vgpu_vreg(vgpu, offset) = v;
1854 
1855 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1856 }
1857 
1858 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1859 		unsigned int offset, void *p_data, unsigned int bytes)
1860 {
1861 	u32 v = *(u32 *)p_data;
1862 
1863 	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1864 		vgpu_vreg(vgpu, offset - 0x600) = v;
1865 		vgpu_vreg(vgpu, offset - 0x800) = v;
1866 	} else {
1867 		vgpu_vreg(vgpu, offset - 0x400) = v;
1868 		vgpu_vreg(vgpu, offset - 0x600) = v;
1869 	}
1870 
1871 	vgpu_vreg(vgpu, offset) = v;
1872 
1873 	return 0;
1874 }
1875 
1876 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1877 		unsigned int offset, void *p_data, unsigned int bytes)
1878 {
1879 	u32 v = *(u32 *)p_data;
1880 
1881 	if (v & BIT(0)) {
1882 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1883 			~PHY_RESERVED;
1884 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1885 			PHY_POWER_GOOD;
1886 	}
1887 
1888 	if (v & BIT(1)) {
1889 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1890 			~PHY_RESERVED;
1891 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1892 			PHY_POWER_GOOD;
1893 	}
1894 
1895 
1896 	vgpu_vreg(vgpu, offset) = v;
1897 
1898 	return 0;
1899 }
1900 
1901 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1902 		unsigned int offset, void *p_data, unsigned int bytes)
1903 {
1904 	vgpu_vreg(vgpu, offset) = 0;
1905 	return 0;
1906 }
1907 
1908 /*
1909  * FixMe:
1910  * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1911  * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1912  * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1913  * these MI_BATCH_BUFFER.
1914  * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1915  * PML4 PTE: PAT(0) PCD(1) PWT(1).
1916  * The performance is still expected to be low, will need further improvement.
1917  */
1918 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1919 			      void *p_data, unsigned int bytes)
1920 {
1921 	u64 pat =
1922 		GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1923 		GEN8_PPAT(1, 0) |
1924 		GEN8_PPAT(2, 0) |
1925 		GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1926 		GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1927 		GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1928 		GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1929 		GEN8_PPAT(7, CHV_PPAT_SNOOP);
1930 
1931 	vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1932 
1933 	return 0;
1934 }
1935 
1936 static int guc_status_read(struct intel_vgpu *vgpu,
1937 			   unsigned int offset, void *p_data,
1938 			   unsigned int bytes)
1939 {
1940 	/* keep MIA_IN_RESET before clearing */
1941 	read_vreg(vgpu, offset, p_data, bytes);
1942 	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1943 	return 0;
1944 }
1945 
1946 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1947 		unsigned int offset, void *p_data, unsigned int bytes)
1948 {
1949 	struct intel_gvt *gvt = vgpu->gvt;
1950 	const struct intel_engine_cs *engine =
1951 		intel_gvt_render_mmio_to_engine(gvt, offset);
1952 
1953 	/**
1954 	 * Read HW reg in following case
1955 	 * a. the offset isn't a ring mmio
1956 	 * b. the offset's ring is running on hw.
1957 	 * c. the offset is ring time stamp mmio
1958 	 */
1959 
1960 	if (!engine ||
1961 	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
1962 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1963 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1964 		mmio_hw_access_pre(gvt->gt);
1965 		vgpu_vreg(vgpu, offset) =
1966 			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1967 		mmio_hw_access_post(gvt->gt);
1968 	}
1969 
1970 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1971 }
1972 
1973 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1974 		void *p_data, unsigned int bytes)
1975 {
1976 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1977 	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1978 	struct intel_vgpu_execlist *execlist;
1979 	u32 data = *(u32 *)p_data;
1980 	int ret = 0;
1981 
1982 	if (drm_WARN_ON(&i915->drm, !engine))
1983 		return -EINVAL;
1984 
1985 	/*
1986 	 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
1987 	 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
1988 	 * vGPU reset if in resuming.
1989 	 * In S0ix exit, the device power state also transite from D3 to D0 as
1990 	 * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
1991 	 * S0ix exit, all engines continue to work. However the d3_entered
1992 	 * remains set which will break next vGPU reset logic (miss the expected
1993 	 * PPGTT invalidation).
1994 	 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
1995 	 * chance to clear d3_entered.
1996 	 */
1997 	if (vgpu->d3_entered)
1998 		vgpu->d3_entered = false;
1999 
2000 	execlist = &vgpu->submission.execlist[engine->id];
2001 
2002 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
2003 	if (execlist->elsp_dwords.index == 3) {
2004 		ret = intel_vgpu_submit_execlist(vgpu, engine);
2005 		if(ret)
2006 			gvt_vgpu_err("fail submit workload on ring %s\n",
2007 				     engine->name);
2008 	}
2009 
2010 	++execlist->elsp_dwords.index;
2011 	execlist->elsp_dwords.index &= 0x3;
2012 	return ret;
2013 }
2014 
2015 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2016 		void *p_data, unsigned int bytes)
2017 {
2018 	u32 data = *(u32 *)p_data;
2019 	const struct intel_engine_cs *engine =
2020 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2021 	bool enable_execlist;
2022 	int ret;
2023 
2024 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2025 	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2026 	    IS_COMETLAKE(vgpu->gvt->gt->i915))
2027 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2028 	write_vreg(vgpu, offset, p_data, bytes);
2029 
2030 	if (IS_MASKED_BITS_ENABLED(data, 1)) {
2031 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2032 		return 0;
2033 	}
2034 
2035 	if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2036 	     IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2037 	    IS_MASKED_BITS_ENABLED(data, 2)) {
2038 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2039 		return 0;
2040 	}
2041 
2042 	/* when PPGTT mode enabled, we will check if guest has called
2043 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2044 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2045 	 */
2046 	if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2047 	    IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2048 	    !vgpu->pv_notified) {
2049 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2050 		return 0;
2051 	}
2052 	if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2053 	    IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2054 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2055 
2056 		gvt_dbg_core("EXECLIST %s on ring %s\n",
2057 			     (enable_execlist ? "enabling" : "disabling"),
2058 			     engine->name);
2059 
2060 		if (!enable_execlist)
2061 			return 0;
2062 
2063 		ret = intel_vgpu_select_submission_ops(vgpu,
2064 						       engine->mask,
2065 						       INTEL_VGPU_EXECLIST_SUBMISSION);
2066 		if (ret)
2067 			return ret;
2068 
2069 		intel_vgpu_start_schedule(vgpu);
2070 	}
2071 	return 0;
2072 }
2073 
2074 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2075 		unsigned int offset, void *p_data, unsigned int bytes)
2076 {
2077 	unsigned int id = 0;
2078 
2079 	write_vreg(vgpu, offset, p_data, bytes);
2080 	vgpu_vreg(vgpu, offset) = 0;
2081 
2082 	switch (offset) {
2083 	case 0x4260:
2084 		id = RCS0;
2085 		break;
2086 	case 0x4264:
2087 		id = VCS0;
2088 		break;
2089 	case 0x4268:
2090 		id = VCS1;
2091 		break;
2092 	case 0x426c:
2093 		id = BCS0;
2094 		break;
2095 	case 0x4270:
2096 		id = VECS0;
2097 		break;
2098 	default:
2099 		return -EINVAL;
2100 	}
2101 	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2102 
2103 	return 0;
2104 }
2105 
2106 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2107 	unsigned int offset, void *p_data, unsigned int bytes)
2108 {
2109 	u32 data;
2110 
2111 	write_vreg(vgpu, offset, p_data, bytes);
2112 	data = vgpu_vreg(vgpu, offset);
2113 
2114 	if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2115 		data |= RESET_CTL_READY_TO_RESET;
2116 	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2117 		data &= ~RESET_CTL_READY_TO_RESET;
2118 
2119 	vgpu_vreg(vgpu, offset) = data;
2120 	return 0;
2121 }
2122 
2123 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2124 				    unsigned int offset, void *p_data,
2125 				    unsigned int bytes)
2126 {
2127 	u32 data = *(u32 *)p_data;
2128 
2129 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2130 	write_vreg(vgpu, offset, p_data, bytes);
2131 
2132 	if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2133 	    IS_MASKED_BITS_ENABLED(data, 0x8))
2134 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2135 
2136 	return 0;
2137 }
2138 
2139 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2140 	ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2141 		f, s, am, rm, d, r, w); \
2142 	if (ret) \
2143 		return ret; \
2144 } while (0)
2145 
2146 #define MMIO_D(reg, d) \
2147 	MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
2148 
2149 #define MMIO_DH(reg, d, r, w) \
2150 	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2151 
2152 #define MMIO_DFH(reg, d, f, r, w) \
2153 	MMIO_F(reg, 4, f, 0, 0, d, r, w)
2154 
2155 #define MMIO_GM(reg, d, r, w) \
2156 	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2157 
2158 #define MMIO_GM_RDR(reg, d, r, w) \
2159 	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2160 
2161 #define MMIO_RO(reg, d, f, rm, r, w) \
2162 	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2163 
2164 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2165 	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2166 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2167 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2168 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2169 	if (HAS_ENGINE(gvt->gt, VCS1)) \
2170 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2171 } while (0)
2172 
2173 #define MMIO_RING_D(prefix, d) \
2174 	MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
2175 
2176 #define MMIO_RING_DFH(prefix, d, f, r, w) \
2177 	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2178 
2179 #define MMIO_RING_GM(prefix, d, r, w) \
2180 	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2181 
2182 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2183 	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2184 
2185 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2186 	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2187 
2188 static int init_generic_mmio_info(struct intel_gvt *gvt)
2189 {
2190 	struct drm_i915_private *dev_priv = gvt->gt->i915;
2191 	int ret;
2192 
2193 	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2194 		intel_vgpu_reg_imr_handler);
2195 
2196 	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2197 	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2198 	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2199 	MMIO_D(SDEISR, D_ALL);
2200 
2201 	MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2202 
2203 
2204 	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2205 		gamw_echo_dev_rw_ia_write);
2206 
2207 	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2208 	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2209 	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2210 
2211 #define RING_REG(base) _MMIO((base) + 0x28)
2212 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2213 #undef RING_REG
2214 
2215 #define RING_REG(base) _MMIO((base) + 0x134)
2216 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2217 #undef RING_REG
2218 
2219 #define RING_REG(base) _MMIO((base) + 0x6c)
2220 	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2221 #undef RING_REG
2222 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2223 
2224 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2225 	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2226 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2227 	MMIO_D(GEN7_CXT_SIZE, D_ALL);
2228 
2229 	MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2230 	MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2231 	MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2232 	MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2233 	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2234 
2235 	/* RING MODE */
2236 #define RING_REG(base) _MMIO((base) + 0x29c)
2237 	MMIO_RING_DFH(RING_REG, D_ALL,
2238 		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2239 		ring_mode_mmio_write);
2240 #undef RING_REG
2241 
2242 	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2243 		NULL, NULL);
2244 	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2245 			NULL, NULL);
2246 	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2247 			mmio_read_from_hw, NULL);
2248 	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2249 			mmio_read_from_hw, NULL);
2250 
2251 	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2252 	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2253 		NULL, NULL);
2254 	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2255 	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2256 	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2257 
2258 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2259 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2260 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2261 	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2262 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2263 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2264 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2265 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2266 		NULL, NULL);
2267 	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2268 		 NULL, NULL);
2269 	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2270 	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2271 	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2272 	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2273 	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2274 	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2275 	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2276 	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2277 	MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2278 	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2279 
2280 	/* display */
2281 	MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
2282 	MMIO_D(_MMIO(0x602a0), D_ALL);
2283 
2284 	MMIO_D(_MMIO(0x65050), D_ALL);
2285 	MMIO_D(_MMIO(0x650b4), D_ALL);
2286 
2287 	MMIO_D(_MMIO(0xc4040), D_ALL);
2288 	MMIO_D(DERRMR, D_ALL);
2289 
2290 	MMIO_D(PIPEDSL(PIPE_A), D_ALL);
2291 	MMIO_D(PIPEDSL(PIPE_B), D_ALL);
2292 	MMIO_D(PIPEDSL(PIPE_C), D_ALL);
2293 	MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
2294 
2295 	MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
2296 	MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
2297 	MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
2298 	MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
2299 
2300 	MMIO_D(PIPESTAT(PIPE_A), D_ALL);
2301 	MMIO_D(PIPESTAT(PIPE_B), D_ALL);
2302 	MMIO_D(PIPESTAT(PIPE_C), D_ALL);
2303 	MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
2304 
2305 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
2306 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
2307 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
2308 	MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
2309 
2310 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
2311 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
2312 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
2313 	MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
2314 
2315 	MMIO_D(CURCNTR(PIPE_A), D_ALL);
2316 	MMIO_D(CURCNTR(PIPE_B), D_ALL);
2317 	MMIO_D(CURCNTR(PIPE_C), D_ALL);
2318 
2319 	MMIO_D(CURPOS(PIPE_A), D_ALL);
2320 	MMIO_D(CURPOS(PIPE_B), D_ALL);
2321 	MMIO_D(CURPOS(PIPE_C), D_ALL);
2322 
2323 	MMIO_D(CURBASE(PIPE_A), D_ALL);
2324 	MMIO_D(CURBASE(PIPE_B), D_ALL);
2325 	MMIO_D(CURBASE(PIPE_C), D_ALL);
2326 
2327 	MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
2328 	MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
2329 	MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
2330 
2331 	MMIO_D(_MMIO(0x700ac), D_ALL);
2332 	MMIO_D(_MMIO(0x710ac), D_ALL);
2333 	MMIO_D(_MMIO(0x720ac), D_ALL);
2334 
2335 	MMIO_D(_MMIO(0x70090), D_ALL);
2336 	MMIO_D(_MMIO(0x70094), D_ALL);
2337 	MMIO_D(_MMIO(0x70098), D_ALL);
2338 	MMIO_D(_MMIO(0x7009c), D_ALL);
2339 
2340 	MMIO_D(DSPCNTR(PIPE_A), D_ALL);
2341 	MMIO_D(DSPADDR(PIPE_A), D_ALL);
2342 	MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
2343 	MMIO_D(DSPPOS(PIPE_A), D_ALL);
2344 	MMIO_D(DSPSIZE(PIPE_A), D_ALL);
2345 	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2346 	MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
2347 	MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
2348 	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2349 		reg50080_mmio_write);
2350 
2351 	MMIO_D(DSPCNTR(PIPE_B), D_ALL);
2352 	MMIO_D(DSPADDR(PIPE_B), D_ALL);
2353 	MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
2354 	MMIO_D(DSPPOS(PIPE_B), D_ALL);
2355 	MMIO_D(DSPSIZE(PIPE_B), D_ALL);
2356 	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2357 	MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
2358 	MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
2359 	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2360 		reg50080_mmio_write);
2361 
2362 	MMIO_D(DSPCNTR(PIPE_C), D_ALL);
2363 	MMIO_D(DSPADDR(PIPE_C), D_ALL);
2364 	MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
2365 	MMIO_D(DSPPOS(PIPE_C), D_ALL);
2366 	MMIO_D(DSPSIZE(PIPE_C), D_ALL);
2367 	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2368 	MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
2369 	MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
2370 	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2371 		reg50080_mmio_write);
2372 
2373 	MMIO_D(SPRCTL(PIPE_A), D_ALL);
2374 	MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
2375 	MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
2376 	MMIO_D(SPRPOS(PIPE_A), D_ALL);
2377 	MMIO_D(SPRSIZE(PIPE_A), D_ALL);
2378 	MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
2379 	MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
2380 	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2381 	MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
2382 	MMIO_D(SPROFFSET(PIPE_A), D_ALL);
2383 	MMIO_D(SPRSCALE(PIPE_A), D_ALL);
2384 	MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
2385 	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2386 		reg50080_mmio_write);
2387 
2388 	MMIO_D(SPRCTL(PIPE_B), D_ALL);
2389 	MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
2390 	MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
2391 	MMIO_D(SPRPOS(PIPE_B), D_ALL);
2392 	MMIO_D(SPRSIZE(PIPE_B), D_ALL);
2393 	MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
2394 	MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
2395 	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2396 	MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
2397 	MMIO_D(SPROFFSET(PIPE_B), D_ALL);
2398 	MMIO_D(SPRSCALE(PIPE_B), D_ALL);
2399 	MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
2400 	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2401 		reg50080_mmio_write);
2402 
2403 	MMIO_D(SPRCTL(PIPE_C), D_ALL);
2404 	MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
2405 	MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
2406 	MMIO_D(SPRPOS(PIPE_C), D_ALL);
2407 	MMIO_D(SPRSIZE(PIPE_C), D_ALL);
2408 	MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
2409 	MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
2410 	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2411 	MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
2412 	MMIO_D(SPROFFSET(PIPE_C), D_ALL);
2413 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
2414 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
2415 	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2416 		reg50080_mmio_write);
2417 
2418 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
2419 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
2420 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
2421 	MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
2422 	MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
2423 	MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
2424 	MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
2425 	MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
2426 	MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
2427 
2428 	MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
2429 	MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
2430 	MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
2431 	MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
2432 	MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
2433 	MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
2434 	MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
2435 	MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
2436 	MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
2437 
2438 	MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
2439 	MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
2440 	MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
2441 	MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
2442 	MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
2443 	MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
2444 	MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
2445 	MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
2446 	MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
2447 
2448 	MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
2449 	MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
2450 	MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
2451 	MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
2452 	MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
2453 	MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
2454 	MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
2455 	MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
2456 
2457 	MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
2458 	MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
2459 	MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
2460 	MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
2461 	MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
2462 	MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
2463 	MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
2464 	MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
2465 
2466 	MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
2467 	MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
2468 	MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
2469 	MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
2470 	MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
2471 	MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
2472 	MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
2473 	MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
2474 
2475 	MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
2476 	MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
2477 	MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
2478 	MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
2479 	MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
2480 	MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
2481 	MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
2482 	MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
2483 
2484 	MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
2485 	MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
2486 	MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
2487 	MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
2488 	MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
2489 	MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
2490 	MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
2491 	MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
2492 
2493 	MMIO_D(PF_CTL(PIPE_A), D_ALL);
2494 	MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
2495 	MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
2496 	MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
2497 	MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
2498 
2499 	MMIO_D(PF_CTL(PIPE_B), D_ALL);
2500 	MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
2501 	MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
2502 	MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
2503 	MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
2504 
2505 	MMIO_D(PF_CTL(PIPE_C), D_ALL);
2506 	MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
2507 	MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
2508 	MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
2509 	MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
2510 
2511 	MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
2512 	MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
2513 	MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
2514 	MMIO_D(WM1_LP_ILK, D_ALL);
2515 	MMIO_D(WM2_LP_ILK, D_ALL);
2516 	MMIO_D(WM3_LP_ILK, D_ALL);
2517 	MMIO_D(WM1S_LP_ILK, D_ALL);
2518 	MMIO_D(WM2S_LP_IVB, D_ALL);
2519 	MMIO_D(WM3S_LP_IVB, D_ALL);
2520 
2521 	MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
2522 	MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
2523 	MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
2524 	MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
2525 
2526 	MMIO_D(_MMIO(0x48268), D_ALL);
2527 
2528 	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2529 		gmbus_mmio_write);
2530 	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2531 	MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
2532 
2533 	MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2534 		dp_aux_ch_ctl_mmio_write);
2535 	MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2536 		dp_aux_ch_ctl_mmio_write);
2537 	MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2538 		dp_aux_ch_ctl_mmio_write);
2539 
2540 	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2541 
2542 	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2543 	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2544 
2545 	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2546 	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2547 	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2548 	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2549 	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2550 	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2551 	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2552 	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2553 	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2554 
2555 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
2556 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
2557 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
2558 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
2559 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
2560 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
2561 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
2562 
2563 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
2564 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
2565 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
2566 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
2567 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
2568 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
2569 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
2570 
2571 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
2572 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
2573 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
2574 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
2575 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
2576 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
2577 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
2578 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
2579 
2580 	MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
2581 	MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
2582 	MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
2583 
2584 	MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
2585 	MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
2586 	MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
2587 
2588 	MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
2589 	MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
2590 	MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
2591 
2592 	MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
2593 	MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
2594 	MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
2595 
2596 	MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
2597 	MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
2598 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
2599 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
2600 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
2601 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
2602 
2603 	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2604 	MMIO_D(PCH_PP_DIVISOR, D_ALL);
2605 	MMIO_D(PCH_PP_STATUS,  D_ALL);
2606 	MMIO_D(PCH_LVDS, D_ALL);
2607 	MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
2608 	MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
2609 	MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
2610 	MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
2611 	MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
2612 	MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
2613 	MMIO_D(PCH_DREF_CONTROL, D_ALL);
2614 	MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
2615 	MMIO_D(PCH_DPLL_SEL, D_ALL);
2616 
2617 	MMIO_D(_MMIO(0x61208), D_ALL);
2618 	MMIO_D(_MMIO(0x6120c), D_ALL);
2619 	MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
2620 	MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
2621 
2622 	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2623 	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2624 	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2625 	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2626 	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2627 	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2628 
2629 	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2630 		PORTA_HOTPLUG_STATUS_MASK
2631 		| PORTB_HOTPLUG_STATUS_MASK
2632 		| PORTC_HOTPLUG_STATUS_MASK
2633 		| PORTD_HOTPLUG_STATUS_MASK,
2634 		NULL, NULL);
2635 
2636 	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2637 	MMIO_D(FUSE_STRAP, D_ALL);
2638 	MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
2639 
2640 	MMIO_D(DISP_ARB_CTL, D_ALL);
2641 	MMIO_D(DISP_ARB_CTL2, D_ALL);
2642 
2643 	MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
2644 	MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
2645 	MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
2646 
2647 	MMIO_D(SOUTH_CHICKEN1, D_ALL);
2648 	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2649 	MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
2650 	MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
2651 	MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
2652 	MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
2653 	MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
2654 
2655 	MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL);
2656 	MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL);
2657 	MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL);
2658 	MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL);
2659 	MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL);
2660 	MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL);
2661 	MMIO_D(ILK_FBC_RT_BASE, D_ALL);
2662 
2663 	MMIO_D(IPS_CTL, D_ALL);
2664 
2665 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
2666 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
2667 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
2668 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
2669 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
2670 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
2671 	MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
2672 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
2673 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
2674 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
2675 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
2676 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
2677 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
2678 
2679 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
2680 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
2681 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
2682 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
2683 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
2684 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
2685 	MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
2686 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
2687 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
2688 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
2689 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
2690 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
2691 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
2692 
2693 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
2694 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
2695 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
2696 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
2697 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
2698 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
2699 	MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
2700 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
2701 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
2702 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
2703 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
2704 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
2705 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
2706 
2707 	MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
2708 	MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
2709 	MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2710 
2711 	MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
2712 	MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
2713 	MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2714 
2715 	MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
2716 	MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
2717 	MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2718 
2719 	MMIO_D(_MMIO(0x60110), D_ALL);
2720 	MMIO_D(_MMIO(0x61110), D_ALL);
2721 	MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2722 	MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2723 	MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2724 	MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2725 	MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2726 	MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2727 	MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2728 	MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2729 	MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2730 
2731 	MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
2732 	MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
2733 	MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
2734 	MMIO_D(SPLL_CTL, D_ALL);
2735 	MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
2736 	MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
2737 	MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
2738 	MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
2739 	MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
2740 	MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
2741 	MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
2742 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
2743 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
2744 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
2745 
2746 	MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
2747 	MMIO_D(_MMIO(0x46508), D_ALL);
2748 
2749 	MMIO_D(_MMIO(0x49080), D_ALL);
2750 	MMIO_D(_MMIO(0x49180), D_ALL);
2751 	MMIO_D(_MMIO(0x49280), D_ALL);
2752 
2753 	MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2754 	MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2755 	MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2756 
2757 	MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
2758 	MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
2759 	MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
2760 
2761 	MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
2762 	MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
2763 	MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
2764 
2765 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
2766 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
2767 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
2768 
2769 	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2770 	MMIO_D(SBI_ADDR, D_ALL);
2771 	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2772 	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2773 	MMIO_D(PIXCLK_GATE, D_ALL);
2774 
2775 	MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
2776 		dp_aux_ch_ctl_mmio_write);
2777 
2778 	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2779 	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2780 	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2781 	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2782 	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2783 
2784 	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2785 	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2786 	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2787 	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2788 	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2789 
2790 	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2791 	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2792 	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2793 	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2794 	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2795 
2796 	MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2797 	MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2798 	MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2799 	MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2800 	MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2801 
2802 	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
2803 	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
2804 	MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
2805 
2806 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2807 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2808 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2809 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2810 
2811 	MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
2812 	MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
2813 	MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
2814 	MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
2815 
2816 	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2817 	MMIO_D(FORCEWAKE_ACK, D_ALL);
2818 	MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
2819 	MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
2820 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2821 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2822 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2823 	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2824 	MMIO_D(ECOBUS, D_ALL);
2825 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2826 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2827 	MMIO_D(GEN6_RPNSWREQ, D_ALL);
2828 	MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
2829 	MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
2830 	MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
2831 	MMIO_D(GEN6_RPSTAT1, D_ALL);
2832 	MMIO_D(GEN6_RP_CONTROL, D_ALL);
2833 	MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
2834 	MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
2835 	MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
2836 	MMIO_D(GEN6_RP_CUR_UP, D_ALL);
2837 	MMIO_D(GEN6_RP_PREV_UP, D_ALL);
2838 	MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
2839 	MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
2840 	MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
2841 	MMIO_D(GEN6_RP_UP_EI, D_ALL);
2842 	MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
2843 	MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
2844 	MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
2845 	MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
2846 	MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
2847 	MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
2848 	MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
2849 	MMIO_D(GEN6_RC_SLEEP, D_ALL);
2850 	MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
2851 	MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
2852 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
2853 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
2854 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
2855 	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2856 	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2857 	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2858 	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2859 	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2860 	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2861 
2862 	MMIO_D(RSTDBYCTL, D_ALL);
2863 
2864 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2865 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2866 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2867 
2868 	MMIO_D(TILECTL, D_ALL);
2869 
2870 	MMIO_D(GEN6_UCGCTL1, D_ALL);
2871 	MMIO_D(GEN6_UCGCTL2, D_ALL);
2872 
2873 	MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2874 
2875 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
2876 	MMIO_D(_MMIO(0x13812c), D_ALL);
2877 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2878 	MMIO_D(HSW_EDRAM_CAP, D_ALL);
2879 	MMIO_D(HSW_IDICR, D_ALL);
2880 	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2881 
2882 	MMIO_D(_MMIO(0x3c), D_ALL);
2883 	MMIO_D(_MMIO(0x860), D_ALL);
2884 	MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL);
2885 	MMIO_D(_MMIO(0x121d0), D_ALL);
2886 	MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL);
2887 	MMIO_D(_MMIO(0x41d0), D_ALL);
2888 	MMIO_D(GAC_ECO_BITS, D_ALL);
2889 	MMIO_D(_MMIO(0x6200), D_ALL);
2890 	MMIO_D(_MMIO(0x6204), D_ALL);
2891 	MMIO_D(_MMIO(0x6208), D_ALL);
2892 	MMIO_D(_MMIO(0x7118), D_ALL);
2893 	MMIO_D(_MMIO(0x7180), D_ALL);
2894 	MMIO_D(_MMIO(0x7408), D_ALL);
2895 	MMIO_D(_MMIO(0x7c00), D_ALL);
2896 	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2897 	MMIO_D(_MMIO(0x911c), D_ALL);
2898 	MMIO_D(_MMIO(0x9120), D_ALL);
2899 	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2900 
2901 	MMIO_D(GAB_CTL, D_ALL);
2902 	MMIO_D(_MMIO(0x48800), D_ALL);
2903 	MMIO_D(_MMIO(0xce044), D_ALL);
2904 	MMIO_D(_MMIO(0xe6500), D_ALL);
2905 	MMIO_D(_MMIO(0xe6504), D_ALL);
2906 	MMIO_D(_MMIO(0xe6600), D_ALL);
2907 	MMIO_D(_MMIO(0xe6604), D_ALL);
2908 	MMIO_D(_MMIO(0xe6700), D_ALL);
2909 	MMIO_D(_MMIO(0xe6704), D_ALL);
2910 	MMIO_D(_MMIO(0xe6800), D_ALL);
2911 	MMIO_D(_MMIO(0xe6804), D_ALL);
2912 	MMIO_D(PCH_GMBUS4, D_ALL);
2913 	MMIO_D(PCH_GMBUS5, D_ALL);
2914 
2915 	MMIO_D(_MMIO(0x902c), D_ALL);
2916 	MMIO_D(_MMIO(0xec008), D_ALL);
2917 	MMIO_D(_MMIO(0xec00c), D_ALL);
2918 	MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
2919 	MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
2920 	MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
2921 	MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
2922 	MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
2923 	MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
2924 	MMIO_D(_MMIO(0xec408), D_ALL);
2925 	MMIO_D(_MMIO(0xec40c), D_ALL);
2926 	MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
2927 	MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
2928 	MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
2929 	MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
2930 	MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
2931 	MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
2932 	MMIO_D(_MMIO(0xfc810), D_ALL);
2933 	MMIO_D(_MMIO(0xfc81c), D_ALL);
2934 	MMIO_D(_MMIO(0xfc828), D_ALL);
2935 	MMIO_D(_MMIO(0xfc834), D_ALL);
2936 	MMIO_D(_MMIO(0xfcc00), D_ALL);
2937 	MMIO_D(_MMIO(0xfcc0c), D_ALL);
2938 	MMIO_D(_MMIO(0xfcc18), D_ALL);
2939 	MMIO_D(_MMIO(0xfcc24), D_ALL);
2940 	MMIO_D(_MMIO(0xfd000), D_ALL);
2941 	MMIO_D(_MMIO(0xfd00c), D_ALL);
2942 	MMIO_D(_MMIO(0xfd018), D_ALL);
2943 	MMIO_D(_MMIO(0xfd024), D_ALL);
2944 	MMIO_D(_MMIO(0xfd034), D_ALL);
2945 
2946 	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2947 	MMIO_D(_MMIO(0x2054), D_ALL);
2948 	MMIO_D(_MMIO(0x12054), D_ALL);
2949 	MMIO_D(_MMIO(0x22054), D_ALL);
2950 	MMIO_D(_MMIO(0x1a054), D_ALL);
2951 
2952 	MMIO_D(_MMIO(0x44070), D_ALL);
2953 	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2954 	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2955 	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2956 	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2957 	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2958 
2959 	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2960 	MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
2961 	MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
2962 	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2963 	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2964 	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2965 
2966 	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2967 	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2968 	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2969 
2970 	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2971 	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2972 	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2973 	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2974 	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2975 	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2976 	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2977 	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2978 	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2979 	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2980 	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2981 	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2982 	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2983 	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2984 	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2985 	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2986 	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2987 
2988 	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2989 	MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2990 	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2991 	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2992 	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2993 	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2994 	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2995 	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2996 	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2997 	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2998 	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2999 
3000 	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
3001 	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
3002 	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
3003 
3004 	return 0;
3005 }
3006 
3007 static int init_bdw_mmio_info(struct intel_gvt *gvt)
3008 {
3009 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3010 	int ret;
3011 
3012 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3013 	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3014 	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3015 	MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
3016 
3017 	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3018 	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3019 	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3020 	MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
3021 
3022 	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3023 	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3024 	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3025 	MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
3026 
3027 	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3028 	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3029 	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3030 	MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
3031 
3032 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
3033 		intel_vgpu_reg_imr_handler);
3034 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
3035 		intel_vgpu_reg_ier_handler);
3036 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
3037 		intel_vgpu_reg_iir_handler);
3038 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
3039 
3040 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
3041 		intel_vgpu_reg_imr_handler);
3042 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
3043 		intel_vgpu_reg_ier_handler);
3044 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
3045 		intel_vgpu_reg_iir_handler);
3046 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
3047 
3048 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
3049 		intel_vgpu_reg_imr_handler);
3050 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
3051 		intel_vgpu_reg_ier_handler);
3052 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
3053 		intel_vgpu_reg_iir_handler);
3054 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
3055 
3056 	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3057 	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3058 	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3059 	MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
3060 
3061 	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3062 	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3063 	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3064 	MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
3065 
3066 	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3067 	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3068 	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3069 	MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
3070 
3071 	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
3072 		intel_vgpu_reg_master_irq_handler);
3073 
3074 	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
3075 		mmio_read_from_hw, NULL);
3076 
3077 #define RING_REG(base) _MMIO((base) + 0xd0)
3078 	MMIO_RING_F(RING_REG, 4, F_RO, 0,
3079 		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
3080 		ring_reset_ctl_write);
3081 #undef RING_REG
3082 
3083 #define RING_REG(base) _MMIO((base) + 0x230)
3084 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
3085 #undef RING_REG
3086 
3087 #define RING_REG(base) _MMIO((base) + 0x234)
3088 	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
3089 		NULL, NULL);
3090 #undef RING_REG
3091 
3092 #define RING_REG(base) _MMIO((base) + 0x244)
3093 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3094 #undef RING_REG
3095 
3096 #define RING_REG(base) _MMIO((base) + 0x370)
3097 	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
3098 #undef RING_REG
3099 
3100 #define RING_REG(base) _MMIO((base) + 0x3a0)
3101 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
3102 #undef RING_REG
3103 
3104 	MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
3105 	MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
3106 	MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
3107 	MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
3108 	MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
3109 	MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
3110 	MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
3111 
3112 	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
3113 
3114 	MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
3115 	MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
3116 
3117 	MMIO_D(GAMTARBMODE, D_BDW_PLUS);
3118 
3119 #define RING_REG(base) _MMIO((base) + 0x270)
3120 	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
3121 #undef RING_REG
3122 
3123 	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
3124 
3125 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3126 
3127 	MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
3128 	MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
3129 	MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
3130 
3131 	MMIO_D(WM_MISC, D_BDW);
3132 	MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
3133 
3134 	MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
3135 	MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
3136 	MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
3137 
3138 	MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
3139 
3140 	MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
3141 	MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
3142 	MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
3143 
3144 	MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
3145 	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3146 		NULL, NULL);
3147 	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3148 		NULL, NULL);
3149 	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3150 
3151 	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
3152 	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
3153 	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3154 	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
3155 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
3156 	MMIO_D(_MMIO(0xb110), D_BDW);
3157 	MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
3158 
3159 	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
3160 		D_BDW_PLUS, NULL, force_nonpriv_write);
3161 
3162 	MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
3163 	MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
3164 
3165 	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
3166 	MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
3167 
3168 	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
3169 
3170 	MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
3171 
3172 	MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
3173 
3174 	MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
3175 	MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
3176 
3177 	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3178 	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3179 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3180 	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3181 
3182 	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
3183 
3184 	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3185 	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3186 	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3187 	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3188 	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3189 	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3190 	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3191 	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3192 	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3193 	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3194 	return 0;
3195 }
3196 
3197 static int init_skl_mmio_info(struct intel_gvt *gvt)
3198 {
3199 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3200 	int ret;
3201 
3202 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3203 	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
3204 	MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3205 	MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
3206 	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3207 	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
3208 
3209 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3210 						dp_aux_ch_ctl_mmio_write);
3211 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3212 						dp_aux_ch_ctl_mmio_write);
3213 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3214 						dp_aux_ch_ctl_mmio_write);
3215 
3216 	MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
3217 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
3218 
3219 	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
3220 
3221 	MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
3222 	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
3223 	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
3224 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3225 	MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3226 	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
3227 	MMIO_D(DC_STATE_EN, D_SKL_PLUS);
3228 	MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
3229 	MMIO_D(CDCLK_CTL, D_SKL_PLUS);
3230 	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
3231 	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
3232 	MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
3233 	MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
3234 	MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
3235 	MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
3236 	MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
3237 	MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
3238 	MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
3239 	MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
3240 	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
3241 
3242 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3243 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3244 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3245 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3246 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3247 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3248 
3249 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3250 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3251 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3252 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3253 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3254 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3255 
3256 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3257 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3258 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3259 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3260 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3261 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3262 
3263 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3264 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3265 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3266 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
3267 
3268 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3269 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3270 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3271 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
3272 
3273 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3274 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3275 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3276 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
3277 
3278 	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
3279 	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
3280 	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
3281 
3282 	MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3283 	MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3284 	MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3285 
3286 	MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3287 	MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3288 	MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3289 
3290 	MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3291 	MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3292 	MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3293 
3294 	MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3295 	MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3296 	MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3297 
3298 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3299 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3300 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3301 
3302 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3303 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3304 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3305 
3306 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3307 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3308 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3309 
3310 	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
3311 	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
3312 	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
3313 
3314 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3315 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3316 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3317 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
3318 
3319 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3320 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3321 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3322 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
3323 
3324 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3325 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3326 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3327 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
3328 
3329 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
3330 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
3331 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
3332 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
3333 
3334 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
3335 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
3336 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
3337 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
3338 
3339 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
3340 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
3341 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
3342 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
3343 
3344 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
3345 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
3346 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
3347 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
3348 
3349 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
3350 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
3351 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
3352 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
3353 
3354 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
3355 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
3356 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
3357 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
3358 
3359 	MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
3360 	MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
3361 	MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
3362 	MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
3363 	MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
3364 	MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
3365 
3366 	MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
3367 	MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
3368 	MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
3369 
3370 	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3371 
3372 	MMIO_D(SKL_DFSM, D_SKL_PLUS);
3373 	MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
3374 
3375 	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3376 		NULL, NULL);
3377 	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3378 		NULL, NULL);
3379 
3380 	MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
3381 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
3382 	MMIO_D(RC6_LOCATION, D_SKL_PLUS);
3383 	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
3384 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3385 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3386 		NULL, NULL);
3387 
3388 	/* TRTT */
3389 	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3390 	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3391 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3392 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3393 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3394 	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
3395 		 NULL, gen9_trtte_write);
3396 	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
3397 		 NULL, gen9_trtt_chicken_write);
3398 
3399 	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
3400 
3401 	MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
3402 
3403 	MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
3404 	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3405 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
3406 
3407 	MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
3408 	MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
3409 	MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
3410 	MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
3411 	MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
3412 	MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
3413 	MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
3414 	MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
3415 	MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
3416 	MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
3417 
3418 	MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
3419 	MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
3420 	MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
3421 
3422 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
3423 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
3424 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
3425 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
3426 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
3427 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
3428 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
3429 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
3430 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
3431 
3432 	MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3433 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
3434 	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3435 		      NULL, csfe_chicken1_mmio_write);
3436 #undef CSFE_CHICKEN1_REG
3437 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3438 		 NULL, NULL);
3439 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3440 		 NULL, NULL);
3441 
3442 	MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
3443 	MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
3444 	MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3445 
3446 	return 0;
3447 }
3448 
3449 static int init_bxt_mmio_info(struct intel_gvt *gvt)
3450 {
3451 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3452 	int ret;
3453 
3454 	MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
3455 
3456 	MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
3457 	MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
3458 	MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
3459 	MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
3460 	MMIO_D(ERROR_GEN6, D_BXT);
3461 	MMIO_D(DONE_REG, D_BXT);
3462 	MMIO_D(EIR, D_BXT);
3463 	MMIO_D(PGTBL_ER, D_BXT);
3464 	MMIO_D(_MMIO(0x4194), D_BXT);
3465 	MMIO_D(_MMIO(0x4294), D_BXT);
3466 	MMIO_D(_MMIO(0x4494), D_BXT);
3467 
3468 	MMIO_RING_D(RING_PSMI_CTL, D_BXT);
3469 	MMIO_RING_D(RING_DMA_FADD, D_BXT);
3470 	MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
3471 	MMIO_RING_D(RING_IPEHR, D_BXT);
3472 	MMIO_RING_D(RING_INSTPS, D_BXT);
3473 	MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
3474 	MMIO_RING_D(RING_BBSTATE, D_BXT);
3475 	MMIO_RING_D(RING_IPEIR, D_BXT);
3476 
3477 	MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
3478 
3479 	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
3480 	MMIO_D(BXT_RP_STATE_CAP, D_BXT);
3481 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
3482 		NULL, bxt_phy_ctl_family_write);
3483 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
3484 		NULL, bxt_phy_ctl_family_write);
3485 	MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
3486 	MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
3487 	MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
3488 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
3489 		NULL, bxt_port_pll_enable_write);
3490 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
3491 		NULL, bxt_port_pll_enable_write);
3492 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
3493 		bxt_port_pll_enable_write);
3494 
3495 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
3496 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
3497 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
3498 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
3499 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
3500 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
3501 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
3502 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
3503 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
3504 
3505 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
3506 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
3507 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
3508 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
3509 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
3510 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
3511 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
3512 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
3513 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
3514 
3515 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
3516 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
3517 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
3518 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3519 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
3520 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
3521 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
3522 		NULL, bxt_pcs_dw12_grp_write);
3523 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
3524 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3525 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
3526 		bxt_port_tx_dw3_read, NULL);
3527 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3528 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
3529 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3530 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
3531 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
3532 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
3533 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
3534 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
3535 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
3536 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
3537 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
3538 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
3539 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
3540 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
3541 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
3542 
3543 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
3544 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
3545 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
3546 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3547 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
3548 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
3549 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
3550 		NULL, bxt_pcs_dw12_grp_write);
3551 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
3552 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3553 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
3554 		bxt_port_tx_dw3_read, NULL);
3555 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3556 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
3557 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3558 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
3559 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
3560 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
3561 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
3562 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
3563 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
3564 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
3565 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
3566 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
3567 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
3568 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
3569 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
3570 
3571 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
3572 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
3573 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
3574 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3575 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
3576 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
3577 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
3578 		NULL, bxt_pcs_dw12_grp_write);
3579 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
3580 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3581 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
3582 		bxt_port_tx_dw3_read, NULL);
3583 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3584 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
3585 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3586 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
3587 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
3588 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
3589 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
3590 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
3591 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
3592 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
3593 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
3594 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
3595 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
3596 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
3597 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
3598 
3599 	MMIO_D(BXT_DE_PLL_CTL, D_BXT);
3600 	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
3601 	MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
3602 	MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
3603 
3604 	MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
3605 	MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
3606 
3607 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
3608 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
3609 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
3610 
3611 	MMIO_D(RC6_CTX_BASE, D_BXT);
3612 
3613 	MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
3614 	MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
3615 	MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
3616 	MMIO_D(GEN6_GFXPAUSE, D_BXT);
3617 	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
3618 	MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3619 	MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
3620 	MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3621 	       0, 0, D_BXT, NULL, NULL);
3622 	MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3623 	       0, 0, D_BXT, NULL, NULL);
3624 	MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3625 	       0, 0, D_BXT, NULL, NULL);
3626 	MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3627 	       0, 0, D_BXT, NULL, NULL);
3628 
3629 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3630 
3631 	MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
3632 
3633 	return 0;
3634 }
3635 
3636 static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
3637 						    unsigned int offset)
3638 {
3639 	unsigned long device = intel_gvt_get_device_type(gvt);
3640 	const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3641 	int num = gvt->mmio.num_mmio_block;
3642 	int i;
3643 
3644 	for (i = 0; i < num; i++, block++) {
3645 		if (!(device & block->device))
3646 			continue;
3647 		if (offset >= i915_mmio_reg_offset(block->offset) &&
3648 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
3649 			return block;
3650 	}
3651 	return NULL;
3652 }
3653 
3654 /**
3655  * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
3656  * @gvt: GVT device
3657  *
3658  * This function is called at the driver unloading stage, to clean up the MMIO
3659  * information table of GVT device
3660  *
3661  */
3662 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
3663 {
3664 	struct hlist_node *tmp;
3665 	struct intel_gvt_mmio_info *e;
3666 	int i;
3667 
3668 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
3669 		kfree(e);
3670 
3671 	vfree(gvt->mmio.mmio_attribute);
3672 	gvt->mmio.mmio_attribute = NULL;
3673 }
3674 
3675 /* Special MMIO blocks. registers in MMIO block ranges should not be command
3676  * accessible (should have no F_CMD_ACCESS flag).
3677  * otherwise, need to update cmd_reg_handler in cmd_parser.c
3678  */
3679 static const struct gvt_mmio_block mmio_blocks[] = {
3680 	{D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
3681 	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
3682 	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
3683 		pvinfo_mmio_read, pvinfo_mmio_write},
3684 	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
3685 	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
3686 	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
3687 };
3688 
3689 /**
3690  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
3691  * @gvt: GVT device
3692  *
3693  * This function is called at the initialization stage, to setup the MMIO
3694  * information table for GVT device
3695  *
3696  * Returns:
3697  * zero on success, negative if failed.
3698  */
3699 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
3700 {
3701 	struct intel_gvt_device_info *info = &gvt->device_info;
3702 	struct drm_i915_private *i915 = gvt->gt->i915;
3703 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
3704 	int ret;
3705 
3706 	gvt->mmio.mmio_attribute = vzalloc(size);
3707 	if (!gvt->mmio.mmio_attribute)
3708 		return -ENOMEM;
3709 
3710 	ret = init_generic_mmio_info(gvt);
3711 	if (ret)
3712 		goto err;
3713 
3714 	if (IS_BROADWELL(i915)) {
3715 		ret = init_bdw_mmio_info(gvt);
3716 		if (ret)
3717 			goto err;
3718 	} else if (IS_SKYLAKE(i915) ||
3719 		   IS_KABYLAKE(i915) ||
3720 		   IS_COFFEELAKE(i915) ||
3721 		   IS_COMETLAKE(i915)) {
3722 		ret = init_bdw_mmio_info(gvt);
3723 		if (ret)
3724 			goto err;
3725 		ret = init_skl_mmio_info(gvt);
3726 		if (ret)
3727 			goto err;
3728 	} else if (IS_BROXTON(i915)) {
3729 		ret = init_bdw_mmio_info(gvt);
3730 		if (ret)
3731 			goto err;
3732 		ret = init_skl_mmio_info(gvt);
3733 		if (ret)
3734 			goto err;
3735 		ret = init_bxt_mmio_info(gvt);
3736 		if (ret)
3737 			goto err;
3738 	}
3739 
3740 	gvt->mmio.mmio_block = mmio_blocks;
3741 	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
3742 
3743 	return 0;
3744 err:
3745 	intel_gvt_clean_mmio_info(gvt);
3746 	return ret;
3747 }
3748 
3749 /**
3750  * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3751  * @gvt: a GVT device
3752  * @handler: the handler
3753  * @data: private data given to handler
3754  *
3755  * Returns:
3756  * Zero on success, negative error code if failed.
3757  */
3758 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3759 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3760 	void *data)
3761 {
3762 	const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3763 	struct intel_gvt_mmio_info *e;
3764 	int i, j, ret;
3765 
3766 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3767 		ret = handler(gvt, e->offset, data);
3768 		if (ret)
3769 			return ret;
3770 	}
3771 
3772 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3773 		/* pvinfo data doesn't come from hw mmio */
3774 		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3775 			continue;
3776 
3777 		for (j = 0; j < block->size; j += 4) {
3778 			ret = handler(gvt,
3779 				      i915_mmio_reg_offset(block->offset) + j,
3780 				      data);
3781 			if (ret)
3782 				return ret;
3783 		}
3784 	}
3785 	return 0;
3786 }
3787 
3788 /**
3789  * intel_vgpu_default_mmio_read - default MMIO read handler
3790  * @vgpu: a vGPU
3791  * @offset: access offset
3792  * @p_data: data return buffer
3793  * @bytes: access data length
3794  *
3795  * Returns:
3796  * Zero on success, negative error code if failed.
3797  */
3798 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3799 		void *p_data, unsigned int bytes)
3800 {
3801 	read_vreg(vgpu, offset, p_data, bytes);
3802 	return 0;
3803 }
3804 
3805 /**
3806  * intel_t_default_mmio_write - default MMIO write handler
3807  * @vgpu: a vGPU
3808  * @offset: access offset
3809  * @p_data: write data buffer
3810  * @bytes: access data length
3811  *
3812  * Returns:
3813  * Zero on success, negative error code if failed.
3814  */
3815 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3816 		void *p_data, unsigned int bytes)
3817 {
3818 	write_vreg(vgpu, offset, p_data, bytes);
3819 	return 0;
3820 }
3821 
3822 /**
3823  * intel_vgpu_mask_mmio_write - write mask register
3824  * @vgpu: a vGPU
3825  * @offset: access offset
3826  * @p_data: write data buffer
3827  * @bytes: access data length
3828  *
3829  * Returns:
3830  * Zero on success, negative error code if failed.
3831  */
3832 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3833 		void *p_data, unsigned int bytes)
3834 {
3835 	u32 mask, old_vreg;
3836 
3837 	old_vreg = vgpu_vreg(vgpu, offset);
3838 	write_vreg(vgpu, offset, p_data, bytes);
3839 	mask = vgpu_vreg(vgpu, offset) >> 16;
3840 	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3841 				(vgpu_vreg(vgpu, offset) & mask);
3842 
3843 	return 0;
3844 }
3845 
3846 /**
3847  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3848  * force-nopriv register
3849  *
3850  * @gvt: a GVT device
3851  * @offset: register offset
3852  *
3853  * Returns:
3854  * True if the register is in force-nonpriv whitelist;
3855  * False if outside;
3856  */
3857 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3858 					  unsigned int offset)
3859 {
3860 	return in_whitelist(offset);
3861 }
3862 
3863 /**
3864  * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3865  * @vgpu: a vGPU
3866  * @offset: register offset
3867  * @pdata: data buffer
3868  * @bytes: data length
3869  * @is_read: read or write
3870  *
3871  * Returns:
3872  * Zero on success, negative error code if failed.
3873  */
3874 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3875 			   void *pdata, unsigned int bytes, bool is_read)
3876 {
3877 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3878 	struct intel_gvt *gvt = vgpu->gvt;
3879 	struct intel_gvt_mmio_info *mmio_info;
3880 	const struct gvt_mmio_block *mmio_block;
3881 	gvt_mmio_func func;
3882 	int ret;
3883 
3884 	if (drm_WARN_ON(&i915->drm, bytes > 8))
3885 		return -EINVAL;
3886 
3887 	/*
3888 	 * Handle special MMIO blocks.
3889 	 */
3890 	mmio_block = find_mmio_block(gvt, offset);
3891 	if (mmio_block) {
3892 		func = is_read ? mmio_block->read : mmio_block->write;
3893 		if (func)
3894 			return func(vgpu, offset, pdata, bytes);
3895 		goto default_rw;
3896 	}
3897 
3898 	/*
3899 	 * Normal tracked MMIOs.
3900 	 */
3901 	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3902 	if (!mmio_info) {
3903 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3904 		goto default_rw;
3905 	}
3906 
3907 	if (is_read)
3908 		return mmio_info->read(vgpu, offset, pdata, bytes);
3909 	else {
3910 		u64 ro_mask = mmio_info->ro_mask;
3911 		u32 old_vreg = 0;
3912 		u64 data = 0;
3913 
3914 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3915 			old_vreg = vgpu_vreg(vgpu, offset);
3916 		}
3917 
3918 		if (likely(!ro_mask))
3919 			ret = mmio_info->write(vgpu, offset, pdata, bytes);
3920 		else if (!~ro_mask) {
3921 			gvt_vgpu_err("try to write RO reg %x\n", offset);
3922 			return 0;
3923 		} else {
3924 			/* keep the RO bits in the virtual register */
3925 			memcpy(&data, pdata, bytes);
3926 			data &= ~ro_mask;
3927 			data |= vgpu_vreg(vgpu, offset) & ro_mask;
3928 			ret = mmio_info->write(vgpu, offset, &data, bytes);
3929 		}
3930 
3931 		/* higher 16bits of mode ctl regs are mask bits for change */
3932 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3933 			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3934 
3935 			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3936 					| (vgpu_vreg(vgpu, offset) & mask);
3937 		}
3938 	}
3939 
3940 	return ret;
3941 
3942 default_rw:
3943 	return is_read ?
3944 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3945 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3946 }
3947 
3948 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3949 {
3950 	struct intel_vgpu *vgpu;
3951 	int i, id;
3952 
3953 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3954 		mmio_hw_access_pre(gvt->gt);
3955 		for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3956 			intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3957 		mmio_hw_access_post(gvt->gt);
3958 	}
3959 }
3960 
3961 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3962 {
3963 	struct intel_vgpu *vgpu = data;
3964 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3965 
3966 	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3967 		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3968 
3969 	return 0;
3970 }
3971 
3972 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3973 {
3974 	struct intel_vgpu *vgpu;
3975 	int id;
3976 
3977 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3978 		mmio_hw_access_pre(gvt->gt);
3979 		intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3980 		mmio_hw_access_post(gvt->gt);
3981 	}
3982 }
3983