xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/handlers.c (revision 301306a9)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Tina Zhang <tina.zhang@intel.com>
31  *    Pei Zhang <pei.zhang@intel.com>
32  *    Niu Bing <bing.niu@intel.com>
33  *    Ping Gao <ping.a.gao@intel.com>
34  *    Zhi Wang <zhi.a.wang@intel.com>
35  *
36 
37  */
38 
39 #include "i915_drv.h"
40 #include "i915_reg.h"
41 #include "gvt.h"
42 #include "i915_pvinfo.h"
43 #include "display/intel_display_types.h"
44 #include "display/intel_fbc.h"
45 #include "gt/intel_gt_regs.h"
46 
47 /* XXX FIXME i915 has changed PP_XXX definition */
48 #define PCH_PP_STATUS  _MMIO(0xc7200)
49 #define PCH_PP_CONTROL _MMIO(0xc7204)
50 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
51 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
52 #define PCH_PP_DIVISOR _MMIO(0xc7210)
53 
54 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
55 {
56 	struct drm_i915_private *i915 = gvt->gt->i915;
57 
58 	if (IS_BROADWELL(i915))
59 		return D_BDW;
60 	else if (IS_SKYLAKE(i915))
61 		return D_SKL;
62 	else if (IS_KABYLAKE(i915))
63 		return D_KBL;
64 	else if (IS_BROXTON(i915))
65 		return D_BXT;
66 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
67 		return D_CFL;
68 
69 	return 0;
70 }
71 
72 bool intel_gvt_match_device(struct intel_gvt *gvt,
73 		unsigned long device)
74 {
75 	return intel_gvt_get_device_type(gvt) & device;
76 }
77 
78 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
79 	void *p_data, unsigned int bytes)
80 {
81 	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
82 }
83 
84 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
85 	void *p_data, unsigned int bytes)
86 {
87 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
88 }
89 
90 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
91 						  unsigned int offset)
92 {
93 	struct intel_gvt_mmio_info *e;
94 
95 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
96 		if (e->offset == offset)
97 			return e;
98 	}
99 	return NULL;
100 }
101 
102 static int new_mmio_info(struct intel_gvt *gvt,
103 		u32 offset, u16 flags, u32 size,
104 		u32 addr_mask, u32 ro_mask, u32 device,
105 		gvt_mmio_func read, gvt_mmio_func write)
106 {
107 	struct intel_gvt_mmio_info *info, *p;
108 	u32 start, end, i;
109 
110 	if (!intel_gvt_match_device(gvt, device))
111 		return 0;
112 
113 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
114 		return -EINVAL;
115 
116 	start = offset;
117 	end = offset + size;
118 
119 	for (i = start; i < end; i += 4) {
120 		info = kzalloc(sizeof(*info), GFP_KERNEL);
121 		if (!info)
122 			return -ENOMEM;
123 
124 		info->offset = i;
125 		p = intel_gvt_find_mmio_info(gvt, info->offset);
126 		if (p) {
127 			WARN(1, "dup mmio definition offset %x\n",
128 				info->offset);
129 			kfree(info);
130 
131 			/* We return -EEXIST here to make GVT-g load fail.
132 			 * So duplicated MMIO can be found as soon as
133 			 * possible.
134 			 */
135 			return -EEXIST;
136 		}
137 
138 		info->ro_mask = ro_mask;
139 		info->device = device;
140 		info->read = read ? read : intel_vgpu_default_mmio_read;
141 		info->write = write ? write : intel_vgpu_default_mmio_write;
142 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
143 		INIT_HLIST_NODE(&info->node);
144 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
145 		gvt->mmio.num_tracked_mmio++;
146 	}
147 	return 0;
148 }
149 
150 /**
151  * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
152  * @gvt: a GVT device
153  * @offset: register offset
154  *
155  * Returns:
156  * The engine containing the offset within its mmio page.
157  */
158 const struct intel_engine_cs *
159 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
160 {
161 	struct intel_engine_cs *engine;
162 	enum intel_engine_id id;
163 
164 	offset &= ~GENMASK(11, 0);
165 	for_each_engine(engine, gvt->gt, id)
166 		if (engine->mmio_base == offset)
167 			return engine;
168 
169 	return NULL;
170 }
171 
172 #define offset_to_fence_num(offset) \
173 	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
174 
175 #define fence_num_to_offset(num) \
176 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
177 
178 
179 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
180 {
181 	switch (reason) {
182 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
183 		pr_err("Detected your guest driver doesn't support GVT-g.\n");
184 		break;
185 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
186 		pr_err("Graphics resource is not enough for the guest\n");
187 		break;
188 	case GVT_FAILSAFE_GUEST_ERR:
189 		pr_err("GVT Internal error  for the guest\n");
190 		break;
191 	default:
192 		break;
193 	}
194 	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
195 	vgpu->failsafe = true;
196 }
197 
198 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
199 		unsigned int fence_num, void *p_data, unsigned int bytes)
200 {
201 	unsigned int max_fence = vgpu_fence_sz(vgpu);
202 
203 	if (fence_num >= max_fence) {
204 		gvt_vgpu_err("access oob fence reg %d/%d\n",
205 			     fence_num, max_fence);
206 
207 		/* When guest access oob fence regs without access
208 		 * pv_info first, we treat guest not supporting GVT,
209 		 * and we will let vgpu enter failsafe mode.
210 		 */
211 		if (!vgpu->pv_notified)
212 			enter_failsafe_mode(vgpu,
213 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
214 
215 		memset(p_data, 0, bytes);
216 		return -EINVAL;
217 	}
218 	return 0;
219 }
220 
221 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
222 		unsigned int offset, void *p_data, unsigned int bytes)
223 {
224 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
225 
226 	if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
227 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
228 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
229 		else if (!ips)
230 			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
231 		else {
232 			/* All engines must be enabled together for vGPU,
233 			 * since we don't know which engine the ppgtt will
234 			 * bind to when shadowing.
235 			 */
236 			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
237 				     ips);
238 			return -EINVAL;
239 		}
240 	}
241 
242 	write_vreg(vgpu, offset, p_data, bytes);
243 	return 0;
244 }
245 
246 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
247 		void *p_data, unsigned int bytes)
248 {
249 	int ret;
250 
251 	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
252 			p_data, bytes);
253 	if (ret)
254 		return ret;
255 	read_vreg(vgpu, off, p_data, bytes);
256 	return 0;
257 }
258 
259 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
260 		void *p_data, unsigned int bytes)
261 {
262 	struct intel_gvt *gvt = vgpu->gvt;
263 	unsigned int fence_num = offset_to_fence_num(off);
264 	int ret;
265 
266 	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
267 	if (ret)
268 		return ret;
269 	write_vreg(vgpu, off, p_data, bytes);
270 
271 	mmio_hw_access_pre(gvt->gt);
272 	intel_vgpu_write_fence(vgpu, fence_num,
273 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
274 	mmio_hw_access_post(gvt->gt);
275 	return 0;
276 }
277 
278 #define CALC_MODE_MASK_REG(old, new) \
279 	(((new) & GENMASK(31, 16)) \
280 	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
281 	 | ((new) & ((new) >> 16))))
282 
283 static int mul_force_wake_write(struct intel_vgpu *vgpu,
284 		unsigned int offset, void *p_data, unsigned int bytes)
285 {
286 	u32 old, new;
287 	u32 ack_reg_offset;
288 
289 	old = vgpu_vreg(vgpu, offset);
290 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
291 
292 	if (GRAPHICS_VER(vgpu->gvt->gt->i915)  >=  9) {
293 		switch (offset) {
294 		case FORCEWAKE_RENDER_GEN9_REG:
295 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
296 			break;
297 		case FORCEWAKE_GT_GEN9_REG:
298 			ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
299 			break;
300 		case FORCEWAKE_MEDIA_GEN9_REG:
301 			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
302 			break;
303 		default:
304 			/*should not hit here*/
305 			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
306 			return -EINVAL;
307 		}
308 	} else {
309 		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
310 	}
311 
312 	vgpu_vreg(vgpu, offset) = new;
313 	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
314 	return 0;
315 }
316 
317 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
318 			    void *p_data, unsigned int bytes)
319 {
320 	intel_engine_mask_t engine_mask = 0;
321 	u32 data;
322 
323 	write_vreg(vgpu, offset, p_data, bytes);
324 	data = vgpu_vreg(vgpu, offset);
325 
326 	if (data & GEN6_GRDOM_FULL) {
327 		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
328 		engine_mask = ALL_ENGINES;
329 	} else {
330 		if (data & GEN6_GRDOM_RENDER) {
331 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
332 			engine_mask |= BIT(RCS0);
333 		}
334 		if (data & GEN6_GRDOM_MEDIA) {
335 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
336 			engine_mask |= BIT(VCS0);
337 		}
338 		if (data & GEN6_GRDOM_BLT) {
339 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
340 			engine_mask |= BIT(BCS0);
341 		}
342 		if (data & GEN6_GRDOM_VECS) {
343 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
344 			engine_mask |= BIT(VECS0);
345 		}
346 		if (data & GEN8_GRDOM_MEDIA2) {
347 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
348 			engine_mask |= BIT(VCS1);
349 		}
350 		if (data & GEN9_GRDOM_GUC) {
351 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
352 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
353 		}
354 		engine_mask &= vgpu->gvt->gt->info.engine_mask;
355 	}
356 
357 	/* vgpu_lock already hold by emulate mmio r/w */
358 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
359 
360 	/* sw will wait for the device to ack the reset request */
361 	vgpu_vreg(vgpu, offset) = 0;
362 
363 	return 0;
364 }
365 
366 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
367 		void *p_data, unsigned int bytes)
368 {
369 	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
370 }
371 
372 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
373 		void *p_data, unsigned int bytes)
374 {
375 	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
376 }
377 
378 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
379 		unsigned int offset, void *p_data, unsigned int bytes)
380 {
381 	write_vreg(vgpu, offset, p_data, bytes);
382 
383 	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
384 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
385 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
386 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
387 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
388 
389 	} else
390 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
391 			~(PP_ON | PP_SEQUENCE_POWER_DOWN
392 					| PP_CYCLE_DELAY_ACTIVE);
393 	return 0;
394 }
395 
396 static int transconf_mmio_write(struct intel_vgpu *vgpu,
397 		unsigned int offset, void *p_data, unsigned int bytes)
398 {
399 	write_vreg(vgpu, offset, p_data, bytes);
400 
401 	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
402 		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
403 	else
404 		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
405 	return 0;
406 }
407 
408 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
409 		void *p_data, unsigned int bytes)
410 {
411 	write_vreg(vgpu, offset, p_data, bytes);
412 
413 	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
414 		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
415 	else
416 		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
417 
418 	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
419 		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
420 	else
421 		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
422 
423 	return 0;
424 }
425 
426 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
427 		void *p_data, unsigned int bytes)
428 {
429 	switch (offset) {
430 	case 0xe651c:
431 	case 0xe661c:
432 	case 0xe671c:
433 	case 0xe681c:
434 		vgpu_vreg(vgpu, offset) = 1 << 17;
435 		break;
436 	case 0xe6c04:
437 		vgpu_vreg(vgpu, offset) = 0x3;
438 		break;
439 	case 0xe6e1c:
440 		vgpu_vreg(vgpu, offset) = 0x2f << 16;
441 		break;
442 	default:
443 		return -EINVAL;
444 	}
445 
446 	read_vreg(vgpu, offset, p_data, bytes);
447 	return 0;
448 }
449 
450 /*
451  * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
452  *   TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
453  *   setup_virtual_dp_monitor().
454  * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
455  *   DPLL. Later guest driver may setup a different DPLLx when setting mode.
456  * So the correct sequence to find DP stream clock is:
457  *   Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
458  *   Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
459  * Then Refresh rate then can be calculated based on follow equations:
460  *   Pixel clock = h_total * v_total * refresh_rate
461  *   stream clock = Pixel clock
462  *   ls_clk = DP bitrate
463  *   Link M/N = strm_clk / ls_clk
464  */
465 
466 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
467 {
468 	u32 dp_br = 0;
469 	u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
470 
471 	switch (ddi_pll_sel) {
472 	case PORT_CLK_SEL_LCPLL_2700:
473 		dp_br = 270000 * 2;
474 		break;
475 	case PORT_CLK_SEL_LCPLL_1350:
476 		dp_br = 135000 * 2;
477 		break;
478 	case PORT_CLK_SEL_LCPLL_810:
479 		dp_br = 81000 * 2;
480 		break;
481 	case PORT_CLK_SEL_SPLL:
482 	{
483 		switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
484 		case SPLL_FREQ_810MHz:
485 			dp_br = 81000 * 2;
486 			break;
487 		case SPLL_FREQ_1350MHz:
488 			dp_br = 135000 * 2;
489 			break;
490 		case SPLL_FREQ_2700MHz:
491 			dp_br = 270000 * 2;
492 			break;
493 		default:
494 			gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
495 				    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
496 			break;
497 		}
498 		break;
499 	}
500 	case PORT_CLK_SEL_WRPLL1:
501 	case PORT_CLK_SEL_WRPLL2:
502 	{
503 		u32 wrpll_ctl;
504 		int refclk, n, p, r;
505 
506 		if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
507 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
508 		else
509 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
510 
511 		switch (wrpll_ctl & WRPLL_REF_MASK) {
512 		case WRPLL_REF_PCH_SSC:
513 			refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
514 			break;
515 		case WRPLL_REF_LCPLL:
516 			refclk = 2700000;
517 			break;
518 		default:
519 			gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
520 				    vgpu->id, port_name(port), wrpll_ctl);
521 			goto out;
522 		}
523 
524 		r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
525 		p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
526 		n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
527 
528 		dp_br = (refclk * n / 10) / (p * r) * 2;
529 		break;
530 	}
531 	default:
532 		gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
533 			    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
534 		break;
535 	}
536 
537 out:
538 	return dp_br;
539 }
540 
541 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
542 {
543 	u32 dp_br = 0;
544 	int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
545 	enum dpio_phy phy = DPIO_PHY0;
546 	enum dpio_channel ch = DPIO_CH0;
547 	struct dpll clock = {0};
548 	u32 temp;
549 
550 	/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
551 	switch (port) {
552 	case PORT_A:
553 		phy = DPIO_PHY1;
554 		ch = DPIO_CH0;
555 		break;
556 	case PORT_B:
557 		phy = DPIO_PHY0;
558 		ch = DPIO_CH0;
559 		break;
560 	case PORT_C:
561 		phy = DPIO_PHY0;
562 		ch = DPIO_CH1;
563 		break;
564 	default:
565 		gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
566 		goto out;
567 	}
568 
569 	temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
570 	if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
571 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
572 			    vgpu->id, port_name(port), temp);
573 		goto out;
574 	}
575 
576 	clock.m1 = 2;
577 	clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22;
578 	if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
579 		clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK;
580 	clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
581 	clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
582 	clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
583 	clock.m = clock.m1 * clock.m2;
584 	clock.p = clock.p1 * clock.p2;
585 
586 	if (clock.n == 0 || clock.p == 0) {
587 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
588 		goto out;
589 	}
590 
591 	clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
592 	clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
593 
594 	dp_br = clock.dot / 5;
595 
596 out:
597 	return dp_br;
598 }
599 
600 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
601 {
602 	u32 dp_br = 0;
603 	enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
604 
605 	/* Find the enabled DPLL for the DDI/PORT */
606 	if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
607 	    (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
608 		dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
609 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
610 			DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
611 	} else {
612 		gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
613 			    vgpu->id, port_name(port));
614 		return dp_br;
615 	}
616 
617 	/* Find PLL output frequency from correct DPLL, and get bir rate */
618 	switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
619 		DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
620 		DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
621 		case DPLL_CTRL1_LINK_RATE_810:
622 			dp_br = 81000 * 2;
623 			break;
624 		case DPLL_CTRL1_LINK_RATE_1080:
625 			dp_br = 108000 * 2;
626 			break;
627 		case DPLL_CTRL1_LINK_RATE_1350:
628 			dp_br = 135000 * 2;
629 			break;
630 		case DPLL_CTRL1_LINK_RATE_1620:
631 			dp_br = 162000 * 2;
632 			break;
633 		case DPLL_CTRL1_LINK_RATE_2160:
634 			dp_br = 216000 * 2;
635 			break;
636 		case DPLL_CTRL1_LINK_RATE_2700:
637 			dp_br = 270000 * 2;
638 			break;
639 		default:
640 			dp_br = 0;
641 			gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
642 				    vgpu->id, port_name(port), dpll_id);
643 	}
644 
645 	return dp_br;
646 }
647 
648 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
649 {
650 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
651 	enum port port;
652 	u32 dp_br, link_m, link_n, htotal, vtotal;
653 
654 	/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
655 	port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
656 		TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
657 	if (port != PORT_B && port != PORT_D) {
658 		gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
659 		return;
660 	}
661 
662 	/* Calculate DP bitrate from PLL */
663 	if (IS_BROADWELL(dev_priv))
664 		dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
665 	else if (IS_BROXTON(dev_priv))
666 		dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
667 	else
668 		dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
669 
670 	/* Get DP link symbol clock M/N */
671 	link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
672 	link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
673 
674 	/* Get H/V total from transcoder timing */
675 	htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
676 	vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
677 
678 	if (dp_br && link_n && htotal && vtotal) {
679 		u64 pixel_clk = 0;
680 		u32 new_rate = 0;
681 		u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
682 
683 		/* Calcuate pixel clock by (ls_clk * M / N) */
684 		pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
685 		pixel_clk *= MSEC_PER_SEC;
686 
687 		/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
688 		new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
689 
690 		if (*old_rate != new_rate)
691 			*old_rate = new_rate;
692 
693 		gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
694 			    vgpu->id, pipe_name(PIPE_A), new_rate);
695 	}
696 }
697 
698 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
699 		void *p_data, unsigned int bytes)
700 {
701 	u32 data;
702 
703 	write_vreg(vgpu, offset, p_data, bytes);
704 	data = vgpu_vreg(vgpu, offset);
705 
706 	if (data & PIPECONF_ENABLE) {
707 		vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
708 		vgpu_update_refresh_rate(vgpu);
709 		vgpu_update_vblank_emulation(vgpu, true);
710 	} else {
711 		vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
712 		vgpu_update_vblank_emulation(vgpu, false);
713 	}
714 	return 0;
715 }
716 
717 /* sorted in ascending order */
718 static i915_reg_t force_nonpriv_white_list[] = {
719 	_MMIO(0xd80),
720 	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
721 	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
722 	CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
723 	PS_INVOCATION_COUNT, //_MMIO(0x2348)
724 	PS_DEPTH_COUNT, //_MMIO(0x2350)
725 	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
726 	_MMIO(0x2690),
727 	_MMIO(0x2694),
728 	_MMIO(0x2698),
729 	_MMIO(0x2754),
730 	_MMIO(0x28a0),
731 	_MMIO(0x4de0),
732 	_MMIO(0x4de4),
733 	_MMIO(0x4dfc),
734 	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
735 	_MMIO(0x7014),
736 	HDC_CHICKEN0,//_MMIO(0x7300)
737 	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
738 	_MMIO(0x7700),
739 	_MMIO(0x7704),
740 	_MMIO(0x7708),
741 	_MMIO(0x770c),
742 	_MMIO(0x83a8),
743 	_MMIO(0xb110),
744 	GEN8_L3SQCREG4,//_MMIO(0xb118)
745 	_MMIO(0xe100),
746 	_MMIO(0xe18c),
747 	_MMIO(0xe48c),
748 	_MMIO(0xe5f4),
749 	_MMIO(0x64844),
750 };
751 
752 /* a simple bsearch */
753 static inline bool in_whitelist(u32 reg)
754 {
755 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
756 	i915_reg_t *array = force_nonpriv_white_list;
757 
758 	while (left < right) {
759 		int mid = (left + right)/2;
760 
761 		if (reg > array[mid].reg)
762 			left = mid + 1;
763 		else if (reg < array[mid].reg)
764 			right = mid;
765 		else
766 			return true;
767 	}
768 	return false;
769 }
770 
771 static int force_nonpriv_write(struct intel_vgpu *vgpu,
772 	unsigned int offset, void *p_data, unsigned int bytes)
773 {
774 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
775 	const struct intel_engine_cs *engine =
776 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
777 
778 	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
779 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
780 			vgpu->id, offset, bytes);
781 		return -EINVAL;
782 	}
783 
784 	if (!in_whitelist(reg_nonpriv) &&
785 	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
786 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
787 			vgpu->id, reg_nonpriv, offset);
788 	} else
789 		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
790 
791 	return 0;
792 }
793 
794 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
795 		void *p_data, unsigned int bytes)
796 {
797 	write_vreg(vgpu, offset, p_data, bytes);
798 
799 	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
800 		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
801 	} else {
802 		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
803 		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
804 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
805 				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
806 	}
807 	return 0;
808 }
809 
810 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
811 		unsigned int offset, void *p_data, unsigned int bytes)
812 {
813 	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
814 	return 0;
815 }
816 
817 #define FDI_LINK_TRAIN_PATTERN1         0
818 #define FDI_LINK_TRAIN_PATTERN2         1
819 
820 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
821 {
822 	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
823 	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
824 	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
825 
826 	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
827 			(rx_ctl & FDI_RX_ENABLE) &&
828 			(rx_ctl & FDI_AUTO_TRAINING) &&
829 			(tx_ctl & DP_TP_CTL_ENABLE) &&
830 			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
831 		return 1;
832 	else
833 		return 0;
834 }
835 
836 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
837 		enum pipe pipe, unsigned int train_pattern)
838 {
839 	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
840 	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
841 	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
842 	unsigned int fdi_iir_check_bits;
843 
844 	fdi_rx_imr = FDI_RX_IMR(pipe);
845 	fdi_tx_ctl = FDI_TX_CTL(pipe);
846 	fdi_rx_ctl = FDI_RX_CTL(pipe);
847 
848 	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
849 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
850 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
851 		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
852 	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
853 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
854 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
855 		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
856 	} else {
857 		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
858 		return -EINVAL;
859 	}
860 
861 	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
862 	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
863 
864 	/* If imr bit has been masked */
865 	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
866 		return 0;
867 
868 	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
869 			== fdi_tx_check_bits)
870 		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
871 			== fdi_rx_check_bits))
872 		return 1;
873 	else
874 		return 0;
875 }
876 
877 #define INVALID_INDEX (~0U)
878 
879 static unsigned int calc_index(unsigned int offset, unsigned int start,
880 	unsigned int next, unsigned int end, i915_reg_t i915_end)
881 {
882 	unsigned int range = next - start;
883 
884 	if (!end)
885 		end = i915_mmio_reg_offset(i915_end);
886 	if (offset < start || offset > end)
887 		return INVALID_INDEX;
888 	offset -= start;
889 	return offset / range;
890 }
891 
892 #define FDI_RX_CTL_TO_PIPE(offset) \
893 	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
894 
895 #define FDI_TX_CTL_TO_PIPE(offset) \
896 	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
897 
898 #define FDI_RX_IMR_TO_PIPE(offset) \
899 	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
900 
901 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
902 		unsigned int offset, void *p_data, unsigned int bytes)
903 {
904 	i915_reg_t fdi_rx_iir;
905 	unsigned int index;
906 	int ret;
907 
908 	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
909 		index = FDI_RX_CTL_TO_PIPE(offset);
910 	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
911 		index = FDI_TX_CTL_TO_PIPE(offset);
912 	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
913 		index = FDI_RX_IMR_TO_PIPE(offset);
914 	else {
915 		gvt_vgpu_err("Unsupport registers %x\n", offset);
916 		return -EINVAL;
917 	}
918 
919 	write_vreg(vgpu, offset, p_data, bytes);
920 
921 	fdi_rx_iir = FDI_RX_IIR(index);
922 
923 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
924 	if (ret < 0)
925 		return ret;
926 	if (ret)
927 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
928 
929 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
930 	if (ret < 0)
931 		return ret;
932 	if (ret)
933 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
934 
935 	if (offset == _FDI_RXA_CTL)
936 		if (fdi_auto_training_started(vgpu))
937 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
938 				DP_TP_STATUS_AUTOTRAIN_DONE;
939 	return 0;
940 }
941 
942 #define DP_TP_CTL_TO_PORT(offset) \
943 	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
944 
945 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
946 		void *p_data, unsigned int bytes)
947 {
948 	i915_reg_t status_reg;
949 	unsigned int index;
950 	u32 data;
951 
952 	write_vreg(vgpu, offset, p_data, bytes);
953 
954 	index = DP_TP_CTL_TO_PORT(offset);
955 	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
956 	if (data == 0x2) {
957 		status_reg = DP_TP_STATUS(index);
958 		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
959 	}
960 	return 0;
961 }
962 
963 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
964 		unsigned int offset, void *p_data, unsigned int bytes)
965 {
966 	u32 reg_val;
967 	u32 sticky_mask;
968 
969 	reg_val = *((u32 *)p_data);
970 	sticky_mask = GENMASK(27, 26) | (1 << 24);
971 
972 	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
973 		(vgpu_vreg(vgpu, offset) & sticky_mask);
974 	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
975 	return 0;
976 }
977 
978 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
979 		unsigned int offset, void *p_data, unsigned int bytes)
980 {
981 	u32 data;
982 
983 	write_vreg(vgpu, offset, p_data, bytes);
984 	data = vgpu_vreg(vgpu, offset);
985 
986 	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
987 		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
988 	return 0;
989 }
990 
991 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
992 		unsigned int offset, void *p_data, unsigned int bytes)
993 {
994 	u32 data;
995 
996 	write_vreg(vgpu, offset, p_data, bytes);
997 	data = vgpu_vreg(vgpu, offset);
998 
999 	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
1000 		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1001 	else
1002 		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1003 	return 0;
1004 }
1005 
1006 #define DSPSURF_TO_PIPE(offset) \
1007 	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
1008 
1009 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1010 		void *p_data, unsigned int bytes)
1011 {
1012 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1013 	u32 pipe = DSPSURF_TO_PIPE(offset);
1014 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1015 
1016 	write_vreg(vgpu, offset, p_data, bytes);
1017 	vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1018 
1019 	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1020 
1021 	if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
1022 		intel_vgpu_trigger_virtual_event(vgpu, event);
1023 	else
1024 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1025 
1026 	return 0;
1027 }
1028 
1029 #define SPRSURF_TO_PIPE(offset) \
1030 	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
1031 
1032 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1033 		void *p_data, unsigned int bytes)
1034 {
1035 	u32 pipe = SPRSURF_TO_PIPE(offset);
1036 	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1037 
1038 	write_vreg(vgpu, offset, p_data, bytes);
1039 	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1040 
1041 	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1042 		intel_vgpu_trigger_virtual_event(vgpu, event);
1043 	else
1044 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1045 
1046 	return 0;
1047 }
1048 
1049 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1050 			       unsigned int offset, void *p_data,
1051 			       unsigned int bytes)
1052 {
1053 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1054 	enum pipe pipe = REG_50080_TO_PIPE(offset);
1055 	enum plane_id plane = REG_50080_TO_PLANE(offset);
1056 	int event = SKL_FLIP_EVENT(pipe, plane);
1057 
1058 	write_vreg(vgpu, offset, p_data, bytes);
1059 	if (plane == PLANE_PRIMARY) {
1060 		vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1061 		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
1062 	} else {
1063 		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1064 	}
1065 
1066 	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1067 		intel_vgpu_trigger_virtual_event(vgpu, event);
1068 	else
1069 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1070 
1071 	return 0;
1072 }
1073 
1074 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1075 		unsigned int reg)
1076 {
1077 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1078 	enum intel_gvt_event_type event;
1079 
1080 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1081 		event = AUX_CHANNEL_A;
1082 	else if (reg == _PCH_DPB_AUX_CH_CTL ||
1083 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1084 		event = AUX_CHANNEL_B;
1085 	else if (reg == _PCH_DPC_AUX_CH_CTL ||
1086 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1087 		event = AUX_CHANNEL_C;
1088 	else if (reg == _PCH_DPD_AUX_CH_CTL ||
1089 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1090 		event = AUX_CHANNEL_D;
1091 	else {
1092 		drm_WARN_ON(&dev_priv->drm, true);
1093 		return -EINVAL;
1094 	}
1095 
1096 	intel_vgpu_trigger_virtual_event(vgpu, event);
1097 	return 0;
1098 }
1099 
1100 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1101 		unsigned int reg, int len, bool data_valid)
1102 {
1103 	/* mark transaction done */
1104 	value |= DP_AUX_CH_CTL_DONE;
1105 	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1106 	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1107 
1108 	if (data_valid)
1109 		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1110 	else
1111 		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1112 
1113 	/* message size */
1114 	value &= ~(0xf << 20);
1115 	value |= (len << 20);
1116 	vgpu_vreg(vgpu, reg) = value;
1117 
1118 	if (value & DP_AUX_CH_CTL_INTERRUPT)
1119 		return trigger_aux_channel_interrupt(vgpu, reg);
1120 	return 0;
1121 }
1122 
1123 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1124 		u8 t)
1125 {
1126 	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
1127 		/* training pattern 1 for CR */
1128 		/* set LANE0_CR_DONE, LANE1_CR_DONE */
1129 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
1130 		/* set LANE2_CR_DONE, LANE3_CR_DONE */
1131 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
1132 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1133 			DPCD_TRAINING_PATTERN_2) {
1134 		/* training pattern 2 for EQ */
1135 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
1136 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
1137 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
1138 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
1139 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
1140 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
1141 		/* set INTERLANE_ALIGN_DONE */
1142 		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
1143 			DPCD_INTERLANE_ALIGN_DONE;
1144 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
1145 			DPCD_LINK_TRAINING_DISABLED) {
1146 		/* finish link training */
1147 		/* set sink status as synchronized */
1148 		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
1149 	}
1150 }
1151 
1152 #define _REG_HSW_DP_AUX_CH_CTL(dp) \
1153 	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
1154 
1155 #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
1156 
1157 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1158 
1159 #define dpy_is_valid_port(port)	\
1160 		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1161 
1162 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1163 		unsigned int offset, void *p_data, unsigned int bytes)
1164 {
1165 	struct intel_vgpu_display *display = &vgpu->display;
1166 	int msg, addr, ctrl, op, len;
1167 	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1168 	struct intel_vgpu_dpcd_data *dpcd = NULL;
1169 	struct intel_vgpu_port *port = NULL;
1170 	u32 data;
1171 
1172 	if (!dpy_is_valid_port(port_index)) {
1173 		gvt_vgpu_err("Unsupported DP port access!\n");
1174 		return 0;
1175 	}
1176 
1177 	write_vreg(vgpu, offset, p_data, bytes);
1178 	data = vgpu_vreg(vgpu, offset);
1179 
1180 	if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
1181 		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
1182 		/* SKL DPB/C/D aux ctl register changed */
1183 		return 0;
1184 	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1185 		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
1186 		/* write to the data registers */
1187 		return 0;
1188 	}
1189 
1190 	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1191 		/* just want to clear the sticky bits */
1192 		vgpu_vreg(vgpu, offset) = 0;
1193 		return 0;
1194 	}
1195 
1196 	port = &display->ports[port_index];
1197 	dpcd = port->dpcd;
1198 
1199 	/* read out message from DATA1 register */
1200 	msg = vgpu_vreg(vgpu, offset + 4);
1201 	addr = (msg >> 8) & 0xffff;
1202 	ctrl = (msg >> 24) & 0xff;
1203 	len = msg & 0xff;
1204 	op = ctrl >> 4;
1205 
1206 	if (op == GVT_AUX_NATIVE_WRITE) {
1207 		int t;
1208 		u8 buf[16];
1209 
1210 		if ((addr + len + 1) >= DPCD_SIZE) {
1211 			/*
1212 			 * Write request exceeds what we supported,
1213 			 * DCPD spec: When a Source Device is writing a DPCD
1214 			 * address not supported by the Sink Device, the Sink
1215 			 * Device shall reply with AUX NACK and “M” equal to
1216 			 * zero.
1217 			 */
1218 
1219 			/* NAK the write */
1220 			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1221 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1222 			return 0;
1223 		}
1224 
1225 		/*
1226 		 * Write request format: Headr (command + address + size) occupies
1227 		 * 4 bytes, followed by (len + 1) bytes of data. See details at
1228 		 * intel_dp_aux_transfer().
1229 		 */
1230 		if ((len + 1 + 4) > AUX_BURST_SIZE) {
1231 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1232 			return -EINVAL;
1233 		}
1234 
1235 		/* unpack data from vreg to buf */
1236 		for (t = 0; t < 4; t++) {
1237 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1238 
1239 			buf[t * 4] = (r >> 24) & 0xff;
1240 			buf[t * 4 + 1] = (r >> 16) & 0xff;
1241 			buf[t * 4 + 2] = (r >> 8) & 0xff;
1242 			buf[t * 4 + 3] = r & 0xff;
1243 		}
1244 
1245 		/* write to virtual DPCD */
1246 		if (dpcd && dpcd->data_valid) {
1247 			for (t = 0; t <= len; t++) {
1248 				int p = addr + t;
1249 
1250 				dpcd->data[p] = buf[t];
1251 				/* check for link training */
1252 				if (p == DPCD_TRAINING_PATTERN_SET)
1253 					dp_aux_ch_ctl_link_training(dpcd,
1254 							buf[t]);
1255 			}
1256 		}
1257 
1258 		/* ACK the write */
1259 		vgpu_vreg(vgpu, offset + 4) = 0;
1260 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1261 				dpcd && dpcd->data_valid);
1262 		return 0;
1263 	}
1264 
1265 	if (op == GVT_AUX_NATIVE_READ) {
1266 		int idx, i, ret = 0;
1267 
1268 		if ((addr + len + 1) >= DPCD_SIZE) {
1269 			/*
1270 			 * read request exceeds what we supported
1271 			 * DPCD spec: A Sink Device receiving a Native AUX CH
1272 			 * read request for an unsupported DPCD address must
1273 			 * reply with an AUX ACK and read data set equal to
1274 			 * zero instead of replying with AUX NACK.
1275 			 */
1276 
1277 			/* ACK the READ*/
1278 			vgpu_vreg(vgpu, offset + 4) = 0;
1279 			vgpu_vreg(vgpu, offset + 8) = 0;
1280 			vgpu_vreg(vgpu, offset + 12) = 0;
1281 			vgpu_vreg(vgpu, offset + 16) = 0;
1282 			vgpu_vreg(vgpu, offset + 20) = 0;
1283 
1284 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1285 					true);
1286 			return 0;
1287 		}
1288 
1289 		for (idx = 1; idx <= 5; idx++) {
1290 			/* clear the data registers */
1291 			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1292 		}
1293 
1294 		/*
1295 		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1296 		 */
1297 		if ((len + 2) > AUX_BURST_SIZE) {
1298 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1299 			return -EINVAL;
1300 		}
1301 
1302 		/* read from virtual DPCD to vreg */
1303 		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1304 		if (dpcd && dpcd->data_valid) {
1305 			for (i = 1; i <= (len + 1); i++) {
1306 				int t;
1307 
1308 				t = dpcd->data[addr + i - 1];
1309 				t <<= (24 - 8 * (i % 4));
1310 				ret |= t;
1311 
1312 				if ((i % 4 == 3) || (i == (len + 1))) {
1313 					vgpu_vreg(vgpu, offset +
1314 							(i / 4 + 1) * 4) = ret;
1315 					ret = 0;
1316 				}
1317 			}
1318 		}
1319 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1320 				dpcd && dpcd->data_valid);
1321 		return 0;
1322 	}
1323 
1324 	/* i2c transaction starts */
1325 	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1326 
1327 	if (data & DP_AUX_CH_CTL_INTERRUPT)
1328 		trigger_aux_channel_interrupt(vgpu, offset);
1329 	return 0;
1330 }
1331 
1332 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1333 		void *p_data, unsigned int bytes)
1334 {
1335 	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1336 	write_vreg(vgpu, offset, p_data, bytes);
1337 	return 0;
1338 }
1339 
1340 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1341 		void *p_data, unsigned int bytes)
1342 {
1343 	bool vga_disable;
1344 
1345 	write_vreg(vgpu, offset, p_data, bytes);
1346 	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1347 
1348 	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1349 			vga_disable ? "Disable" : "Enable");
1350 	return 0;
1351 }
1352 
1353 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1354 		unsigned int sbi_offset)
1355 {
1356 	struct intel_vgpu_display *display = &vgpu->display;
1357 	int num = display->sbi.number;
1358 	int i;
1359 
1360 	for (i = 0; i < num; ++i)
1361 		if (display->sbi.registers[i].offset == sbi_offset)
1362 			break;
1363 
1364 	if (i == num)
1365 		return 0;
1366 
1367 	return display->sbi.registers[i].value;
1368 }
1369 
1370 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1371 		unsigned int offset, u32 value)
1372 {
1373 	struct intel_vgpu_display *display = &vgpu->display;
1374 	int num = display->sbi.number;
1375 	int i;
1376 
1377 	for (i = 0; i < num; ++i) {
1378 		if (display->sbi.registers[i].offset == offset)
1379 			break;
1380 	}
1381 
1382 	if (i == num) {
1383 		if (num == SBI_REG_MAX) {
1384 			gvt_vgpu_err("SBI caching meets maximum limits\n");
1385 			return;
1386 		}
1387 		display->sbi.number++;
1388 	}
1389 
1390 	display->sbi.registers[i].offset = offset;
1391 	display->sbi.registers[i].value = value;
1392 }
1393 
1394 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1395 		void *p_data, unsigned int bytes)
1396 {
1397 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1398 				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
1399 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1400 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1401 		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
1402 				sbi_offset);
1403 	}
1404 	read_vreg(vgpu, offset, p_data, bytes);
1405 	return 0;
1406 }
1407 
1408 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1409 		void *p_data, unsigned int bytes)
1410 {
1411 	u32 data;
1412 
1413 	write_vreg(vgpu, offset, p_data, bytes);
1414 	data = vgpu_vreg(vgpu, offset);
1415 
1416 	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
1417 	data |= SBI_READY;
1418 
1419 	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
1420 	data |= SBI_RESPONSE_SUCCESS;
1421 
1422 	vgpu_vreg(vgpu, offset) = data;
1423 
1424 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
1425 				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
1426 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
1427 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
1428 
1429 		write_virtual_sbi_register(vgpu, sbi_offset,
1430 					   vgpu_vreg_t(vgpu, SBI_DATA));
1431 	}
1432 	return 0;
1433 }
1434 
1435 #define _vgtif_reg(x) \
1436 	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1437 
1438 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1439 		void *p_data, unsigned int bytes)
1440 {
1441 	bool invalid_read = false;
1442 
1443 	read_vreg(vgpu, offset, p_data, bytes);
1444 
1445 	switch (offset) {
1446 	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1447 		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1448 			invalid_read = true;
1449 		break;
1450 	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1451 		_vgtif_reg(avail_rs.fence_num):
1452 		if (offset + bytes >
1453 			_vgtif_reg(avail_rs.fence_num) + 4)
1454 			invalid_read = true;
1455 		break;
1456 	case 0x78010:	/* vgt_caps */
1457 	case 0x7881c:
1458 		break;
1459 	default:
1460 		invalid_read = true;
1461 		break;
1462 	}
1463 	if (invalid_read)
1464 		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1465 				offset, bytes, *(u32 *)p_data);
1466 	vgpu->pv_notified = true;
1467 	return 0;
1468 }
1469 
1470 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1471 {
1472 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1473 	struct intel_vgpu_mm *mm;
1474 	u64 *pdps;
1475 
1476 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1477 
1478 	switch (notification) {
1479 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1480 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1481 		fallthrough;
1482 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1483 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1484 		return PTR_ERR_OR_ZERO(mm);
1485 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1486 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1487 		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1488 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1489 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1490 	case 1:	/* Remove this in guest driver. */
1491 		break;
1492 	default:
1493 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
1494 	}
1495 	return 0;
1496 }
1497 
1498 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1499 {
1500 	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1501 	char *env[3] = {NULL, NULL, NULL};
1502 	char vmid_str[20];
1503 	char display_ready_str[20];
1504 
1505 	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1506 	env[0] = display_ready_str;
1507 
1508 	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1509 	env[1] = vmid_str;
1510 
1511 	return kobject_uevent_env(kobj, KOBJ_ADD, env);
1512 }
1513 
1514 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1515 		void *p_data, unsigned int bytes)
1516 {
1517 	u32 data = *(u32 *)p_data;
1518 	bool invalid_write = false;
1519 
1520 	switch (offset) {
1521 	case _vgtif_reg(display_ready):
1522 		send_display_ready_uevent(vgpu, data ? 1 : 0);
1523 		break;
1524 	case _vgtif_reg(g2v_notify):
1525 		handle_g2v_notification(vgpu, data);
1526 		break;
1527 	/* add xhot and yhot to handled list to avoid error log */
1528 	case _vgtif_reg(cursor_x_hot):
1529 	case _vgtif_reg(cursor_y_hot):
1530 	case _vgtif_reg(pdp[0].lo):
1531 	case _vgtif_reg(pdp[0].hi):
1532 	case _vgtif_reg(pdp[1].lo):
1533 	case _vgtif_reg(pdp[1].hi):
1534 	case _vgtif_reg(pdp[2].lo):
1535 	case _vgtif_reg(pdp[2].hi):
1536 	case _vgtif_reg(pdp[3].lo):
1537 	case _vgtif_reg(pdp[3].hi):
1538 	case _vgtif_reg(execlist_context_descriptor_lo):
1539 	case _vgtif_reg(execlist_context_descriptor_hi):
1540 		break;
1541 	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1542 		invalid_write = true;
1543 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1544 		break;
1545 	default:
1546 		invalid_write = true;
1547 		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1548 				offset, bytes, data);
1549 		break;
1550 	}
1551 
1552 	if (!invalid_write)
1553 		write_vreg(vgpu, offset, p_data, bytes);
1554 
1555 	return 0;
1556 }
1557 
1558 static int pf_write(struct intel_vgpu *vgpu,
1559 		unsigned int offset, void *p_data, unsigned int bytes)
1560 {
1561 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1562 	u32 val = *(u32 *)p_data;
1563 
1564 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1565 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1566 	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
1567 		drm_WARN_ONCE(&i915->drm, true,
1568 			      "VM(%d): guest is trying to scaling a plane\n",
1569 			      vgpu->id);
1570 		return 0;
1571 	}
1572 
1573 	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1574 }
1575 
1576 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1577 		unsigned int offset, void *p_data, unsigned int bytes)
1578 {
1579 	write_vreg(vgpu, offset, p_data, bytes);
1580 
1581 	if (vgpu_vreg(vgpu, offset) &
1582 	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1583 		vgpu_vreg(vgpu, offset) |=
1584 			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1585 	else
1586 		vgpu_vreg(vgpu, offset) &=
1587 			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1588 	return 0;
1589 }
1590 
1591 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1592 		unsigned int offset, void *p_data, unsigned int bytes)
1593 {
1594 	write_vreg(vgpu, offset, p_data, bytes);
1595 
1596 	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1597 		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1598 	else
1599 		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1600 
1601 	return 0;
1602 }
1603 
1604 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1605 	unsigned int offset, void *p_data, unsigned int bytes)
1606 {
1607 	write_vreg(vgpu, offset, p_data, bytes);
1608 
1609 	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1610 		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1611 	return 0;
1612 }
1613 
1614 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1615 		void *p_data, unsigned int bytes)
1616 {
1617 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1618 	u32 mode;
1619 
1620 	write_vreg(vgpu, offset, p_data, bytes);
1621 	mode = vgpu_vreg(vgpu, offset);
1622 
1623 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1624 		drm_WARN_ONCE(&i915->drm, 1,
1625 				"VM(%d): iGVT-g doesn't support GuC\n",
1626 				vgpu->id);
1627 		return 0;
1628 	}
1629 
1630 	return 0;
1631 }
1632 
1633 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1634 		void *p_data, unsigned int bytes)
1635 {
1636 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1637 	u32 trtte = *(u32 *)p_data;
1638 
1639 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1640 		drm_WARN(&i915->drm, 1,
1641 				"VM(%d): Use physical address for TRTT!\n",
1642 				vgpu->id);
1643 		return -EINVAL;
1644 	}
1645 	write_vreg(vgpu, offset, p_data, bytes);
1646 
1647 	return 0;
1648 }
1649 
1650 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1651 		void *p_data, unsigned int bytes)
1652 {
1653 	write_vreg(vgpu, offset, p_data, bytes);
1654 	return 0;
1655 }
1656 
1657 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1658 		void *p_data, unsigned int bytes)
1659 {
1660 	u32 v = 0;
1661 
1662 	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1663 		v |= (1 << 0);
1664 
1665 	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1666 		v |= (1 << 8);
1667 
1668 	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1669 		v |= (1 << 16);
1670 
1671 	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1672 		v |= (1 << 24);
1673 
1674 	vgpu_vreg(vgpu, offset) = v;
1675 
1676 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1677 }
1678 
1679 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1680 		void *p_data, unsigned int bytes)
1681 {
1682 	u32 value = *(u32 *)p_data;
1683 	u32 cmd = value & 0xff;
1684 	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1685 
1686 	switch (cmd) {
1687 	case GEN9_PCODE_READ_MEM_LATENCY:
1688 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1689 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1690 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1691 		    IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1692 			/**
1693 			 * "Read memory latency" command on gen9.
1694 			 * Below memory latency values are read
1695 			 * from skylake platform.
1696 			 */
1697 			if (!*data0)
1698 				*data0 = 0x1e1a1100;
1699 			else
1700 				*data0 = 0x61514b3d;
1701 		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1702 			/**
1703 			 * "Read memory latency" command on gen9.
1704 			 * Below memory latency values are read
1705 			 * from Broxton MRB.
1706 			 */
1707 			if (!*data0)
1708 				*data0 = 0x16080707;
1709 			else
1710 				*data0 = 0x16161616;
1711 		}
1712 		break;
1713 	case SKL_PCODE_CDCLK_CONTROL:
1714 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1715 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1716 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1717 		    IS_COMETLAKE(vgpu->gvt->gt->i915))
1718 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
1719 		break;
1720 	case GEN6_PCODE_READ_RC6VIDS:
1721 		*data0 |= 0x1;
1722 		break;
1723 	}
1724 
1725 	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1726 		     vgpu->id, value, *data0);
1727 	/**
1728 	 * PCODE_READY clear means ready for pcode read/write,
1729 	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1730 	 * always emulate as pcode read/write success and ready for access
1731 	 * anytime, since we don't touch real physical registers here.
1732 	 */
1733 	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1734 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1735 }
1736 
1737 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1738 		void *p_data, unsigned int bytes)
1739 {
1740 	u32 value = *(u32 *)p_data;
1741 	const struct intel_engine_cs *engine =
1742 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1743 
1744 	if (value != 0 &&
1745 	    !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1746 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1747 			      offset, value);
1748 		return -EINVAL;
1749 	}
1750 
1751 	/*
1752 	 * Need to emulate all the HWSP register write to ensure host can
1753 	 * update the VM CSB status correctly. Here listed registers can
1754 	 * support BDW, SKL or other platforms with same HWSP registers.
1755 	 */
1756 	if (unlikely(!engine)) {
1757 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1758 			     offset);
1759 		return -EINVAL;
1760 	}
1761 	vgpu->hws_pga[engine->id] = value;
1762 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1763 		     vgpu->id, value, offset);
1764 
1765 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1766 }
1767 
1768 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1769 		unsigned int offset, void *p_data, unsigned int bytes)
1770 {
1771 	u32 v = *(u32 *)p_data;
1772 
1773 	if (IS_BROXTON(vgpu->gvt->gt->i915))
1774 		v &= (1 << 31) | (1 << 29);
1775 	else
1776 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
1777 			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1778 	v |= (v >> 1);
1779 
1780 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1781 }
1782 
1783 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1784 		void *p_data, unsigned int bytes)
1785 {
1786 	u32 v = *(u32 *)p_data;
1787 
1788 	/* other bits are MBZ. */
1789 	v &= (1 << 31) | (1 << 30);
1790 	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1791 
1792 	vgpu_vreg(vgpu, offset) = v;
1793 
1794 	return 0;
1795 }
1796 
1797 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1798 		unsigned int offset, void *p_data, unsigned int bytes)
1799 {
1800 	u32 v = *(u32 *)p_data;
1801 
1802 	if (v & BXT_DE_PLL_PLL_ENABLE)
1803 		v |= BXT_DE_PLL_LOCK;
1804 
1805 	vgpu_vreg(vgpu, offset) = v;
1806 
1807 	return 0;
1808 }
1809 
1810 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1811 		unsigned int offset, void *p_data, unsigned int bytes)
1812 {
1813 	u32 v = *(u32 *)p_data;
1814 
1815 	if (v & PORT_PLL_ENABLE)
1816 		v |= PORT_PLL_LOCK;
1817 
1818 	vgpu_vreg(vgpu, offset) = v;
1819 
1820 	return 0;
1821 }
1822 
1823 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1824 		unsigned int offset, void *p_data, unsigned int bytes)
1825 {
1826 	u32 v = *(u32 *)p_data;
1827 	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1828 
1829 	switch (offset) {
1830 	case _PHY_CTL_FAMILY_EDP:
1831 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1832 		break;
1833 	case _PHY_CTL_FAMILY_DDI:
1834 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1835 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1836 		break;
1837 	}
1838 
1839 	vgpu_vreg(vgpu, offset) = v;
1840 
1841 	return 0;
1842 }
1843 
1844 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1845 		unsigned int offset, void *p_data, unsigned int bytes)
1846 {
1847 	u32 v = vgpu_vreg(vgpu, offset);
1848 
1849 	v &= ~UNIQUE_TRANGE_EN_METHOD;
1850 
1851 	vgpu_vreg(vgpu, offset) = v;
1852 
1853 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1854 }
1855 
1856 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1857 		unsigned int offset, void *p_data, unsigned int bytes)
1858 {
1859 	u32 v = *(u32 *)p_data;
1860 
1861 	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1862 		vgpu_vreg(vgpu, offset - 0x600) = v;
1863 		vgpu_vreg(vgpu, offset - 0x800) = v;
1864 	} else {
1865 		vgpu_vreg(vgpu, offset - 0x400) = v;
1866 		vgpu_vreg(vgpu, offset - 0x600) = v;
1867 	}
1868 
1869 	vgpu_vreg(vgpu, offset) = v;
1870 
1871 	return 0;
1872 }
1873 
1874 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1875 		unsigned int offset, void *p_data, unsigned int bytes)
1876 {
1877 	u32 v = *(u32 *)p_data;
1878 
1879 	if (v & BIT(0)) {
1880 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1881 			~PHY_RESERVED;
1882 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1883 			PHY_POWER_GOOD;
1884 	}
1885 
1886 	if (v & BIT(1)) {
1887 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1888 			~PHY_RESERVED;
1889 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1890 			PHY_POWER_GOOD;
1891 	}
1892 
1893 
1894 	vgpu_vreg(vgpu, offset) = v;
1895 
1896 	return 0;
1897 }
1898 
1899 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1900 		unsigned int offset, void *p_data, unsigned int bytes)
1901 {
1902 	vgpu_vreg(vgpu, offset) = 0;
1903 	return 0;
1904 }
1905 
1906 /*
1907  * FixMe:
1908  * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1909  * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1910  * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1911  * these MI_BATCH_BUFFER.
1912  * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1913  * PML4 PTE: PAT(0) PCD(1) PWT(1).
1914  * The performance is still expected to be low, will need further improvement.
1915  */
1916 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1917 			      void *p_data, unsigned int bytes)
1918 {
1919 	u64 pat =
1920 		GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1921 		GEN8_PPAT(1, 0) |
1922 		GEN8_PPAT(2, 0) |
1923 		GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1924 		GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1925 		GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1926 		GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1927 		GEN8_PPAT(7, CHV_PPAT_SNOOP);
1928 
1929 	vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1930 
1931 	return 0;
1932 }
1933 
1934 static int guc_status_read(struct intel_vgpu *vgpu,
1935 			   unsigned int offset, void *p_data,
1936 			   unsigned int bytes)
1937 {
1938 	/* keep MIA_IN_RESET before clearing */
1939 	read_vreg(vgpu, offset, p_data, bytes);
1940 	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1941 	return 0;
1942 }
1943 
1944 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1945 		unsigned int offset, void *p_data, unsigned int bytes)
1946 {
1947 	struct intel_gvt *gvt = vgpu->gvt;
1948 	const struct intel_engine_cs *engine =
1949 		intel_gvt_render_mmio_to_engine(gvt, offset);
1950 
1951 	/**
1952 	 * Read HW reg in following case
1953 	 * a. the offset isn't a ring mmio
1954 	 * b. the offset's ring is running on hw.
1955 	 * c. the offset is ring time stamp mmio
1956 	 */
1957 
1958 	if (!engine ||
1959 	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
1960 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1961 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1962 		mmio_hw_access_pre(gvt->gt);
1963 		vgpu_vreg(vgpu, offset) =
1964 			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1965 		mmio_hw_access_post(gvt->gt);
1966 	}
1967 
1968 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1969 }
1970 
1971 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1972 		void *p_data, unsigned int bytes)
1973 {
1974 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1975 	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1976 	struct intel_vgpu_execlist *execlist;
1977 	u32 data = *(u32 *)p_data;
1978 	int ret = 0;
1979 
1980 	if (drm_WARN_ON(&i915->drm, !engine))
1981 		return -EINVAL;
1982 
1983 	/*
1984 	 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
1985 	 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
1986 	 * vGPU reset if in resuming.
1987 	 * In S0ix exit, the device power state also transite from D3 to D0 as
1988 	 * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
1989 	 * S0ix exit, all engines continue to work. However the d3_entered
1990 	 * remains set which will break next vGPU reset logic (miss the expected
1991 	 * PPGTT invalidation).
1992 	 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
1993 	 * chance to clear d3_entered.
1994 	 */
1995 	if (vgpu->d3_entered)
1996 		vgpu->d3_entered = false;
1997 
1998 	execlist = &vgpu->submission.execlist[engine->id];
1999 
2000 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
2001 	if (execlist->elsp_dwords.index == 3) {
2002 		ret = intel_vgpu_submit_execlist(vgpu, engine);
2003 		if(ret)
2004 			gvt_vgpu_err("fail submit workload on ring %s\n",
2005 				     engine->name);
2006 	}
2007 
2008 	++execlist->elsp_dwords.index;
2009 	execlist->elsp_dwords.index &= 0x3;
2010 	return ret;
2011 }
2012 
2013 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2014 		void *p_data, unsigned int bytes)
2015 {
2016 	u32 data = *(u32 *)p_data;
2017 	const struct intel_engine_cs *engine =
2018 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2019 	bool enable_execlist;
2020 	int ret;
2021 
2022 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2023 	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2024 	    IS_COMETLAKE(vgpu->gvt->gt->i915))
2025 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2026 	write_vreg(vgpu, offset, p_data, bytes);
2027 
2028 	if (IS_MASKED_BITS_ENABLED(data, 1)) {
2029 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2030 		return 0;
2031 	}
2032 
2033 	if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2034 	     IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2035 	    IS_MASKED_BITS_ENABLED(data, 2)) {
2036 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2037 		return 0;
2038 	}
2039 
2040 	/* when PPGTT mode enabled, we will check if guest has called
2041 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2042 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2043 	 */
2044 	if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2045 	    IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2046 	    !vgpu->pv_notified) {
2047 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2048 		return 0;
2049 	}
2050 	if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2051 	    IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2052 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2053 
2054 		gvt_dbg_core("EXECLIST %s on ring %s\n",
2055 			     (enable_execlist ? "enabling" : "disabling"),
2056 			     engine->name);
2057 
2058 		if (!enable_execlist)
2059 			return 0;
2060 
2061 		ret = intel_vgpu_select_submission_ops(vgpu,
2062 						       engine->mask,
2063 						       INTEL_VGPU_EXECLIST_SUBMISSION);
2064 		if (ret)
2065 			return ret;
2066 
2067 		intel_vgpu_start_schedule(vgpu);
2068 	}
2069 	return 0;
2070 }
2071 
2072 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2073 		unsigned int offset, void *p_data, unsigned int bytes)
2074 {
2075 	unsigned int id = 0;
2076 
2077 	write_vreg(vgpu, offset, p_data, bytes);
2078 	vgpu_vreg(vgpu, offset) = 0;
2079 
2080 	switch (offset) {
2081 	case 0x4260:
2082 		id = RCS0;
2083 		break;
2084 	case 0x4264:
2085 		id = VCS0;
2086 		break;
2087 	case 0x4268:
2088 		id = VCS1;
2089 		break;
2090 	case 0x426c:
2091 		id = BCS0;
2092 		break;
2093 	case 0x4270:
2094 		id = VECS0;
2095 		break;
2096 	default:
2097 		return -EINVAL;
2098 	}
2099 	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2100 
2101 	return 0;
2102 }
2103 
2104 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2105 	unsigned int offset, void *p_data, unsigned int bytes)
2106 {
2107 	u32 data;
2108 
2109 	write_vreg(vgpu, offset, p_data, bytes);
2110 	data = vgpu_vreg(vgpu, offset);
2111 
2112 	if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2113 		data |= RESET_CTL_READY_TO_RESET;
2114 	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2115 		data &= ~RESET_CTL_READY_TO_RESET;
2116 
2117 	vgpu_vreg(vgpu, offset) = data;
2118 	return 0;
2119 }
2120 
2121 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2122 				    unsigned int offset, void *p_data,
2123 				    unsigned int bytes)
2124 {
2125 	u32 data = *(u32 *)p_data;
2126 
2127 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2128 	write_vreg(vgpu, offset, p_data, bytes);
2129 
2130 	if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2131 	    IS_MASKED_BITS_ENABLED(data, 0x8))
2132 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2133 
2134 	return 0;
2135 }
2136 
2137 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2138 	ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2139 		f, s, am, rm, d, r, w); \
2140 	if (ret) \
2141 		return ret; \
2142 } while (0)
2143 
2144 #define MMIO_D(reg, d) \
2145 	MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
2146 
2147 #define MMIO_DH(reg, d, r, w) \
2148 	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2149 
2150 #define MMIO_DFH(reg, d, f, r, w) \
2151 	MMIO_F(reg, 4, f, 0, 0, d, r, w)
2152 
2153 #define MMIO_GM(reg, d, r, w) \
2154 	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2155 
2156 #define MMIO_GM_RDR(reg, d, r, w) \
2157 	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2158 
2159 #define MMIO_RO(reg, d, f, rm, r, w) \
2160 	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2161 
2162 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2163 	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2164 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2165 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2166 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2167 	if (HAS_ENGINE(gvt->gt, VCS1)) \
2168 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2169 } while (0)
2170 
2171 #define MMIO_RING_D(prefix, d) \
2172 	MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
2173 
2174 #define MMIO_RING_DFH(prefix, d, f, r, w) \
2175 	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2176 
2177 #define MMIO_RING_GM(prefix, d, r, w) \
2178 	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2179 
2180 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2181 	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2182 
2183 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2184 	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2185 
2186 static int init_generic_mmio_info(struct intel_gvt *gvt)
2187 {
2188 	struct drm_i915_private *dev_priv = gvt->gt->i915;
2189 	int ret;
2190 
2191 	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2192 		intel_vgpu_reg_imr_handler);
2193 
2194 	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2195 	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2196 	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2197 	MMIO_D(SDEISR, D_ALL);
2198 
2199 	MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2200 
2201 
2202 	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2203 		gamw_echo_dev_rw_ia_write);
2204 
2205 	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2206 	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2207 	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2208 
2209 #define RING_REG(base) _MMIO((base) + 0x28)
2210 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2211 #undef RING_REG
2212 
2213 #define RING_REG(base) _MMIO((base) + 0x134)
2214 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2215 #undef RING_REG
2216 
2217 #define RING_REG(base) _MMIO((base) + 0x6c)
2218 	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2219 #undef RING_REG
2220 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2221 
2222 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2223 	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2224 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2225 	MMIO_D(GEN7_CXT_SIZE, D_ALL);
2226 
2227 	MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2228 	MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2229 	MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2230 	MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2231 	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2232 
2233 	/* RING MODE */
2234 #define RING_REG(base) _MMIO((base) + 0x29c)
2235 	MMIO_RING_DFH(RING_REG, D_ALL,
2236 		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2237 		ring_mode_mmio_write);
2238 #undef RING_REG
2239 
2240 	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2241 		NULL, NULL);
2242 	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2243 			NULL, NULL);
2244 	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2245 			mmio_read_from_hw, NULL);
2246 	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2247 			mmio_read_from_hw, NULL);
2248 
2249 	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2250 	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2251 		NULL, NULL);
2252 	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2253 	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2254 	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2255 
2256 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2257 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2258 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2259 	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2260 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2261 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2262 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2263 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2264 		NULL, NULL);
2265 	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2266 		 NULL, NULL);
2267 	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2268 	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2269 	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2270 	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2271 	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2272 	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2273 	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2274 	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2275 	MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2276 	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2277 
2278 	/* display */
2279 	MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
2280 	MMIO_D(_MMIO(0x602a0), D_ALL);
2281 
2282 	MMIO_D(_MMIO(0x65050), D_ALL);
2283 	MMIO_D(_MMIO(0x650b4), D_ALL);
2284 
2285 	MMIO_D(_MMIO(0xc4040), D_ALL);
2286 	MMIO_D(DERRMR, D_ALL);
2287 
2288 	MMIO_D(PIPEDSL(PIPE_A), D_ALL);
2289 	MMIO_D(PIPEDSL(PIPE_B), D_ALL);
2290 	MMIO_D(PIPEDSL(PIPE_C), D_ALL);
2291 	MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
2292 
2293 	MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
2294 	MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
2295 	MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
2296 	MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
2297 
2298 	MMIO_D(PIPESTAT(PIPE_A), D_ALL);
2299 	MMIO_D(PIPESTAT(PIPE_B), D_ALL);
2300 	MMIO_D(PIPESTAT(PIPE_C), D_ALL);
2301 	MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
2302 
2303 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
2304 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
2305 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
2306 	MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
2307 
2308 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
2309 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
2310 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
2311 	MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
2312 
2313 	MMIO_D(CURCNTR(PIPE_A), D_ALL);
2314 	MMIO_D(CURCNTR(PIPE_B), D_ALL);
2315 	MMIO_D(CURCNTR(PIPE_C), D_ALL);
2316 
2317 	MMIO_D(CURPOS(PIPE_A), D_ALL);
2318 	MMIO_D(CURPOS(PIPE_B), D_ALL);
2319 	MMIO_D(CURPOS(PIPE_C), D_ALL);
2320 
2321 	MMIO_D(CURBASE(PIPE_A), D_ALL);
2322 	MMIO_D(CURBASE(PIPE_B), D_ALL);
2323 	MMIO_D(CURBASE(PIPE_C), D_ALL);
2324 
2325 	MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
2326 	MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
2327 	MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
2328 
2329 	MMIO_D(_MMIO(0x700ac), D_ALL);
2330 	MMIO_D(_MMIO(0x710ac), D_ALL);
2331 	MMIO_D(_MMIO(0x720ac), D_ALL);
2332 
2333 	MMIO_D(_MMIO(0x70090), D_ALL);
2334 	MMIO_D(_MMIO(0x70094), D_ALL);
2335 	MMIO_D(_MMIO(0x70098), D_ALL);
2336 	MMIO_D(_MMIO(0x7009c), D_ALL);
2337 
2338 	MMIO_D(DSPCNTR(PIPE_A), D_ALL);
2339 	MMIO_D(DSPADDR(PIPE_A), D_ALL);
2340 	MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
2341 	MMIO_D(DSPPOS(PIPE_A), D_ALL);
2342 	MMIO_D(DSPSIZE(PIPE_A), D_ALL);
2343 	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2344 	MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
2345 	MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
2346 	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2347 		reg50080_mmio_write);
2348 
2349 	MMIO_D(DSPCNTR(PIPE_B), D_ALL);
2350 	MMIO_D(DSPADDR(PIPE_B), D_ALL);
2351 	MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
2352 	MMIO_D(DSPPOS(PIPE_B), D_ALL);
2353 	MMIO_D(DSPSIZE(PIPE_B), D_ALL);
2354 	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2355 	MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
2356 	MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
2357 	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2358 		reg50080_mmio_write);
2359 
2360 	MMIO_D(DSPCNTR(PIPE_C), D_ALL);
2361 	MMIO_D(DSPADDR(PIPE_C), D_ALL);
2362 	MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
2363 	MMIO_D(DSPPOS(PIPE_C), D_ALL);
2364 	MMIO_D(DSPSIZE(PIPE_C), D_ALL);
2365 	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2366 	MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
2367 	MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
2368 	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2369 		reg50080_mmio_write);
2370 
2371 	MMIO_D(SPRCTL(PIPE_A), D_ALL);
2372 	MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
2373 	MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
2374 	MMIO_D(SPRPOS(PIPE_A), D_ALL);
2375 	MMIO_D(SPRSIZE(PIPE_A), D_ALL);
2376 	MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
2377 	MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
2378 	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2379 	MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
2380 	MMIO_D(SPROFFSET(PIPE_A), D_ALL);
2381 	MMIO_D(SPRSCALE(PIPE_A), D_ALL);
2382 	MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
2383 	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2384 		reg50080_mmio_write);
2385 
2386 	MMIO_D(SPRCTL(PIPE_B), D_ALL);
2387 	MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
2388 	MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
2389 	MMIO_D(SPRPOS(PIPE_B), D_ALL);
2390 	MMIO_D(SPRSIZE(PIPE_B), D_ALL);
2391 	MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
2392 	MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
2393 	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2394 	MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
2395 	MMIO_D(SPROFFSET(PIPE_B), D_ALL);
2396 	MMIO_D(SPRSCALE(PIPE_B), D_ALL);
2397 	MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
2398 	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2399 		reg50080_mmio_write);
2400 
2401 	MMIO_D(SPRCTL(PIPE_C), D_ALL);
2402 	MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
2403 	MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
2404 	MMIO_D(SPRPOS(PIPE_C), D_ALL);
2405 	MMIO_D(SPRSIZE(PIPE_C), D_ALL);
2406 	MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
2407 	MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
2408 	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2409 	MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
2410 	MMIO_D(SPROFFSET(PIPE_C), D_ALL);
2411 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
2412 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
2413 	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2414 		reg50080_mmio_write);
2415 
2416 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
2417 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
2418 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
2419 	MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
2420 	MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
2421 	MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
2422 	MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
2423 	MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
2424 	MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
2425 
2426 	MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
2427 	MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
2428 	MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
2429 	MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
2430 	MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
2431 	MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
2432 	MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
2433 	MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
2434 	MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
2435 
2436 	MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
2437 	MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
2438 	MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
2439 	MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
2440 	MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
2441 	MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
2442 	MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
2443 	MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
2444 	MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
2445 
2446 	MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
2447 	MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
2448 	MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
2449 	MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
2450 	MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
2451 	MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
2452 	MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
2453 	MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
2454 
2455 	MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
2456 	MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
2457 	MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
2458 	MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
2459 	MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
2460 	MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
2461 	MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
2462 	MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
2463 
2464 	MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
2465 	MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
2466 	MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
2467 	MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
2468 	MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
2469 	MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
2470 	MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
2471 	MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
2472 
2473 	MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
2474 	MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
2475 	MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
2476 	MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
2477 	MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
2478 	MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
2479 	MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
2480 	MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
2481 
2482 	MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
2483 	MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
2484 	MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
2485 	MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
2486 	MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
2487 	MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
2488 	MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
2489 	MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
2490 
2491 	MMIO_D(PF_CTL(PIPE_A), D_ALL);
2492 	MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
2493 	MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
2494 	MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
2495 	MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
2496 
2497 	MMIO_D(PF_CTL(PIPE_B), D_ALL);
2498 	MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
2499 	MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
2500 	MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
2501 	MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
2502 
2503 	MMIO_D(PF_CTL(PIPE_C), D_ALL);
2504 	MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
2505 	MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
2506 	MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
2507 	MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
2508 
2509 	MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
2510 	MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
2511 	MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
2512 	MMIO_D(WM1_LP_ILK, D_ALL);
2513 	MMIO_D(WM2_LP_ILK, D_ALL);
2514 	MMIO_D(WM3_LP_ILK, D_ALL);
2515 	MMIO_D(WM1S_LP_ILK, D_ALL);
2516 	MMIO_D(WM2S_LP_IVB, D_ALL);
2517 	MMIO_D(WM3S_LP_IVB, D_ALL);
2518 
2519 	MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
2520 	MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
2521 	MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
2522 	MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
2523 
2524 	MMIO_D(_MMIO(0x48268), D_ALL);
2525 
2526 	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2527 		gmbus_mmio_write);
2528 	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2529 	MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
2530 
2531 	MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2532 		dp_aux_ch_ctl_mmio_write);
2533 	MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2534 		dp_aux_ch_ctl_mmio_write);
2535 	MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2536 		dp_aux_ch_ctl_mmio_write);
2537 
2538 	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2539 
2540 	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2541 	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2542 
2543 	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2544 	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2545 	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2546 	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2547 	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2548 	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2549 	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2550 	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2551 	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2552 
2553 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
2554 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
2555 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
2556 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
2557 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
2558 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
2559 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
2560 
2561 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
2562 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
2563 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
2564 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
2565 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
2566 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
2567 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
2568 
2569 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
2570 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
2571 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
2572 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
2573 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
2574 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
2575 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
2576 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
2577 
2578 	MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
2579 	MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
2580 	MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
2581 
2582 	MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
2583 	MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
2584 	MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
2585 
2586 	MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
2587 	MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
2588 	MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
2589 
2590 	MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
2591 	MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
2592 	MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
2593 
2594 	MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
2595 	MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
2596 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
2597 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
2598 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
2599 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
2600 
2601 	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2602 	MMIO_D(PCH_PP_DIVISOR, D_ALL);
2603 	MMIO_D(PCH_PP_STATUS,  D_ALL);
2604 	MMIO_D(PCH_LVDS, D_ALL);
2605 	MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
2606 	MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
2607 	MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
2608 	MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
2609 	MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
2610 	MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
2611 	MMIO_D(PCH_DREF_CONTROL, D_ALL);
2612 	MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
2613 	MMIO_D(PCH_DPLL_SEL, D_ALL);
2614 
2615 	MMIO_D(_MMIO(0x61208), D_ALL);
2616 	MMIO_D(_MMIO(0x6120c), D_ALL);
2617 	MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
2618 	MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
2619 
2620 	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2621 	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2622 	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2623 	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2624 	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2625 	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2626 
2627 	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2628 		PORTA_HOTPLUG_STATUS_MASK
2629 		| PORTB_HOTPLUG_STATUS_MASK
2630 		| PORTC_HOTPLUG_STATUS_MASK
2631 		| PORTD_HOTPLUG_STATUS_MASK,
2632 		NULL, NULL);
2633 
2634 	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2635 	MMIO_D(FUSE_STRAP, D_ALL);
2636 	MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
2637 
2638 	MMIO_D(DISP_ARB_CTL, D_ALL);
2639 	MMIO_D(DISP_ARB_CTL2, D_ALL);
2640 
2641 	MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
2642 	MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
2643 	MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
2644 
2645 	MMIO_D(SOUTH_CHICKEN1, D_ALL);
2646 	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2647 	MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
2648 	MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
2649 	MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
2650 	MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
2651 	MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
2652 
2653 	MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL);
2654 	MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL);
2655 	MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL);
2656 	MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL);
2657 	MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL);
2658 	MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL);
2659 	MMIO_D(ILK_FBC_RT_BASE, D_ALL);
2660 
2661 	MMIO_D(IPS_CTL, D_ALL);
2662 
2663 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
2664 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
2665 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
2666 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
2667 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
2668 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
2669 	MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
2670 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
2671 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
2672 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
2673 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
2674 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
2675 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
2676 
2677 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
2678 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
2679 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
2680 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
2681 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
2682 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
2683 	MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
2684 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
2685 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
2686 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
2687 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
2688 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
2689 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
2690 
2691 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
2692 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
2693 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
2694 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
2695 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
2696 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
2697 	MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
2698 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
2699 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
2700 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
2701 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
2702 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
2703 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
2704 
2705 	MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
2706 	MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
2707 	MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2708 
2709 	MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
2710 	MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
2711 	MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2712 
2713 	MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
2714 	MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
2715 	MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
2716 
2717 	MMIO_D(_MMIO(0x60110), D_ALL);
2718 	MMIO_D(_MMIO(0x61110), D_ALL);
2719 	MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2720 	MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2721 	MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
2722 	MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2723 	MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2724 	MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2725 	MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2726 	MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2727 	MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
2728 
2729 	MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
2730 	MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
2731 	MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
2732 	MMIO_D(SPLL_CTL, D_ALL);
2733 	MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
2734 	MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
2735 	MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
2736 	MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
2737 	MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
2738 	MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
2739 	MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
2740 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
2741 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
2742 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
2743 
2744 	MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
2745 	MMIO_D(_MMIO(0x46508), D_ALL);
2746 
2747 	MMIO_D(_MMIO(0x49080), D_ALL);
2748 	MMIO_D(_MMIO(0x49180), D_ALL);
2749 	MMIO_D(_MMIO(0x49280), D_ALL);
2750 
2751 	MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2752 	MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2753 	MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
2754 
2755 	MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
2756 	MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
2757 	MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
2758 
2759 	MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
2760 	MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
2761 	MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
2762 
2763 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
2764 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
2765 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
2766 
2767 	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2768 	MMIO_D(SBI_ADDR, D_ALL);
2769 	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2770 	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2771 	MMIO_D(PIXCLK_GATE, D_ALL);
2772 
2773 	MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
2774 		dp_aux_ch_ctl_mmio_write);
2775 
2776 	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2777 	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2778 	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2779 	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2780 	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2781 
2782 	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2783 	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2784 	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2785 	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2786 	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2787 
2788 	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2789 	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2790 	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2791 	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2792 	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2793 
2794 	MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2795 	MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2796 	MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2797 	MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2798 	MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
2799 
2800 	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
2801 	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
2802 	MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
2803 
2804 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2805 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2806 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2807 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2808 
2809 	MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
2810 	MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
2811 	MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
2812 	MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
2813 
2814 	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2815 	MMIO_D(FORCEWAKE_ACK, D_ALL);
2816 	MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
2817 	MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
2818 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2819 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2820 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2821 	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2822 	MMIO_D(ECOBUS, D_ALL);
2823 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2824 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2825 	MMIO_D(GEN6_RPNSWREQ, D_ALL);
2826 	MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
2827 	MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
2828 	MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
2829 	MMIO_D(GEN6_RPSTAT1, D_ALL);
2830 	MMIO_D(GEN6_RP_CONTROL, D_ALL);
2831 	MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
2832 	MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
2833 	MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
2834 	MMIO_D(GEN6_RP_CUR_UP, D_ALL);
2835 	MMIO_D(GEN6_RP_PREV_UP, D_ALL);
2836 	MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
2837 	MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
2838 	MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
2839 	MMIO_D(GEN6_RP_UP_EI, D_ALL);
2840 	MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
2841 	MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
2842 	MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
2843 	MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
2844 	MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
2845 	MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
2846 	MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
2847 	MMIO_D(GEN6_RC_SLEEP, D_ALL);
2848 	MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
2849 	MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
2850 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
2851 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
2852 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
2853 	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2854 	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2855 	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2856 	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2857 	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2858 	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2859 
2860 	MMIO_D(RSTDBYCTL, D_ALL);
2861 
2862 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2863 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2864 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2865 
2866 	MMIO_D(TILECTL, D_ALL);
2867 
2868 	MMIO_D(GEN6_UCGCTL1, D_ALL);
2869 	MMIO_D(GEN6_UCGCTL2, D_ALL);
2870 
2871 	MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2872 
2873 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
2874 	MMIO_D(_MMIO(0x13812c), D_ALL);
2875 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2876 	MMIO_D(HSW_EDRAM_CAP, D_ALL);
2877 	MMIO_D(HSW_IDICR, D_ALL);
2878 	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2879 
2880 	MMIO_D(_MMIO(0x3c), D_ALL);
2881 	MMIO_D(_MMIO(0x860), D_ALL);
2882 	MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL);
2883 	MMIO_D(_MMIO(0x121d0), D_ALL);
2884 	MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL);
2885 	MMIO_D(_MMIO(0x41d0), D_ALL);
2886 	MMIO_D(GAC_ECO_BITS, D_ALL);
2887 	MMIO_D(_MMIO(0x6200), D_ALL);
2888 	MMIO_D(_MMIO(0x6204), D_ALL);
2889 	MMIO_D(_MMIO(0x6208), D_ALL);
2890 	MMIO_D(_MMIO(0x7118), D_ALL);
2891 	MMIO_D(_MMIO(0x7180), D_ALL);
2892 	MMIO_D(_MMIO(0x7408), D_ALL);
2893 	MMIO_D(_MMIO(0x7c00), D_ALL);
2894 	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2895 	MMIO_D(_MMIO(0x911c), D_ALL);
2896 	MMIO_D(_MMIO(0x9120), D_ALL);
2897 	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2898 
2899 	MMIO_D(GAB_CTL, D_ALL);
2900 	MMIO_D(_MMIO(0x48800), D_ALL);
2901 	MMIO_D(_MMIO(0xce044), D_ALL);
2902 	MMIO_D(_MMIO(0xe6500), D_ALL);
2903 	MMIO_D(_MMIO(0xe6504), D_ALL);
2904 	MMIO_D(_MMIO(0xe6600), D_ALL);
2905 	MMIO_D(_MMIO(0xe6604), D_ALL);
2906 	MMIO_D(_MMIO(0xe6700), D_ALL);
2907 	MMIO_D(_MMIO(0xe6704), D_ALL);
2908 	MMIO_D(_MMIO(0xe6800), D_ALL);
2909 	MMIO_D(_MMIO(0xe6804), D_ALL);
2910 	MMIO_D(PCH_GMBUS4, D_ALL);
2911 	MMIO_D(PCH_GMBUS5, D_ALL);
2912 
2913 	MMIO_D(_MMIO(0x902c), D_ALL);
2914 	MMIO_D(_MMIO(0xec008), D_ALL);
2915 	MMIO_D(_MMIO(0xec00c), D_ALL);
2916 	MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
2917 	MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
2918 	MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
2919 	MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
2920 	MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
2921 	MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
2922 	MMIO_D(_MMIO(0xec408), D_ALL);
2923 	MMIO_D(_MMIO(0xec40c), D_ALL);
2924 	MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
2925 	MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
2926 	MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
2927 	MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
2928 	MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
2929 	MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
2930 	MMIO_D(_MMIO(0xfc810), D_ALL);
2931 	MMIO_D(_MMIO(0xfc81c), D_ALL);
2932 	MMIO_D(_MMIO(0xfc828), D_ALL);
2933 	MMIO_D(_MMIO(0xfc834), D_ALL);
2934 	MMIO_D(_MMIO(0xfcc00), D_ALL);
2935 	MMIO_D(_MMIO(0xfcc0c), D_ALL);
2936 	MMIO_D(_MMIO(0xfcc18), D_ALL);
2937 	MMIO_D(_MMIO(0xfcc24), D_ALL);
2938 	MMIO_D(_MMIO(0xfd000), D_ALL);
2939 	MMIO_D(_MMIO(0xfd00c), D_ALL);
2940 	MMIO_D(_MMIO(0xfd018), D_ALL);
2941 	MMIO_D(_MMIO(0xfd024), D_ALL);
2942 	MMIO_D(_MMIO(0xfd034), D_ALL);
2943 
2944 	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2945 	MMIO_D(_MMIO(0x2054), D_ALL);
2946 	MMIO_D(_MMIO(0x12054), D_ALL);
2947 	MMIO_D(_MMIO(0x22054), D_ALL);
2948 	MMIO_D(_MMIO(0x1a054), D_ALL);
2949 
2950 	MMIO_D(_MMIO(0x44070), D_ALL);
2951 	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2952 	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2953 	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2954 	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2955 	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2956 
2957 	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2958 	MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
2959 	MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
2960 	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2961 	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2962 	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2963 
2964 	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2965 	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2966 	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2967 
2968 	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2969 	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2970 	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2971 	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2972 	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2973 	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2974 	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2975 	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2976 	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2977 	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2978 	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2979 	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2980 	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2981 	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2982 	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2983 	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2984 	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2985 
2986 	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2987 	MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2988 	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2989 	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2990 	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2991 	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2992 	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2993 	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2994 	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2995 	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2996 	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2997 
2998 	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2999 	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
3000 	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
3001 
3002 	return 0;
3003 }
3004 
3005 static int init_bdw_mmio_info(struct intel_gvt *gvt)
3006 {
3007 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3008 	int ret;
3009 
3010 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3011 	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3012 	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3013 	MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
3014 
3015 	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3016 	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3017 	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3018 	MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
3019 
3020 	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3021 	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3022 	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3023 	MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
3024 
3025 	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3026 	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3027 	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3028 	MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
3029 
3030 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
3031 		intel_vgpu_reg_imr_handler);
3032 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
3033 		intel_vgpu_reg_ier_handler);
3034 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
3035 		intel_vgpu_reg_iir_handler);
3036 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
3037 
3038 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
3039 		intel_vgpu_reg_imr_handler);
3040 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
3041 		intel_vgpu_reg_ier_handler);
3042 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
3043 		intel_vgpu_reg_iir_handler);
3044 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
3045 
3046 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
3047 		intel_vgpu_reg_imr_handler);
3048 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
3049 		intel_vgpu_reg_ier_handler);
3050 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
3051 		intel_vgpu_reg_iir_handler);
3052 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
3053 
3054 	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3055 	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3056 	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3057 	MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
3058 
3059 	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3060 	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3061 	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3062 	MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
3063 
3064 	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
3065 	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
3066 	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
3067 	MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
3068 
3069 	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
3070 		intel_vgpu_reg_master_irq_handler);
3071 
3072 	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
3073 		mmio_read_from_hw, NULL);
3074 
3075 #define RING_REG(base) _MMIO((base) + 0xd0)
3076 	MMIO_RING_F(RING_REG, 4, F_RO, 0,
3077 		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
3078 		ring_reset_ctl_write);
3079 #undef RING_REG
3080 
3081 #define RING_REG(base) _MMIO((base) + 0x230)
3082 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
3083 #undef RING_REG
3084 
3085 #define RING_REG(base) _MMIO((base) + 0x234)
3086 	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
3087 		NULL, NULL);
3088 #undef RING_REG
3089 
3090 #define RING_REG(base) _MMIO((base) + 0x244)
3091 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3092 #undef RING_REG
3093 
3094 #define RING_REG(base) _MMIO((base) + 0x370)
3095 	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
3096 #undef RING_REG
3097 
3098 #define RING_REG(base) _MMIO((base) + 0x3a0)
3099 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
3100 #undef RING_REG
3101 
3102 	MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
3103 	MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
3104 	MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
3105 	MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
3106 	MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
3107 	MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
3108 	MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
3109 
3110 	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
3111 
3112 	MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
3113 	MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
3114 
3115 	MMIO_D(GAMTARBMODE, D_BDW_PLUS);
3116 
3117 #define RING_REG(base) _MMIO((base) + 0x270)
3118 	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
3119 #undef RING_REG
3120 
3121 	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
3122 
3123 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3124 
3125 	MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
3126 	MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
3127 	MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
3128 
3129 	MMIO_D(WM_MISC, D_BDW);
3130 	MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
3131 
3132 	MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
3133 	MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
3134 	MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
3135 
3136 	MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
3137 
3138 	MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
3139 	MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
3140 	MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
3141 
3142 	MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
3143 	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3144 		NULL, NULL);
3145 	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3146 		NULL, NULL);
3147 	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3148 
3149 	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
3150 	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
3151 	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3152 	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
3153 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
3154 	MMIO_D(_MMIO(0xb110), D_BDW);
3155 	MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
3156 
3157 	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
3158 		D_BDW_PLUS, NULL, force_nonpriv_write);
3159 
3160 	MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
3161 	MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
3162 
3163 	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
3164 	MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
3165 
3166 	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
3167 
3168 	MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
3169 
3170 	MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
3171 
3172 	MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
3173 	MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
3174 
3175 	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3176 	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3177 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3178 	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3179 
3180 	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
3181 
3182 	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3183 	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3184 	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3185 	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3186 	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3187 	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3188 	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3189 	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3190 	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3191 	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3192 	return 0;
3193 }
3194 
3195 static int init_skl_mmio_info(struct intel_gvt *gvt)
3196 {
3197 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3198 	int ret;
3199 
3200 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3201 	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
3202 	MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3203 	MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
3204 	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
3205 	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
3206 
3207 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3208 						dp_aux_ch_ctl_mmio_write);
3209 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3210 						dp_aux_ch_ctl_mmio_write);
3211 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
3212 						dp_aux_ch_ctl_mmio_write);
3213 
3214 	MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
3215 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
3216 
3217 	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
3218 
3219 	MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
3220 	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
3221 	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
3222 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3223 	MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3224 	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
3225 	MMIO_D(DC_STATE_EN, D_SKL_PLUS);
3226 	MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
3227 	MMIO_D(CDCLK_CTL, D_SKL_PLUS);
3228 	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
3229 	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
3230 	MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
3231 	MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
3232 	MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
3233 	MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
3234 	MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
3235 	MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
3236 	MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
3237 	MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
3238 	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
3239 
3240 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3241 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3242 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3243 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3244 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3245 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3246 
3247 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3248 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3249 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3250 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3251 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3252 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3253 
3254 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
3255 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
3256 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
3257 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
3258 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
3259 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
3260 
3261 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3262 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3263 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3264 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
3265 
3266 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3267 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3268 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3269 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
3270 
3271 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3272 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3273 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3274 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
3275 
3276 	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
3277 	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
3278 	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
3279 
3280 	MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3281 	MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3282 	MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3283 
3284 	MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3285 	MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3286 	MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3287 
3288 	MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3289 	MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3290 	MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3291 
3292 	MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3293 	MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3294 	MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
3295 
3296 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3297 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3298 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3299 
3300 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3301 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3302 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3303 
3304 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3305 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3306 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3307 
3308 	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
3309 	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
3310 	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
3311 
3312 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
3313 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
3314 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
3315 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
3316 
3317 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
3318 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
3319 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
3320 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
3321 
3322 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
3323 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
3324 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
3325 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
3326 
3327 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
3328 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
3329 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
3330 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
3331 
3332 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
3333 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
3334 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
3335 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
3336 
3337 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
3338 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
3339 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
3340 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
3341 
3342 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
3343 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
3344 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
3345 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
3346 
3347 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
3348 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
3349 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
3350 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
3351 
3352 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
3353 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
3354 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
3355 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
3356 
3357 	MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
3358 	MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
3359 	MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
3360 	MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
3361 	MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
3362 	MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
3363 
3364 	MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
3365 	MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
3366 	MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
3367 
3368 	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3369 
3370 	MMIO_D(SKL_DFSM, D_SKL_PLUS);
3371 	MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
3372 
3373 	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3374 		NULL, NULL);
3375 	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3376 		NULL, NULL);
3377 
3378 	MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
3379 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
3380 	MMIO_D(RC6_LOCATION, D_SKL_PLUS);
3381 	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
3382 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3383 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3384 		NULL, NULL);
3385 
3386 	/* TRTT */
3387 	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3388 	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3389 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3390 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3391 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3392 	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
3393 		 NULL, gen9_trtte_write);
3394 	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
3395 		 NULL, gen9_trtt_chicken_write);
3396 
3397 	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
3398 
3399 	MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
3400 
3401 	MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
3402 	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3403 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
3404 
3405 	MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
3406 	MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
3407 	MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
3408 	MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
3409 	MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
3410 	MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
3411 	MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
3412 	MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
3413 	MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
3414 	MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
3415 
3416 	MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
3417 	MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
3418 	MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
3419 
3420 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
3421 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
3422 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
3423 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
3424 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
3425 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
3426 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
3427 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
3428 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
3429 
3430 	MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3431 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
3432 	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3433 		      NULL, csfe_chicken1_mmio_write);
3434 #undef CSFE_CHICKEN1_REG
3435 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3436 		 NULL, NULL);
3437 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3438 		 NULL, NULL);
3439 
3440 	MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
3441 	MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
3442 	MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
3443 
3444 	return 0;
3445 }
3446 
3447 static int init_bxt_mmio_info(struct intel_gvt *gvt)
3448 {
3449 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3450 	int ret;
3451 
3452 	MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
3453 
3454 	MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
3455 	MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
3456 	MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
3457 	MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
3458 	MMIO_D(ERROR_GEN6, D_BXT);
3459 	MMIO_D(DONE_REG, D_BXT);
3460 	MMIO_D(EIR, D_BXT);
3461 	MMIO_D(PGTBL_ER, D_BXT);
3462 	MMIO_D(_MMIO(0x4194), D_BXT);
3463 	MMIO_D(_MMIO(0x4294), D_BXT);
3464 	MMIO_D(_MMIO(0x4494), D_BXT);
3465 
3466 	MMIO_RING_D(RING_PSMI_CTL, D_BXT);
3467 	MMIO_RING_D(RING_DMA_FADD, D_BXT);
3468 	MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
3469 	MMIO_RING_D(RING_IPEHR, D_BXT);
3470 	MMIO_RING_D(RING_INSTPS, D_BXT);
3471 	MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
3472 	MMIO_RING_D(RING_BBSTATE, D_BXT);
3473 	MMIO_RING_D(RING_IPEIR, D_BXT);
3474 
3475 	MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
3476 
3477 	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
3478 	MMIO_D(BXT_RP_STATE_CAP, D_BXT);
3479 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
3480 		NULL, bxt_phy_ctl_family_write);
3481 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
3482 		NULL, bxt_phy_ctl_family_write);
3483 	MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
3484 	MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
3485 	MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
3486 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
3487 		NULL, bxt_port_pll_enable_write);
3488 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
3489 		NULL, bxt_port_pll_enable_write);
3490 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
3491 		bxt_port_pll_enable_write);
3492 
3493 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
3494 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
3495 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
3496 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
3497 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
3498 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
3499 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
3500 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
3501 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
3502 
3503 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
3504 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
3505 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
3506 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
3507 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
3508 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
3509 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
3510 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
3511 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
3512 
3513 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
3514 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
3515 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
3516 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3517 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
3518 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
3519 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
3520 		NULL, bxt_pcs_dw12_grp_write);
3521 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
3522 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3523 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
3524 		bxt_port_tx_dw3_read, NULL);
3525 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3526 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
3527 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
3528 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
3529 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
3530 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
3531 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
3532 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
3533 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
3534 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
3535 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
3536 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
3537 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
3538 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
3539 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
3540 
3541 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
3542 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
3543 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
3544 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3545 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
3546 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
3547 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
3548 		NULL, bxt_pcs_dw12_grp_write);
3549 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
3550 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3551 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
3552 		bxt_port_tx_dw3_read, NULL);
3553 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3554 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
3555 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
3556 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
3557 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
3558 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
3559 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
3560 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
3561 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
3562 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
3563 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
3564 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
3565 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
3566 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
3567 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
3568 
3569 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
3570 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
3571 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
3572 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3573 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
3574 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
3575 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
3576 		NULL, bxt_pcs_dw12_grp_write);
3577 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
3578 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3579 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
3580 		bxt_port_tx_dw3_read, NULL);
3581 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3582 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
3583 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
3584 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
3585 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
3586 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
3587 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
3588 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
3589 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
3590 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
3591 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
3592 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
3593 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
3594 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
3595 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
3596 
3597 	MMIO_D(BXT_DE_PLL_CTL, D_BXT);
3598 	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
3599 	MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
3600 	MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
3601 
3602 	MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
3603 	MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
3604 
3605 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
3606 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
3607 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
3608 
3609 	MMIO_D(RC6_CTX_BASE, D_BXT);
3610 
3611 	MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
3612 	MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
3613 	MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
3614 	MMIO_D(GEN6_GFXPAUSE, D_BXT);
3615 	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
3616 	MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3617 	MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
3618 	MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3619 	       0, 0, D_BXT, NULL, NULL);
3620 	MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3621 	       0, 0, D_BXT, NULL, NULL);
3622 	MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3623 	       0, 0, D_BXT, NULL, NULL);
3624 	MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3625 	       0, 0, D_BXT, NULL, NULL);
3626 
3627 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3628 
3629 	MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
3630 
3631 	return 0;
3632 }
3633 
3634 static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
3635 						    unsigned int offset)
3636 {
3637 	unsigned long device = intel_gvt_get_device_type(gvt);
3638 	const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3639 	int num = gvt->mmio.num_mmio_block;
3640 	int i;
3641 
3642 	for (i = 0; i < num; i++, block++) {
3643 		if (!(device & block->device))
3644 			continue;
3645 		if (offset >= i915_mmio_reg_offset(block->offset) &&
3646 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
3647 			return block;
3648 	}
3649 	return NULL;
3650 }
3651 
3652 /**
3653  * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
3654  * @gvt: GVT device
3655  *
3656  * This function is called at the driver unloading stage, to clean up the MMIO
3657  * information table of GVT device
3658  *
3659  */
3660 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
3661 {
3662 	struct hlist_node *tmp;
3663 	struct intel_gvt_mmio_info *e;
3664 	int i;
3665 
3666 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
3667 		kfree(e);
3668 
3669 	vfree(gvt->mmio.mmio_attribute);
3670 	gvt->mmio.mmio_attribute = NULL;
3671 }
3672 
3673 /* Special MMIO blocks. registers in MMIO block ranges should not be command
3674  * accessible (should have no F_CMD_ACCESS flag).
3675  * otherwise, need to update cmd_reg_handler in cmd_parser.c
3676  */
3677 static const struct gvt_mmio_block mmio_blocks[] = {
3678 	{D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
3679 	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
3680 	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
3681 		pvinfo_mmio_read, pvinfo_mmio_write},
3682 	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
3683 	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
3684 	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
3685 };
3686 
3687 /**
3688  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
3689  * @gvt: GVT device
3690  *
3691  * This function is called at the initialization stage, to setup the MMIO
3692  * information table for GVT device
3693  *
3694  * Returns:
3695  * zero on success, negative if failed.
3696  */
3697 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
3698 {
3699 	struct intel_gvt_device_info *info = &gvt->device_info;
3700 	struct drm_i915_private *i915 = gvt->gt->i915;
3701 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
3702 	int ret;
3703 
3704 	gvt->mmio.mmio_attribute = vzalloc(size);
3705 	if (!gvt->mmio.mmio_attribute)
3706 		return -ENOMEM;
3707 
3708 	ret = init_generic_mmio_info(gvt);
3709 	if (ret)
3710 		goto err;
3711 
3712 	if (IS_BROADWELL(i915)) {
3713 		ret = init_bdw_mmio_info(gvt);
3714 		if (ret)
3715 			goto err;
3716 	} else if (IS_SKYLAKE(i915) ||
3717 		   IS_KABYLAKE(i915) ||
3718 		   IS_COFFEELAKE(i915) ||
3719 		   IS_COMETLAKE(i915)) {
3720 		ret = init_bdw_mmio_info(gvt);
3721 		if (ret)
3722 			goto err;
3723 		ret = init_skl_mmio_info(gvt);
3724 		if (ret)
3725 			goto err;
3726 	} else if (IS_BROXTON(i915)) {
3727 		ret = init_bdw_mmio_info(gvt);
3728 		if (ret)
3729 			goto err;
3730 		ret = init_skl_mmio_info(gvt);
3731 		if (ret)
3732 			goto err;
3733 		ret = init_bxt_mmio_info(gvt);
3734 		if (ret)
3735 			goto err;
3736 	}
3737 
3738 	gvt->mmio.mmio_block = mmio_blocks;
3739 	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
3740 
3741 	return 0;
3742 err:
3743 	intel_gvt_clean_mmio_info(gvt);
3744 	return ret;
3745 }
3746 
3747 /**
3748  * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3749  * @gvt: a GVT device
3750  * @handler: the handler
3751  * @data: private data given to handler
3752  *
3753  * Returns:
3754  * Zero on success, negative error code if failed.
3755  */
3756 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3757 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3758 	void *data)
3759 {
3760 	const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3761 	struct intel_gvt_mmio_info *e;
3762 	int i, j, ret;
3763 
3764 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3765 		ret = handler(gvt, e->offset, data);
3766 		if (ret)
3767 			return ret;
3768 	}
3769 
3770 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3771 		/* pvinfo data doesn't come from hw mmio */
3772 		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3773 			continue;
3774 
3775 		for (j = 0; j < block->size; j += 4) {
3776 			ret = handler(gvt,
3777 				      i915_mmio_reg_offset(block->offset) + j,
3778 				      data);
3779 			if (ret)
3780 				return ret;
3781 		}
3782 	}
3783 	return 0;
3784 }
3785 
3786 /**
3787  * intel_vgpu_default_mmio_read - default MMIO read handler
3788  * @vgpu: a vGPU
3789  * @offset: access offset
3790  * @p_data: data return buffer
3791  * @bytes: access data length
3792  *
3793  * Returns:
3794  * Zero on success, negative error code if failed.
3795  */
3796 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3797 		void *p_data, unsigned int bytes)
3798 {
3799 	read_vreg(vgpu, offset, p_data, bytes);
3800 	return 0;
3801 }
3802 
3803 /**
3804  * intel_t_default_mmio_write - default MMIO write handler
3805  * @vgpu: a vGPU
3806  * @offset: access offset
3807  * @p_data: write data buffer
3808  * @bytes: access data length
3809  *
3810  * Returns:
3811  * Zero on success, negative error code if failed.
3812  */
3813 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3814 		void *p_data, unsigned int bytes)
3815 {
3816 	write_vreg(vgpu, offset, p_data, bytes);
3817 	return 0;
3818 }
3819 
3820 /**
3821  * intel_vgpu_mask_mmio_write - write mask register
3822  * @vgpu: a vGPU
3823  * @offset: access offset
3824  * @p_data: write data buffer
3825  * @bytes: access data length
3826  *
3827  * Returns:
3828  * Zero on success, negative error code if failed.
3829  */
3830 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3831 		void *p_data, unsigned int bytes)
3832 {
3833 	u32 mask, old_vreg;
3834 
3835 	old_vreg = vgpu_vreg(vgpu, offset);
3836 	write_vreg(vgpu, offset, p_data, bytes);
3837 	mask = vgpu_vreg(vgpu, offset) >> 16;
3838 	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3839 				(vgpu_vreg(vgpu, offset) & mask);
3840 
3841 	return 0;
3842 }
3843 
3844 /**
3845  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3846  * force-nopriv register
3847  *
3848  * @gvt: a GVT device
3849  * @offset: register offset
3850  *
3851  * Returns:
3852  * True if the register is in force-nonpriv whitelist;
3853  * False if outside;
3854  */
3855 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3856 					  unsigned int offset)
3857 {
3858 	return in_whitelist(offset);
3859 }
3860 
3861 /**
3862  * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3863  * @vgpu: a vGPU
3864  * @offset: register offset
3865  * @pdata: data buffer
3866  * @bytes: data length
3867  * @is_read: read or write
3868  *
3869  * Returns:
3870  * Zero on success, negative error code if failed.
3871  */
3872 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3873 			   void *pdata, unsigned int bytes, bool is_read)
3874 {
3875 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3876 	struct intel_gvt *gvt = vgpu->gvt;
3877 	struct intel_gvt_mmio_info *mmio_info;
3878 	const struct gvt_mmio_block *mmio_block;
3879 	gvt_mmio_func func;
3880 	int ret;
3881 
3882 	if (drm_WARN_ON(&i915->drm, bytes > 8))
3883 		return -EINVAL;
3884 
3885 	/*
3886 	 * Handle special MMIO blocks.
3887 	 */
3888 	mmio_block = find_mmio_block(gvt, offset);
3889 	if (mmio_block) {
3890 		func = is_read ? mmio_block->read : mmio_block->write;
3891 		if (func)
3892 			return func(vgpu, offset, pdata, bytes);
3893 		goto default_rw;
3894 	}
3895 
3896 	/*
3897 	 * Normal tracked MMIOs.
3898 	 */
3899 	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3900 	if (!mmio_info) {
3901 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3902 		goto default_rw;
3903 	}
3904 
3905 	if (is_read)
3906 		return mmio_info->read(vgpu, offset, pdata, bytes);
3907 	else {
3908 		u64 ro_mask = mmio_info->ro_mask;
3909 		u32 old_vreg = 0;
3910 		u64 data = 0;
3911 
3912 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3913 			old_vreg = vgpu_vreg(vgpu, offset);
3914 		}
3915 
3916 		if (likely(!ro_mask))
3917 			ret = mmio_info->write(vgpu, offset, pdata, bytes);
3918 		else if (!~ro_mask) {
3919 			gvt_vgpu_err("try to write RO reg %x\n", offset);
3920 			return 0;
3921 		} else {
3922 			/* keep the RO bits in the virtual register */
3923 			memcpy(&data, pdata, bytes);
3924 			data &= ~ro_mask;
3925 			data |= vgpu_vreg(vgpu, offset) & ro_mask;
3926 			ret = mmio_info->write(vgpu, offset, &data, bytes);
3927 		}
3928 
3929 		/* higher 16bits of mode ctl regs are mask bits for change */
3930 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3931 			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3932 
3933 			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3934 					| (vgpu_vreg(vgpu, offset) & mask);
3935 		}
3936 	}
3937 
3938 	return ret;
3939 
3940 default_rw:
3941 	return is_read ?
3942 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3943 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3944 }
3945 
3946 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3947 {
3948 	struct intel_vgpu *vgpu;
3949 	int i, id;
3950 
3951 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3952 		mmio_hw_access_pre(gvt->gt);
3953 		for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3954 			intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3955 		mmio_hw_access_post(gvt->gt);
3956 	}
3957 }
3958 
3959 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3960 {
3961 	struct intel_vgpu *vgpu = data;
3962 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3963 
3964 	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3965 		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3966 
3967 	return 0;
3968 }
3969 
3970 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3971 {
3972 	struct intel_vgpu *vgpu;
3973 	int id;
3974 
3975 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3976 		mmio_hw_access_pre(gvt->gt);
3977 		intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3978 		mmio_hw_access_post(gvt->gt);
3979 	}
3980 }
3981