xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/gvt.h (revision 8bd1369b)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_H_
34 #define _GVT_H_
35 
36 #include "debug.h"
37 #include "hypercall.h"
38 #include "mmio.h"
39 #include "reg.h"
40 #include "interrupt.h"
41 #include "gtt.h"
42 #include "display.h"
43 #include "edid.h"
44 #include "execlist.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "mmio_context.h"
48 #include "cmd_parser.h"
49 #include "fb_decoder.h"
50 #include "dmabuf.h"
51 #include "page_track.h"
52 
53 #define GVT_MAX_VGPU 8
54 
55 enum {
56 	INTEL_GVT_HYPERVISOR_XEN = 0,
57 	INTEL_GVT_HYPERVISOR_KVM,
58 };
59 
60 struct intel_gvt_host {
61 	bool initialized;
62 	int hypervisor_type;
63 	struct intel_gvt_mpt *mpt;
64 };
65 
66 extern struct intel_gvt_host intel_gvt_host;
67 
68 /* Describe per-platform limitations. */
69 struct intel_gvt_device_info {
70 	u32 max_support_vgpus;
71 	u32 cfg_space_size;
72 	u32 mmio_size;
73 	u32 mmio_bar;
74 	unsigned long msi_cap_offset;
75 	u32 gtt_start_offset;
76 	u32 gtt_entry_size;
77 	u32 gtt_entry_size_shift;
78 	int gmadr_bytes_in_cmd;
79 	u32 max_surface_size;
80 };
81 
82 /* GM resources owned by a vGPU */
83 struct intel_vgpu_gm {
84 	u64 aperture_sz;
85 	u64 hidden_sz;
86 	struct drm_mm_node low_gm_node;
87 	struct drm_mm_node high_gm_node;
88 };
89 
90 #define INTEL_GVT_MAX_NUM_FENCES 32
91 
92 /* Fences owned by a vGPU */
93 struct intel_vgpu_fence {
94 	struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 	u32 base;
96 	u32 size;
97 };
98 
99 struct intel_vgpu_mmio {
100 	void *vreg;
101 	void *sreg;
102 };
103 
104 #define INTEL_GVT_MAX_BAR_NUM 4
105 
106 struct intel_vgpu_pci_bar {
107 	u64 size;
108 	bool tracked;
109 };
110 
111 struct intel_vgpu_cfg_space {
112 	unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
113 	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
114 };
115 
116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117 
118 #define INTEL_GVT_MAX_PIPE 4
119 
120 struct intel_vgpu_irq {
121 	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
122 	DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
123 		       INTEL_GVT_EVENT_MAX);
124 };
125 
126 struct intel_vgpu_opregion {
127 	bool mapped;
128 	void *va;
129 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
130 };
131 
132 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
133 
134 struct intel_vgpu_display {
135 	struct intel_vgpu_i2c_edid i2c_edid;
136 	struct intel_vgpu_port ports[I915_MAX_PORTS];
137 	struct intel_vgpu_sbi sbi;
138 };
139 
140 struct vgpu_sched_ctl {
141 	int weight;
142 };
143 
144 enum {
145 	INTEL_VGPU_EXECLIST_SUBMISSION = 1,
146 	INTEL_VGPU_GUC_SUBMISSION,
147 };
148 
149 struct intel_vgpu_submission_ops {
150 	const char *name;
151 	int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
152 	void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
153 	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
154 };
155 
156 struct intel_vgpu_submission {
157 	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
158 	struct list_head workload_q_head[I915_NUM_ENGINES];
159 	struct kmem_cache *workloads;
160 	atomic_t running_workload_num;
161 	struct i915_gem_context *shadow_ctx;
162 	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
163 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 	void *ring_scan_buffer[I915_NUM_ENGINES];
165 	int ring_scan_buffer_size[I915_NUM_ENGINES];
166 	const struct intel_vgpu_submission_ops *ops;
167 	int virtual_submission_interface;
168 	bool active;
169 };
170 
171 struct intel_vgpu {
172 	struct intel_gvt *gvt;
173 	int id;
174 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
175 	bool active;
176 	bool pv_notified;
177 	bool failsafe;
178 	unsigned int resetting_eng;
179 	void *sched_data;
180 	struct vgpu_sched_ctl sched_ctl;
181 
182 	struct intel_vgpu_fence fence;
183 	struct intel_vgpu_gm gm;
184 	struct intel_vgpu_cfg_space cfg_space;
185 	struct intel_vgpu_mmio mmio;
186 	struct intel_vgpu_irq irq;
187 	struct intel_vgpu_gtt gtt;
188 	struct intel_vgpu_opregion opregion;
189 	struct intel_vgpu_display display;
190 	struct intel_vgpu_submission submission;
191 	struct radix_tree_root page_track_tree;
192 	u32 hws_pga[I915_NUM_ENGINES];
193 
194 	struct dentry *debugfs;
195 
196 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
197 	struct {
198 		struct mdev_device *mdev;
199 		struct vfio_region *region;
200 		int num_regions;
201 		struct eventfd_ctx *intx_trigger;
202 		struct eventfd_ctx *msi_trigger;
203 
204 		/*
205 		 * Two caches are used to avoid mapping duplicated pages (eg.
206 		 * scratch pages). This help to reduce dma setup overhead.
207 		 */
208 		struct rb_root gfn_cache;
209 		struct rb_root dma_addr_cache;
210 		unsigned long nr_cache_entries;
211 		struct mutex cache_lock;
212 
213 		struct notifier_block iommu_notifier;
214 		struct notifier_block group_notifier;
215 		struct kvm *kvm;
216 		struct work_struct release_work;
217 		atomic_t released;
218 		struct vfio_device *vfio_device;
219 	} vdev;
220 #endif
221 
222 	struct list_head dmabuf_obj_list_head;
223 	struct mutex dmabuf_lock;
224 	struct idr object_idr;
225 
226 	struct completion vblank_done;
227 
228 	u32 scan_nonprivbb;
229 };
230 
231 /* validating GM healthy status*/
232 #define vgpu_is_vm_unhealthy(ret_val) \
233 	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
234 
235 struct intel_gvt_gm {
236 	unsigned long vgpu_allocated_low_gm_size;
237 	unsigned long vgpu_allocated_high_gm_size;
238 };
239 
240 struct intel_gvt_fence {
241 	unsigned long vgpu_allocated_fence_num;
242 };
243 
244 /* Special MMIO blocks. */
245 struct gvt_mmio_block {
246 	unsigned int device;
247 	i915_reg_t   offset;
248 	unsigned int size;
249 	gvt_mmio_func read;
250 	gvt_mmio_func write;
251 };
252 
253 #define INTEL_GVT_MMIO_HASH_BITS 11
254 
255 struct intel_gvt_mmio {
256 	u8 *mmio_attribute;
257 /* Register contains RO bits */
258 #define F_RO		(1 << 0)
259 /* Register contains graphics address */
260 #define F_GMADR		(1 << 1)
261 /* Mode mask registers with high 16 bits as the mask bits */
262 #define F_MODE_MASK	(1 << 2)
263 /* This reg can be accessed by GPU commands */
264 #define F_CMD_ACCESS	(1 << 3)
265 /* This reg has been accessed by a VM */
266 #define F_ACCESSED	(1 << 4)
267 /* This reg has been accessed through GPU commands */
268 #define F_CMD_ACCESSED	(1 << 5)
269 /* This reg could be accessed by unaligned address */
270 #define F_UNALIGN	(1 << 6)
271 /* This reg is saved/restored in context */
272 #define F_IN_CTX	(1 << 7)
273 
274 	struct gvt_mmio_block *mmio_block;
275 	unsigned int num_mmio_block;
276 
277 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
278 	unsigned long num_tracked_mmio;
279 };
280 
281 struct intel_gvt_firmware {
282 	void *cfg_space;
283 	void *mmio;
284 	bool firmware_loaded;
285 };
286 
287 #define NR_MAX_INTEL_VGPU_TYPES 20
288 struct intel_vgpu_type {
289 	char name[16];
290 	unsigned int avail_instance;
291 	unsigned int low_gm_size;
292 	unsigned int high_gm_size;
293 	unsigned int fence;
294 	unsigned int weight;
295 	enum intel_vgpu_edid resolution;
296 };
297 
298 struct intel_gvt {
299 	struct mutex lock;
300 	struct drm_i915_private *dev_priv;
301 	struct idr vgpu_idr;	/* vGPU IDR pool */
302 
303 	struct intel_gvt_device_info device_info;
304 	struct intel_gvt_gm gm;
305 	struct intel_gvt_fence fence;
306 	struct intel_gvt_mmio mmio;
307 	struct intel_gvt_firmware firmware;
308 	struct intel_gvt_irq irq;
309 	struct intel_gvt_gtt gtt;
310 	struct intel_gvt_workload_scheduler scheduler;
311 	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
312 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
313 	struct intel_vgpu_type *types;
314 	unsigned int num_types;
315 	struct intel_vgpu *idle_vgpu;
316 
317 	struct task_struct *service_thread;
318 	wait_queue_head_t service_thread_wq;
319 	unsigned long service_request;
320 
321 	struct {
322 		struct engine_mmio *mmio;
323 		int ctx_mmio_count[I915_NUM_ENGINES];
324 	} engine_mmio_list;
325 
326 	struct dentry *debugfs_root;
327 };
328 
329 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
330 {
331 	return i915->gvt;
332 }
333 
334 enum {
335 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
336 
337 	/* Scheduling trigger by timer */
338 	INTEL_GVT_REQUEST_SCHED = 1,
339 
340 	/* Scheduling trigger by event */
341 	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
342 };
343 
344 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
345 		int service)
346 {
347 	set_bit(service, (void *)&gvt->service_request);
348 	wake_up(&gvt->service_thread_wq);
349 }
350 
351 void intel_gvt_free_firmware(struct intel_gvt *gvt);
352 int intel_gvt_load_firmware(struct intel_gvt *gvt);
353 
354 /* Aperture/GM space definitions for GVT device */
355 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
356 #define BYTES_TO_MB(b) ((b) >> 20ULL)
357 
358 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
359 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
360 #define HOST_FENCE 4
361 
362 /* Aperture/GM space definitions for GVT device */
363 #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
364 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
365 
366 #define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
367 #define gvt_ggtt_sz(gvt) \
368 	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
369 #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
370 
371 #define gvt_aperture_gmadr_base(gvt) (0)
372 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
373 				     + gvt_aperture_sz(gvt) - 1)
374 
375 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
376 				    + gvt_aperture_sz(gvt))
377 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
378 				   + gvt_hidden_sz(gvt) - 1)
379 
380 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
381 
382 /* Aperture/GM space definitions for vGPU */
383 #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
384 #define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
385 #define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
386 #define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
387 
388 #define vgpu_aperture_pa_base(vgpu) \
389 	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
390 
391 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
392 
393 #define vgpu_aperture_pa_end(vgpu) \
394 	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
395 
396 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
397 #define vgpu_aperture_gmadr_end(vgpu) \
398 	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
399 
400 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
401 #define vgpu_hidden_gmadr_end(vgpu) \
402 	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
403 
404 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
405 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
406 
407 struct intel_vgpu_creation_params {
408 	__u64 handle;
409 	__u64 low_gm_sz;  /* in MB */
410 	__u64 high_gm_sz; /* in MB */
411 	__u64 fence_sz;
412 	__u64 resolution;
413 	__s32 primary;
414 	__u64 vgpu_id;
415 
416 	__u32 weight;
417 };
418 
419 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
420 			      struct intel_vgpu_creation_params *param);
421 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
422 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
423 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
424 	u32 fence, u64 value);
425 
426 /* Macros for easily accessing vGPU virtual/shadow register.
427    Explicitly seperate use for typed MMIO reg or real offset.*/
428 #define vgpu_vreg_t(vgpu, reg) \
429 	(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
430 #define vgpu_vreg(vgpu, offset) \
431 	(*(u32 *)(vgpu->mmio.vreg + (offset)))
432 #define vgpu_vreg64_t(vgpu, reg) \
433 	(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
434 #define vgpu_vreg64(vgpu, offset) \
435 	(*(u64 *)(vgpu->mmio.vreg + (offset)))
436 #define vgpu_sreg_t(vgpu, reg) \
437 	(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
438 #define vgpu_sreg(vgpu, offset) \
439 	(*(u32 *)(vgpu->mmio.sreg + (offset)))
440 
441 #define for_each_active_vgpu(gvt, vgpu, id) \
442 	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
443 		for_each_if(vgpu->active)
444 
445 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
446 					    u32 offset, u32 val, bool low)
447 {
448 	u32 *pval;
449 
450 	/* BAR offset should be 32 bits algiend */
451 	offset = rounddown(offset, 4);
452 	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
453 
454 	if (low) {
455 		/*
456 		 * only update bit 31 - bit 4,
457 		 * leave the bit 3 - bit 0 unchanged.
458 		 */
459 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
460 	} else {
461 		*pval = val;
462 	}
463 }
464 
465 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
466 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
467 
468 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
469 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
470 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
471 					 struct intel_vgpu_type *type);
472 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
473 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
474 				 unsigned int engine_mask);
475 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
476 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
477 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
478 
479 /* validating GM functions */
480 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
481 	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
482 	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
483 
484 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
485 	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
486 	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
487 
488 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
489 	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
490 	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
491 
492 #define gvt_gmadr_is_aperture(gvt, gmadr) \
493 	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
494 	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
495 
496 #define gvt_gmadr_is_hidden(gvt, gmadr) \
497 	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
498 	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
499 
500 #define gvt_gmadr_is_valid(gvt, gmadr) \
501 	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
502 	    gvt_gmadr_is_hidden(gvt, gmadr))
503 
504 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
505 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
506 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
507 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
508 			     unsigned long *h_index);
509 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
510 			     unsigned long *g_index);
511 
512 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
513 		bool primary);
514 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
515 
516 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
517 		void *p_data, unsigned int bytes);
518 
519 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
520 		void *p_data, unsigned int bytes);
521 
522 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
523 {
524 	/* We are 64bit bar. */
525 	return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
526 			PCI_BASE_ADDRESS_MEM_MASK;
527 }
528 
529 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
530 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
531 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
532 
533 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
534 void populate_pvinfo_page(struct intel_vgpu *vgpu);
535 
536 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
537 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
538 
539 struct intel_gvt_ops {
540 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
541 				unsigned int);
542 	int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
543 				unsigned int);
544 	int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
545 				unsigned int);
546 	int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
547 				unsigned int);
548 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
549 				struct intel_vgpu_type *);
550 	void (*vgpu_destroy)(struct intel_vgpu *);
551 	void (*vgpu_reset)(struct intel_vgpu *);
552 	void (*vgpu_activate)(struct intel_vgpu *);
553 	void (*vgpu_deactivate)(struct intel_vgpu *);
554 	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
555 			const char *name);
556 	bool (*get_gvt_attrs)(struct attribute ***type_attrs,
557 			struct attribute_group ***intel_vgpu_type_groups);
558 	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
559 	int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
560 	int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
561 				     unsigned int);
562 };
563 
564 
565 enum {
566 	GVT_FAILSAFE_UNSUPPORTED_GUEST,
567 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
568 	GVT_FAILSAFE_GUEST_ERR,
569 };
570 
571 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
572 {
573 	intel_runtime_pm_get(dev_priv);
574 }
575 
576 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
577 {
578 	intel_runtime_pm_put(dev_priv);
579 }
580 
581 /**
582  * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
583  * @gvt: a GVT device
584  * @offset: register offset
585  *
586  */
587 static inline void intel_gvt_mmio_set_accessed(
588 			struct intel_gvt *gvt, unsigned int offset)
589 {
590 	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
591 }
592 
593 /**
594  * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
595  * @gvt: a GVT device
596  * @offset: register offset
597  *
598  */
599 static inline bool intel_gvt_mmio_is_cmd_access(
600 			struct intel_gvt *gvt, unsigned int offset)
601 {
602 	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
603 }
604 
605 /**
606  * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
607  * @gvt: a GVT device
608  * @offset: register offset
609  *
610  */
611 static inline bool intel_gvt_mmio_is_unalign(
612 			struct intel_gvt *gvt, unsigned int offset)
613 {
614 	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
615 }
616 
617 /**
618  * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
619  * @gvt: a GVT device
620  * @offset: register offset
621  *
622  */
623 static inline void intel_gvt_mmio_set_cmd_accessed(
624 			struct intel_gvt *gvt, unsigned int offset)
625 {
626 	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
627 }
628 
629 /**
630  * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
631  * @gvt: a GVT device
632  * @offset: register offset
633  *
634  * Returns:
635  * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
636  *
637  */
638 static inline bool intel_gvt_mmio_has_mode_mask(
639 			struct intel_gvt *gvt, unsigned int offset)
640 {
641 	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
642 }
643 
644 /**
645  * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
646  * @gvt: a GVT device
647  * @offset: register offset
648  *
649  * Returns:
650  * True if a MMIO has a in-context mask, false if it isn't.
651  *
652  */
653 static inline bool intel_gvt_mmio_is_in_ctx(
654 			struct intel_gvt *gvt, unsigned int offset)
655 {
656 	return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
657 }
658 
659 /**
660  * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
661  * @gvt: a GVT device
662  * @offset: register offset
663  *
664  */
665 static inline void intel_gvt_mmio_set_in_ctx(
666 			struct intel_gvt *gvt, unsigned int offset)
667 {
668 	gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
669 }
670 
671 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
672 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
673 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
674 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
675 
676 
677 #include "trace.h"
678 #include "mpt.h"
679 
680 #endif
681