xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/gvt.h (revision 9977a8c3497a8f7f7f951994f298a8e4d961234f)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_H_
34 #define _GVT_H_
35 
36 #include "debug.h"
37 #include "hypercall.h"
38 #include "mmio.h"
39 #include "reg.h"
40 #include "interrupt.h"
41 #include "gtt.h"
42 #include "display.h"
43 #include "edid.h"
44 #include "execlist.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "mmio_context.h"
48 #include "cmd_parser.h"
49 #include "fb_decoder.h"
50 #include "dmabuf.h"
51 
52 #define GVT_MAX_VGPU 8
53 
54 enum {
55 	INTEL_GVT_HYPERVISOR_XEN = 0,
56 	INTEL_GVT_HYPERVISOR_KVM,
57 };
58 
59 struct intel_gvt_host {
60 	bool initialized;
61 	int hypervisor_type;
62 	struct intel_gvt_mpt *mpt;
63 };
64 
65 extern struct intel_gvt_host intel_gvt_host;
66 
67 /* Describe per-platform limitations. */
68 struct intel_gvt_device_info {
69 	u32 max_support_vgpus;
70 	u32 cfg_space_size;
71 	u32 mmio_size;
72 	u32 mmio_bar;
73 	unsigned long msi_cap_offset;
74 	u32 gtt_start_offset;
75 	u32 gtt_entry_size;
76 	u32 gtt_entry_size_shift;
77 	int gmadr_bytes_in_cmd;
78 	u32 max_surface_size;
79 };
80 
81 /* GM resources owned by a vGPU */
82 struct intel_vgpu_gm {
83 	u64 aperture_sz;
84 	u64 hidden_sz;
85 	struct drm_mm_node low_gm_node;
86 	struct drm_mm_node high_gm_node;
87 };
88 
89 #define INTEL_GVT_MAX_NUM_FENCES 32
90 
91 /* Fences owned by a vGPU */
92 struct intel_vgpu_fence {
93 	struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
94 	u32 base;
95 	u32 size;
96 };
97 
98 struct intel_vgpu_mmio {
99 	void *vreg;
100 	void *sreg;
101 	bool disable_warn_untrack;
102 };
103 
104 #define INTEL_GVT_MAX_BAR_NUM 4
105 
106 struct intel_vgpu_pci_bar {
107 	u64 size;
108 	bool tracked;
109 };
110 
111 struct intel_vgpu_cfg_space {
112 	unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
113 	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
114 };
115 
116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117 
118 #define INTEL_GVT_MAX_PIPE 4
119 
120 struct intel_vgpu_irq {
121 	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
122 	DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
123 		       INTEL_GVT_EVENT_MAX);
124 };
125 
126 struct intel_vgpu_opregion {
127 	bool mapped;
128 	void *va;
129 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
130 };
131 
132 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
133 
134 #define INTEL_GVT_MAX_PORT 5
135 
136 struct intel_vgpu_display {
137 	struct intel_vgpu_i2c_edid i2c_edid;
138 	struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
139 	struct intel_vgpu_sbi sbi;
140 };
141 
142 struct vgpu_sched_ctl {
143 	int weight;
144 };
145 
146 enum {
147 	INTEL_VGPU_EXECLIST_SUBMISSION = 1,
148 	INTEL_VGPU_GUC_SUBMISSION,
149 };
150 
151 struct intel_vgpu_submission_ops {
152 	const char *name;
153 	int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
154 	void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
155 	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
156 };
157 
158 struct intel_vgpu_submission {
159 	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
160 	struct list_head workload_q_head[I915_NUM_ENGINES];
161 	struct kmem_cache *workloads;
162 	atomic_t running_workload_num;
163 	struct i915_gem_context *shadow_ctx;
164 	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
165 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
166 	void *ring_scan_buffer[I915_NUM_ENGINES];
167 	int ring_scan_buffer_size[I915_NUM_ENGINES];
168 	const struct intel_vgpu_submission_ops *ops;
169 	int virtual_submission_interface;
170 	bool active;
171 };
172 
173 struct intel_vgpu {
174 	struct intel_gvt *gvt;
175 	int id;
176 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
177 	bool active;
178 	bool pv_notified;
179 	bool failsafe;
180 	unsigned int resetting_eng;
181 	void *sched_data;
182 	struct vgpu_sched_ctl sched_ctl;
183 
184 	struct intel_vgpu_fence fence;
185 	struct intel_vgpu_gm gm;
186 	struct intel_vgpu_cfg_space cfg_space;
187 	struct intel_vgpu_mmio mmio;
188 	struct intel_vgpu_irq irq;
189 	struct intel_vgpu_gtt gtt;
190 	struct intel_vgpu_opregion opregion;
191 	struct intel_vgpu_display display;
192 	struct intel_vgpu_submission submission;
193 	u32 hws_pga[I915_NUM_ENGINES];
194 
195 	struct dentry *debugfs;
196 
197 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
198 	struct {
199 		struct mdev_device *mdev;
200 		struct vfio_region *region;
201 		int num_regions;
202 		struct eventfd_ctx *intx_trigger;
203 		struct eventfd_ctx *msi_trigger;
204 		struct rb_root cache;
205 		struct mutex cache_lock;
206 		struct notifier_block iommu_notifier;
207 		struct notifier_block group_notifier;
208 		struct kvm *kvm;
209 		struct work_struct release_work;
210 		atomic_t released;
211 		struct vfio_device *vfio_device;
212 	} vdev;
213 #endif
214 
215 	struct list_head dmabuf_obj_list_head;
216 	struct mutex dmabuf_lock;
217 	struct idr object_idr;
218 
219 	struct completion vblank_done;
220 
221 };
222 
223 /* validating GM healthy status*/
224 #define vgpu_is_vm_unhealthy(ret_val) \
225 	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
226 
227 struct intel_gvt_gm {
228 	unsigned long vgpu_allocated_low_gm_size;
229 	unsigned long vgpu_allocated_high_gm_size;
230 };
231 
232 struct intel_gvt_fence {
233 	unsigned long vgpu_allocated_fence_num;
234 };
235 
236 /* Special MMIO blocks. */
237 struct gvt_mmio_block {
238 	unsigned int device;
239 	i915_reg_t   offset;
240 	unsigned int size;
241 	gvt_mmio_func read;
242 	gvt_mmio_func write;
243 };
244 
245 #define INTEL_GVT_MMIO_HASH_BITS 11
246 
247 struct intel_gvt_mmio {
248 	u8 *mmio_attribute;
249 /* Register contains RO bits */
250 #define F_RO		(1 << 0)
251 /* Register contains graphics address */
252 #define F_GMADR		(1 << 1)
253 /* Mode mask registers with high 16 bits as the mask bits */
254 #define F_MODE_MASK	(1 << 2)
255 /* This reg can be accessed by GPU commands */
256 #define F_CMD_ACCESS	(1 << 3)
257 /* This reg has been accessed by a VM */
258 #define F_ACCESSED	(1 << 4)
259 /* This reg has been accessed through GPU commands */
260 #define F_CMD_ACCESSED	(1 << 5)
261 /* This reg could be accessed by unaligned address */
262 #define F_UNALIGN	(1 << 6)
263 
264 	struct gvt_mmio_block *mmio_block;
265 	unsigned int num_mmio_block;
266 
267 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
268 	unsigned long num_tracked_mmio;
269 };
270 
271 struct intel_gvt_firmware {
272 	void *cfg_space;
273 	void *mmio;
274 	bool firmware_loaded;
275 };
276 
277 #define NR_MAX_INTEL_VGPU_TYPES 20
278 struct intel_vgpu_type {
279 	char name[16];
280 	unsigned int avail_instance;
281 	unsigned int low_gm_size;
282 	unsigned int high_gm_size;
283 	unsigned int fence;
284 	unsigned int weight;
285 	enum intel_vgpu_edid resolution;
286 };
287 
288 struct intel_gvt {
289 	struct mutex lock;
290 	struct drm_i915_private *dev_priv;
291 	struct idr vgpu_idr;	/* vGPU IDR pool */
292 
293 	struct intel_gvt_device_info device_info;
294 	struct intel_gvt_gm gm;
295 	struct intel_gvt_fence fence;
296 	struct intel_gvt_mmio mmio;
297 	struct intel_gvt_firmware firmware;
298 	struct intel_gvt_irq irq;
299 	struct intel_gvt_gtt gtt;
300 	struct intel_gvt_workload_scheduler scheduler;
301 	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
302 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
303 	struct intel_vgpu_type *types;
304 	unsigned int num_types;
305 	struct intel_vgpu *idle_vgpu;
306 
307 	struct task_struct *service_thread;
308 	wait_queue_head_t service_thread_wq;
309 	unsigned long service_request;
310 
311 	struct engine_mmio *engine_mmio_list;
312 
313 	struct dentry *debugfs_root;
314 };
315 
316 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
317 {
318 	return i915->gvt;
319 }
320 
321 enum {
322 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
323 
324 	/* Scheduling trigger by timer */
325 	INTEL_GVT_REQUEST_SCHED = 1,
326 
327 	/* Scheduling trigger by event */
328 	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
329 };
330 
331 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
332 		int service)
333 {
334 	set_bit(service, (void *)&gvt->service_request);
335 	wake_up(&gvt->service_thread_wq);
336 }
337 
338 void intel_gvt_free_firmware(struct intel_gvt *gvt);
339 int intel_gvt_load_firmware(struct intel_gvt *gvt);
340 
341 /* Aperture/GM space definitions for GVT device */
342 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
343 #define BYTES_TO_MB(b) ((b) >> 20ULL)
344 
345 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
346 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
347 #define HOST_FENCE 4
348 
349 /* Aperture/GM space definitions for GVT device */
350 #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
351 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
352 
353 #define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
354 #define gvt_ggtt_sz(gvt) \
355 	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
356 #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
357 
358 #define gvt_aperture_gmadr_base(gvt) (0)
359 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
360 				     + gvt_aperture_sz(gvt) - 1)
361 
362 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
363 				    + gvt_aperture_sz(gvt))
364 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
365 				   + gvt_hidden_sz(gvt) - 1)
366 
367 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
368 
369 /* Aperture/GM space definitions for vGPU */
370 #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
371 #define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
372 #define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
373 #define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
374 
375 #define vgpu_aperture_pa_base(vgpu) \
376 	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
377 
378 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
379 
380 #define vgpu_aperture_pa_end(vgpu) \
381 	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
382 
383 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
384 #define vgpu_aperture_gmadr_end(vgpu) \
385 	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
386 
387 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
388 #define vgpu_hidden_gmadr_end(vgpu) \
389 	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
390 
391 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
392 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
393 
394 struct intel_vgpu_creation_params {
395 	__u64 handle;
396 	__u64 low_gm_sz;  /* in MB */
397 	__u64 high_gm_sz; /* in MB */
398 	__u64 fence_sz;
399 	__u64 resolution;
400 	__s32 primary;
401 	__u64 vgpu_id;
402 
403 	__u32 weight;
404 };
405 
406 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
407 			      struct intel_vgpu_creation_params *param);
408 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
409 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
410 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
411 	u32 fence, u64 value);
412 
413 /* Macros for easily accessing vGPU virtual/shadow register.
414    Explicitly seperate use for typed MMIO reg or real offset.*/
415 #define vgpu_vreg_t(vgpu, reg) \
416 	(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
417 #define vgpu_vreg(vgpu, offset) \
418 	(*(u32 *)(vgpu->mmio.vreg + (offset)))
419 #define vgpu_vreg64_t(vgpu, reg) \
420 	(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
421 #define vgpu_vreg64(vgpu, offset) \
422 	(*(u64 *)(vgpu->mmio.vreg + (offset)))
423 #define vgpu_sreg_t(vgpu, reg) \
424 	(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
425 #define vgpu_sreg(vgpu, offset) \
426 	(*(u32 *)(vgpu->mmio.sreg + (offset)))
427 
428 #define for_each_active_vgpu(gvt, vgpu, id) \
429 	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
430 		for_each_if(vgpu->active)
431 
432 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
433 					    u32 offset, u32 val, bool low)
434 {
435 	u32 *pval;
436 
437 	/* BAR offset should be 32 bits algiend */
438 	offset = rounddown(offset, 4);
439 	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
440 
441 	if (low) {
442 		/*
443 		 * only update bit 31 - bit 4,
444 		 * leave the bit 3 - bit 0 unchanged.
445 		 */
446 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
447 	} else {
448 		*pval = val;
449 	}
450 }
451 
452 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
453 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
454 
455 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
456 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
457 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
458 					 struct intel_vgpu_type *type);
459 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
460 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
461 				 unsigned int engine_mask);
462 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
463 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
464 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
465 
466 /* validating GM functions */
467 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
468 	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
469 	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
470 
471 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
472 	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
473 	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
474 
475 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
476 	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
477 	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
478 
479 #define gvt_gmadr_is_aperture(gvt, gmadr) \
480 	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
481 	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
482 
483 #define gvt_gmadr_is_hidden(gvt, gmadr) \
484 	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
485 	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
486 
487 #define gvt_gmadr_is_valid(gvt, gmadr) \
488 	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
489 	    gvt_gmadr_is_hidden(gvt, gmadr))
490 
491 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
492 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
493 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
494 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
495 			     unsigned long *h_index);
496 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
497 			     unsigned long *g_index);
498 
499 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
500 		bool primary);
501 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
502 
503 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
504 		void *p_data, unsigned int bytes);
505 
506 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
507 		void *p_data, unsigned int bytes);
508 
509 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
510 {
511 	/* We are 64bit bar. */
512 	return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
513 			PCI_BASE_ADDRESS_MEM_MASK;
514 }
515 
516 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
517 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
518 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
519 
520 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
521 void populate_pvinfo_page(struct intel_vgpu *vgpu);
522 
523 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
524 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
525 
526 struct intel_gvt_ops {
527 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
528 				unsigned int);
529 	int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
530 				unsigned int);
531 	int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
532 				unsigned int);
533 	int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
534 				unsigned int);
535 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
536 				struct intel_vgpu_type *);
537 	void (*vgpu_destroy)(struct intel_vgpu *);
538 	void (*vgpu_reset)(struct intel_vgpu *);
539 	void (*vgpu_activate)(struct intel_vgpu *);
540 	void (*vgpu_deactivate)(struct intel_vgpu *);
541 	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
542 			const char *name);
543 	bool (*get_gvt_attrs)(struct attribute ***type_attrs,
544 			struct attribute_group ***intel_vgpu_type_groups);
545 	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
546 	int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
547 	int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
548 				     unsigned int);
549 };
550 
551 
552 enum {
553 	GVT_FAILSAFE_UNSUPPORTED_GUEST,
554 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
555 	GVT_FAILSAFE_GUEST_ERR,
556 };
557 
558 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
559 {
560 	intel_runtime_pm_get(dev_priv);
561 }
562 
563 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
564 {
565 	intel_runtime_pm_put(dev_priv);
566 }
567 
568 /**
569  * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
570  * @gvt: a GVT device
571  * @offset: register offset
572  *
573  */
574 static inline void intel_gvt_mmio_set_accessed(
575 			struct intel_gvt *gvt, unsigned int offset)
576 {
577 	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
578 }
579 
580 /**
581  * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
582  * @gvt: a GVT device
583  * @offset: register offset
584  *
585  */
586 static inline bool intel_gvt_mmio_is_cmd_access(
587 			struct intel_gvt *gvt, unsigned int offset)
588 {
589 	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
590 }
591 
592 /**
593  * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
594  * @gvt: a GVT device
595  * @offset: register offset
596  *
597  */
598 static inline bool intel_gvt_mmio_is_unalign(
599 			struct intel_gvt *gvt, unsigned int offset)
600 {
601 	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
602 }
603 
604 /**
605  * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
606  * @gvt: a GVT device
607  * @offset: register offset
608  *
609  */
610 static inline void intel_gvt_mmio_set_cmd_accessed(
611 			struct intel_gvt *gvt, unsigned int offset)
612 {
613 	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
614 }
615 
616 /**
617  * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
618  * @gvt: a GVT device
619  * @offset: register offset
620  *
621  * Returns:
622  * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
623  *
624  */
625 static inline bool intel_gvt_mmio_has_mode_mask(
626 			struct intel_gvt *gvt, unsigned int offset)
627 {
628 	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
629 }
630 
631 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
632 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
633 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
634 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
635 
636 
637 #include "trace.h"
638 #include "mpt.h"
639 
640 #endif
641