xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/gvt.h (revision d774a589)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_H_
34 #define _GVT_H_
35 
36 #include "debug.h"
37 #include "hypercall.h"
38 #include "mmio.h"
39 #include "reg.h"
40 #include "interrupt.h"
41 #include "gtt.h"
42 #include "display.h"
43 #include "edid.h"
44 #include "execlist.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "render.h"
48 #include "cmd_parser.h"
49 
50 #define GVT_MAX_VGPU 8
51 
52 enum {
53 	INTEL_GVT_HYPERVISOR_XEN = 0,
54 	INTEL_GVT_HYPERVISOR_KVM,
55 };
56 
57 struct intel_gvt_host {
58 	bool initialized;
59 	int hypervisor_type;
60 	struct intel_gvt_mpt *mpt;
61 };
62 
63 extern struct intel_gvt_host intel_gvt_host;
64 
65 /* Describe per-platform limitations. */
66 struct intel_gvt_device_info {
67 	u32 max_support_vgpus;
68 	u32 cfg_space_size;
69 	u32 mmio_size;
70 	u32 mmio_bar;
71 	unsigned long msi_cap_offset;
72 	u32 gtt_start_offset;
73 	u32 gtt_entry_size;
74 	u32 gtt_entry_size_shift;
75 	int gmadr_bytes_in_cmd;
76 	u32 max_surface_size;
77 };
78 
79 /* GM resources owned by a vGPU */
80 struct intel_vgpu_gm {
81 	u64 aperture_sz;
82 	u64 hidden_sz;
83 	struct drm_mm_node low_gm_node;
84 	struct drm_mm_node high_gm_node;
85 };
86 
87 #define INTEL_GVT_MAX_NUM_FENCES 32
88 
89 /* Fences owned by a vGPU */
90 struct intel_vgpu_fence {
91 	struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
92 	u32 base;
93 	u32 size;
94 };
95 
96 struct intel_vgpu_mmio {
97 	void *vreg;
98 	void *sreg;
99 	bool disable_warn_untrack;
100 };
101 
102 #define INTEL_GVT_MAX_CFG_SPACE_SZ 256
103 #define INTEL_GVT_MAX_BAR_NUM 4
104 
105 struct intel_vgpu_pci_bar {
106 	u64 size;
107 	bool tracked;
108 };
109 
110 struct intel_vgpu_cfg_space {
111 	unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
112 	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
113 };
114 
115 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
116 
117 #define INTEL_GVT_MAX_PIPE 4
118 
119 struct intel_vgpu_irq {
120 	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
121 	DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
122 		       INTEL_GVT_EVENT_MAX);
123 };
124 
125 struct intel_vgpu_opregion {
126 	void *va;
127 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
128 	struct page *pages[INTEL_GVT_OPREGION_PAGES];
129 };
130 
131 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
132 
133 #define INTEL_GVT_MAX_PORT 5
134 
135 struct intel_vgpu_display {
136 	struct intel_vgpu_i2c_edid i2c_edid;
137 	struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
138 	struct intel_vgpu_sbi sbi;
139 };
140 
141 struct intel_vgpu {
142 	struct intel_gvt *gvt;
143 	int id;
144 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
145 	bool active;
146 	bool resetting;
147 	void *sched_data;
148 
149 	struct intel_vgpu_fence fence;
150 	struct intel_vgpu_gm gm;
151 	struct intel_vgpu_cfg_space cfg_space;
152 	struct intel_vgpu_mmio mmio;
153 	struct intel_vgpu_irq irq;
154 	struct intel_vgpu_gtt gtt;
155 	struct intel_vgpu_opregion opregion;
156 	struct intel_vgpu_display display;
157 	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
158 	struct list_head workload_q_head[I915_NUM_ENGINES];
159 	struct kmem_cache *workloads;
160 	atomic_t running_workload_num;
161 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
162 	struct i915_gem_context *shadow_ctx;
163 	struct notifier_block shadow_ctx_notifier_block;
164 
165 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
166 	struct {
167 		struct mdev_device *mdev;
168 		struct vfio_region *region;
169 		int num_regions;
170 		struct eventfd_ctx *intx_trigger;
171 		struct eventfd_ctx *msi_trigger;
172 		struct rb_root cache;
173 		struct mutex cache_lock;
174 		struct notifier_block iommu_notifier;
175 		struct notifier_block group_notifier;
176 		struct kvm *kvm;
177 		struct work_struct release_work;
178 	} vdev;
179 #endif
180 };
181 
182 struct intel_gvt_gm {
183 	unsigned long vgpu_allocated_low_gm_size;
184 	unsigned long vgpu_allocated_high_gm_size;
185 };
186 
187 struct intel_gvt_fence {
188 	unsigned long vgpu_allocated_fence_num;
189 };
190 
191 #define INTEL_GVT_MMIO_HASH_BITS 9
192 
193 struct intel_gvt_mmio {
194 	u32 *mmio_attribute;
195 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
196 };
197 
198 struct intel_gvt_firmware {
199 	void *cfg_space;
200 	void *mmio;
201 	bool firmware_loaded;
202 };
203 
204 struct intel_gvt_opregion {
205 	void __iomem *opregion_va;
206 	u32 opregion_pa;
207 };
208 
209 #define NR_MAX_INTEL_VGPU_TYPES 20
210 struct intel_vgpu_type {
211 	char name[16];
212 	unsigned int max_instance;
213 	unsigned int avail_instance;
214 	unsigned int low_gm_size;
215 	unsigned int high_gm_size;
216 	unsigned int fence;
217 };
218 
219 struct intel_gvt {
220 	struct mutex lock;
221 	struct drm_i915_private *dev_priv;
222 	struct idr vgpu_idr;	/* vGPU IDR pool */
223 
224 	struct intel_gvt_device_info device_info;
225 	struct intel_gvt_gm gm;
226 	struct intel_gvt_fence fence;
227 	struct intel_gvt_mmio mmio;
228 	struct intel_gvt_firmware firmware;
229 	struct intel_gvt_irq irq;
230 	struct intel_gvt_gtt gtt;
231 	struct intel_gvt_opregion opregion;
232 	struct intel_gvt_workload_scheduler scheduler;
233 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
234 	struct intel_vgpu_type *types;
235 	unsigned int num_types;
236 
237 	struct task_struct *service_thread;
238 	wait_queue_head_t service_thread_wq;
239 	unsigned long service_request;
240 };
241 
242 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
243 {
244 	return i915->gvt;
245 }
246 
247 enum {
248 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
249 };
250 
251 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
252 		int service)
253 {
254 	set_bit(service, (void *)&gvt->service_request);
255 	wake_up(&gvt->service_thread_wq);
256 }
257 
258 void intel_gvt_free_firmware(struct intel_gvt *gvt);
259 int intel_gvt_load_firmware(struct intel_gvt *gvt);
260 
261 /* Aperture/GM space definitions for GVT device */
262 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
263 #define BYTES_TO_MB(b) ((b) >> 20ULL)
264 
265 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
266 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
267 #define HOST_FENCE 4
268 
269 /* Aperture/GM space definitions for GVT device */
270 #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
271 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
272 
273 #define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
274 #define gvt_ggtt_sz(gvt) \
275 	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
276 #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
277 
278 #define gvt_aperture_gmadr_base(gvt) (0)
279 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
280 				     + gvt_aperture_sz(gvt) - 1)
281 
282 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
283 				    + gvt_aperture_sz(gvt))
284 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
285 				   + gvt_hidden_sz(gvt) - 1)
286 
287 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
288 
289 /* Aperture/GM space definitions for vGPU */
290 #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
291 #define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
292 #define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
293 #define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
294 
295 #define vgpu_aperture_pa_base(vgpu) \
296 	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
297 
298 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
299 
300 #define vgpu_aperture_pa_end(vgpu) \
301 	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
302 
303 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
304 #define vgpu_aperture_gmadr_end(vgpu) \
305 	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
306 
307 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
308 #define vgpu_hidden_gmadr_end(vgpu) \
309 	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
310 
311 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
312 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
313 
314 struct intel_vgpu_creation_params {
315 	__u64 handle;
316 	__u64 low_gm_sz;  /* in MB */
317 	__u64 high_gm_sz; /* in MB */
318 	__u64 fence_sz;
319 	__s32 primary;
320 	__u64 vgpu_id;
321 };
322 
323 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
324 			      struct intel_vgpu_creation_params *param);
325 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
326 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
327 	u32 fence, u64 value);
328 
329 /* Macros for easily accessing vGPU virtual/shadow register */
330 #define vgpu_vreg(vgpu, reg) \
331 	(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
332 #define vgpu_vreg8(vgpu, reg) \
333 	(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
334 #define vgpu_vreg16(vgpu, reg) \
335 	(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
336 #define vgpu_vreg64(vgpu, reg) \
337 	(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
338 #define vgpu_sreg(vgpu, reg) \
339 	(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
340 #define vgpu_sreg8(vgpu, reg) \
341 	(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
342 #define vgpu_sreg16(vgpu, reg) \
343 	(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
344 #define vgpu_sreg64(vgpu, reg) \
345 	(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
346 
347 #define for_each_active_vgpu(gvt, vgpu, id) \
348 	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
349 		for_each_if(vgpu->active)
350 
351 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
352 					    u32 offset, u32 val, bool low)
353 {
354 	u32 *pval;
355 
356 	/* BAR offset should be 32 bits algiend */
357 	offset = rounddown(offset, 4);
358 	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
359 
360 	if (low) {
361 		/*
362 		 * only update bit 31 - bit 4,
363 		 * leave the bit 3 - bit 0 unchanged.
364 		 */
365 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
366 	} else {
367 		*pval = val;
368 	}
369 }
370 
371 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
372 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
373 
374 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
375 					 struct intel_vgpu_type *type);
376 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
377 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
378 
379 
380 /* validating GM functions */
381 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
382 	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
383 	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
384 
385 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
386 	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
387 	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
388 
389 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
390 	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
391 	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
392 
393 #define gvt_gmadr_is_aperture(gvt, gmadr) \
394 	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
395 	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
396 
397 #define gvt_gmadr_is_hidden(gvt, gmadr) \
398 	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
399 	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
400 
401 #define gvt_gmadr_is_valid(gvt, gmadr) \
402 	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
403 	    gvt_gmadr_is_hidden(gvt, gmadr))
404 
405 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
406 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
407 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
408 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
409 			     unsigned long *h_index);
410 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
411 			     unsigned long *g_index);
412 
413 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
414 		void *p_data, unsigned int bytes);
415 
416 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
417 		void *p_data, unsigned int bytes);
418 
419 void intel_gvt_clean_opregion(struct intel_gvt *gvt);
420 int intel_gvt_init_opregion(struct intel_gvt *gvt);
421 
422 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
423 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
424 
425 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
426 int setup_vgpu_mmio(struct intel_vgpu *vgpu);
427 void populate_pvinfo_page(struct intel_vgpu *vgpu);
428 
429 struct intel_gvt_ops {
430 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
431 				unsigned int);
432 	int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
433 				unsigned int);
434 	int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
435 				unsigned int);
436 	int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
437 				unsigned int);
438 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
439 				struct intel_vgpu_type *);
440 	void (*vgpu_destroy)(struct intel_vgpu *);
441 	void (*vgpu_reset)(struct intel_vgpu *);
442 };
443 
444 
445 #include "mpt.h"
446 
447 #endif
448