xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/opregion.c (revision 82e6fdd6)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <linux/acpi.h>
25 #include "i915_drv.h"
26 #include "gvt.h"
27 
28 /*
29  * Note: Only for GVT-g virtual VBT generation, other usage must
30  * not do like this.
31  */
32 #define _INTEL_BIOS_PRIVATE
33 #include "intel_vbt_defs.h"
34 
35 #define OPREGION_SIGNATURE "IntelGraphicsMem"
36 #define MBOX_VBT      (1<<3)
37 
38 /* device handle */
39 #define DEVICE_TYPE_CRT    0x01
40 #define DEVICE_TYPE_EFP1   0x04
41 #define DEVICE_TYPE_EFP2   0x40
42 #define DEVICE_TYPE_EFP3   0x20
43 #define DEVICE_TYPE_EFP4   0x10
44 
45 #define DEV_SIZE	38
46 
47 struct opregion_header {
48 	u8 signature[16];
49 	u32 size;
50 	u32 opregion_ver;
51 	u8 bios_ver[32];
52 	u8 vbios_ver[16];
53 	u8 driver_ver[16];
54 	u32 mboxes;
55 	u32 driver_model;
56 	u32 pcon;
57 	u8 dver[32];
58 	u8 rsvd[124];
59 } __packed;
60 
61 struct bdb_data_header {
62 	u8 id;
63 	u16 size; /* data size */
64 } __packed;
65 
66 struct efp_child_device_config {
67 	u16 handle;
68 	u16 device_type;
69 	u16 device_class;
70 	u8 i2c_speed;
71 	u8 dp_onboard_redriver; /* 158 */
72 	u8 dp_ondock_redriver; /* 158 */
73 	u8 hdmi_level_shifter_value:4; /* 169 */
74 	u8 hdmi_max_data_rate:4; /* 204 */
75 	u16 dtd_buf_ptr; /* 161 */
76 	u8 edidless_efp:1; /* 161 */
77 	u8 compression_enable:1; /* 198 */
78 	u8 compression_method:1; /* 198 */
79 	u8 ganged_edp:1; /* 202 */
80 	u8 skip0:4;
81 	u8 compression_structure_index:4; /* 198 */
82 	u8 skip1:4;
83 	u8 slave_port; /*  202 */
84 	u8 skip2;
85 	u8 dvo_port;
86 	u8 i2c_pin; /* for add-in card */
87 	u8 slave_addr; /* for add-in card */
88 	u8 ddc_pin;
89 	u16 edid_ptr;
90 	u8 dvo_config;
91 	u8 efp_docked_port:1; /* 158 */
92 	u8 lane_reversal:1; /* 184 */
93 	u8 onboard_lspcon:1; /* 192 */
94 	u8 iboost_enable:1; /* 196 */
95 	u8 hpd_invert:1; /* BXT 196 */
96 	u8 slip3:3;
97 	u8 hdmi_compat:1;
98 	u8 dp_compat:1;
99 	u8 tmds_compat:1;
100 	u8 skip4:5;
101 	u8 aux_channel;
102 	u8 dongle_detect;
103 	u8 pipe_cap:2;
104 	u8 sdvo_stall:1; /* 158 */
105 	u8 hpd_status:2;
106 	u8 integrated_encoder:1;
107 	u8 skip5:2;
108 	u8 dvo_wiring;
109 	u8 mipi_bridge_type; /* 171 */
110 	u16 device_class_ext;
111 	u8 dvo_function;
112 	u8 dp_usb_type_c:1; /* 195 */
113 	u8 skip6:7;
114 	u8 dp_usb_type_c_2x_gpio_index; /* 195 */
115 	u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
116 	u8 iboost_dp:4; /* 196 */
117 	u8 iboost_hdmi:4; /* 196 */
118 } __packed;
119 
120 struct vbt {
121 	/* header->bdb_offset point to bdb_header offset */
122 	struct vbt_header header;
123 	struct bdb_header bdb_header;
124 
125 	struct bdb_data_header general_features_header;
126 	struct bdb_general_features general_features;
127 
128 	struct bdb_data_header general_definitions_header;
129 	struct bdb_general_definitions general_definitions;
130 
131 	struct efp_child_device_config child0;
132 	struct efp_child_device_config child1;
133 	struct efp_child_device_config child2;
134 	struct efp_child_device_config child3;
135 
136 	struct bdb_data_header driver_features_header;
137 	struct bdb_driver_features driver_features;
138 };
139 
140 static void virt_vbt_generation(struct vbt *v)
141 {
142 	int num_child;
143 
144 	memset(v, 0, sizeof(struct vbt));
145 
146 	v->header.signature[0] = '$';
147 	v->header.signature[1] = 'V';
148 	v->header.signature[2] = 'B';
149 	v->header.signature[3] = 'T';
150 
151 	/* there's features depending on version! */
152 	v->header.version = 155;
153 	v->header.header_size = sizeof(v->header);
154 	v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
155 	v->header.bdb_offset = offsetof(struct vbt, bdb_header);
156 
157 	strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
158 	v->bdb_header.version = 186; /* child_dev_size = 38 */
159 	v->bdb_header.header_size = sizeof(v->bdb_header);
160 
161 	v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
162 		- sizeof(struct bdb_header);
163 
164 	/* general features */
165 	v->general_features_header.id = BDB_GENERAL_FEATURES;
166 	v->general_features_header.size = sizeof(struct bdb_general_features);
167 	v->general_features.int_crt_support = 0;
168 	v->general_features.int_tv_support = 0;
169 
170 	/* child device */
171 	num_child = 4; /* each port has one child */
172 	v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
173 	/* size will include child devices */
174 	v->general_definitions_header.size =
175 		sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
176 	v->general_definitions.child_dev_size = DEV_SIZE;
177 
178 	/* portA */
179 	v->child0.handle = DEVICE_TYPE_EFP1;
180 	v->child0.device_type = DEVICE_TYPE_DP;
181 	v->child0.dvo_port = DVO_PORT_DPA;
182 	v->child0.aux_channel = DP_AUX_A;
183 	v->child0.dp_compat = true;
184 	v->child0.integrated_encoder = true;
185 
186 	/* portB */
187 	v->child1.handle = DEVICE_TYPE_EFP2;
188 	v->child1.device_type = DEVICE_TYPE_DP;
189 	v->child1.dvo_port = DVO_PORT_DPB;
190 	v->child1.aux_channel = DP_AUX_B;
191 	v->child1.dp_compat = true;
192 	v->child1.integrated_encoder = true;
193 
194 	/* portC */
195 	v->child2.handle = DEVICE_TYPE_EFP3;
196 	v->child2.device_type = DEVICE_TYPE_DP;
197 	v->child2.dvo_port = DVO_PORT_DPC;
198 	v->child2.aux_channel = DP_AUX_C;
199 	v->child2.dp_compat = true;
200 	v->child2.integrated_encoder = true;
201 
202 	/* portD */
203 	v->child3.handle = DEVICE_TYPE_EFP4;
204 	v->child3.device_type = DEVICE_TYPE_DP;
205 	v->child3.dvo_port = DVO_PORT_DPD;
206 	v->child3.aux_channel = DP_AUX_D;
207 	v->child3.dp_compat = true;
208 	v->child3.integrated_encoder = true;
209 
210 	/* driver features */
211 	v->driver_features_header.id = BDB_DRIVER_FEATURES;
212 	v->driver_features_header.size = sizeof(struct bdb_driver_features);
213 	v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
214 }
215 
216 /**
217  * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
218  * @vgpu: a vGPU
219  * @gpa: guest physical address of opregion
220  *
221  * Returns:
222  * Zero on success, negative error code if failed.
223  */
224 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
225 {
226 	u8 *buf;
227 	struct opregion_header *header;
228 	struct vbt v;
229 	const char opregion_signature[16] = OPREGION_SIGNATURE;
230 
231 	gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
232 	vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
233 			__GFP_ZERO,
234 			get_order(INTEL_GVT_OPREGION_SIZE));
235 	if (!vgpu_opregion(vgpu)->va) {
236 		gvt_err("fail to get memory for vgpu virt opregion\n");
237 		return -ENOMEM;
238 	}
239 
240 	/* emulated opregion with VBT mailbox only */
241 	buf = (u8 *)vgpu_opregion(vgpu)->va;
242 	header = (struct opregion_header *)buf;
243 	memcpy(header->signature, opregion_signature,
244 	       sizeof(opregion_signature));
245 	header->size = 0x8;
246 	header->opregion_ver = 0x02000000;
247 	header->mboxes = MBOX_VBT;
248 
249 	/* for unknown reason, the value in LID field is incorrect
250 	 * which block the windows guest, so workaround it by force
251 	 * setting it to "OPEN"
252 	 */
253 	buf[INTEL_GVT_OPREGION_CLID] = 0x3;
254 
255 	/* emulated vbt from virt vbt generation */
256 	virt_vbt_generation(&v);
257 	memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
258 
259 	return 0;
260 }
261 
262 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
263 {
264 	u64 mfn;
265 	int i, ret;
266 
267 	for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
268 		mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
269 			+ i * PAGE_SIZE);
270 		if (mfn == INTEL_GVT_INVALID_ADDR) {
271 			gvt_vgpu_err("fail to get MFN from VA\n");
272 			return -EINVAL;
273 		}
274 		ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
275 				vgpu_opregion(vgpu)->gfn[i],
276 				mfn, 1, map);
277 		if (ret) {
278 			gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
279 				ret);
280 			return ret;
281 		}
282 	}
283 
284 	vgpu_opregion(vgpu)->mapped = map;
285 
286 	return 0;
287 }
288 
289 /**
290  * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
291  *
292  * @vgpu: a vGPU
293  * @gpa: guest physical address of opregion
294  *
295  * Returns:
296  * Zero on success, negative error code if failed.
297  */
298 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
299 {
300 
301 	int i, ret = 0;
302 
303 	gvt_dbg_core("emulate opregion from kernel\n");
304 
305 	switch (intel_gvt_host.hypervisor_type) {
306 	case INTEL_GVT_HYPERVISOR_KVM:
307 		for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
308 			vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
309 		break;
310 	case INTEL_GVT_HYPERVISOR_XEN:
311 		/**
312 		 * Wins guest on Xengt will write this register twice: xen
313 		 * hvmloader and windows graphic driver.
314 		 */
315 		if (vgpu_opregion(vgpu)->mapped)
316 			map_vgpu_opregion(vgpu, false);
317 
318 		for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
319 			vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
320 
321 		ret = map_vgpu_opregion(vgpu, true);
322 		break;
323 	default:
324 		ret = -EINVAL;
325 		gvt_vgpu_err("not supported hypervisor\n");
326 	}
327 
328 	return ret;
329 }
330 
331 /**
332  * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
333  * @vgpu: a vGPU
334  *
335  */
336 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
337 {
338 	gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
339 
340 	if (!vgpu_opregion(vgpu)->va)
341 		return;
342 
343 	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
344 		if (vgpu_opregion(vgpu)->mapped)
345 			map_vgpu_opregion(vgpu, false);
346 	} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
347 		/* Guest opregion is released by VFIO */
348 	}
349 	free_pages((unsigned long)vgpu_opregion(vgpu)->va,
350 		   get_order(INTEL_GVT_OPREGION_SIZE));
351 
352 	vgpu_opregion(vgpu)->va = NULL;
353 
354 }
355 
356 
357 #define GVT_OPREGION_FUNC(scic)					\
358 	({							\
359 	 u32 __ret;						\
360 	 __ret = (scic & OPREGION_SCIC_FUNC_MASK) >>		\
361 	 OPREGION_SCIC_FUNC_SHIFT;				\
362 	 __ret;							\
363 	 })
364 
365 #define GVT_OPREGION_SUBFUNC(scic)				\
366 	({							\
367 	 u32 __ret;						\
368 	 __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >>		\
369 	 OPREGION_SCIC_SUBFUNC_SHIFT;				\
370 	 __ret;							\
371 	 })
372 
373 static const char *opregion_func_name(u32 func)
374 {
375 	const char *name = NULL;
376 
377 	switch (func) {
378 	case 0 ... 3:
379 	case 5:
380 	case 7 ... 15:
381 		name = "Reserved";
382 		break;
383 
384 	case 4:
385 		name = "Get BIOS Data";
386 		break;
387 
388 	case 6:
389 		name = "System BIOS Callbacks";
390 		break;
391 
392 	default:
393 		name = "Unknown";
394 		break;
395 	}
396 	return name;
397 }
398 
399 static const char *opregion_subfunc_name(u32 subfunc)
400 {
401 	const char *name = NULL;
402 
403 	switch (subfunc) {
404 	case 0:
405 		name = "Supported Calls";
406 		break;
407 
408 	case 1:
409 		name = "Requested Callbacks";
410 		break;
411 
412 	case 2 ... 3:
413 	case 8 ... 9:
414 		name = "Reserved";
415 		break;
416 
417 	case 5:
418 		name = "Boot Display";
419 		break;
420 
421 	case 6:
422 		name = "TV-Standard/Video-Connector";
423 		break;
424 
425 	case 7:
426 		name = "Internal Graphics";
427 		break;
428 
429 	case 10:
430 		name = "Spread Spectrum Clocks";
431 		break;
432 
433 	case 11:
434 		name = "Get AKSV";
435 		break;
436 
437 	default:
438 		name = "Unknown";
439 		break;
440 	}
441 	return name;
442 };
443 
444 static bool querying_capabilities(u32 scic)
445 {
446 	u32 func, subfunc;
447 
448 	func = GVT_OPREGION_FUNC(scic);
449 	subfunc = GVT_OPREGION_SUBFUNC(scic);
450 
451 	if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
452 		subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
453 		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
454 		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
455 		|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
456 		 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
457 		return true;
458 	}
459 	return false;
460 }
461 
462 /**
463  * intel_vgpu_emulate_opregion_request - emulating OpRegion request
464  * @vgpu: a vGPU
465  * @swsci: SWSCI request
466  *
467  * Returns:
468  * Zero on success, negative error code if failed
469  */
470 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
471 {
472 	u32 scic, parm;
473 	u32 func, subfunc;
474 	u64 scic_pa = 0, parm_pa = 0;
475 	int ret;
476 
477 	switch (intel_gvt_host.hypervisor_type) {
478 	case INTEL_GVT_HYPERVISOR_XEN:
479 		scic = *((u32 *)vgpu_opregion(vgpu)->va +
480 					INTEL_GVT_OPREGION_SCIC);
481 		parm = *((u32 *)vgpu_opregion(vgpu)->va +
482 					INTEL_GVT_OPREGION_PARM);
483 		break;
484 	case INTEL_GVT_HYPERVISOR_KVM:
485 		scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
486 					INTEL_GVT_OPREGION_SCIC;
487 		parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
488 					INTEL_GVT_OPREGION_PARM;
489 
490 		ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
491 						    &scic, sizeof(scic));
492 		if (ret) {
493 			gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
494 				ret, scic_pa, sizeof(scic));
495 			return ret;
496 		}
497 
498 		ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
499 						    &parm, sizeof(parm));
500 		if (ret) {
501 			gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
502 				ret, scic_pa, sizeof(scic));
503 			return ret;
504 		}
505 
506 		break;
507 	default:
508 		gvt_vgpu_err("not supported hypervisor\n");
509 		return -EINVAL;
510 	}
511 
512 	if (!(swsci & SWSCI_SCI_SELECT)) {
513 		gvt_vgpu_err("requesting SMI service\n");
514 		return 0;
515 	}
516 	/* ignore non 0->1 trasitions */
517 	if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
518 				& SWSCI_SCI_TRIGGER) ||
519 			!(swsci & SWSCI_SCI_TRIGGER)) {
520 		return 0;
521 	}
522 
523 	func = GVT_OPREGION_FUNC(scic);
524 	subfunc = GVT_OPREGION_SUBFUNC(scic);
525 	if (!querying_capabilities(scic)) {
526 		gvt_vgpu_err("requesting runtime service: func \"%s\","
527 				" subfunc \"%s\"\n",
528 				opregion_func_name(func),
529 				opregion_subfunc_name(subfunc));
530 		/*
531 		 * emulate exit status of function call, '0' means
532 		 * "failure, generic, unsupported or unknown cause"
533 		 */
534 		scic &= ~OPREGION_SCIC_EXIT_MASK;
535 		goto out;
536 	}
537 
538 	scic = 0;
539 	parm = 0;
540 
541 out:
542 	switch (intel_gvt_host.hypervisor_type) {
543 	case INTEL_GVT_HYPERVISOR_XEN:
544 		*((u32 *)vgpu_opregion(vgpu)->va +
545 					INTEL_GVT_OPREGION_SCIC) = scic;
546 		*((u32 *)vgpu_opregion(vgpu)->va +
547 					INTEL_GVT_OPREGION_PARM) = parm;
548 		break;
549 	case INTEL_GVT_HYPERVISOR_KVM:
550 		ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
551 						    &scic, sizeof(scic));
552 		if (ret) {
553 			gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
554 				ret, scic_pa, sizeof(scic));
555 			return ret;
556 		}
557 
558 		ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
559 						    &parm, sizeof(parm));
560 		if (ret) {
561 			gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
562 				ret, scic_pa, sizeof(scic));
563 			return ret;
564 		}
565 
566 		break;
567 	default:
568 		gvt_vgpu_err("not supported hypervisor\n");
569 		return -EINVAL;
570 	}
571 
572 	return 0;
573 }
574