xref: /openbmc/linux/drivers/vfio/pci/vfio_pci_igd.c (revision 240e6d25)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI Intel Graphics support
4  *
5  * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
6  *	Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Register a device specific region through which to provide read-only
9  * access to the Intel IGD opregion.  The register defining the opregion
10  * address is also virtualized to prevent user modification.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/pci.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio.h>
17 
18 #include <linux/vfio_pci_core.h>
19 
20 #define OPREGION_SIGNATURE	"IntelGraphicsMem"
21 #define OPREGION_SIZE		(8 * 1024)
22 #define OPREGION_PCI_ADDR	0xfc
23 
24 #define OPREGION_RVDA		0x3ba
25 #define OPREGION_RVDS		0x3c2
26 #define OPREGION_VERSION	0x16
27 
28 struct igd_opregion_vbt {
29 	void *opregion;
30 	void *vbt_ex;
31 };
32 
33 /**
34  * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
35  * @dst: User buffer ptr to copy to.
36  * @off: Offset to user buffer ptr. Increased by bytes on return.
37  * @src: Source buffer to copy from.
38  * @pos: Increased by bytes on return.
39  * @remaining: Decreased by bytes on return.
40  * @bytes: Bytes to copy and adjust off, pos and remaining.
41  *
42  * Copy OpRegion to offset from specific source ptr and shift the offset.
43  *
44  * Return: 0 on success, -EFAULT otherwise.
45  *
46  */
47 static inline unsigned long igd_opregion_shift_copy(char __user *dst,
48 						    loff_t *off,
49 						    void *src,
50 						    loff_t *pos,
51 						    size_t *remaining,
52 						    size_t bytes)
53 {
54 	if (copy_to_user(dst + (*off), src, bytes))
55 		return -EFAULT;
56 
57 	*off += bytes;
58 	*pos += bytes;
59 	*remaining -= bytes;
60 
61 	return 0;
62 }
63 
64 static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
65 			       char __user *buf, size_t count, loff_t *ppos,
66 			       bool iswrite)
67 {
68 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
69 	struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
70 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
71 	size_t remaining;
72 
73 	if (pos >= vdev->region[i].size || iswrite)
74 		return -EINVAL;
75 
76 	count = min_t(size_t, count, vdev->region[i].size - pos);
77 	remaining = count;
78 
79 	/* Copy until OpRegion version */
80 	if (remaining && pos < OPREGION_VERSION) {
81 		size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
82 
83 		if (igd_opregion_shift_copy(buf, &off,
84 					    opregionvbt->opregion + pos, &pos,
85 					    &remaining, bytes))
86 			return -EFAULT;
87 	}
88 
89 	/* Copy patched (if necessary) OpRegion version */
90 	if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
91 		size_t bytes = min_t(size_t, remaining,
92 				     OPREGION_VERSION + sizeof(__le16) - pos);
93 		__le16 version = *(__le16 *)(opregionvbt->opregion +
94 					     OPREGION_VERSION);
95 
96 		/* Patch to 2.1 if OpRegion 2.0 has extended VBT */
97 		if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
98 			version = cpu_to_le16(0x0201);
99 
100 		if (igd_opregion_shift_copy(buf, &off,
101 					    &version + (pos - OPREGION_VERSION),
102 					    &pos, &remaining, bytes))
103 			return -EFAULT;
104 	}
105 
106 	/* Copy until RVDA */
107 	if (remaining && pos < OPREGION_RVDA) {
108 		size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
109 
110 		if (igd_opregion_shift_copy(buf, &off,
111 					    opregionvbt->opregion + pos, &pos,
112 					    &remaining, bytes))
113 			return -EFAULT;
114 	}
115 
116 	/* Copy modified (if necessary) RVDA */
117 	if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
118 		size_t bytes = min_t(size_t, remaining,
119 				     OPREGION_RVDA + sizeof(__le64) - pos);
120 		__le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
121 					  OPREGION_SIZE : 0);
122 
123 		if (igd_opregion_shift_copy(buf, &off,
124 					    &rvda + (pos - OPREGION_RVDA),
125 					    &pos, &remaining, bytes))
126 			return -EFAULT;
127 	}
128 
129 	/* Copy the rest of OpRegion */
130 	if (remaining && pos < OPREGION_SIZE) {
131 		size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
132 
133 		if (igd_opregion_shift_copy(buf, &off,
134 					    opregionvbt->opregion + pos, &pos,
135 					    &remaining, bytes))
136 			return -EFAULT;
137 	}
138 
139 	/* Copy extended VBT if exists */
140 	if (remaining &&
141 	    copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
142 			 remaining))
143 		return -EFAULT;
144 
145 	*ppos += count;
146 
147 	return count;
148 }
149 
150 static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
151 				 struct vfio_pci_region *region)
152 {
153 	struct igd_opregion_vbt *opregionvbt = region->data;
154 
155 	if (opregionvbt->vbt_ex)
156 		memunmap(opregionvbt->vbt_ex);
157 
158 	memunmap(opregionvbt->opregion);
159 	kfree(opregionvbt);
160 }
161 
162 static const struct vfio_pci_regops vfio_pci_igd_regops = {
163 	.rw		= vfio_pci_igd_rw,
164 	.release	= vfio_pci_igd_release,
165 };
166 
167 static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
168 {
169 	__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
170 	u32 addr, size;
171 	struct igd_opregion_vbt *opregionvbt;
172 	int ret;
173 	u16 version;
174 
175 	ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
176 	if (ret)
177 		return ret;
178 
179 	if (!addr || !(~addr))
180 		return -ENODEV;
181 
182 	opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
183 	if (!opregionvbt)
184 		return -ENOMEM;
185 
186 	opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
187 	if (!opregionvbt->opregion) {
188 		kfree(opregionvbt);
189 		return -ENOMEM;
190 	}
191 
192 	if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
193 		memunmap(opregionvbt->opregion);
194 		kfree(opregionvbt);
195 		return -EINVAL;
196 	}
197 
198 	size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
199 	if (!size) {
200 		memunmap(opregionvbt->opregion);
201 		kfree(opregionvbt);
202 		return -EINVAL;
203 	}
204 
205 	size *= 1024; /* In KB */
206 
207 	/*
208 	 * OpRegion and VBT:
209 	 * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
210 	 * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
211 	 * to hold the VBT data, the Extended VBT region is introduced since
212 	 * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
213 	 * introduced to define the extended VBT data location and size.
214 	 * OpRegion 2.0: RVDA defines the absolute physical address of the
215 	 *   extended VBT data, RVDS defines the VBT data size.
216 	 * OpRegion 2.1 and above: RVDA defines the relative address of the
217 	 *   extended VBT data to OpRegion base, RVDS defines the VBT data size.
218 	 *
219 	 * Due to the RVDA definition diff in OpRegion VBT (also the only diff
220 	 * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
221 	 * for OpRegion 2.0 and above makes it possible to support the
222 	 * non-contiguous VBT through a single vfio region. From r/w ops view,
223 	 * only contiguous VBT after OpRegion with version 2.1+ is exposed,
224 	 * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
225 	 * ops will on-the-fly shift the actural offset into VBT so that data at
226 	 * correct position can be returned to the requester.
227 	 */
228 	version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
229 					  OPREGION_VERSION));
230 	if (version >= 0x0200) {
231 		u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
232 						   OPREGION_RVDA));
233 		u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
234 						   OPREGION_RVDS));
235 
236 		/* The extended VBT is valid only when RVDA/RVDS are non-zero */
237 		if (rvda && rvds) {
238 			size += rvds;
239 
240 			/*
241 			 * Extended VBT location by RVDA:
242 			 * Absolute physical addr for 2.0.
243 			 * Relative addr to OpRegion header for 2.1+.
244 			 */
245 			if (version == 0x0200)
246 				addr = rvda;
247 			else
248 				addr += rvda;
249 
250 			opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
251 			if (!opregionvbt->vbt_ex) {
252 				memunmap(opregionvbt->opregion);
253 				kfree(opregionvbt);
254 				return -ENOMEM;
255 			}
256 		}
257 	}
258 
259 	ret = vfio_pci_register_dev_region(vdev,
260 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
261 		VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
262 		size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
263 	if (ret) {
264 		if (opregionvbt->vbt_ex)
265 			memunmap(opregionvbt->vbt_ex);
266 
267 		memunmap(opregionvbt->opregion);
268 		kfree(opregionvbt);
269 		return ret;
270 	}
271 
272 	/* Fill vconfig with the hw value and virtualize register */
273 	*dwordp = cpu_to_le32(addr);
274 	memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
275 	       PCI_CAP_ID_INVALID_VIRT, 4);
276 
277 	return ret;
278 }
279 
280 static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
281 				   char __user *buf, size_t count, loff_t *ppos,
282 				   bool iswrite)
283 {
284 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
285 	struct pci_dev *pdev = vdev->region[i].data;
286 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
287 	size_t size;
288 	int ret;
289 
290 	if (pos >= vdev->region[i].size || iswrite)
291 		return -EINVAL;
292 
293 	size = count = min(count, (size_t)(vdev->region[i].size - pos));
294 
295 	if ((pos & 1) && size) {
296 		u8 val;
297 
298 		ret = pci_user_read_config_byte(pdev, pos, &val);
299 		if (ret)
300 			return ret;
301 
302 		if (copy_to_user(buf + count - size, &val, 1))
303 			return -EFAULT;
304 
305 		pos++;
306 		size--;
307 	}
308 
309 	if ((pos & 3) && size > 2) {
310 		u16 val;
311 
312 		ret = pci_user_read_config_word(pdev, pos, &val);
313 		if (ret)
314 			return ret;
315 
316 		val = cpu_to_le16(val);
317 		if (copy_to_user(buf + count - size, &val, 2))
318 			return -EFAULT;
319 
320 		pos += 2;
321 		size -= 2;
322 	}
323 
324 	while (size > 3) {
325 		u32 val;
326 
327 		ret = pci_user_read_config_dword(pdev, pos, &val);
328 		if (ret)
329 			return ret;
330 
331 		val = cpu_to_le32(val);
332 		if (copy_to_user(buf + count - size, &val, 4))
333 			return -EFAULT;
334 
335 		pos += 4;
336 		size -= 4;
337 	}
338 
339 	while (size >= 2) {
340 		u16 val;
341 
342 		ret = pci_user_read_config_word(pdev, pos, &val);
343 		if (ret)
344 			return ret;
345 
346 		val = cpu_to_le16(val);
347 		if (copy_to_user(buf + count - size, &val, 2))
348 			return -EFAULT;
349 
350 		pos += 2;
351 		size -= 2;
352 	}
353 
354 	while (size) {
355 		u8 val;
356 
357 		ret = pci_user_read_config_byte(pdev, pos, &val);
358 		if (ret)
359 			return ret;
360 
361 		if (copy_to_user(buf + count - size, &val, 1))
362 			return -EFAULT;
363 
364 		pos++;
365 		size--;
366 	}
367 
368 	*ppos += count;
369 
370 	return count;
371 }
372 
373 static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
374 				     struct vfio_pci_region *region)
375 {
376 	struct pci_dev *pdev = region->data;
377 
378 	pci_dev_put(pdev);
379 }
380 
381 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
382 	.rw		= vfio_pci_igd_cfg_rw,
383 	.release	= vfio_pci_igd_cfg_release,
384 };
385 
386 static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
387 {
388 	struct pci_dev *host_bridge, *lpc_bridge;
389 	int ret;
390 
391 	host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
392 	if (!host_bridge)
393 		return -ENODEV;
394 
395 	if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
396 	    host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
397 		pci_dev_put(host_bridge);
398 		return -EINVAL;
399 	}
400 
401 	ret = vfio_pci_register_dev_region(vdev,
402 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
403 		VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
404 		&vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
405 		VFIO_REGION_INFO_FLAG_READ, host_bridge);
406 	if (ret) {
407 		pci_dev_put(host_bridge);
408 		return ret;
409 	}
410 
411 	lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
412 	if (!lpc_bridge)
413 		return -ENODEV;
414 
415 	if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
416 	    lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
417 		pci_dev_put(lpc_bridge);
418 		return -EINVAL;
419 	}
420 
421 	ret = vfio_pci_register_dev_region(vdev,
422 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
423 		VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
424 		&vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
425 		VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
426 	if (ret) {
427 		pci_dev_put(lpc_bridge);
428 		return ret;
429 	}
430 
431 	return 0;
432 }
433 
434 int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
435 {
436 	int ret;
437 
438 	ret = vfio_pci_igd_opregion_init(vdev);
439 	if (ret)
440 		return ret;
441 
442 	ret = vfio_pci_igd_cfg_init(vdev);
443 	if (ret)
444 		return ret;
445 
446 	return 0;
447 }
448