xref: /openbmc/linux/drivers/vfio/pci/vfio_pci_igd.c (revision c4f7ac64)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI Intel Graphics support
4  *
5  * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
6  *	Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Register a device specific region through which to provide read-only
9  * access to the Intel IGD opregion.  The register defining the opregion
10  * address is also virtualized to prevent user modification.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/pci.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio.h>
17 
18 #include "vfio_pci_private.h"
19 
20 #define OPREGION_SIGNATURE	"IntelGraphicsMem"
21 #define OPREGION_SIZE		(8 * 1024)
22 #define OPREGION_PCI_ADDR	0xfc
23 
24 #define OPREGION_RVDA		0x3ba
25 #define OPREGION_RVDS		0x3c2
26 #define OPREGION_VERSION	0x16
27 
28 static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
29 			      size_t count, loff_t *ppos, bool iswrite)
30 {
31 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
32 	void *base = vdev->region[i].data;
33 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
34 
35 	if (pos >= vdev->region[i].size || iswrite)
36 		return -EINVAL;
37 
38 	count = min(count, (size_t)(vdev->region[i].size - pos));
39 
40 	if (copy_to_user(buf, base + pos, count))
41 		return -EFAULT;
42 
43 	*ppos += count;
44 
45 	return count;
46 }
47 
48 static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
49 				 struct vfio_pci_region *region)
50 {
51 	memunmap(region->data);
52 }
53 
54 static const struct vfio_pci_regops vfio_pci_igd_regops = {
55 	.rw		= vfio_pci_igd_rw,
56 	.release	= vfio_pci_igd_release,
57 };
58 
59 static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
60 {
61 	__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
62 	u32 addr, size;
63 	void *base;
64 	int ret;
65 	u16 version;
66 
67 	ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
68 	if (ret)
69 		return ret;
70 
71 	if (!addr || !(~addr))
72 		return -ENODEV;
73 
74 	base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
75 	if (!base)
76 		return -ENOMEM;
77 
78 	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
79 		memunmap(base);
80 		return -EINVAL;
81 	}
82 
83 	size = le32_to_cpu(*(__le32 *)(base + 16));
84 	if (!size) {
85 		memunmap(base);
86 		return -EINVAL;
87 	}
88 
89 	size *= 1024; /* In KB */
90 
91 	/*
92 	 * Support opregion v2.1+
93 	 * When VBT data exceeds 6KB size and cannot be within mailbox #4, then
94 	 * the Extended VBT region next to opregion is used to hold the VBT data.
95 	 * RVDA (Relative Address of VBT Data from Opregion Base) and RVDS
96 	 * (Raw VBT Data Size) from opregion structure member are used to hold the
97 	 * address from region base and size of VBT data. RVDA/RVDS are not
98 	 * defined before opregion 2.0.
99 	 *
100 	 * opregion 2.1+: RVDA is unsigned, relative offset from
101 	 * opregion base, and should point to the end of opregion.
102 	 * otherwise, exposing to userspace to allow read access to everything between
103 	 * the OpRegion and VBT is not safe.
104 	 * RVDS is defined as size in bytes.
105 	 *
106 	 * opregion 2.0: rvda is the physical VBT address.
107 	 * Since rvda is HPA it cannot be directly used in guest.
108 	 * And it should not be practically available for end user,so it is not supported.
109 	 */
110 	version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION));
111 	if (version >= 0x0200) {
112 		u64 rvda;
113 		u32 rvds;
114 
115 		rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA));
116 		rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS));
117 		if (rvda && rvds) {
118 			/* no support for opregion v2.0 with physical VBT address */
119 			if (version == 0x0200) {
120 				memunmap(base);
121 				pci_err(vdev->pdev,
122 					"IGD assignment does not support opregion v2.0 with an extended VBT region\n");
123 				return -EINVAL;
124 			}
125 
126 			if (rvda != size) {
127 				memunmap(base);
128 				pci_err(vdev->pdev,
129 					"Extended VBT does not follow opregion on version 0x%04x\n",
130 					version);
131 				return -EINVAL;
132 			}
133 
134 			/* region size for opregion v2.0+: opregion and VBT size. */
135 			size += rvds;
136 		}
137 	}
138 
139 	if (size != OPREGION_SIZE) {
140 		memunmap(base);
141 		base = memremap(addr, size, MEMREMAP_WB);
142 		if (!base)
143 			return -ENOMEM;
144 	}
145 
146 	ret = vfio_pci_register_dev_region(vdev,
147 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
148 		VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
149 		&vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
150 	if (ret) {
151 		memunmap(base);
152 		return ret;
153 	}
154 
155 	/* Fill vconfig with the hw value and virtualize register */
156 	*dwordp = cpu_to_le32(addr);
157 	memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
158 	       PCI_CAP_ID_INVALID_VIRT, 4);
159 
160 	return ret;
161 }
162 
163 static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
164 				  char __user *buf, size_t count, loff_t *ppos,
165 				  bool iswrite)
166 {
167 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
168 	struct pci_dev *pdev = vdev->region[i].data;
169 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
170 	size_t size;
171 	int ret;
172 
173 	if (pos >= vdev->region[i].size || iswrite)
174 		return -EINVAL;
175 
176 	size = count = min(count, (size_t)(vdev->region[i].size - pos));
177 
178 	if ((pos & 1) && size) {
179 		u8 val;
180 
181 		ret = pci_user_read_config_byte(pdev, pos, &val);
182 		if (ret)
183 			return ret;
184 
185 		if (copy_to_user(buf + count - size, &val, 1))
186 			return -EFAULT;
187 
188 		pos++;
189 		size--;
190 	}
191 
192 	if ((pos & 3) && size > 2) {
193 		u16 val;
194 
195 		ret = pci_user_read_config_word(pdev, pos, &val);
196 		if (ret)
197 			return ret;
198 
199 		val = cpu_to_le16(val);
200 		if (copy_to_user(buf + count - size, &val, 2))
201 			return -EFAULT;
202 
203 		pos += 2;
204 		size -= 2;
205 	}
206 
207 	while (size > 3) {
208 		u32 val;
209 
210 		ret = pci_user_read_config_dword(pdev, pos, &val);
211 		if (ret)
212 			return ret;
213 
214 		val = cpu_to_le32(val);
215 		if (copy_to_user(buf + count - size, &val, 4))
216 			return -EFAULT;
217 
218 		pos += 4;
219 		size -= 4;
220 	}
221 
222 	while (size >= 2) {
223 		u16 val;
224 
225 		ret = pci_user_read_config_word(pdev, pos, &val);
226 		if (ret)
227 			return ret;
228 
229 		val = cpu_to_le16(val);
230 		if (copy_to_user(buf + count - size, &val, 2))
231 			return -EFAULT;
232 
233 		pos += 2;
234 		size -= 2;
235 	}
236 
237 	while (size) {
238 		u8 val;
239 
240 		ret = pci_user_read_config_byte(pdev, pos, &val);
241 		if (ret)
242 			return ret;
243 
244 		if (copy_to_user(buf + count - size, &val, 1))
245 			return -EFAULT;
246 
247 		pos++;
248 		size--;
249 	}
250 
251 	*ppos += count;
252 
253 	return count;
254 }
255 
256 static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
257 				     struct vfio_pci_region *region)
258 {
259 	struct pci_dev *pdev = region->data;
260 
261 	pci_dev_put(pdev);
262 }
263 
264 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
265 	.rw		= vfio_pci_igd_cfg_rw,
266 	.release	= vfio_pci_igd_cfg_release,
267 };
268 
269 static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
270 {
271 	struct pci_dev *host_bridge, *lpc_bridge;
272 	int ret;
273 
274 	host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
275 	if (!host_bridge)
276 		return -ENODEV;
277 
278 	if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
279 	    host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
280 		pci_dev_put(host_bridge);
281 		return -EINVAL;
282 	}
283 
284 	ret = vfio_pci_register_dev_region(vdev,
285 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
286 		VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
287 		&vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
288 		VFIO_REGION_INFO_FLAG_READ, host_bridge);
289 	if (ret) {
290 		pci_dev_put(host_bridge);
291 		return ret;
292 	}
293 
294 	lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
295 	if (!lpc_bridge)
296 		return -ENODEV;
297 
298 	if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
299 	    lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
300 		pci_dev_put(lpc_bridge);
301 		return -EINVAL;
302 	}
303 
304 	ret = vfio_pci_register_dev_region(vdev,
305 		PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
306 		VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
307 		&vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
308 		VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
309 	if (ret) {
310 		pci_dev_put(lpc_bridge);
311 		return ret;
312 	}
313 
314 	return 0;
315 }
316 
317 int vfio_pci_igd_init(struct vfio_pci_device *vdev)
318 {
319 	int ret;
320 
321 	ret = vfio_pci_igd_opregion_init(vdev);
322 	if (ret)
323 		return ret;
324 
325 	ret = vfio_pci_igd_cfg_init(vdev);
326 	if (ret)
327 		return ret;
328 
329 	return 0;
330 }
331