1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO PCI Intel Graphics support 4 * 5 * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Register a device specific region through which to provide read-only 9 * access to the Intel IGD opregion. The register defining the opregion 10 * address is also virtualized to prevent user modification. 11 */ 12 13 #include <linux/io.h> 14 #include <linux/pci.h> 15 #include <linux/uaccess.h> 16 #include <linux/vfio.h> 17 18 #include <linux/vfio_pci_core.h> 19 20 #define OPREGION_SIGNATURE "IntelGraphicsMem" 21 #define OPREGION_SIZE (8 * 1024) 22 #define OPREGION_PCI_ADDR 0xfc 23 24 #define OPREGION_RVDA 0x3ba 25 #define OPREGION_RVDS 0x3c2 26 #define OPREGION_VERSION 0x16 27 28 struct igd_opregion_vbt { 29 void *opregion; 30 void *vbt_ex; 31 }; 32 33 /** 34 * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position. 35 * @dst: User buffer ptr to copy to. 36 * @off: Offset to user buffer ptr. Increased by bytes on return. 37 * @src: Source buffer to copy from. 38 * @pos: Increased by bytes on return. 39 * @remaining: Decreased by bytes on return. 40 * @bytes: Bytes to copy and adjust off, pos and remaining. 41 * 42 * Copy OpRegion to offset from specific source ptr and shift the offset. 43 * 44 * Return: 0 on success, -EFAULT otherwise. 45 * 46 */ 47 static inline unsigned long igd_opregion_shift_copy(char __user *dst, 48 loff_t *off, 49 void *src, 50 loff_t *pos, 51 size_t *remaining, 52 size_t bytes) 53 { 54 if (copy_to_user(dst + (*off), src, bytes)) 55 return -EFAULT; 56 57 *off += bytes; 58 *pos += bytes; 59 *remaining -= bytes; 60 61 return 0; 62 } 63 64 static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, 65 char __user *buf, size_t count, loff_t *ppos, 66 bool iswrite) 67 { 68 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; 69 struct igd_opregion_vbt *opregionvbt = vdev->region[i].data; 70 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0; 71 size_t remaining; 72 73 if (pos >= vdev->region[i].size || iswrite) 74 return -EINVAL; 75 76 count = min_t(size_t, count, vdev->region[i].size - pos); 77 remaining = count; 78 79 /* Copy until OpRegion version */ 80 if (remaining && pos < OPREGION_VERSION) { 81 size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos); 82 83 if (igd_opregion_shift_copy(buf, &off, 84 opregionvbt->opregion + pos, &pos, 85 &remaining, bytes)) 86 return -EFAULT; 87 } 88 89 /* Copy patched (if necessary) OpRegion version */ 90 if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) { 91 size_t bytes = min_t(size_t, remaining, 92 OPREGION_VERSION + sizeof(__le16) - pos); 93 __le16 version = *(__le16 *)(opregionvbt->opregion + 94 OPREGION_VERSION); 95 96 /* Patch to 2.1 if OpRegion 2.0 has extended VBT */ 97 if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex) 98 version = cpu_to_le16(0x0201); 99 100 if (igd_opregion_shift_copy(buf, &off, 101 (u8 *)&version + 102 (pos - OPREGION_VERSION), 103 &pos, &remaining, bytes)) 104 return -EFAULT; 105 } 106 107 /* Copy until RVDA */ 108 if (remaining && pos < OPREGION_RVDA) { 109 size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos); 110 111 if (igd_opregion_shift_copy(buf, &off, 112 opregionvbt->opregion + pos, &pos, 113 &remaining, bytes)) 114 return -EFAULT; 115 } 116 117 /* Copy modified (if necessary) RVDA */ 118 if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) { 119 size_t bytes = min_t(size_t, remaining, 120 OPREGION_RVDA + sizeof(__le64) - pos); 121 __le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ? 122 OPREGION_SIZE : 0); 123 124 if (igd_opregion_shift_copy(buf, &off, 125 (u8 *)&rvda + (pos - OPREGION_RVDA), 126 &pos, &remaining, bytes)) 127 return -EFAULT; 128 } 129 130 /* Copy the rest of OpRegion */ 131 if (remaining && pos < OPREGION_SIZE) { 132 size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos); 133 134 if (igd_opregion_shift_copy(buf, &off, 135 opregionvbt->opregion + pos, &pos, 136 &remaining, bytes)) 137 return -EFAULT; 138 } 139 140 /* Copy extended VBT if exists */ 141 if (remaining && 142 copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE), 143 remaining)) 144 return -EFAULT; 145 146 *ppos += count; 147 148 return count; 149 } 150 151 static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev, 152 struct vfio_pci_region *region) 153 { 154 struct igd_opregion_vbt *opregionvbt = region->data; 155 156 if (opregionvbt->vbt_ex) 157 memunmap(opregionvbt->vbt_ex); 158 159 memunmap(opregionvbt->opregion); 160 kfree(opregionvbt); 161 } 162 163 static const struct vfio_pci_regops vfio_pci_igd_regops = { 164 .rw = vfio_pci_igd_rw, 165 .release = vfio_pci_igd_release, 166 }; 167 168 static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev) 169 { 170 __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR); 171 u32 addr, size; 172 struct igd_opregion_vbt *opregionvbt; 173 int ret; 174 u16 version; 175 176 ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr); 177 if (ret) 178 return ret; 179 180 if (!addr || !(~addr)) 181 return -ENODEV; 182 183 opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL); 184 if (!opregionvbt) 185 return -ENOMEM; 186 187 opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB); 188 if (!opregionvbt->opregion) { 189 kfree(opregionvbt); 190 return -ENOMEM; 191 } 192 193 if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) { 194 memunmap(opregionvbt->opregion); 195 kfree(opregionvbt); 196 return -EINVAL; 197 } 198 199 size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16)); 200 if (!size) { 201 memunmap(opregionvbt->opregion); 202 kfree(opregionvbt); 203 return -EINVAL; 204 } 205 206 size *= 1024; /* In KB */ 207 208 /* 209 * OpRegion and VBT: 210 * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4. 211 * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough 212 * to hold the VBT data, the Extended VBT region is introduced since 213 * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are 214 * introduced to define the extended VBT data location and size. 215 * OpRegion 2.0: RVDA defines the absolute physical address of the 216 * extended VBT data, RVDS defines the VBT data size. 217 * OpRegion 2.1 and above: RVDA defines the relative address of the 218 * extended VBT data to OpRegion base, RVDS defines the VBT data size. 219 * 220 * Due to the RVDA definition diff in OpRegion VBT (also the only diff 221 * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range 222 * for OpRegion 2.0 and above makes it possible to support the 223 * non-contiguous VBT through a single vfio region. From r/w ops view, 224 * only contiguous VBT after OpRegion with version 2.1+ is exposed, 225 * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w 226 * ops will on-the-fly shift the actural offset into VBT so that data at 227 * correct position can be returned to the requester. 228 */ 229 version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion + 230 OPREGION_VERSION)); 231 if (version >= 0x0200) { 232 u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion + 233 OPREGION_RVDA)); 234 u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 235 OPREGION_RVDS)); 236 237 /* The extended VBT is valid only when RVDA/RVDS are non-zero */ 238 if (rvda && rvds) { 239 size += rvds; 240 241 /* 242 * Extended VBT location by RVDA: 243 * Absolute physical addr for 2.0. 244 * Relative addr to OpRegion header for 2.1+. 245 */ 246 if (version == 0x0200) 247 addr = rvda; 248 else 249 addr += rvda; 250 251 opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB); 252 if (!opregionvbt->vbt_ex) { 253 memunmap(opregionvbt->opregion); 254 kfree(opregionvbt); 255 return -ENOMEM; 256 } 257 } 258 } 259 260 ret = vfio_pci_register_dev_region(vdev, 261 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, 262 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops, 263 size, VFIO_REGION_INFO_FLAG_READ, opregionvbt); 264 if (ret) { 265 if (opregionvbt->vbt_ex) 266 memunmap(opregionvbt->vbt_ex); 267 268 memunmap(opregionvbt->opregion); 269 kfree(opregionvbt); 270 return ret; 271 } 272 273 /* Fill vconfig with the hw value and virtualize register */ 274 *dwordp = cpu_to_le32(addr); 275 memset(vdev->pci_config_map + OPREGION_PCI_ADDR, 276 PCI_CAP_ID_INVALID_VIRT, 4); 277 278 return ret; 279 } 280 281 static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev, 282 char __user *buf, size_t count, loff_t *ppos, 283 bool iswrite) 284 { 285 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; 286 struct pci_dev *pdev = vdev->region[i].data; 287 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; 288 size_t size; 289 int ret; 290 291 if (pos >= vdev->region[i].size || iswrite) 292 return -EINVAL; 293 294 size = count = min(count, (size_t)(vdev->region[i].size - pos)); 295 296 if ((pos & 1) && size) { 297 u8 val; 298 299 ret = pci_user_read_config_byte(pdev, pos, &val); 300 if (ret) 301 return ret; 302 303 if (copy_to_user(buf + count - size, &val, 1)) 304 return -EFAULT; 305 306 pos++; 307 size--; 308 } 309 310 if ((pos & 3) && size > 2) { 311 u16 val; 312 __le16 lval; 313 314 ret = pci_user_read_config_word(pdev, pos, &val); 315 if (ret) 316 return ret; 317 318 lval = cpu_to_le16(val); 319 if (copy_to_user(buf + count - size, &lval, 2)) 320 return -EFAULT; 321 322 pos += 2; 323 size -= 2; 324 } 325 326 while (size > 3) { 327 u32 val; 328 __le32 lval; 329 330 ret = pci_user_read_config_dword(pdev, pos, &val); 331 if (ret) 332 return ret; 333 334 lval = cpu_to_le32(val); 335 if (copy_to_user(buf + count - size, &lval, 4)) 336 return -EFAULT; 337 338 pos += 4; 339 size -= 4; 340 } 341 342 while (size >= 2) { 343 u16 val; 344 __le16 lval; 345 346 ret = pci_user_read_config_word(pdev, pos, &val); 347 if (ret) 348 return ret; 349 350 lval = cpu_to_le16(val); 351 if (copy_to_user(buf + count - size, &lval, 2)) 352 return -EFAULT; 353 354 pos += 2; 355 size -= 2; 356 } 357 358 while (size) { 359 u8 val; 360 361 ret = pci_user_read_config_byte(pdev, pos, &val); 362 if (ret) 363 return ret; 364 365 if (copy_to_user(buf + count - size, &val, 1)) 366 return -EFAULT; 367 368 pos++; 369 size--; 370 } 371 372 *ppos += count; 373 374 return count; 375 } 376 377 static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev, 378 struct vfio_pci_region *region) 379 { 380 struct pci_dev *pdev = region->data; 381 382 pci_dev_put(pdev); 383 } 384 385 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = { 386 .rw = vfio_pci_igd_cfg_rw, 387 .release = vfio_pci_igd_cfg_release, 388 }; 389 390 static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev) 391 { 392 struct pci_dev *host_bridge, *lpc_bridge; 393 int ret; 394 395 host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); 396 if (!host_bridge) 397 return -ENODEV; 398 399 if (host_bridge->vendor != PCI_VENDOR_ID_INTEL || 400 host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) { 401 pci_dev_put(host_bridge); 402 return -EINVAL; 403 } 404 405 ret = vfio_pci_register_dev_region(vdev, 406 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, 407 VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, 408 &vfio_pci_igd_cfg_regops, host_bridge->cfg_size, 409 VFIO_REGION_INFO_FLAG_READ, host_bridge); 410 if (ret) { 411 pci_dev_put(host_bridge); 412 return ret; 413 } 414 415 lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0)); 416 if (!lpc_bridge) 417 return -ENODEV; 418 419 if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL || 420 lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) { 421 pci_dev_put(lpc_bridge); 422 return -EINVAL; 423 } 424 425 ret = vfio_pci_register_dev_region(vdev, 426 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, 427 VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, 428 &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size, 429 VFIO_REGION_INFO_FLAG_READ, lpc_bridge); 430 if (ret) { 431 pci_dev_put(lpc_bridge); 432 return ret; 433 } 434 435 return 0; 436 } 437 438 int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) 439 { 440 int ret; 441 442 ret = vfio_pci_igd_opregion_init(vdev); 443 if (ret) 444 return ret; 445 446 ret = vfio_pci_igd_cfg_init(vdev); 447 if (ret) 448 return ret; 449 450 return 0; 451 } 452