1 /* 2 * VFIO PCI config space virtualization 3 * 4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 5 * Author: Alex Williamson <alex.williamson@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Derived from original vfio: 12 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 13 * Author: Tom Lyon, pugs@cisco.com 14 */ 15 16 /* 17 * This code handles reading and writing of PCI configuration registers. 18 * This is hairy because we want to allow a lot of flexibility to the 19 * user driver, but cannot trust it with all of the config fields. 20 * Tables determine which fields can be read and written, as well as 21 * which fields are 'virtualized' - special actions and translations to 22 * make it appear to the user that he has control, when in fact things 23 * must be negotiated with the underlying OS. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/pci.h> 28 #include <linux/uaccess.h> 29 #include <linux/vfio.h> 30 #include <linux/slab.h> 31 32 #include "vfio_pci_private.h" 33 34 #define PCI_CFG_SPACE_SIZE 256 35 36 /* Fake capability ID for standard config space */ 37 #define PCI_CAP_ID_BASIC 0 38 39 #define is_bar(offset) \ 40 ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \ 41 (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4)) 42 43 /* 44 * Lengths of PCI Config Capabilities 45 * 0: Removed from the user visible capability list 46 * FF: Variable length 47 */ 48 static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = { 49 [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */ 50 [PCI_CAP_ID_PM] = PCI_PM_SIZEOF, 51 [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF, 52 [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF, 53 [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */ 54 [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */ 55 [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */ 56 [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */ 57 [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */ 58 [PCI_CAP_ID_VNDR] = 0xFF, /* variable */ 59 [PCI_CAP_ID_DBG] = 0, /* debug - don't care */ 60 [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */ 61 [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */ 62 [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */ 63 [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */ 64 [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */ 65 [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */ 66 [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF, 67 [PCI_CAP_ID_SATA] = 0xFF, 68 [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF, 69 }; 70 71 /* 72 * Lengths of PCIe/PCI-X Extended Config Capabilities 73 * 0: Removed or masked from the user visible capabilty list 74 * FF: Variable length 75 */ 76 static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = { 77 [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND, 78 [PCI_EXT_CAP_ID_VC] = 0xFF, 79 [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF, 80 [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF, 81 [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */ 82 [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */ 83 [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */ 84 [PCI_EXT_CAP_ID_MFVC] = 0xFF, 85 [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */ 86 [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */ 87 [PCI_EXT_CAP_ID_VNDR] = 0xFF, 88 [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */ 89 [PCI_EXT_CAP_ID_ACS] = 0xFF, 90 [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF, 91 [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF, 92 [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF, 93 [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */ 94 [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF, 95 [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF, 96 [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */ 97 [PCI_EXT_CAP_ID_REBAR] = 0xFF, 98 [PCI_EXT_CAP_ID_DPA] = 0xFF, 99 [PCI_EXT_CAP_ID_TPH] = 0xFF, 100 [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF, 101 [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */ 102 [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */ 103 [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */ 104 }; 105 106 /* 107 * Read/Write Permission Bits - one bit for each bit in capability 108 * Any field can be read if it exists, but what is read depends on 109 * whether the field is 'virtualized', or just pass thru to the 110 * hardware. Any virtualized field is also virtualized for writes. 111 * Writes are only permitted if they have a 1 bit here. 112 */ 113 struct perm_bits { 114 u8 *virt; /* read/write virtual data, not hw */ 115 u8 *write; /* writeable bits */ 116 int (*readfn)(struct vfio_pci_device *vdev, int pos, int count, 117 struct perm_bits *perm, int offset, __le32 *val); 118 int (*writefn)(struct vfio_pci_device *vdev, int pos, int count, 119 struct perm_bits *perm, int offset, __le32 val); 120 }; 121 122 #define NO_VIRT 0 123 #define ALL_VIRT 0xFFFFFFFFU 124 #define NO_WRITE 0 125 #define ALL_WRITE 0xFFFFFFFFU 126 127 static int vfio_user_config_read(struct pci_dev *pdev, int offset, 128 __le32 *val, int count) 129 { 130 int ret = -EINVAL; 131 u32 tmp_val = 0; 132 133 switch (count) { 134 case 1: 135 { 136 u8 tmp; 137 ret = pci_user_read_config_byte(pdev, offset, &tmp); 138 tmp_val = tmp; 139 break; 140 } 141 case 2: 142 { 143 u16 tmp; 144 ret = pci_user_read_config_word(pdev, offset, &tmp); 145 tmp_val = tmp; 146 break; 147 } 148 case 4: 149 ret = pci_user_read_config_dword(pdev, offset, &tmp_val); 150 break; 151 } 152 153 *val = cpu_to_le32(tmp_val); 154 155 return pcibios_err_to_errno(ret); 156 } 157 158 static int vfio_user_config_write(struct pci_dev *pdev, int offset, 159 __le32 val, int count) 160 { 161 int ret = -EINVAL; 162 u32 tmp_val = le32_to_cpu(val); 163 164 switch (count) { 165 case 1: 166 ret = pci_user_write_config_byte(pdev, offset, tmp_val); 167 break; 168 case 2: 169 ret = pci_user_write_config_word(pdev, offset, tmp_val); 170 break; 171 case 4: 172 ret = pci_user_write_config_dword(pdev, offset, tmp_val); 173 break; 174 } 175 176 return pcibios_err_to_errno(ret); 177 } 178 179 static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos, 180 int count, struct perm_bits *perm, 181 int offset, __le32 *val) 182 { 183 __le32 virt = 0; 184 185 memcpy(val, vdev->vconfig + pos, count); 186 187 memcpy(&virt, perm->virt + offset, count); 188 189 /* Any non-virtualized bits? */ 190 if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) { 191 struct pci_dev *pdev = vdev->pdev; 192 __le32 phys_val = 0; 193 int ret; 194 195 ret = vfio_user_config_read(pdev, pos, &phys_val, count); 196 if (ret) 197 return ret; 198 199 *val = (phys_val & ~virt) | (*val & virt); 200 } 201 202 return count; 203 } 204 205 static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos, 206 int count, struct perm_bits *perm, 207 int offset, __le32 val) 208 { 209 __le32 virt = 0, write = 0; 210 211 memcpy(&write, perm->write + offset, count); 212 213 if (!write) 214 return count; /* drop, no writable bits */ 215 216 memcpy(&virt, perm->virt + offset, count); 217 218 /* Virtualized and writable bits go to vconfig */ 219 if (write & virt) { 220 __le32 virt_val = 0; 221 222 memcpy(&virt_val, vdev->vconfig + pos, count); 223 224 virt_val &= ~(write & virt); 225 virt_val |= (val & (write & virt)); 226 227 memcpy(vdev->vconfig + pos, &virt_val, count); 228 } 229 230 /* Non-virtualzed and writable bits go to hardware */ 231 if (write & ~virt) { 232 struct pci_dev *pdev = vdev->pdev; 233 __le32 phys_val = 0; 234 int ret; 235 236 ret = vfio_user_config_read(pdev, pos, &phys_val, count); 237 if (ret) 238 return ret; 239 240 phys_val &= ~(write & ~virt); 241 phys_val |= (val & (write & ~virt)); 242 243 ret = vfio_user_config_write(pdev, pos, phys_val, count); 244 if (ret) 245 return ret; 246 } 247 248 return count; 249 } 250 251 /* Allow direct read from hardware, except for capability next pointer */ 252 static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos, 253 int count, struct perm_bits *perm, 254 int offset, __le32 *val) 255 { 256 int ret; 257 258 ret = vfio_user_config_read(vdev->pdev, pos, val, count); 259 if (ret) 260 return pcibios_err_to_errno(ret); 261 262 if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */ 263 if (offset < 4) 264 memcpy(val, vdev->vconfig + pos, count); 265 } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */ 266 if (offset == PCI_CAP_LIST_ID && count > 1) 267 memcpy(val, vdev->vconfig + pos, 268 min(PCI_CAP_FLAGS, count)); 269 else if (offset == PCI_CAP_LIST_NEXT) 270 memcpy(val, vdev->vconfig + pos, 1); 271 } 272 273 return count; 274 } 275 276 /* Raw access skips any kind of virtualization */ 277 static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos, 278 int count, struct perm_bits *perm, 279 int offset, __le32 val) 280 { 281 int ret; 282 283 ret = vfio_user_config_write(vdev->pdev, pos, val, count); 284 if (ret) 285 return ret; 286 287 return count; 288 } 289 290 static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos, 291 int count, struct perm_bits *perm, 292 int offset, __le32 *val) 293 { 294 int ret; 295 296 ret = vfio_user_config_read(vdev->pdev, pos, val, count); 297 if (ret) 298 return pcibios_err_to_errno(ret); 299 300 return count; 301 } 302 303 /* Virt access uses only virtualization */ 304 static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos, 305 int count, struct perm_bits *perm, 306 int offset, __le32 val) 307 { 308 memcpy(vdev->vconfig + pos, &val, count); 309 return count; 310 } 311 312 static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos, 313 int count, struct perm_bits *perm, 314 int offset, __le32 *val) 315 { 316 memcpy(val, vdev->vconfig + pos, count); 317 return count; 318 } 319 320 /* Default capability regions to read-only, no-virtualization */ 321 static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { 322 [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } 323 }; 324 static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = { 325 [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } 326 }; 327 /* 328 * Default unassigned regions to raw read-write access. Some devices 329 * require this to function as they hide registers between the gaps in 330 * config space (be2net). Like MMIO and I/O port registers, we have 331 * to trust the hardware isolation. 332 */ 333 static struct perm_bits unassigned_perms = { 334 .readfn = vfio_raw_config_read, 335 .writefn = vfio_raw_config_write 336 }; 337 338 static struct perm_bits virt_perms = { 339 .readfn = vfio_virt_config_read, 340 .writefn = vfio_virt_config_write 341 }; 342 343 static void free_perm_bits(struct perm_bits *perm) 344 { 345 kfree(perm->virt); 346 kfree(perm->write); 347 perm->virt = NULL; 348 perm->write = NULL; 349 } 350 351 static int alloc_perm_bits(struct perm_bits *perm, int size) 352 { 353 /* 354 * Round up all permission bits to the next dword, this lets us 355 * ignore whether a read/write exceeds the defined capability 356 * structure. We can do this because: 357 * - Standard config space is already dword aligned 358 * - Capabilities are all dword alinged (bits 0:1 of next reserved) 359 * - Express capabilities defined as dword aligned 360 */ 361 size = round_up(size, 4); 362 363 /* 364 * Zero state is 365 * - All Readable, None Writeable, None Virtualized 366 */ 367 perm->virt = kzalloc(size, GFP_KERNEL); 368 perm->write = kzalloc(size, GFP_KERNEL); 369 if (!perm->virt || !perm->write) { 370 free_perm_bits(perm); 371 return -ENOMEM; 372 } 373 374 perm->readfn = vfio_default_config_read; 375 perm->writefn = vfio_default_config_write; 376 377 return 0; 378 } 379 380 /* 381 * Helper functions for filling in permission tables 382 */ 383 static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write) 384 { 385 p->virt[off] = virt; 386 p->write[off] = write; 387 } 388 389 /* Handle endian-ness - pci and tables are little-endian */ 390 static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write) 391 { 392 *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt); 393 *(__le16 *)(&p->write[off]) = cpu_to_le16(write); 394 } 395 396 /* Handle endian-ness - pci and tables are little-endian */ 397 static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) 398 { 399 *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt); 400 *(__le32 *)(&p->write[off]) = cpu_to_le32(write); 401 } 402 403 /* 404 * Restore the *real* BARs after we detect a FLR or backdoor reset. 405 * (backdoor = some device specific technique that we didn't catch) 406 */ 407 static void vfio_bar_restore(struct vfio_pci_device *vdev) 408 { 409 struct pci_dev *pdev = vdev->pdev; 410 u32 *rbar = vdev->rbar; 411 int i; 412 413 if (pdev->is_virtfn) 414 return; 415 416 pr_info("%s: %s reset recovery - restoring bars\n", 417 __func__, dev_name(&pdev->dev)); 418 419 for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++) 420 pci_user_write_config_dword(pdev, i, *rbar); 421 422 pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar); 423 } 424 425 static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar) 426 { 427 unsigned long flags = pci_resource_flags(pdev, bar); 428 u32 val; 429 430 if (flags & IORESOURCE_IO) 431 return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO); 432 433 val = PCI_BASE_ADDRESS_SPACE_MEMORY; 434 435 if (flags & IORESOURCE_PREFETCH) 436 val |= PCI_BASE_ADDRESS_MEM_PREFETCH; 437 438 if (flags & IORESOURCE_MEM_64) 439 val |= PCI_BASE_ADDRESS_MEM_TYPE_64; 440 441 return cpu_to_le32(val); 442 } 443 444 /* 445 * Pretend we're hardware and tweak the values of the *virtual* PCI BARs 446 * to reflect the hardware capabilities. This implements BAR sizing. 447 */ 448 static void vfio_bar_fixup(struct vfio_pci_device *vdev) 449 { 450 struct pci_dev *pdev = vdev->pdev; 451 int i; 452 __le32 *bar; 453 u64 mask; 454 455 bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0]; 456 457 for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) { 458 if (!pci_resource_start(pdev, i)) { 459 *bar = 0; /* Unmapped by host = unimplemented to user */ 460 continue; 461 } 462 463 mask = ~(pci_resource_len(pdev, i) - 1); 464 465 *bar &= cpu_to_le32((u32)mask); 466 *bar |= vfio_generate_bar_flags(pdev, i); 467 468 if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) { 469 bar++; 470 *bar &= cpu_to_le32((u32)(mask >> 32)); 471 i++; 472 } 473 } 474 475 bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS]; 476 477 /* 478 * NB. REGION_INFO will have reported zero size if we weren't able 479 * to read the ROM, but we still return the actual BAR size here if 480 * it exists (or the shadow ROM space). 481 */ 482 if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) { 483 mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1); 484 mask |= PCI_ROM_ADDRESS_ENABLE; 485 *bar &= cpu_to_le32((u32)mask); 486 } else if (pdev->resource[PCI_ROM_RESOURCE].flags & 487 IORESOURCE_ROM_SHADOW) { 488 mask = ~(0x20000 - 1); 489 mask |= PCI_ROM_ADDRESS_ENABLE; 490 *bar &= cpu_to_le32((u32)mask); 491 } else 492 *bar = 0; 493 494 vdev->bardirty = false; 495 } 496 497 static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, 498 int count, struct perm_bits *perm, 499 int offset, __le32 *val) 500 { 501 if (is_bar(offset)) /* pos == offset for basic config */ 502 vfio_bar_fixup(vdev); 503 504 count = vfio_default_config_read(vdev, pos, count, perm, offset, val); 505 506 /* Mask in virtual memory enable for SR-IOV devices */ 507 if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { 508 u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); 509 u32 tmp_val = le32_to_cpu(*val); 510 511 tmp_val |= cmd & PCI_COMMAND_MEMORY; 512 *val = cpu_to_le32(tmp_val); 513 } 514 515 return count; 516 } 517 518 static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, 519 int count, struct perm_bits *perm, 520 int offset, __le32 val) 521 { 522 struct pci_dev *pdev = vdev->pdev; 523 __le16 *virt_cmd; 524 u16 new_cmd = 0; 525 int ret; 526 527 virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND]; 528 529 if (offset == PCI_COMMAND) { 530 bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io; 531 u16 phys_cmd; 532 533 ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd); 534 if (ret) 535 return ret; 536 537 new_cmd = le32_to_cpu(val); 538 539 phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); 540 virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); 541 new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); 542 543 phys_io = !!(phys_cmd & PCI_COMMAND_IO); 544 virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); 545 new_io = !!(new_cmd & PCI_COMMAND_IO); 546 547 /* 548 * If the user is writing mem/io enable (new_mem/io) and we 549 * think it's already enabled (virt_mem/io), but the hardware 550 * shows it disabled (phys_mem/io, then the device has 551 * undergone some kind of backdoor reset and needs to be 552 * restored before we allow it to enable the bars. 553 * SR-IOV devices will trigger this, but we catch them later 554 */ 555 if ((new_mem && virt_mem && !phys_mem) || 556 (new_io && virt_io && !phys_io)) 557 vfio_bar_restore(vdev); 558 } 559 560 count = vfio_default_config_write(vdev, pos, count, perm, offset, val); 561 if (count < 0) 562 return count; 563 564 /* 565 * Save current memory/io enable bits in vconfig to allow for 566 * the test above next time. 567 */ 568 if (offset == PCI_COMMAND) { 569 u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO; 570 571 *virt_cmd &= cpu_to_le16(~mask); 572 *virt_cmd |= cpu_to_le16(new_cmd & mask); 573 } 574 575 /* Emulate INTx disable */ 576 if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) { 577 bool virt_intx_disable; 578 579 virt_intx_disable = !!(le16_to_cpu(*virt_cmd) & 580 PCI_COMMAND_INTX_DISABLE); 581 582 if (virt_intx_disable && !vdev->virq_disabled) { 583 vdev->virq_disabled = true; 584 vfio_pci_intx_mask(vdev); 585 } else if (!virt_intx_disable && vdev->virq_disabled) { 586 vdev->virq_disabled = false; 587 vfio_pci_intx_unmask(vdev); 588 } 589 } 590 591 if (is_bar(offset)) 592 vdev->bardirty = true; 593 594 return count; 595 } 596 597 /* Permissions for the Basic PCI Header */ 598 static int __init init_pci_cap_basic_perm(struct perm_bits *perm) 599 { 600 if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF)) 601 return -ENOMEM; 602 603 perm->readfn = vfio_basic_config_read; 604 perm->writefn = vfio_basic_config_write; 605 606 /* Virtualized for SR-IOV functions, which just have FFFF */ 607 p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE); 608 p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE); 609 610 /* 611 * Virtualize INTx disable, we use it internally for interrupt 612 * control and can emulate it for non-PCI 2.3 devices. 613 */ 614 p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE); 615 616 /* Virtualize capability list, we might want to skip/disable */ 617 p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE); 618 619 /* No harm to write */ 620 p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE); 621 p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE); 622 p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE); 623 624 /* Virtualize all bars, can't touch the real ones */ 625 p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE); 626 p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE); 627 p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE); 628 p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE); 629 p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE); 630 p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE); 631 p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE); 632 633 /* Allow us to adjust capability chain */ 634 p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE); 635 636 /* Sometimes used by sw, just virtualize */ 637 p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE); 638 639 /* Virtualize interrupt pin to allow hiding INTx */ 640 p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE); 641 642 return 0; 643 } 644 645 static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos, 646 int count, struct perm_bits *perm, 647 int offset, __le32 val) 648 { 649 count = vfio_default_config_write(vdev, pos, count, perm, offset, val); 650 if (count < 0) 651 return count; 652 653 if (offset == PCI_PM_CTRL) { 654 pci_power_t state; 655 656 switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) { 657 case 0: 658 state = PCI_D0; 659 break; 660 case 1: 661 state = PCI_D1; 662 break; 663 case 2: 664 state = PCI_D2; 665 break; 666 case 3: 667 state = PCI_D3hot; 668 break; 669 } 670 671 pci_set_power_state(vdev->pdev, state); 672 } 673 674 return count; 675 } 676 677 /* Permissions for the Power Management capability */ 678 static int __init init_pci_cap_pm_perm(struct perm_bits *perm) 679 { 680 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM])) 681 return -ENOMEM; 682 683 perm->writefn = vfio_pm_config_write; 684 685 /* 686 * We always virtualize the next field so we can remove 687 * capabilities from the chain if we want to. 688 */ 689 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 690 691 /* 692 * Power management is defined *per function*, so we can let 693 * the user change power state, but we trap and initiate the 694 * change ourselves, so the state bits are read-only. 695 */ 696 p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK); 697 return 0; 698 } 699 700 static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, 701 int count, struct perm_bits *perm, 702 int offset, __le32 val) 703 { 704 struct pci_dev *pdev = vdev->pdev; 705 __le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR); 706 __le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA); 707 u16 addr; 708 u32 data; 709 710 /* 711 * Write through to emulation. If the write includes the upper byte 712 * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we 713 * have work to do. 714 */ 715 count = vfio_default_config_write(vdev, pos, count, perm, offset, val); 716 if (count < 0 || offset > PCI_VPD_ADDR + 1 || 717 offset + count <= PCI_VPD_ADDR + 1) 718 return count; 719 720 addr = le16_to_cpu(*paddr); 721 722 if (addr & PCI_VPD_ADDR_F) { 723 data = le32_to_cpu(*pdata); 724 if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) 725 return count; 726 } else { 727 if (pci_read_vpd(pdev, addr, 4, &data) != 4) 728 return count; 729 *pdata = cpu_to_le32(data); 730 } 731 732 /* 733 * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to 734 * signal completion. If an error occurs above, we assume that not 735 * toggling this bit will induce a driver timeout. 736 */ 737 addr ^= PCI_VPD_ADDR_F; 738 *paddr = cpu_to_le16(addr); 739 740 return count; 741 } 742 743 /* Permissions for Vital Product Data capability */ 744 static int __init init_pci_cap_vpd_perm(struct perm_bits *perm) 745 { 746 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD])) 747 return -ENOMEM; 748 749 perm->writefn = vfio_vpd_config_write; 750 751 /* 752 * We always virtualize the next field so we can remove 753 * capabilities from the chain if we want to. 754 */ 755 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 756 757 /* 758 * Both the address and data registers are virtualized to 759 * enable access through the pci_vpd_read/write functions 760 */ 761 p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE); 762 p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE); 763 764 return 0; 765 } 766 767 /* Permissions for PCI-X capability */ 768 static int __init init_pci_cap_pcix_perm(struct perm_bits *perm) 769 { 770 /* Alloc 24, but only 8 are used in v0 */ 771 if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2)) 772 return -ENOMEM; 773 774 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 775 776 p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE); 777 p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE); 778 return 0; 779 } 780 781 /* Permissions for PCI Express capability */ 782 static int __init init_pci_cap_exp_perm(struct perm_bits *perm) 783 { 784 /* Alloc larger of two possible sizes */ 785 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) 786 return -ENOMEM; 787 788 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 789 790 /* 791 * Allow writes to device control fields (includes FLR!) 792 * but not to devctl_phantom which could confuse IOMMU 793 * or to the ARI bit in devctl2 which is set at probe time 794 */ 795 p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM); 796 p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); 797 return 0; 798 } 799 800 /* Permissions for Advanced Function capability */ 801 static int __init init_pci_cap_af_perm(struct perm_bits *perm) 802 { 803 if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF])) 804 return -ENOMEM; 805 806 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 807 p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR); 808 return 0; 809 } 810 811 /* Permissions for Advanced Error Reporting extended capability */ 812 static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm) 813 { 814 u32 mask; 815 816 if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR])) 817 return -ENOMEM; 818 819 /* 820 * Virtualize the first dword of all express capabilities 821 * because it includes the next pointer. This lets us later 822 * remove capabilities from the chain if we need to. 823 */ 824 p_setd(perm, 0, ALL_VIRT, NO_WRITE); 825 826 /* Writable bits mask */ 827 mask = PCI_ERR_UNC_UND | /* Undefined */ 828 PCI_ERR_UNC_DLP | /* Data Link Protocol */ 829 PCI_ERR_UNC_SURPDN | /* Surprise Down */ 830 PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */ 831 PCI_ERR_UNC_FCP | /* Flow Control Protocol */ 832 PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */ 833 PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */ 834 PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */ 835 PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */ 836 PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */ 837 PCI_ERR_UNC_ECRC | /* ECRC Error Status */ 838 PCI_ERR_UNC_UNSUP | /* Unsupported Request */ 839 PCI_ERR_UNC_ACSV | /* ACS Violation */ 840 PCI_ERR_UNC_INTN | /* internal error */ 841 PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */ 842 PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */ 843 PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */ 844 p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask); 845 p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask); 846 p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask); 847 848 mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */ 849 PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */ 850 PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */ 851 PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */ 852 PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */ 853 PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */ 854 PCI_ERR_COR_INTERNAL | /* Corrected Internal */ 855 PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */ 856 p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask); 857 p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask); 858 859 mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */ 860 PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */ 861 p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask); 862 return 0; 863 } 864 865 /* Permissions for Power Budgeting extended capability */ 866 static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm) 867 { 868 if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR])) 869 return -ENOMEM; 870 871 p_setd(perm, 0, ALL_VIRT, NO_WRITE); 872 873 /* Writing the data selector is OK, the info is still read-only */ 874 p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE); 875 return 0; 876 } 877 878 /* 879 * Initialize the shared permission tables 880 */ 881 void vfio_pci_uninit_perm_bits(void) 882 { 883 free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]); 884 885 free_perm_bits(&cap_perms[PCI_CAP_ID_PM]); 886 free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]); 887 free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]); 888 free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]); 889 free_perm_bits(&cap_perms[PCI_CAP_ID_AF]); 890 891 free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]); 892 free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]); 893 } 894 895 int __init vfio_pci_init_perm_bits(void) 896 { 897 int ret; 898 899 /* Basic config space */ 900 ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]); 901 902 /* Capabilities */ 903 ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]); 904 ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]); 905 ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]); 906 cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write; 907 ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]); 908 ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]); 909 910 /* Extended capabilities */ 911 ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]); 912 ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]); 913 ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write; 914 915 if (ret) 916 vfio_pci_uninit_perm_bits(); 917 918 return ret; 919 } 920 921 static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos) 922 { 923 u8 cap; 924 int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE : 925 PCI_STD_HEADER_SIZEOF; 926 cap = vdev->pci_config_map[pos]; 927 928 if (cap == PCI_CAP_ID_BASIC) 929 return 0; 930 931 /* XXX Can we have to abutting capabilities of the same type? */ 932 while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap) 933 pos--; 934 935 return pos; 936 } 937 938 static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos, 939 int count, struct perm_bits *perm, 940 int offset, __le32 *val) 941 { 942 /* Update max available queue size from msi_qmax */ 943 if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { 944 __le16 *flags; 945 int start; 946 947 start = vfio_find_cap_start(vdev, pos); 948 949 flags = (__le16 *)&vdev->vconfig[start]; 950 951 *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK); 952 *flags |= cpu_to_le16(vdev->msi_qmax << 1); 953 } 954 955 return vfio_default_config_read(vdev, pos, count, perm, offset, val); 956 } 957 958 static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos, 959 int count, struct perm_bits *perm, 960 int offset, __le32 val) 961 { 962 count = vfio_default_config_write(vdev, pos, count, perm, offset, val); 963 if (count < 0) 964 return count; 965 966 /* Fixup and write configured queue size and enable to hardware */ 967 if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) { 968 __le16 *pflags; 969 u16 flags; 970 int start, ret; 971 972 start = vfio_find_cap_start(vdev, pos); 973 974 pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS]; 975 976 flags = le16_to_cpu(*pflags); 977 978 /* MSI is enabled via ioctl */ 979 if (!is_msi(vdev)) 980 flags &= ~PCI_MSI_FLAGS_ENABLE; 981 982 /* Check queue size */ 983 if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) { 984 flags &= ~PCI_MSI_FLAGS_QSIZE; 985 flags |= vdev->msi_qmax << 4; 986 } 987 988 /* Write back to virt and to hardware */ 989 *pflags = cpu_to_le16(flags); 990 ret = pci_user_write_config_word(vdev->pdev, 991 start + PCI_MSI_FLAGS, 992 flags); 993 if (ret) 994 return pcibios_err_to_errno(ret); 995 } 996 997 return count; 998 } 999 1000 /* 1001 * MSI determination is per-device, so this routine gets used beyond 1002 * initialization time. Don't add __init 1003 */ 1004 static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags) 1005 { 1006 if (alloc_perm_bits(perm, len)) 1007 return -ENOMEM; 1008 1009 perm->readfn = vfio_msi_config_read; 1010 perm->writefn = vfio_msi_config_write; 1011 1012 p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); 1013 1014 /* 1015 * The upper byte of the control register is reserved, 1016 * just setup the lower byte. 1017 */ 1018 p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE); 1019 p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE); 1020 if (flags & PCI_MSI_FLAGS_64BIT) { 1021 p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE); 1022 p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE); 1023 if (flags & PCI_MSI_FLAGS_MASKBIT) { 1024 p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE); 1025 p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE); 1026 } 1027 } else { 1028 p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE); 1029 if (flags & PCI_MSI_FLAGS_MASKBIT) { 1030 p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE); 1031 p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE); 1032 } 1033 } 1034 return 0; 1035 } 1036 1037 /* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */ 1038 static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) 1039 { 1040 struct pci_dev *pdev = vdev->pdev; 1041 int len, ret; 1042 u16 flags; 1043 1044 ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags); 1045 if (ret) 1046 return pcibios_err_to_errno(ret); 1047 1048 len = 10; /* Minimum size */ 1049 if (flags & PCI_MSI_FLAGS_64BIT) 1050 len += 4; 1051 if (flags & PCI_MSI_FLAGS_MASKBIT) 1052 len += 10; 1053 1054 if (vdev->msi_perm) 1055 return len; 1056 1057 vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL); 1058 if (!vdev->msi_perm) 1059 return -ENOMEM; 1060 1061 ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); 1062 if (ret) 1063 return ret; 1064 1065 return len; 1066 } 1067 1068 /* Determine extended capability length for VC (2 & 9) and MFVC */ 1069 static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) 1070 { 1071 struct pci_dev *pdev = vdev->pdev; 1072 u32 tmp; 1073 int ret, evcc, phases, vc_arb; 1074 int len = PCI_CAP_VC_BASE_SIZEOF; 1075 1076 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp); 1077 if (ret) 1078 return pcibios_err_to_errno(ret); 1079 1080 evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ 1081 ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp); 1082 if (ret) 1083 return pcibios_err_to_errno(ret); 1084 1085 if (tmp & PCI_VC_CAP2_128_PHASE) 1086 phases = 128; 1087 else if (tmp & PCI_VC_CAP2_64_PHASE) 1088 phases = 64; 1089 else if (tmp & PCI_VC_CAP2_32_PHASE) 1090 phases = 32; 1091 else 1092 phases = 0; 1093 1094 vc_arb = phases * 4; 1095 1096 /* 1097 * Port arbitration tables are root & switch only; 1098 * function arbitration tables are function 0 only. 1099 * In either case, we'll never let user write them so 1100 * we don't care how big they are 1101 */ 1102 len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF; 1103 if (vc_arb) { 1104 len = round_up(len, 16); 1105 len += vc_arb / 8; 1106 } 1107 return len; 1108 } 1109 1110 static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) 1111 { 1112 struct pci_dev *pdev = vdev->pdev; 1113 u32 dword; 1114 u16 word; 1115 u8 byte; 1116 int ret; 1117 1118 switch (cap) { 1119 case PCI_CAP_ID_MSI: 1120 return vfio_msi_cap_len(vdev, pos); 1121 case PCI_CAP_ID_PCIX: 1122 ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word); 1123 if (ret) 1124 return pcibios_err_to_errno(ret); 1125 1126 if (PCI_X_CMD_VERSION(word)) { 1127 /* Test for extended capabilities */ 1128 pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); 1129 vdev->extended_caps = (dword != 0); 1130 return PCI_CAP_PCIX_SIZEOF_V2; 1131 } else 1132 return PCI_CAP_PCIX_SIZEOF_V0; 1133 case PCI_CAP_ID_VNDR: 1134 /* length follows next field */ 1135 ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte); 1136 if (ret) 1137 return pcibios_err_to_errno(ret); 1138 1139 return byte; 1140 case PCI_CAP_ID_EXP: 1141 /* Test for extended capabilities */ 1142 pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); 1143 vdev->extended_caps = (dword != 0); 1144 1145 /* length based on version */ 1146 if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) 1147 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; 1148 else 1149 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; 1150 case PCI_CAP_ID_HT: 1151 ret = pci_read_config_byte(pdev, pos + 3, &byte); 1152 if (ret) 1153 return pcibios_err_to_errno(ret); 1154 1155 return (byte & HT_3BIT_CAP_MASK) ? 1156 HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG; 1157 case PCI_CAP_ID_SATA: 1158 ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte); 1159 if (ret) 1160 return pcibios_err_to_errno(ret); 1161 1162 byte &= PCI_SATA_REGS_MASK; 1163 if (byte == PCI_SATA_REGS_INLINE) 1164 return PCI_SATA_SIZEOF_LONG; 1165 else 1166 return PCI_SATA_SIZEOF_SHORT; 1167 default: 1168 pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n", 1169 dev_name(&pdev->dev), __func__, cap, pos); 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) 1176 { 1177 struct pci_dev *pdev = vdev->pdev; 1178 u8 byte; 1179 u32 dword; 1180 int ret; 1181 1182 switch (ecap) { 1183 case PCI_EXT_CAP_ID_VNDR: 1184 ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword); 1185 if (ret) 1186 return pcibios_err_to_errno(ret); 1187 1188 return dword >> PCI_VSEC_HDR_LEN_SHIFT; 1189 case PCI_EXT_CAP_ID_VC: 1190 case PCI_EXT_CAP_ID_VC9: 1191 case PCI_EXT_CAP_ID_MFVC: 1192 return vfio_vc_cap_len(vdev, epos); 1193 case PCI_EXT_CAP_ID_ACS: 1194 ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte); 1195 if (ret) 1196 return pcibios_err_to_errno(ret); 1197 1198 if (byte & PCI_ACS_EC) { 1199 int bits; 1200 1201 ret = pci_read_config_byte(pdev, 1202 epos + PCI_ACS_EGRESS_BITS, 1203 &byte); 1204 if (ret) 1205 return pcibios_err_to_errno(ret); 1206 1207 bits = byte ? round_up(byte, 32) : 256; 1208 return 8 + (bits / 8); 1209 } 1210 return 8; 1211 1212 case PCI_EXT_CAP_ID_REBAR: 1213 ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte); 1214 if (ret) 1215 return pcibios_err_to_errno(ret); 1216 1217 byte &= PCI_REBAR_CTRL_NBAR_MASK; 1218 byte >>= PCI_REBAR_CTRL_NBAR_SHIFT; 1219 1220 return 4 + (byte * 8); 1221 case PCI_EXT_CAP_ID_DPA: 1222 ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte); 1223 if (ret) 1224 return pcibios_err_to_errno(ret); 1225 1226 byte &= PCI_DPA_CAP_SUBSTATE_MASK; 1227 return PCI_DPA_BASE_SIZEOF + byte + 1; 1228 case PCI_EXT_CAP_ID_TPH: 1229 ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); 1230 if (ret) 1231 return pcibios_err_to_errno(ret); 1232 1233 if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { 1234 int sts; 1235 1236 sts = dword & PCI_TPH_CAP_ST_MASK; 1237 sts >>= PCI_TPH_CAP_ST_SHIFT; 1238 return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; 1239 } 1240 return PCI_TPH_BASE_SIZEOF; 1241 default: 1242 pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n", 1243 dev_name(&pdev->dev), __func__, ecap, epos); 1244 } 1245 1246 return 0; 1247 } 1248 1249 static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev, 1250 int offset, int size) 1251 { 1252 struct pci_dev *pdev = vdev->pdev; 1253 int ret = 0; 1254 1255 /* 1256 * We try to read physical config space in the largest chunks 1257 * we can, assuming that all of the fields support dword access. 1258 * pci_save_state() makes this same assumption and seems to do ok. 1259 */ 1260 while (size) { 1261 int filled; 1262 1263 if (size >= 4 && !(offset % 4)) { 1264 __le32 *dwordp = (__le32 *)&vdev->vconfig[offset]; 1265 u32 dword; 1266 1267 ret = pci_read_config_dword(pdev, offset, &dword); 1268 if (ret) 1269 return ret; 1270 *dwordp = cpu_to_le32(dword); 1271 filled = 4; 1272 } else if (size >= 2 && !(offset % 2)) { 1273 __le16 *wordp = (__le16 *)&vdev->vconfig[offset]; 1274 u16 word; 1275 1276 ret = pci_read_config_word(pdev, offset, &word); 1277 if (ret) 1278 return ret; 1279 *wordp = cpu_to_le16(word); 1280 filled = 2; 1281 } else { 1282 u8 *byte = &vdev->vconfig[offset]; 1283 ret = pci_read_config_byte(pdev, offset, byte); 1284 if (ret) 1285 return ret; 1286 filled = 1; 1287 } 1288 1289 offset += filled; 1290 size -= filled; 1291 } 1292 1293 return ret; 1294 } 1295 1296 static int vfio_cap_init(struct vfio_pci_device *vdev) 1297 { 1298 struct pci_dev *pdev = vdev->pdev; 1299 u8 *map = vdev->pci_config_map; 1300 u16 status; 1301 u8 pos, *prev, cap; 1302 int loops, ret, caps = 0; 1303 1304 /* Any capabilities? */ 1305 ret = pci_read_config_word(pdev, PCI_STATUS, &status); 1306 if (ret) 1307 return ret; 1308 1309 if (!(status & PCI_STATUS_CAP_LIST)) 1310 return 0; /* Done */ 1311 1312 ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); 1313 if (ret) 1314 return ret; 1315 1316 /* Mark the previous position in case we want to skip a capability */ 1317 prev = &vdev->vconfig[PCI_CAPABILITY_LIST]; 1318 1319 /* We can bound our loop, capabilities are dword aligned */ 1320 loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF; 1321 while (pos && loops--) { 1322 u8 next; 1323 int i, len = 0; 1324 1325 ret = pci_read_config_byte(pdev, pos, &cap); 1326 if (ret) 1327 return ret; 1328 1329 ret = pci_read_config_byte(pdev, 1330 pos + PCI_CAP_LIST_NEXT, &next); 1331 if (ret) 1332 return ret; 1333 1334 if (cap <= PCI_CAP_ID_MAX) { 1335 len = pci_cap_length[cap]; 1336 if (len == 0xFF) { /* Variable length */ 1337 len = vfio_cap_len(vdev, cap, pos); 1338 if (len < 0) 1339 return len; 1340 } 1341 } 1342 1343 if (!len) { 1344 pr_info("%s: %s hiding cap 0x%x\n", 1345 __func__, dev_name(&pdev->dev), cap); 1346 *prev = next; 1347 pos = next; 1348 continue; 1349 } 1350 1351 /* Sanity check, do we overlap other capabilities? */ 1352 for (i = 0; i < len; i++) { 1353 if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) 1354 continue; 1355 1356 pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n", 1357 __func__, dev_name(&pdev->dev), 1358 pos + i, map[pos + i], cap); 1359 } 1360 1361 BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); 1362 1363 memset(map + pos, cap, len); 1364 ret = vfio_fill_vconfig_bytes(vdev, pos, len); 1365 if (ret) 1366 return ret; 1367 1368 prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT]; 1369 pos = next; 1370 caps++; 1371 } 1372 1373 /* If we didn't fill any capabilities, clear the status flag */ 1374 if (!caps) { 1375 __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS]; 1376 *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST); 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int vfio_ecap_init(struct vfio_pci_device *vdev) 1383 { 1384 struct pci_dev *pdev = vdev->pdev; 1385 u8 *map = vdev->pci_config_map; 1386 u16 epos; 1387 __le32 *prev = NULL; 1388 int loops, ret, ecaps = 0; 1389 1390 if (!vdev->extended_caps) 1391 return 0; 1392 1393 epos = PCI_CFG_SPACE_SIZE; 1394 1395 loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF; 1396 1397 while (loops-- && epos >= PCI_CFG_SPACE_SIZE) { 1398 u32 header; 1399 u16 ecap; 1400 int i, len = 0; 1401 bool hidden = false; 1402 1403 ret = pci_read_config_dword(pdev, epos, &header); 1404 if (ret) 1405 return ret; 1406 1407 ecap = PCI_EXT_CAP_ID(header); 1408 1409 if (ecap <= PCI_EXT_CAP_ID_MAX) { 1410 len = pci_ext_cap_length[ecap]; 1411 if (len == 0xFF) { 1412 len = vfio_ext_cap_len(vdev, ecap, epos); 1413 if (len < 0) 1414 return ret; 1415 } 1416 } 1417 1418 if (!len) { 1419 pr_info("%s: %s hiding ecap 0x%x@0x%x\n", 1420 __func__, dev_name(&pdev->dev), ecap, epos); 1421 1422 /* If not the first in the chain, we can skip over it */ 1423 if (prev) { 1424 u32 val = epos = PCI_EXT_CAP_NEXT(header); 1425 *prev &= cpu_to_le32(~(0xffcU << 20)); 1426 *prev |= cpu_to_le32(val << 20); 1427 continue; 1428 } 1429 1430 /* 1431 * Otherwise, fill in a placeholder, the direct 1432 * readfn will virtualize this automatically 1433 */ 1434 len = PCI_CAP_SIZEOF; 1435 hidden = true; 1436 } 1437 1438 for (i = 0; i < len; i++) { 1439 if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) 1440 continue; 1441 1442 pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n", 1443 __func__, dev_name(&pdev->dev), 1444 epos + i, map[epos + i], ecap); 1445 } 1446 1447 /* 1448 * Even though ecap is 2 bytes, we're currently a long way 1449 * from exceeding 1 byte capabilities. If we ever make it 1450 * up to 0xFE we'll need to up this to a two-byte, byte map. 1451 */ 1452 BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT); 1453 1454 memset(map + epos, ecap, len); 1455 ret = vfio_fill_vconfig_bytes(vdev, epos, len); 1456 if (ret) 1457 return ret; 1458 1459 /* 1460 * If we're just using this capability to anchor the list, 1461 * hide the real ID. Only count real ecaps. XXX PCI spec 1462 * indicates to use cap id = 0, version = 0, next = 0 if 1463 * ecaps are absent, hope users check all the way to next. 1464 */ 1465 if (hidden) 1466 *(__le32 *)&vdev->vconfig[epos] &= 1467 cpu_to_le32((0xffcU << 20)); 1468 else 1469 ecaps++; 1470 1471 prev = (__le32 *)&vdev->vconfig[epos]; 1472 epos = PCI_EXT_CAP_NEXT(header); 1473 } 1474 1475 if (!ecaps) 1476 *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0; 1477 1478 return 0; 1479 } 1480 1481 /* 1482 * For each device we allocate a pci_config_map that indicates the 1483 * capability occupying each dword and thus the struct perm_bits we 1484 * use for read and write. We also allocate a virtualized config 1485 * space which tracks reads and writes to bits that we emulate for 1486 * the user. Initial values filled from device. 1487 * 1488 * Using shared stuct perm_bits between all vfio-pci devices saves 1489 * us from allocating cfg_size buffers for virt and write for every 1490 * device. We could remove vconfig and allocate individual buffers 1491 * for each area requring emulated bits, but the array of pointers 1492 * would be comparable in size (at least for standard config space). 1493 */ 1494 int vfio_config_init(struct vfio_pci_device *vdev) 1495 { 1496 struct pci_dev *pdev = vdev->pdev; 1497 u8 *map, *vconfig; 1498 int ret; 1499 1500 /* 1501 * Config space, caps and ecaps are all dword aligned, so we could 1502 * use one byte per dword to record the type. However, there are 1503 * no requiremenst on the length of a capability, so the gap between 1504 * capabilities needs byte granularity. 1505 */ 1506 map = kmalloc(pdev->cfg_size, GFP_KERNEL); 1507 if (!map) 1508 return -ENOMEM; 1509 1510 vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL); 1511 if (!vconfig) { 1512 kfree(map); 1513 return -ENOMEM; 1514 } 1515 1516 vdev->pci_config_map = map; 1517 vdev->vconfig = vconfig; 1518 1519 memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF); 1520 memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID, 1521 pdev->cfg_size - PCI_STD_HEADER_SIZEOF); 1522 1523 ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF); 1524 if (ret) 1525 goto out; 1526 1527 vdev->bardirty = true; 1528 1529 /* 1530 * XXX can we just pci_load_saved_state/pci_restore_state? 1531 * may need to rebuild vconfig after that 1532 */ 1533 1534 /* For restore after reset */ 1535 vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]); 1536 vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]); 1537 vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]); 1538 vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]); 1539 vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]); 1540 vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]); 1541 vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]); 1542 1543 if (pdev->is_virtfn) { 1544 *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); 1545 *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); 1546 } 1547 1548 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX)) 1549 vconfig[PCI_INTERRUPT_PIN] = 0; 1550 1551 ret = vfio_cap_init(vdev); 1552 if (ret) 1553 goto out; 1554 1555 ret = vfio_ecap_init(vdev); 1556 if (ret) 1557 goto out; 1558 1559 return 0; 1560 1561 out: 1562 kfree(map); 1563 vdev->pci_config_map = NULL; 1564 kfree(vconfig); 1565 vdev->vconfig = NULL; 1566 return pcibios_err_to_errno(ret); 1567 } 1568 1569 void vfio_config_free(struct vfio_pci_device *vdev) 1570 { 1571 kfree(vdev->vconfig); 1572 vdev->vconfig = NULL; 1573 kfree(vdev->pci_config_map); 1574 vdev->pci_config_map = NULL; 1575 kfree(vdev->msi_perm); 1576 vdev->msi_perm = NULL; 1577 } 1578 1579 /* 1580 * Find the remaining number of bytes in a dword that match the given 1581 * position. Stop at either the end of the capability or the dword boundary. 1582 */ 1583 static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev, 1584 loff_t pos) 1585 { 1586 u8 cap = vdev->pci_config_map[pos]; 1587 size_t i; 1588 1589 for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++) 1590 /* nop */; 1591 1592 return i; 1593 } 1594 1595 static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf, 1596 size_t count, loff_t *ppos, bool iswrite) 1597 { 1598 struct pci_dev *pdev = vdev->pdev; 1599 struct perm_bits *perm; 1600 __le32 val = 0; 1601 int cap_start = 0, offset; 1602 u8 cap_id; 1603 ssize_t ret; 1604 1605 if (*ppos < 0 || *ppos >= pdev->cfg_size || 1606 *ppos + count > pdev->cfg_size) 1607 return -EFAULT; 1608 1609 /* 1610 * Chop accesses into aligned chunks containing no more than a 1611 * single capability. Caller increments to the next chunk. 1612 */ 1613 count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos)); 1614 if (count >= 4 && !(*ppos % 4)) 1615 count = 4; 1616 else if (count >= 2 && !(*ppos % 2)) 1617 count = 2; 1618 else 1619 count = 1; 1620 1621 ret = count; 1622 1623 cap_id = vdev->pci_config_map[*ppos]; 1624 1625 if (cap_id == PCI_CAP_ID_INVALID) { 1626 perm = &unassigned_perms; 1627 cap_start = *ppos; 1628 } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) { 1629 perm = &virt_perms; 1630 cap_start = *ppos; 1631 } else { 1632 if (*ppos >= PCI_CFG_SPACE_SIZE) { 1633 WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); 1634 1635 perm = &ecap_perms[cap_id]; 1636 cap_start = vfio_find_cap_start(vdev, *ppos); 1637 } else { 1638 WARN_ON(cap_id > PCI_CAP_ID_MAX); 1639 1640 perm = &cap_perms[cap_id]; 1641 1642 if (cap_id == PCI_CAP_ID_MSI) 1643 perm = vdev->msi_perm; 1644 1645 if (cap_id > PCI_CAP_ID_BASIC) 1646 cap_start = vfio_find_cap_start(vdev, *ppos); 1647 } 1648 } 1649 1650 WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC); 1651 WARN_ON(cap_start > *ppos); 1652 1653 offset = *ppos - cap_start; 1654 1655 if (iswrite) { 1656 if (!perm->writefn) 1657 return ret; 1658 1659 if (copy_from_user(&val, buf, count)) 1660 return -EFAULT; 1661 1662 ret = perm->writefn(vdev, *ppos, count, perm, offset, val); 1663 } else { 1664 if (perm->readfn) { 1665 ret = perm->readfn(vdev, *ppos, count, 1666 perm, offset, &val); 1667 if (ret < 0) 1668 return ret; 1669 } 1670 1671 if (copy_to_user(buf, &val, count)) 1672 return -EFAULT; 1673 } 1674 1675 return ret; 1676 } 1677 1678 ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf, 1679 size_t count, loff_t *ppos, bool iswrite) 1680 { 1681 size_t done = 0; 1682 int ret = 0; 1683 loff_t pos = *ppos; 1684 1685 pos &= VFIO_PCI_OFFSET_MASK; 1686 1687 while (count) { 1688 ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite); 1689 if (ret < 0) 1690 return ret; 1691 1692 count -= ret; 1693 done += ret; 1694 buf += ret; 1695 pos += ret; 1696 } 1697 1698 *ppos += done; 1699 1700 return done; 1701 } 1702