1 /* 2 * Copyright (c) 2007, Intel Corporation. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 * 7 * Jiang Yunhong <yunhong.jiang@intel.com> 8 * 9 * This file implements direct PCI assignment to a HVM guest 10 */ 11 12 #include <sys/mman.h> 13 14 #include "hw/xen/xen_backend.h" 15 #include "xen_pt.h" 16 #include "hw/i386/apic-msidef.h" 17 18 19 #define XEN_PT_AUTO_ASSIGN -1 20 21 /* shift count for gflags */ 22 #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 23 #define XEN_PT_GFLAGS_SHIFT_RH 8 24 #define XEN_PT_GFLAGS_SHIFT_DM 9 25 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 26 #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 27 28 29 /* 30 * Helpers 31 */ 32 33 static inline uint8_t msi_vector(uint32_t data) 34 { 35 return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 36 } 37 38 static inline uint8_t msi_dest_id(uint32_t addr) 39 { 40 return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 41 } 42 43 static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) 44 { 45 return addr_hi & 0xffffff00; 46 } 47 48 static uint32_t msi_gflags(uint32_t data, uint64_t addr) 49 { 50 uint32_t result = 0; 51 int rh, dm, dest_id, deliv_mode, trig_mode; 52 53 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; 54 dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 55 dest_id = msi_dest_id(addr); 56 deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 57 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 58 59 result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) 60 | (dm << XEN_PT_GFLAGS_SHIFT_DM) 61 | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) 62 | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); 63 64 return result; 65 } 66 67 static inline uint64_t msi_addr64(XenPTMSI *msi) 68 { 69 return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; 70 } 71 72 static int msi_msix_enable(XenPCIPassthroughState *s, 73 uint32_t address, 74 uint16_t flag, 75 bool enable) 76 { 77 uint16_t val = 0; 78 int rc; 79 80 if (!address) { 81 return -1; 82 } 83 84 rc = xen_host_pci_get_word(&s->real_device, address, &val); 85 if (rc) { 86 XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n", 87 address, rc); 88 return rc; 89 } 90 if (enable) { 91 val |= flag; 92 } else { 93 val &= ~flag; 94 } 95 rc = xen_host_pci_set_word(&s->real_device, address, val); 96 if (rc) { 97 XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n", 98 address, rc); 99 } 100 return rc; 101 } 102 103 static int msi_msix_setup(XenPCIPassthroughState *s, 104 uint64_t addr, 105 uint32_t data, 106 int *ppirq, 107 bool is_msix, 108 int msix_entry, 109 bool is_not_mapped) 110 { 111 uint8_t gvec = msi_vector(data); 112 int rc = 0; 113 114 assert((!is_msix && msix_entry == 0) || is_msix); 115 116 if (gvec == 0) { 117 /* if gvec is 0, the guest is asking for a particular pirq that 118 * is passed as dest_id */ 119 *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); 120 if (!*ppirq) { 121 /* this probably identifies an misconfiguration of the guest, 122 * try the emulated path */ 123 *ppirq = XEN_PT_UNASSIGNED_PIRQ; 124 } else { 125 XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" 126 " (vec: %#x, entry: %#x)\n", 127 *ppirq, is_msix ? "-X" : "", gvec, msix_entry); 128 } 129 } 130 131 if (is_not_mapped) { 132 uint64_t table_base = 0; 133 134 if (is_msix) { 135 table_base = s->msix->table_base; 136 } 137 138 rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, 139 ppirq, PCI_DEVFN(s->real_device.dev, 140 s->real_device.func), 141 s->real_device.bus, 142 msix_entry, table_base); 143 if (rc) { 144 XEN_PT_ERR(&s->dev, 145 "Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n", 146 is_msix ? "-X" : "", errno, gvec, msix_entry); 147 return rc; 148 } 149 } 150 151 return 0; 152 } 153 static int msi_msix_update(XenPCIPassthroughState *s, 154 uint64_t addr, 155 uint32_t data, 156 int pirq, 157 bool is_msix, 158 int msix_entry, 159 int *old_pirq) 160 { 161 PCIDevice *d = &s->dev; 162 uint8_t gvec = msi_vector(data); 163 uint32_t gflags = msi_gflags(data, addr); 164 int rc = 0; 165 uint64_t table_addr = 0; 166 167 XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x" 168 " (entry: %#x)\n", 169 is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); 170 171 if (is_msix) { 172 table_addr = s->msix->mmio_base_addr; 173 } 174 175 rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, 176 pirq, gflags, table_addr); 177 178 if (rc) { 179 XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n", 180 is_msix ? "-X" : "", errno); 181 182 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { 183 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n", 184 is_msix ? "-X" : "", *old_pirq, errno); 185 } 186 *old_pirq = XEN_PT_UNASSIGNED_PIRQ; 187 } 188 return rc; 189 } 190 191 static int msi_msix_disable(XenPCIPassthroughState *s, 192 uint64_t addr, 193 uint32_t data, 194 int pirq, 195 bool is_msix, 196 bool is_binded) 197 { 198 PCIDevice *d = &s->dev; 199 uint8_t gvec = msi_vector(data); 200 uint32_t gflags = msi_gflags(data, addr); 201 int rc = 0; 202 203 if (pirq == XEN_PT_UNASSIGNED_PIRQ) { 204 return 0; 205 } 206 207 if (is_binded) { 208 XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n", 209 is_msix ? "-X" : "", pirq, gvec); 210 rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); 211 if (rc) { 212 XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: %#x)\n", 213 is_msix ? "-X" : "", errno, pirq, gvec); 214 return rc; 215 } 216 } 217 218 XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); 219 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); 220 if (rc) { 221 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n", 222 is_msix ? "-X" : "", pirq, errno); 223 return rc; 224 } 225 226 return 0; 227 } 228 229 /* 230 * MSI virtualization functions 231 */ 232 233 static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) 234 { 235 XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); 236 237 if (!s->msi) { 238 return -1; 239 } 240 241 return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, 242 enable); 243 } 244 245 /* setup physical msi, but don't enable it */ 246 int xen_pt_msi_setup(XenPCIPassthroughState *s) 247 { 248 int pirq = XEN_PT_UNASSIGNED_PIRQ; 249 int rc = 0; 250 XenPTMSI *msi = s->msi; 251 252 if (msi->initialized) { 253 XEN_PT_ERR(&s->dev, 254 "Setup physical MSI when it has been properly initialized.\n"); 255 return -1; 256 } 257 258 rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); 259 if (rc) { 260 return rc; 261 } 262 263 if (pirq < 0) { 264 XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); 265 return -1; 266 } 267 268 msi->pirq = pirq; 269 XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); 270 271 return 0; 272 } 273 274 int xen_pt_msi_update(XenPCIPassthroughState *s) 275 { 276 XenPTMSI *msi = s->msi; 277 return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, 278 false, 0, &msi->pirq); 279 } 280 281 void xen_pt_msi_disable(XenPCIPassthroughState *s) 282 { 283 XenPTMSI *msi = s->msi; 284 285 if (!msi) { 286 return; 287 } 288 289 (void)xen_pt_msi_set_enable(s, false); 290 291 msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, 292 msi->initialized); 293 294 /* clear msi info */ 295 msi->flags &= ~PCI_MSI_FLAGS_ENABLE; 296 msi->initialized = false; 297 msi->mapped = false; 298 msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 299 } 300 301 /* 302 * MSI-X virtualization functions 303 */ 304 305 static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) 306 { 307 XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); 308 309 if (!s->msix) { 310 return -1; 311 } 312 313 return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, 314 enabled); 315 } 316 317 static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr) 318 { 319 XenPTMSIXEntry *entry = NULL; 320 int pirq; 321 int rc; 322 323 if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { 324 return -EINVAL; 325 } 326 327 entry = &s->msix->msix_entry[entry_nr]; 328 329 if (!entry->updated) { 330 return 0; 331 } 332 333 pirq = entry->pirq; 334 335 rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr, 336 entry->pirq == XEN_PT_UNASSIGNED_PIRQ); 337 if (rc) { 338 return rc; 339 } 340 if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { 341 entry->pirq = pirq; 342 } 343 344 rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, 345 entry_nr, &entry->pirq); 346 347 if (!rc) { 348 entry->updated = false; 349 } 350 351 return rc; 352 } 353 354 int xen_pt_msix_update(XenPCIPassthroughState *s) 355 { 356 XenPTMSIX *msix = s->msix; 357 int i; 358 359 for (i = 0; i < msix->total_entries; i++) { 360 xen_pt_msix_update_one(s, i); 361 } 362 363 return 0; 364 } 365 366 void xen_pt_msix_disable(XenPCIPassthroughState *s) 367 { 368 int i = 0; 369 370 msix_set_enable(s, false); 371 372 for (i = 0; i < s->msix->total_entries; i++) { 373 XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; 374 375 msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); 376 377 /* clear MSI-X info */ 378 entry->pirq = XEN_PT_UNASSIGNED_PIRQ; 379 entry->updated = false; 380 } 381 } 382 383 int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) 384 { 385 XenPTMSIXEntry *entry; 386 int i, ret; 387 388 if (!(s->msix && s->msix->bar_index == bar_index)) { 389 return 0; 390 } 391 392 for (i = 0; i < s->msix->total_entries; i++) { 393 entry = &s->msix->msix_entry[i]; 394 if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 395 ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, 396 PT_IRQ_TYPE_MSI, 0, 0, 0, 0); 397 if (ret) { 398 XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n", 399 entry->pirq, errno); 400 } 401 entry->updated = true; 402 } 403 } 404 return xen_pt_msix_update(s); 405 } 406 407 static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) 408 { 409 switch (offset) { 410 case PCI_MSIX_ENTRY_LOWER_ADDR: 411 return e->addr & UINT32_MAX; 412 case PCI_MSIX_ENTRY_UPPER_ADDR: 413 return e->addr >> 32; 414 case PCI_MSIX_ENTRY_DATA: 415 return e->data; 416 case PCI_MSIX_ENTRY_VECTOR_CTRL: 417 return e->vector_ctrl; 418 default: 419 return 0; 420 } 421 } 422 423 static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) 424 { 425 switch (offset) { 426 case PCI_MSIX_ENTRY_LOWER_ADDR: 427 e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val; 428 break; 429 case PCI_MSIX_ENTRY_UPPER_ADDR: 430 e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX); 431 break; 432 case PCI_MSIX_ENTRY_DATA: 433 e->data = val; 434 break; 435 case PCI_MSIX_ENTRY_VECTOR_CTRL: 436 e->vector_ctrl = val; 437 break; 438 } 439 } 440 441 static void pci_msix_write(void *opaque, hwaddr addr, 442 uint64_t val, unsigned size) 443 { 444 XenPCIPassthroughState *s = opaque; 445 XenPTMSIX *msix = s->msix; 446 XenPTMSIXEntry *entry; 447 unsigned int entry_nr, offset; 448 449 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 450 if (entry_nr >= msix->total_entries) { 451 return; 452 } 453 entry = &msix->msix_entry[entry_nr]; 454 offset = addr % PCI_MSIX_ENTRY_SIZE; 455 456 if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { 457 const volatile uint32_t *vec_ctrl; 458 459 if (get_entry_value(entry, offset) == val 460 && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 461 return; 462 } 463 464 /* 465 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be 466 * up-to-date. Read from hardware directly. 467 */ 468 vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE 469 + PCI_MSIX_ENTRY_VECTOR_CTRL; 470 471 if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 472 if (!entry->warned) { 473 entry->warned = true; 474 XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is" 475 " already enabled.\n", entry_nr); 476 } 477 return; 478 } 479 480 entry->updated = true; 481 } 482 483 set_entry_value(entry, offset, val); 484 485 if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) { 486 if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 487 xen_pt_msix_update_one(s, entry_nr); 488 } 489 } 490 } 491 492 static uint64_t pci_msix_read(void *opaque, hwaddr addr, 493 unsigned size) 494 { 495 XenPCIPassthroughState *s = opaque; 496 XenPTMSIX *msix = s->msix; 497 int entry_nr, offset; 498 499 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 500 if (entry_nr < 0) { 501 XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); 502 return 0; 503 } 504 505 offset = addr % PCI_MSIX_ENTRY_SIZE; 506 507 if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { 508 return get_entry_value(&msix->msix_entry[entry_nr], offset); 509 } else { 510 /* Pending Bit Array (PBA) */ 511 return *(uint32_t *)(msix->phys_iomem_base + addr); 512 } 513 } 514 515 static const MemoryRegionOps pci_msix_ops = { 516 .read = pci_msix_read, 517 .write = pci_msix_write, 518 .endianness = DEVICE_NATIVE_ENDIAN, 519 .valid = { 520 .min_access_size = 4, 521 .max_access_size = 4, 522 .unaligned = false, 523 }, 524 }; 525 526 int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) 527 { 528 uint8_t id = 0; 529 uint16_t control = 0; 530 uint32_t table_off = 0; 531 int i, total_entries, bar_index; 532 XenHostPCIDevice *hd = &s->real_device; 533 PCIDevice *d = &s->dev; 534 int fd = -1; 535 XenPTMSIX *msix = NULL; 536 int rc = 0; 537 538 rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); 539 if (rc) { 540 return rc; 541 } 542 543 if (id != PCI_CAP_ID_MSIX) { 544 XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base); 545 return -1; 546 } 547 548 xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); 549 total_entries = control & PCI_MSIX_FLAGS_QSIZE; 550 total_entries += 1; 551 552 s->msix = g_malloc0(sizeof (XenPTMSIX) 553 + total_entries * sizeof (XenPTMSIXEntry)); 554 msix = s->msix; 555 556 msix->total_entries = total_entries; 557 for (i = 0; i < total_entries; i++) { 558 msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; 559 } 560 561 memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops, 562 s, "xen-pci-pt-msix", 563 (total_entries * PCI_MSIX_ENTRY_SIZE 564 + XC_PAGE_SIZE - 1) 565 & XC_PAGE_MASK); 566 567 xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); 568 bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; 569 table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; 570 msix->table_base = s->real_device.io_regions[bar_index].base_addr; 571 XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); 572 573 fd = open("/dev/mem", O_RDWR); 574 if (fd == -1) { 575 rc = -errno; 576 XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); 577 goto error_out; 578 } 579 XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n", 580 table_off, total_entries); 581 msix->table_offset_adjust = table_off & 0x0fff; 582 msix->phys_iomem_base = 583 mmap(NULL, 584 total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, 585 PROT_READ, 586 MAP_SHARED | MAP_LOCKED, 587 fd, 588 msix->table_base + table_off - msix->table_offset_adjust); 589 close(fd); 590 if (msix->phys_iomem_base == MAP_FAILED) { 591 rc = -errno; 592 XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); 593 goto error_out; 594 } 595 msix->phys_iomem_base = (char *)msix->phys_iomem_base 596 + msix->table_offset_adjust; 597 598 XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", 599 msix->phys_iomem_base); 600 601 memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, 602 &msix->mmio, 603 2); /* Priority: pci default + 1 */ 604 605 return 0; 606 607 error_out: 608 g_free(s->msix); 609 s->msix = NULL; 610 return rc; 611 } 612 613 void xen_pt_msix_unmap(XenPCIPassthroughState *s) 614 { 615 XenPTMSIX *msix = s->msix; 616 617 if (!msix) { 618 return; 619 } 620 621 /* unmap the MSI-X memory mapped register area */ 622 if (msix->phys_iomem_base) { 623 XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", 624 msix->phys_iomem_base); 625 munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE 626 + msix->table_offset_adjust); 627 } 628 629 memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); 630 } 631 632 void xen_pt_msix_delete(XenPCIPassthroughState *s) 633 { 634 XenPTMSIX *msix = s->msix; 635 636 if (!msix) { 637 return; 638 } 639 640 object_unparent(OBJECT(&msix->mmio)); 641 642 g_free(s->msix); 643 s->msix = NULL; 644 } 645