1 /* 2 * Copyright (c) 2007, Intel Corporation. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 * 7 * Jiang Yunhong <yunhong.jiang@intel.com> 8 * 9 * This file implements direct PCI assignment to a HVM guest 10 */ 11 12 #include "qemu/osdep.h" 13 14 #include "hw/xen/xen_backend.h" 15 #include "xen_pt.h" 16 #include "hw/i386/apic-msidef.h" 17 18 19 #define XEN_PT_AUTO_ASSIGN -1 20 21 /* shift count for gflags */ 22 #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 23 #define XEN_PT_GFLAGS_SHIFT_RH 8 24 #define XEN_PT_GFLAGS_SHIFT_DM 9 25 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 26 #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 27 28 #define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)] 29 30 /* 31 * Helpers 32 */ 33 34 static inline uint8_t msi_vector(uint32_t data) 35 { 36 return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 37 } 38 39 static inline uint8_t msi_dest_id(uint32_t addr) 40 { 41 return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 42 } 43 44 static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) 45 { 46 return addr_hi & 0xffffff00; 47 } 48 49 static uint32_t msi_gflags(uint32_t data, uint64_t addr) 50 { 51 uint32_t result = 0; 52 int rh, dm, dest_id, deliv_mode, trig_mode; 53 54 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; 55 dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 56 dest_id = msi_dest_id(addr); 57 deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 58 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 59 60 result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) 61 | (dm << XEN_PT_GFLAGS_SHIFT_DM) 62 | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) 63 | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); 64 65 return result; 66 } 67 68 static inline uint64_t msi_addr64(XenPTMSI *msi) 69 { 70 return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; 71 } 72 73 static int msi_msix_enable(XenPCIPassthroughState *s, 74 uint32_t address, 75 uint16_t flag, 76 bool enable) 77 { 78 uint16_t val = 0; 79 int rc; 80 81 if (!address) { 82 return -1; 83 } 84 85 rc = xen_host_pci_get_word(&s->real_device, address, &val); 86 if (rc) { 87 XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n", 88 address, rc); 89 return rc; 90 } 91 if (enable) { 92 val |= flag; 93 } else { 94 val &= ~flag; 95 } 96 rc = xen_host_pci_set_word(&s->real_device, address, val); 97 if (rc) { 98 XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n", 99 address, rc); 100 } 101 return rc; 102 } 103 104 static int msi_msix_setup(XenPCIPassthroughState *s, 105 uint64_t addr, 106 uint32_t data, 107 int *ppirq, 108 bool is_msix, 109 int msix_entry, 110 bool is_not_mapped) 111 { 112 uint8_t gvec = msi_vector(data); 113 int rc = 0; 114 115 assert((!is_msix && msix_entry == 0) || is_msix); 116 117 if (xen_is_pirq_msi(data)) { 118 *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); 119 if (!*ppirq) { 120 /* this probably identifies an misconfiguration of the guest, 121 * try the emulated path */ 122 *ppirq = XEN_PT_UNASSIGNED_PIRQ; 123 } else { 124 XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" 125 " (vec: %#x, entry: %#x)\n", 126 *ppirq, is_msix ? "-X" : "", gvec, msix_entry); 127 } 128 } 129 130 if (is_not_mapped) { 131 uint64_t table_base = 0; 132 133 if (is_msix) { 134 table_base = s->msix->table_base; 135 } 136 137 rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, 138 ppirq, PCI_DEVFN(s->real_device.dev, 139 s->real_device.func), 140 s->real_device.bus, 141 msix_entry, table_base); 142 if (rc) { 143 XEN_PT_ERR(&s->dev, 144 "Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n", 145 is_msix ? "-X" : "", errno, gvec, msix_entry); 146 return rc; 147 } 148 } 149 150 return 0; 151 } 152 static int msi_msix_update(XenPCIPassthroughState *s, 153 uint64_t addr, 154 uint32_t data, 155 int pirq, 156 bool is_msix, 157 int msix_entry, 158 int *old_pirq) 159 { 160 PCIDevice *d = &s->dev; 161 uint8_t gvec = msi_vector(data); 162 uint32_t gflags = msi_gflags(data, addr); 163 int rc = 0; 164 uint64_t table_addr = 0; 165 166 XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x" 167 " (entry: %#x)\n", 168 is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); 169 170 if (is_msix) { 171 table_addr = s->msix->mmio_base_addr; 172 } 173 174 rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, 175 pirq, gflags, table_addr); 176 177 if (rc) { 178 XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n", 179 is_msix ? "-X" : "", errno); 180 181 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { 182 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n", 183 is_msix ? "-X" : "", *old_pirq, errno); 184 } 185 *old_pirq = XEN_PT_UNASSIGNED_PIRQ; 186 } 187 return rc; 188 } 189 190 static int msi_msix_disable(XenPCIPassthroughState *s, 191 uint64_t addr, 192 uint32_t data, 193 int pirq, 194 bool is_msix, 195 bool is_binded) 196 { 197 PCIDevice *d = &s->dev; 198 uint8_t gvec = msi_vector(data); 199 uint32_t gflags = msi_gflags(data, addr); 200 int rc = 0; 201 202 if (pirq == XEN_PT_UNASSIGNED_PIRQ) { 203 return 0; 204 } 205 206 if (is_binded) { 207 XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n", 208 is_msix ? "-X" : "", pirq, gvec); 209 rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); 210 if (rc) { 211 XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: %#x)\n", 212 is_msix ? "-X" : "", errno, pirq, gvec); 213 return rc; 214 } 215 } 216 217 XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); 218 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); 219 if (rc) { 220 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n", 221 is_msix ? "-X" : "", pirq, errno); 222 return rc; 223 } 224 225 return 0; 226 } 227 228 /* 229 * MSI virtualization functions 230 */ 231 232 static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) 233 { 234 XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); 235 236 if (!s->msi) { 237 return -1; 238 } 239 240 return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, 241 enable); 242 } 243 244 /* setup physical msi, but don't enable it */ 245 int xen_pt_msi_setup(XenPCIPassthroughState *s) 246 { 247 int pirq = XEN_PT_UNASSIGNED_PIRQ; 248 int rc = 0; 249 XenPTMSI *msi = s->msi; 250 251 if (msi->initialized) { 252 XEN_PT_ERR(&s->dev, 253 "Setup physical MSI when it has been properly initialized.\n"); 254 return -1; 255 } 256 257 rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); 258 if (rc) { 259 return rc; 260 } 261 262 if (pirq < 0) { 263 XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); 264 return -1; 265 } 266 267 msi->pirq = pirq; 268 XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); 269 270 return 0; 271 } 272 273 int xen_pt_msi_update(XenPCIPassthroughState *s) 274 { 275 XenPTMSI *msi = s->msi; 276 return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, 277 false, 0, &msi->pirq); 278 } 279 280 void xen_pt_msi_disable(XenPCIPassthroughState *s) 281 { 282 XenPTMSI *msi = s->msi; 283 284 if (!msi) { 285 return; 286 } 287 288 (void)xen_pt_msi_set_enable(s, false); 289 290 msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, 291 msi->initialized); 292 293 /* clear msi info */ 294 msi->flags &= ~PCI_MSI_FLAGS_ENABLE; 295 msi->initialized = false; 296 msi->mapped = false; 297 msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 298 } 299 300 /* 301 * MSI-X virtualization functions 302 */ 303 304 static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) 305 { 306 XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); 307 308 if (!s->msix) { 309 return -1; 310 } 311 312 return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, 313 enabled); 314 } 315 316 static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr, 317 uint32_t vec_ctrl) 318 { 319 XenPTMSIXEntry *entry = NULL; 320 int pirq; 321 int rc; 322 323 if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { 324 return -EINVAL; 325 } 326 327 entry = &s->msix->msix_entry[entry_nr]; 328 329 if (!entry->updated) { 330 return 0; 331 } 332 333 pirq = entry->pirq; 334 335 /* 336 * Update the entry addr and data to the latest values only when the 337 * entry is masked or they are all masked, as required by the spec. 338 * Addr and data changes while the MSI-X entry is unmasked get deferred 339 * until the next masked -> unmasked transition. 340 */ 341 if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall || 342 (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 343 entry->addr = entry->latch(LOWER_ADDR) | 344 ((uint64_t)entry->latch(UPPER_ADDR) << 32); 345 entry->data = entry->latch(DATA); 346 } 347 348 rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr, 349 entry->pirq == XEN_PT_UNASSIGNED_PIRQ); 350 if (rc) { 351 return rc; 352 } 353 if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { 354 entry->pirq = pirq; 355 } 356 357 rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, 358 entry_nr, &entry->pirq); 359 360 if (!rc) { 361 entry->updated = false; 362 } 363 364 return rc; 365 } 366 367 int xen_pt_msix_update(XenPCIPassthroughState *s) 368 { 369 XenPTMSIX *msix = s->msix; 370 int i; 371 372 for (i = 0; i < msix->total_entries; i++) { 373 xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL)); 374 } 375 376 return 0; 377 } 378 379 void xen_pt_msix_disable(XenPCIPassthroughState *s) 380 { 381 int i = 0; 382 383 msix_set_enable(s, false); 384 385 for (i = 0; i < s->msix->total_entries; i++) { 386 XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; 387 388 msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); 389 390 /* clear MSI-X info */ 391 entry->pirq = XEN_PT_UNASSIGNED_PIRQ; 392 entry->updated = false; 393 } 394 } 395 396 int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) 397 { 398 XenPTMSIXEntry *entry; 399 int i, ret; 400 401 if (!(s->msix && s->msix->bar_index == bar_index)) { 402 return 0; 403 } 404 405 for (i = 0; i < s->msix->total_entries; i++) { 406 entry = &s->msix->msix_entry[i]; 407 if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 408 ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, 409 PT_IRQ_TYPE_MSI, 0, 0, 0, 0); 410 if (ret) { 411 XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n", 412 entry->pirq, errno); 413 } 414 entry->updated = true; 415 } 416 } 417 return xen_pt_msix_update(s); 418 } 419 420 static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) 421 { 422 assert(!(offset % sizeof(*e->latch))); 423 return e->latch[offset / sizeof(*e->latch)]; 424 } 425 426 static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) 427 { 428 assert(!(offset % sizeof(*e->latch))); 429 e->latch[offset / sizeof(*e->latch)] = val; 430 } 431 432 static void pci_msix_write(void *opaque, hwaddr addr, 433 uint64_t val, unsigned size) 434 { 435 XenPCIPassthroughState *s = opaque; 436 XenPTMSIX *msix = s->msix; 437 XenPTMSIXEntry *entry; 438 unsigned int entry_nr, offset; 439 440 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 441 if (entry_nr >= msix->total_entries) { 442 return; 443 } 444 entry = &msix->msix_entry[entry_nr]; 445 offset = addr % PCI_MSIX_ENTRY_SIZE; 446 447 if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { 448 if (get_entry_value(entry, offset) == val 449 && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 450 return; 451 } 452 453 entry->updated = true; 454 } else if (msix->enabled && entry->updated && 455 !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 456 const volatile uint32_t *vec_ctrl; 457 458 /* 459 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be 460 * up-to-date. Read from hardware directly. 461 */ 462 vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE 463 + PCI_MSIX_ENTRY_VECTOR_CTRL; 464 xen_pt_msix_update_one(s, entry_nr, *vec_ctrl); 465 } 466 467 set_entry_value(entry, offset, val); 468 } 469 470 static uint64_t pci_msix_read(void *opaque, hwaddr addr, 471 unsigned size) 472 { 473 XenPCIPassthroughState *s = opaque; 474 XenPTMSIX *msix = s->msix; 475 int entry_nr, offset; 476 477 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 478 if (entry_nr < 0) { 479 XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); 480 return 0; 481 } 482 483 offset = addr % PCI_MSIX_ENTRY_SIZE; 484 485 if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { 486 return get_entry_value(&msix->msix_entry[entry_nr], offset); 487 } else { 488 /* Pending Bit Array (PBA) */ 489 return *(uint32_t *)(msix->phys_iomem_base + addr); 490 } 491 } 492 493 static bool pci_msix_accepts(void *opaque, hwaddr addr, 494 unsigned size, bool is_write) 495 { 496 return !(addr & (size - 1)); 497 } 498 499 static const MemoryRegionOps pci_msix_ops = { 500 .read = pci_msix_read, 501 .write = pci_msix_write, 502 .endianness = DEVICE_NATIVE_ENDIAN, 503 .valid = { 504 .min_access_size = 4, 505 .max_access_size = 4, 506 .unaligned = false, 507 .accepts = pci_msix_accepts 508 }, 509 .impl = { 510 .min_access_size = 4, 511 .max_access_size = 4, 512 .unaligned = false 513 } 514 }; 515 516 int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) 517 { 518 uint8_t id = 0; 519 uint16_t control = 0; 520 uint32_t table_off = 0; 521 int i, total_entries, bar_index; 522 XenHostPCIDevice *hd = &s->real_device; 523 PCIDevice *d = &s->dev; 524 int fd = -1; 525 XenPTMSIX *msix = NULL; 526 int rc = 0; 527 528 rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); 529 if (rc) { 530 return rc; 531 } 532 533 if (id != PCI_CAP_ID_MSIX) { 534 XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base); 535 return -1; 536 } 537 538 xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); 539 total_entries = control & PCI_MSIX_FLAGS_QSIZE; 540 total_entries += 1; 541 542 s->msix = g_malloc0(sizeof (XenPTMSIX) 543 + total_entries * sizeof (XenPTMSIXEntry)); 544 msix = s->msix; 545 546 msix->total_entries = total_entries; 547 for (i = 0; i < total_entries; i++) { 548 msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; 549 } 550 551 memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops, 552 s, "xen-pci-pt-msix", 553 (total_entries * PCI_MSIX_ENTRY_SIZE 554 + XC_PAGE_SIZE - 1) 555 & XC_PAGE_MASK); 556 557 xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); 558 bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; 559 table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; 560 msix->table_base = s->real_device.io_regions[bar_index].base_addr; 561 XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); 562 563 fd = open("/dev/mem", O_RDWR); 564 if (fd == -1) { 565 rc = -errno; 566 XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); 567 goto error_out; 568 } 569 XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n", 570 table_off, total_entries); 571 msix->table_offset_adjust = table_off & 0x0fff; 572 msix->phys_iomem_base = 573 mmap(NULL, 574 total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, 575 PROT_READ, 576 MAP_SHARED | MAP_LOCKED, 577 fd, 578 msix->table_base + table_off - msix->table_offset_adjust); 579 close(fd); 580 if (msix->phys_iomem_base == MAP_FAILED) { 581 rc = -errno; 582 XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); 583 goto error_out; 584 } 585 msix->phys_iomem_base = (char *)msix->phys_iomem_base 586 + msix->table_offset_adjust; 587 588 XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", 589 msix->phys_iomem_base); 590 591 memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, 592 &msix->mmio, 593 2); /* Priority: pci default + 1 */ 594 595 return 0; 596 597 error_out: 598 g_free(s->msix); 599 s->msix = NULL; 600 return rc; 601 } 602 603 void xen_pt_msix_unmap(XenPCIPassthroughState *s) 604 { 605 XenPTMSIX *msix = s->msix; 606 607 if (!msix) { 608 return; 609 } 610 611 /* unmap the MSI-X memory mapped register area */ 612 if (msix->phys_iomem_base) { 613 XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", 614 msix->phys_iomem_base); 615 munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE 616 + msix->table_offset_adjust); 617 } 618 619 memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); 620 } 621 622 void xen_pt_msix_delete(XenPCIPassthroughState *s) 623 { 624 XenPTMSIX *msix = s->msix; 625 626 if (!msix) { 627 return; 628 } 629 630 object_unparent(OBJECT(&msix->mmio)); 631 632 g_free(s->msix); 633 s->msix = NULL; 634 } 635