1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/error.h" 17 #include "qemu/timer.h" 18 #include "hw/xen/xen_pt.h" 19 #include "hw/xen/xen_igd.h" 20 #include "hw/xen/xen-legacy-backend.h" 21 22 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 23 (((value) & (val_mask)) | ((data) & ~(val_mask))) 24 25 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 26 27 /* prototype */ 28 29 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 30 uint32_t real_offset, uint32_t *data); 31 32 33 /* helper */ 34 35 /* A return value of 1 means the capability should NOT be exposed to guest. */ 36 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 37 { 38 switch (grp_id) { 39 case PCI_CAP_ID_EXP: 40 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 41 * Controller looks trivial, e.g., the PCI Express Capabilities 42 * Register is 0. We should not try to expose it to guest. 43 * 44 * The datasheet is available at 45 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 46 * 47 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 48 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 49 * Controller looks trivial, e.g., the PCI Express Capabilities 50 * Register is 0, so the Capability Version is 0 and 51 * xen_pt_pcie_size_init() would fail. 52 */ 53 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 54 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 55 return 1; 56 } 57 break; 58 } 59 return 0; 60 } 61 62 /* find emulate register group entry */ 63 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 64 { 65 XenPTRegGroup *entry = NULL; 66 67 /* find register group entry */ 68 QLIST_FOREACH(entry, &s->reg_grps, entries) { 69 /* check address */ 70 if ((entry->base_offset <= address) 71 && ((entry->base_offset + entry->size) > address)) { 72 return entry; 73 } 74 } 75 76 /* group entry not found */ 77 return NULL; 78 } 79 80 /* find emulate register entry */ 81 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 82 { 83 XenPTReg *reg_entry = NULL; 84 XenPTRegInfo *reg = NULL; 85 uint32_t real_offset = 0; 86 87 /* find register entry */ 88 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 89 reg = reg_entry->reg; 90 real_offset = reg_grp->base_offset + reg->offset; 91 /* check address */ 92 if ((real_offset <= address) 93 && ((real_offset + reg->size) > address)) { 94 return reg_entry; 95 } 96 } 97 98 return NULL; 99 } 100 101 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, 102 XenPTRegInfo *reg, uint32_t valid_mask) 103 { 104 uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); 105 106 if (!s->permissive) { 107 throughable_mask &= ~reg->res_mask; 108 } 109 110 return throughable_mask & valid_mask; 111 } 112 113 /**************** 114 * general register functions 115 */ 116 117 /* register initialization function */ 118 119 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 120 XenPTRegInfo *reg, uint32_t real_offset, 121 uint32_t *data) 122 { 123 *data = reg->init_val; 124 return 0; 125 } 126 127 /* Read register functions */ 128 129 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 130 uint8_t *value, uint8_t valid_mask) 131 { 132 XenPTRegInfo *reg = cfg_entry->reg; 133 uint8_t valid_emu_mask = 0; 134 uint8_t *data = cfg_entry->ptr.byte; 135 136 /* emulate byte register */ 137 valid_emu_mask = reg->emu_mask & valid_mask; 138 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 139 140 return 0; 141 } 142 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 143 uint16_t *value, uint16_t valid_mask) 144 { 145 XenPTRegInfo *reg = cfg_entry->reg; 146 uint16_t valid_emu_mask = 0; 147 uint16_t *data = cfg_entry->ptr.half_word; 148 149 /* emulate word register */ 150 valid_emu_mask = reg->emu_mask & valid_mask; 151 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 152 153 return 0; 154 } 155 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 156 uint32_t *value, uint32_t valid_mask) 157 { 158 XenPTRegInfo *reg = cfg_entry->reg; 159 uint32_t valid_emu_mask = 0; 160 uint32_t *data = cfg_entry->ptr.word; 161 162 /* emulate long register */ 163 valid_emu_mask = reg->emu_mask & valid_mask; 164 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 165 166 return 0; 167 } 168 169 /* Write register functions */ 170 171 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 172 uint8_t *val, uint8_t dev_value, 173 uint8_t valid_mask) 174 { 175 XenPTRegInfo *reg = cfg_entry->reg; 176 uint8_t writable_mask = 0; 177 uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 178 uint8_t *data = cfg_entry->ptr.byte; 179 180 /* modify emulate register */ 181 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 182 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 183 184 /* create value for writing to I/O device register */ 185 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 186 throughable_mask); 187 188 return 0; 189 } 190 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 191 uint16_t *val, uint16_t dev_value, 192 uint16_t valid_mask) 193 { 194 XenPTRegInfo *reg = cfg_entry->reg; 195 uint16_t writable_mask = 0; 196 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 197 uint16_t *data = cfg_entry->ptr.half_word; 198 199 /* modify emulate register */ 200 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 201 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 202 203 /* create value for writing to I/O device register */ 204 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 205 throughable_mask); 206 207 return 0; 208 } 209 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 210 uint32_t *val, uint32_t dev_value, 211 uint32_t valid_mask) 212 { 213 XenPTRegInfo *reg = cfg_entry->reg; 214 uint32_t writable_mask = 0; 215 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 216 uint32_t *data = cfg_entry->ptr.word; 217 218 /* modify emulate register */ 219 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 220 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 221 222 /* create value for writing to I/O device register */ 223 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 224 throughable_mask); 225 226 return 0; 227 } 228 229 230 /* XenPTRegInfo declaration 231 * - only for emulated register (either a part or whole bit). 232 * - for passthrough register that need special behavior (like interacting with 233 * other component), set emu_mask to all 0 and specify r/w func properly. 234 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 235 */ 236 237 /******************** 238 * Header Type0 239 */ 240 241 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 242 XenPTRegInfo *reg, uint32_t real_offset, 243 uint32_t *data) 244 { 245 *data = s->real_device.vendor_id; 246 return 0; 247 } 248 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 249 XenPTRegInfo *reg, uint32_t real_offset, 250 uint32_t *data) 251 { 252 *data = s->real_device.device_id; 253 return 0; 254 } 255 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 256 XenPTRegInfo *reg, uint32_t real_offset, 257 uint32_t *data) 258 { 259 XenPTRegGroup *reg_grp_entry = NULL; 260 XenPTReg *reg_entry = NULL; 261 uint32_t reg_field = 0; 262 263 /* find Header register group */ 264 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 265 if (reg_grp_entry) { 266 /* find Capabilities Pointer register */ 267 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 268 if (reg_entry) { 269 /* check Capabilities Pointer register */ 270 if (*reg_entry->ptr.half_word) { 271 reg_field |= PCI_STATUS_CAP_LIST; 272 } else { 273 reg_field &= ~PCI_STATUS_CAP_LIST; 274 } 275 } else { 276 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 277 " for Capabilities Pointer register." 278 " (%s)\n", __func__); 279 return -1; 280 } 281 } else { 282 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 283 " for Header. (%s)\n", __func__); 284 return -1; 285 } 286 287 *data = reg_field; 288 return 0; 289 } 290 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 291 XenPTRegInfo *reg, uint32_t real_offset, 292 uint32_t *data) 293 { 294 /* read PCI_HEADER_TYPE */ 295 *data = reg->init_val; 296 if ((PCI_DEVICE(s)->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { 297 *data |= PCI_HEADER_TYPE_MULTI_FUNCTION; 298 } 299 return 0; 300 } 301 302 /* initialize Interrupt Pin register */ 303 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 304 XenPTRegInfo *reg, uint32_t real_offset, 305 uint32_t *data) 306 { 307 if (s->real_device.irq) { 308 *data = xen_pt_pci_read_intx(s); 309 } 310 return 0; 311 } 312 313 /* Command register */ 314 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 315 uint16_t *val, uint16_t dev_value, 316 uint16_t valid_mask) 317 { 318 XenPTRegInfo *reg = cfg_entry->reg; 319 uint16_t writable_mask = 0; 320 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 321 uint16_t *data = cfg_entry->ptr.half_word; 322 323 /* modify emulate register */ 324 writable_mask = ~reg->ro_mask & valid_mask; 325 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 326 327 /* create value for writing to I/O device register */ 328 if (*val & PCI_COMMAND_INTX_DISABLE) { 329 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 330 } else { 331 if (s->machine_irq) { 332 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 333 } 334 } 335 336 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 337 338 return 0; 339 } 340 341 /* BAR */ 342 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 343 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 344 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 345 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 346 347 static bool is_64bit_bar(PCIIORegion *r) 348 { 349 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 350 } 351 352 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 353 { 354 if (is_64bit_bar(r)) { 355 uint64_t size64; 356 size64 = (r + 1)->size; 357 size64 <<= 32; 358 size64 += r->size; 359 return size64; 360 } 361 return r->size; 362 } 363 364 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 365 int index) 366 { 367 PCIDevice *d = PCI_DEVICE(s); 368 XenPTRegion *region = NULL; 369 PCIIORegion *r; 370 371 /* check 64bit BAR */ 372 if ((0 < index) && (index < PCI_ROM_SLOT)) { 373 int type = s->real_device.io_regions[index - 1].type; 374 375 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 376 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 377 region = &s->bases[index - 1]; 378 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 379 return XEN_PT_BAR_FLAG_UPPER; 380 } 381 } 382 } 383 384 /* check unused BAR */ 385 r = &d->io_regions[index]; 386 if (!xen_pt_get_bar_size(r)) { 387 return XEN_PT_BAR_FLAG_UNUSED; 388 } 389 390 /* for ExpROM BAR */ 391 if (index == PCI_ROM_SLOT) { 392 return XEN_PT_BAR_FLAG_MEM; 393 } 394 395 /* check BAR I/O indicator */ 396 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 397 return XEN_PT_BAR_FLAG_IO; 398 } else { 399 return XEN_PT_BAR_FLAG_MEM; 400 } 401 } 402 403 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 404 { 405 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 406 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 407 } else { 408 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 409 } 410 } 411 412 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 413 uint32_t real_offset, uint32_t *data) 414 { 415 uint32_t reg_field = 0; 416 int index; 417 418 index = xen_pt_bar_offset_to_index(reg->offset); 419 if (index < 0 || index >= PCI_NUM_REGIONS) { 420 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 421 return -1; 422 } 423 424 /* set BAR flag */ 425 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 426 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 427 reg_field = XEN_PT_INVALID_REG; 428 } 429 430 *data = reg_field; 431 return 0; 432 } 433 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 434 uint32_t *value, uint32_t valid_mask) 435 { 436 XenPTRegInfo *reg = cfg_entry->reg; 437 uint32_t valid_emu_mask = 0; 438 uint32_t bar_emu_mask = 0; 439 int index; 440 441 /* get BAR index */ 442 index = xen_pt_bar_offset_to_index(reg->offset); 443 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 444 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 445 return -1; 446 } 447 448 /* use fixed-up value from kernel sysfs */ 449 *value = base_address_with_flags(&s->real_device.io_regions[index]); 450 451 /* set emulate mask depend on BAR flag */ 452 switch (s->bases[index].bar_flag) { 453 case XEN_PT_BAR_FLAG_MEM: 454 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 455 break; 456 case XEN_PT_BAR_FLAG_IO: 457 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 458 break; 459 case XEN_PT_BAR_FLAG_UPPER: 460 bar_emu_mask = XEN_PT_BAR_ALLF; 461 break; 462 default: 463 break; 464 } 465 466 /* emulate BAR */ 467 valid_emu_mask = bar_emu_mask & valid_mask; 468 *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); 469 470 return 0; 471 } 472 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 473 uint32_t *val, uint32_t dev_value, 474 uint32_t valid_mask) 475 { 476 XenPTRegInfo *reg = cfg_entry->reg; 477 XenPTRegion *base = NULL; 478 PCIDevice *d = PCI_DEVICE(s); 479 const PCIIORegion *r; 480 uint32_t writable_mask = 0; 481 uint32_t bar_emu_mask = 0; 482 uint32_t bar_ro_mask = 0; 483 uint32_t r_size = 0; 484 int index = 0; 485 uint32_t *data = cfg_entry->ptr.word; 486 487 index = xen_pt_bar_offset_to_index(reg->offset); 488 if (index < 0 || index >= PCI_NUM_REGIONS) { 489 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 490 return -1; 491 } 492 493 r = &d->io_regions[index]; 494 base = &s->bases[index]; 495 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 496 497 /* set emulate mask and read-only mask values depend on the BAR flag */ 498 switch (s->bases[index].bar_flag) { 499 case XEN_PT_BAR_FLAG_MEM: 500 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 501 if (!r_size) { 502 /* low 32 bits mask for 64 bit bars */ 503 bar_ro_mask = XEN_PT_BAR_ALLF; 504 } else { 505 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 506 } 507 break; 508 case XEN_PT_BAR_FLAG_IO: 509 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 510 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 511 break; 512 case XEN_PT_BAR_FLAG_UPPER: 513 assert(index > 0); 514 r_size = d->io_regions[index - 1].size >> 32; 515 bar_emu_mask = XEN_PT_BAR_ALLF; 516 bar_ro_mask = r_size ? r_size - 1 : 0; 517 break; 518 default: 519 break; 520 } 521 522 /* modify emulate register */ 523 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 524 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 525 526 /* check whether we need to update the virtual region address or not */ 527 switch (s->bases[index].bar_flag) { 528 case XEN_PT_BAR_FLAG_UPPER: 529 case XEN_PT_BAR_FLAG_MEM: 530 /* nothing to do */ 531 break; 532 case XEN_PT_BAR_FLAG_IO: 533 /* nothing to do */ 534 break; 535 default: 536 break; 537 } 538 539 /* create value for writing to I/O device register */ 540 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 541 542 return 0; 543 } 544 545 /* write Exp ROM BAR */ 546 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 547 XenPTReg *cfg_entry, uint32_t *val, 548 uint32_t dev_value, uint32_t valid_mask) 549 { 550 XenPTRegInfo *reg = cfg_entry->reg; 551 XenPTRegion *base = NULL; 552 PCIDevice *d = PCI_DEVICE(s); 553 uint32_t writable_mask = 0; 554 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 555 pcibus_t r_size = 0; 556 uint32_t bar_ro_mask = 0; 557 uint32_t *data = cfg_entry->ptr.word; 558 559 r_size = d->io_regions[PCI_ROM_SLOT].size; 560 base = &s->bases[PCI_ROM_SLOT]; 561 /* align memory type resource size */ 562 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 563 564 /* set emulate mask and read-only mask */ 565 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 566 567 /* modify emulate register */ 568 writable_mask = ~bar_ro_mask & valid_mask; 569 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 570 571 /* create value for writing to I/O device register */ 572 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 573 574 return 0; 575 } 576 577 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, 578 XenPTReg *cfg_entry, 579 uint32_t *value, uint32_t valid_mask) 580 { 581 *value = igd_read_opregion(s); 582 return 0; 583 } 584 585 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, 586 XenPTReg *cfg_entry, uint32_t *value, 587 uint32_t dev_value, uint32_t valid_mask) 588 { 589 igd_write_opregion(s, *value); 590 return 0; 591 } 592 593 /* Header Type0 reg static information table */ 594 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 595 /* Vendor ID reg */ 596 { 597 .offset = PCI_VENDOR_ID, 598 .size = 2, 599 .init_val = 0x0000, 600 .ro_mask = 0xFFFF, 601 .emu_mask = 0xFFFF, 602 .init = xen_pt_vendor_reg_init, 603 .u.w.read = xen_pt_word_reg_read, 604 .u.w.write = xen_pt_word_reg_write, 605 }, 606 /* Device ID reg */ 607 { 608 .offset = PCI_DEVICE_ID, 609 .size = 2, 610 .init_val = 0x0000, 611 .ro_mask = 0xFFFF, 612 .emu_mask = 0xFFFF, 613 .init = xen_pt_device_reg_init, 614 .u.w.read = xen_pt_word_reg_read, 615 .u.w.write = xen_pt_word_reg_write, 616 }, 617 /* Command reg */ 618 { 619 .offset = PCI_COMMAND, 620 .size = 2, 621 .init_val = 0x0000, 622 .res_mask = 0xF880, 623 .emu_mask = 0x0743, 624 .init = xen_pt_common_reg_init, 625 .u.w.read = xen_pt_word_reg_read, 626 .u.w.write = xen_pt_cmd_reg_write, 627 }, 628 /* Capabilities Pointer reg */ 629 { 630 .offset = PCI_CAPABILITY_LIST, 631 .size = 1, 632 .init_val = 0x00, 633 .ro_mask = 0xFF, 634 .emu_mask = 0xFF, 635 .init = xen_pt_ptr_reg_init, 636 .u.b.read = xen_pt_byte_reg_read, 637 .u.b.write = xen_pt_byte_reg_write, 638 }, 639 /* Status reg */ 640 /* use emulated Cap Ptr value to initialize, 641 * so need to be declared after Cap Ptr reg 642 */ 643 { 644 .offset = PCI_STATUS, 645 .size = 2, 646 .init_val = 0x0000, 647 .res_mask = 0x0007, 648 .ro_mask = 0x06F8, 649 .rw1c_mask = 0xF900, 650 .emu_mask = 0x0010, 651 .init = xen_pt_status_reg_init, 652 .u.w.read = xen_pt_word_reg_read, 653 .u.w.write = xen_pt_word_reg_write, 654 }, 655 /* Cache Line Size reg */ 656 { 657 .offset = PCI_CACHE_LINE_SIZE, 658 .size = 1, 659 .init_val = 0x00, 660 .ro_mask = 0x00, 661 .emu_mask = 0xFF, 662 .init = xen_pt_common_reg_init, 663 .u.b.read = xen_pt_byte_reg_read, 664 .u.b.write = xen_pt_byte_reg_write, 665 }, 666 /* Latency Timer reg */ 667 { 668 .offset = PCI_LATENCY_TIMER, 669 .size = 1, 670 .init_val = 0x00, 671 .ro_mask = 0x00, 672 .emu_mask = 0xFF, 673 .init = xen_pt_common_reg_init, 674 .u.b.read = xen_pt_byte_reg_read, 675 .u.b.write = xen_pt_byte_reg_write, 676 }, 677 /* Header Type reg */ 678 { 679 .offset = PCI_HEADER_TYPE, 680 .size = 1, 681 .init_val = 0x00, 682 .ro_mask = 0xFF, 683 .emu_mask = PCI_HEADER_TYPE_MULTI_FUNCTION, 684 .init = xen_pt_header_type_reg_init, 685 .u.b.read = xen_pt_byte_reg_read, 686 .u.b.write = xen_pt_byte_reg_write, 687 }, 688 /* Interrupt Line reg */ 689 { 690 .offset = PCI_INTERRUPT_LINE, 691 .size = 1, 692 .init_val = 0x00, 693 .ro_mask = 0x00, 694 .emu_mask = 0xFF, 695 .init = xen_pt_common_reg_init, 696 .u.b.read = xen_pt_byte_reg_read, 697 .u.b.write = xen_pt_byte_reg_write, 698 }, 699 /* Interrupt Pin reg */ 700 { 701 .offset = PCI_INTERRUPT_PIN, 702 .size = 1, 703 .init_val = 0x00, 704 .ro_mask = 0xFF, 705 .emu_mask = 0xFF, 706 .init = xen_pt_irqpin_reg_init, 707 .u.b.read = xen_pt_byte_reg_read, 708 .u.b.write = xen_pt_byte_reg_write, 709 }, 710 /* BAR 0 reg */ 711 /* mask of BAR need to be decided later, depends on IO/MEM type */ 712 { 713 .offset = PCI_BASE_ADDRESS_0, 714 .size = 4, 715 .init_val = 0x00000000, 716 .init = xen_pt_bar_reg_init, 717 .u.dw.read = xen_pt_bar_reg_read, 718 .u.dw.write = xen_pt_bar_reg_write, 719 }, 720 /* BAR 1 reg */ 721 { 722 .offset = PCI_BASE_ADDRESS_1, 723 .size = 4, 724 .init_val = 0x00000000, 725 .init = xen_pt_bar_reg_init, 726 .u.dw.read = xen_pt_bar_reg_read, 727 .u.dw.write = xen_pt_bar_reg_write, 728 }, 729 /* BAR 2 reg */ 730 { 731 .offset = PCI_BASE_ADDRESS_2, 732 .size = 4, 733 .init_val = 0x00000000, 734 .init = xen_pt_bar_reg_init, 735 .u.dw.read = xen_pt_bar_reg_read, 736 .u.dw.write = xen_pt_bar_reg_write, 737 }, 738 /* BAR 3 reg */ 739 { 740 .offset = PCI_BASE_ADDRESS_3, 741 .size = 4, 742 .init_val = 0x00000000, 743 .init = xen_pt_bar_reg_init, 744 .u.dw.read = xen_pt_bar_reg_read, 745 .u.dw.write = xen_pt_bar_reg_write, 746 }, 747 /* BAR 4 reg */ 748 { 749 .offset = PCI_BASE_ADDRESS_4, 750 .size = 4, 751 .init_val = 0x00000000, 752 .init = xen_pt_bar_reg_init, 753 .u.dw.read = xen_pt_bar_reg_read, 754 .u.dw.write = xen_pt_bar_reg_write, 755 }, 756 /* BAR 5 reg */ 757 { 758 .offset = PCI_BASE_ADDRESS_5, 759 .size = 4, 760 .init_val = 0x00000000, 761 .init = xen_pt_bar_reg_init, 762 .u.dw.read = xen_pt_bar_reg_read, 763 .u.dw.write = xen_pt_bar_reg_write, 764 }, 765 /* Expansion ROM BAR reg */ 766 { 767 .offset = PCI_ROM_ADDRESS, 768 .size = 4, 769 .init_val = 0x00000000, 770 .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, 771 .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, 772 .init = xen_pt_bar_reg_init, 773 .u.dw.read = xen_pt_long_reg_read, 774 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 775 }, 776 { 777 .size = 0, 778 }, 779 }; 780 781 782 /********************************* 783 * Vital Product Data Capability 784 */ 785 786 /* Vital Product Data Capability Structure reg static information table */ 787 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 788 { 789 .offset = PCI_CAP_LIST_NEXT, 790 .size = 1, 791 .init_val = 0x00, 792 .ro_mask = 0xFF, 793 .emu_mask = 0xFF, 794 .init = xen_pt_ptr_reg_init, 795 .u.b.read = xen_pt_byte_reg_read, 796 .u.b.write = xen_pt_byte_reg_write, 797 }, 798 { 799 .offset = PCI_VPD_ADDR, 800 .size = 2, 801 .ro_mask = 0x0003, 802 .emu_mask = 0x0003, 803 .init = xen_pt_common_reg_init, 804 .u.w.read = xen_pt_word_reg_read, 805 .u.w.write = xen_pt_word_reg_write, 806 }, 807 { 808 .size = 0, 809 }, 810 }; 811 812 813 /************************************** 814 * Vendor Specific Capability 815 */ 816 817 /* Vendor Specific Capability Structure reg static information table */ 818 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 819 { 820 .offset = PCI_CAP_LIST_NEXT, 821 .size = 1, 822 .init_val = 0x00, 823 .ro_mask = 0xFF, 824 .emu_mask = 0xFF, 825 .init = xen_pt_ptr_reg_init, 826 .u.b.read = xen_pt_byte_reg_read, 827 .u.b.write = xen_pt_byte_reg_write, 828 }, 829 { 830 .size = 0, 831 }, 832 }; 833 834 835 /***************************** 836 * PCI Express Capability 837 */ 838 839 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 840 uint32_t offset) 841 { 842 uint8_t flag; 843 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 844 return 0; 845 } 846 return flag & PCI_EXP_FLAGS_VERS; 847 } 848 849 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 850 uint32_t offset) 851 { 852 uint8_t flag; 853 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 854 return 0; 855 } 856 return (flag & PCI_EXP_FLAGS_TYPE) >> 4; 857 } 858 859 /* initialize Link Control register */ 860 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 861 XenPTRegInfo *reg, uint32_t real_offset, 862 uint32_t *data) 863 { 864 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 865 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 866 867 /* no need to initialize in case of Root Complex Integrated Endpoint 868 * with cap_ver 1.x 869 */ 870 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 871 *data = XEN_PT_INVALID_REG; 872 } 873 874 *data = reg->init_val; 875 return 0; 876 } 877 /* initialize Device Control 2 register */ 878 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 879 XenPTRegInfo *reg, uint32_t real_offset, 880 uint32_t *data) 881 { 882 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 883 884 /* no need to initialize in case of cap_ver 1.x */ 885 if (cap_ver == 1) { 886 *data = XEN_PT_INVALID_REG; 887 } 888 889 *data = reg->init_val; 890 return 0; 891 } 892 /* initialize Link Control 2 register */ 893 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 894 XenPTRegInfo *reg, uint32_t real_offset, 895 uint32_t *data) 896 { 897 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 898 uint32_t reg_field = 0; 899 900 /* no need to initialize in case of cap_ver 1.x */ 901 if (cap_ver == 1) { 902 reg_field = XEN_PT_INVALID_REG; 903 } else { 904 /* set Supported Link Speed */ 905 uint8_t lnkcap; 906 int rc; 907 rc = xen_host_pci_get_byte(&s->real_device, 908 real_offset - reg->offset + PCI_EXP_LNKCAP, 909 &lnkcap); 910 if (rc) { 911 return rc; 912 } 913 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 914 } 915 916 *data = reg_field; 917 return 0; 918 } 919 920 /* PCI Express Capability Structure reg static information table */ 921 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 922 /* Next Pointer reg */ 923 { 924 .offset = PCI_CAP_LIST_NEXT, 925 .size = 1, 926 .init_val = 0x00, 927 .ro_mask = 0xFF, 928 .emu_mask = 0xFF, 929 .init = xen_pt_ptr_reg_init, 930 .u.b.read = xen_pt_byte_reg_read, 931 .u.b.write = xen_pt_byte_reg_write, 932 }, 933 /* Device Capabilities reg */ 934 { 935 .offset = PCI_EXP_DEVCAP, 936 .size = 4, 937 .init_val = 0x00000000, 938 .ro_mask = 0xFFFFFFFF, 939 .emu_mask = 0x10000000, 940 .init = xen_pt_common_reg_init, 941 .u.dw.read = xen_pt_long_reg_read, 942 .u.dw.write = xen_pt_long_reg_write, 943 }, 944 /* Device Control reg */ 945 { 946 .offset = PCI_EXP_DEVCTL, 947 .size = 2, 948 .init_val = 0x2810, 949 .ro_mask = 0x8400, 950 .emu_mask = 0xFFFF, 951 .init = xen_pt_common_reg_init, 952 .u.w.read = xen_pt_word_reg_read, 953 .u.w.write = xen_pt_word_reg_write, 954 }, 955 /* Device Status reg */ 956 { 957 .offset = PCI_EXP_DEVSTA, 958 .size = 2, 959 .res_mask = 0xFFC0, 960 .ro_mask = 0x0030, 961 .rw1c_mask = 0x000F, 962 .init = xen_pt_common_reg_init, 963 .u.w.read = xen_pt_word_reg_read, 964 .u.w.write = xen_pt_word_reg_write, 965 }, 966 /* Link Control reg */ 967 { 968 .offset = PCI_EXP_LNKCTL, 969 .size = 2, 970 .init_val = 0x0000, 971 .ro_mask = 0xFC34, 972 .emu_mask = 0xFFFF, 973 .init = xen_pt_linkctrl_reg_init, 974 .u.w.read = xen_pt_word_reg_read, 975 .u.w.write = xen_pt_word_reg_write, 976 }, 977 /* Link Status reg */ 978 { 979 .offset = PCI_EXP_LNKSTA, 980 .size = 2, 981 .ro_mask = 0x3FFF, 982 .rw1c_mask = 0xC000, 983 .init = xen_pt_common_reg_init, 984 .u.w.read = xen_pt_word_reg_read, 985 .u.w.write = xen_pt_word_reg_write, 986 }, 987 /* Device Control 2 reg */ 988 { 989 .offset = 0x28, 990 .size = 2, 991 .init_val = 0x0000, 992 .ro_mask = 0xFFA0, 993 .emu_mask = 0xFFBF, 994 .init = xen_pt_devctrl2_reg_init, 995 .u.w.read = xen_pt_word_reg_read, 996 .u.w.write = xen_pt_word_reg_write, 997 }, 998 /* Link Control 2 reg */ 999 { 1000 .offset = 0x30, 1001 .size = 2, 1002 .init_val = 0x0000, 1003 .ro_mask = 0xE040, 1004 .emu_mask = 0xFFFF, 1005 .init = xen_pt_linkctrl2_reg_init, 1006 .u.w.read = xen_pt_word_reg_read, 1007 .u.w.write = xen_pt_word_reg_write, 1008 }, 1009 { 1010 .size = 0, 1011 }, 1012 }; 1013 1014 1015 /********************************* 1016 * Power Management Capability 1017 */ 1018 1019 /* Power Management Capability reg static information table */ 1020 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 1021 /* Next Pointer reg */ 1022 { 1023 .offset = PCI_CAP_LIST_NEXT, 1024 .size = 1, 1025 .init_val = 0x00, 1026 .ro_mask = 0xFF, 1027 .emu_mask = 0xFF, 1028 .init = xen_pt_ptr_reg_init, 1029 .u.b.read = xen_pt_byte_reg_read, 1030 .u.b.write = xen_pt_byte_reg_write, 1031 }, 1032 /* Power Management Capabilities reg */ 1033 { 1034 .offset = PCI_CAP_FLAGS, 1035 .size = 2, 1036 .init_val = 0x0000, 1037 .ro_mask = 0xFFFF, 1038 .emu_mask = 0xF9C8, 1039 .init = xen_pt_common_reg_init, 1040 .u.w.read = xen_pt_word_reg_read, 1041 .u.w.write = xen_pt_word_reg_write, 1042 }, 1043 /* PCI Power Management Control/Status reg */ 1044 { 1045 .offset = PCI_PM_CTRL, 1046 .size = 2, 1047 .init_val = 0x0008, 1048 .res_mask = 0x00F0, 1049 .ro_mask = 0x610C, 1050 .rw1c_mask = 0x8000, 1051 .emu_mask = 0x810B, 1052 .init = xen_pt_common_reg_init, 1053 .u.w.read = xen_pt_word_reg_read, 1054 .u.w.write = xen_pt_word_reg_write, 1055 }, 1056 { 1057 .size = 0, 1058 }, 1059 }; 1060 1061 1062 /******************************** 1063 * MSI Capability 1064 */ 1065 1066 /* Helper */ 1067 #define xen_pt_msi_check_type(offset, flags, what) \ 1068 ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ 1069 PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) 1070 1071 /* Message Control register */ 1072 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1073 XenPTRegInfo *reg, uint32_t real_offset, 1074 uint32_t *data) 1075 { 1076 XenPTMSI *msi = s->msi; 1077 uint16_t reg_field; 1078 int rc; 1079 1080 /* use I/O device register's value as initial value */ 1081 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1082 if (rc) { 1083 return rc; 1084 } 1085 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1086 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1087 xen_host_pci_set_word(&s->real_device, real_offset, 1088 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1089 } 1090 msi->flags |= reg_field; 1091 msi->ctrl_offset = real_offset; 1092 msi->initialized = false; 1093 msi->mapped = false; 1094 1095 *data = reg->init_val; 1096 return 0; 1097 } 1098 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1099 XenPTReg *cfg_entry, uint16_t *val, 1100 uint16_t dev_value, uint16_t valid_mask) 1101 { 1102 XenPTRegInfo *reg = cfg_entry->reg; 1103 XenPTMSI *msi = s->msi; 1104 uint16_t writable_mask = 0; 1105 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1106 uint16_t *data = cfg_entry->ptr.half_word; 1107 1108 /* Currently no support for multi-vector */ 1109 if (*val & PCI_MSI_FLAGS_QSIZE) { 1110 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1111 } 1112 1113 /* modify emulate register */ 1114 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1115 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1116 msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; 1117 1118 /* create value for writing to I/O device register */ 1119 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1120 1121 /* update MSI */ 1122 if (*val & PCI_MSI_FLAGS_ENABLE) { 1123 /* setup MSI pirq for the first time */ 1124 if (!msi->initialized) { 1125 /* Init physical one */ 1126 XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); 1127 if (xen_pt_msi_setup(s)) { 1128 /* We do not broadcast the error to the framework code, so 1129 * that MSI errors are contained in MSI emulation code and 1130 * QEMU can go on running. 1131 * Guest MSI would be actually not working. 1132 */ 1133 *val &= ~PCI_MSI_FLAGS_ENABLE; 1134 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); 1135 return 0; 1136 } 1137 if (xen_pt_msi_update(s)) { 1138 *val &= ~PCI_MSI_FLAGS_ENABLE; 1139 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); 1140 return 0; 1141 } 1142 msi->initialized = true; 1143 msi->mapped = true; 1144 } 1145 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1146 } else if (msi->mapped) { 1147 xen_pt_msi_disable(s); 1148 } 1149 1150 return 0; 1151 } 1152 1153 /* initialize Message Upper Address register */ 1154 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1155 XenPTRegInfo *reg, uint32_t real_offset, 1156 uint32_t *data) 1157 { 1158 /* no need to initialize in case of 32 bit type */ 1159 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1160 *data = XEN_PT_INVALID_REG; 1161 } else { 1162 *data = reg->init_val; 1163 } 1164 1165 return 0; 1166 } 1167 /* this function will be called twice (for 32 bit and 64 bit type) */ 1168 /* initialize Message Data register */ 1169 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1170 XenPTRegInfo *reg, uint32_t real_offset, 1171 uint32_t *data) 1172 { 1173 uint32_t flags = s->msi->flags; 1174 uint32_t offset = reg->offset; 1175 1176 /* check the offset whether matches the type or not */ 1177 if (xen_pt_msi_check_type(offset, flags, DATA)) { 1178 *data = reg->init_val; 1179 } else { 1180 *data = XEN_PT_INVALID_REG; 1181 } 1182 return 0; 1183 } 1184 1185 /* this function will be called twice (for 32 bit and 64 bit type) */ 1186 /* initialize Mask register */ 1187 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, 1188 XenPTRegInfo *reg, uint32_t real_offset, 1189 uint32_t *data) 1190 { 1191 uint32_t flags = s->msi->flags; 1192 1193 /* check the offset whether matches the type or not */ 1194 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1195 *data = XEN_PT_INVALID_REG; 1196 } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { 1197 *data = reg->init_val; 1198 } else { 1199 *data = XEN_PT_INVALID_REG; 1200 } 1201 return 0; 1202 } 1203 1204 /* this function will be called twice (for 32 bit and 64 bit type) */ 1205 /* initialize Pending register */ 1206 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, 1207 XenPTRegInfo *reg, uint32_t real_offset, 1208 uint32_t *data) 1209 { 1210 uint32_t flags = s->msi->flags; 1211 1212 /* check the offset whether matches the type or not */ 1213 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1214 *data = XEN_PT_INVALID_REG; 1215 } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { 1216 *data = reg->init_val; 1217 } else { 1218 *data = XEN_PT_INVALID_REG; 1219 } 1220 return 0; 1221 } 1222 1223 /* write Message Address register */ 1224 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1225 XenPTReg *cfg_entry, uint32_t *val, 1226 uint32_t dev_value, uint32_t valid_mask) 1227 { 1228 XenPTRegInfo *reg = cfg_entry->reg; 1229 uint32_t writable_mask = 0; 1230 uint32_t old_addr = *cfg_entry->ptr.word; 1231 uint32_t *data = cfg_entry->ptr.word; 1232 1233 /* modify emulate register */ 1234 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1235 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1236 s->msi->addr_lo = *data; 1237 1238 /* create value for writing to I/O device register */ 1239 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1240 1241 /* update MSI */ 1242 if (*data != old_addr) { 1243 if (s->msi->mapped) { 1244 xen_pt_msi_update(s); 1245 } 1246 } 1247 1248 return 0; 1249 } 1250 /* write Message Upper Address register */ 1251 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1252 XenPTReg *cfg_entry, uint32_t *val, 1253 uint32_t dev_value, uint32_t valid_mask) 1254 { 1255 XenPTRegInfo *reg = cfg_entry->reg; 1256 uint32_t writable_mask = 0; 1257 uint32_t old_addr = *cfg_entry->ptr.word; 1258 uint32_t *data = cfg_entry->ptr.word; 1259 1260 /* check whether the type is 64 bit or not */ 1261 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1262 XEN_PT_ERR(&s->dev, 1263 "Can't write to the upper address without 64 bit support\n"); 1264 return -1; 1265 } 1266 1267 /* modify emulate register */ 1268 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1269 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1270 /* update the msi_info too */ 1271 s->msi->addr_hi = *data; 1272 1273 /* create value for writing to I/O device register */ 1274 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1275 1276 /* update MSI */ 1277 if (*data != old_addr) { 1278 if (s->msi->mapped) { 1279 xen_pt_msi_update(s); 1280 } 1281 } 1282 1283 return 0; 1284 } 1285 1286 1287 /* this function will be called twice (for 32 bit and 64 bit type) */ 1288 /* write Message Data register */ 1289 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1290 XenPTReg *cfg_entry, uint16_t *val, 1291 uint16_t dev_value, uint16_t valid_mask) 1292 { 1293 XenPTRegInfo *reg = cfg_entry->reg; 1294 XenPTMSI *msi = s->msi; 1295 uint16_t writable_mask = 0; 1296 uint16_t old_data = *cfg_entry->ptr.half_word; 1297 uint32_t offset = reg->offset; 1298 uint16_t *data = cfg_entry->ptr.half_word; 1299 1300 /* check the offset whether matches the type or not */ 1301 if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { 1302 /* exit I/O emulator */ 1303 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1304 return -1; 1305 } 1306 1307 /* modify emulate register */ 1308 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1309 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1310 /* update the msi_info too */ 1311 msi->data = *data; 1312 1313 /* create value for writing to I/O device register */ 1314 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1315 1316 /* update MSI */ 1317 if (*data != old_data) { 1318 if (msi->mapped) { 1319 xen_pt_msi_update(s); 1320 } 1321 } 1322 1323 return 0; 1324 } 1325 1326 static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 1327 uint32_t *val, uint32_t dev_value, 1328 uint32_t valid_mask) 1329 { 1330 int rc; 1331 1332 rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask); 1333 if (rc) { 1334 return rc; 1335 } 1336 1337 s->msi->mask = *val; 1338 1339 return 0; 1340 } 1341 1342 /* MSI Capability Structure reg static information table */ 1343 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1344 /* Next Pointer reg */ 1345 { 1346 .offset = PCI_CAP_LIST_NEXT, 1347 .size = 1, 1348 .init_val = 0x00, 1349 .ro_mask = 0xFF, 1350 .emu_mask = 0xFF, 1351 .init = xen_pt_ptr_reg_init, 1352 .u.b.read = xen_pt_byte_reg_read, 1353 .u.b.write = xen_pt_byte_reg_write, 1354 }, 1355 /* Message Control reg */ 1356 { 1357 .offset = PCI_MSI_FLAGS, 1358 .size = 2, 1359 .init_val = 0x0000, 1360 .res_mask = 0xFE00, 1361 .ro_mask = 0x018E, 1362 .emu_mask = 0x017E, 1363 .init = xen_pt_msgctrl_reg_init, 1364 .u.w.read = xen_pt_word_reg_read, 1365 .u.w.write = xen_pt_msgctrl_reg_write, 1366 }, 1367 /* Message Address reg */ 1368 { 1369 .offset = PCI_MSI_ADDRESS_LO, 1370 .size = 4, 1371 .init_val = 0x00000000, 1372 .ro_mask = 0x00000003, 1373 .emu_mask = 0xFFFFFFFF, 1374 .init = xen_pt_common_reg_init, 1375 .u.dw.read = xen_pt_long_reg_read, 1376 .u.dw.write = xen_pt_msgaddr32_reg_write, 1377 }, 1378 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1379 { 1380 .offset = PCI_MSI_ADDRESS_HI, 1381 .size = 4, 1382 .init_val = 0x00000000, 1383 .ro_mask = 0x00000000, 1384 .emu_mask = 0xFFFFFFFF, 1385 .init = xen_pt_msgaddr64_reg_init, 1386 .u.dw.read = xen_pt_long_reg_read, 1387 .u.dw.write = xen_pt_msgaddr64_reg_write, 1388 }, 1389 /* Message Data reg (16 bits of data for 32-bit devices) */ 1390 { 1391 .offset = PCI_MSI_DATA_32, 1392 .size = 2, 1393 .init_val = 0x0000, 1394 .ro_mask = 0x0000, 1395 .emu_mask = 0xFFFF, 1396 .init = xen_pt_msgdata_reg_init, 1397 .u.w.read = xen_pt_word_reg_read, 1398 .u.w.write = xen_pt_msgdata_reg_write, 1399 }, 1400 /* Message Data reg (16 bits of data for 64-bit devices) */ 1401 { 1402 .offset = PCI_MSI_DATA_64, 1403 .size = 2, 1404 .init_val = 0x0000, 1405 .ro_mask = 0x0000, 1406 .emu_mask = 0xFFFF, 1407 .init = xen_pt_msgdata_reg_init, 1408 .u.w.read = xen_pt_word_reg_read, 1409 .u.w.write = xen_pt_msgdata_reg_write, 1410 }, 1411 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1412 { 1413 .offset = PCI_MSI_MASK_32, 1414 .size = 4, 1415 .init_val = 0x00000000, 1416 .ro_mask = 0xFFFFFFFF, 1417 .emu_mask = 0xFFFFFFFF, 1418 .init = xen_pt_mask_reg_init, 1419 .u.dw.read = xen_pt_long_reg_read, 1420 .u.dw.write = xen_pt_mask_reg_write, 1421 }, 1422 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1423 { 1424 .offset = PCI_MSI_MASK_64, 1425 .size = 4, 1426 .init_val = 0x00000000, 1427 .ro_mask = 0xFFFFFFFF, 1428 .emu_mask = 0xFFFFFFFF, 1429 .init = xen_pt_mask_reg_init, 1430 .u.dw.read = xen_pt_long_reg_read, 1431 .u.dw.write = xen_pt_mask_reg_write, 1432 }, 1433 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1434 { 1435 .offset = PCI_MSI_MASK_32 + 4, 1436 .size = 4, 1437 .init_val = 0x00000000, 1438 .ro_mask = 0xFFFFFFFF, 1439 .emu_mask = 0x00000000, 1440 .init = xen_pt_pending_reg_init, 1441 .u.dw.read = xen_pt_long_reg_read, 1442 .u.dw.write = xen_pt_long_reg_write, 1443 }, 1444 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1445 { 1446 .offset = PCI_MSI_MASK_64 + 4, 1447 .size = 4, 1448 .init_val = 0x00000000, 1449 .ro_mask = 0xFFFFFFFF, 1450 .emu_mask = 0x00000000, 1451 .init = xen_pt_pending_reg_init, 1452 .u.dw.read = xen_pt_long_reg_read, 1453 .u.dw.write = xen_pt_long_reg_write, 1454 }, 1455 { 1456 .size = 0, 1457 }, 1458 }; 1459 1460 1461 /************************************** 1462 * MSI-X Capability 1463 */ 1464 1465 /* Message Control register for MSI-X */ 1466 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1467 XenPTRegInfo *reg, uint32_t real_offset, 1468 uint32_t *data) 1469 { 1470 uint16_t reg_field; 1471 int rc; 1472 1473 /* use I/O device register's value as initial value */ 1474 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1475 if (rc) { 1476 return rc; 1477 } 1478 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1479 XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); 1480 xen_host_pci_set_word(&s->real_device, real_offset, 1481 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1482 } 1483 1484 s->msix->ctrl_offset = real_offset; 1485 1486 *data = reg->init_val; 1487 return 0; 1488 } 1489 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1490 XenPTReg *cfg_entry, uint16_t *val, 1491 uint16_t dev_value, uint16_t valid_mask) 1492 { 1493 XenPTRegInfo *reg = cfg_entry->reg; 1494 uint16_t writable_mask = 0; 1495 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1496 int debug_msix_enabled_old; 1497 uint16_t *data = cfg_entry->ptr.half_word; 1498 1499 /* modify emulate register */ 1500 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1501 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1502 1503 /* create value for writing to I/O device register */ 1504 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1505 1506 /* update MSI-X */ 1507 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1508 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1509 xen_pt_msix_update(s); 1510 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1511 xen_pt_msix_disable(s); 1512 } 1513 1514 s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; 1515 1516 debug_msix_enabled_old = s->msix->enabled; 1517 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1518 if (s->msix->enabled != debug_msix_enabled_old) { 1519 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1520 s->msix->enabled ? "enable" : "disable"); 1521 } 1522 1523 return 0; 1524 } 1525 1526 /* MSI-X Capability Structure reg static information table */ 1527 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1528 /* Next Pointer reg */ 1529 { 1530 .offset = PCI_CAP_LIST_NEXT, 1531 .size = 1, 1532 .init_val = 0x00, 1533 .ro_mask = 0xFF, 1534 .emu_mask = 0xFF, 1535 .init = xen_pt_ptr_reg_init, 1536 .u.b.read = xen_pt_byte_reg_read, 1537 .u.b.write = xen_pt_byte_reg_write, 1538 }, 1539 /* Message Control reg */ 1540 { 1541 .offset = PCI_MSI_FLAGS, 1542 .size = 2, 1543 .init_val = 0x0000, 1544 .res_mask = 0x3800, 1545 .ro_mask = 0x07FF, 1546 .emu_mask = 0x0000, 1547 .init = xen_pt_msixctrl_reg_init, 1548 .u.w.read = xen_pt_word_reg_read, 1549 .u.w.write = xen_pt_msixctrl_reg_write, 1550 }, 1551 { 1552 .size = 0, 1553 }, 1554 }; 1555 1556 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { 1557 /* Intel IGFX OpRegion reg */ 1558 { 1559 .offset = 0x0, 1560 .size = 4, 1561 .init_val = 0, 1562 .emu_mask = 0xFFFFFFFF, 1563 .u.dw.read = xen_pt_intel_opregion_read, 1564 .u.dw.write = xen_pt_intel_opregion_write, 1565 }, 1566 { 1567 .size = 0, 1568 }, 1569 }; 1570 1571 /**************************** 1572 * Capabilities 1573 */ 1574 1575 /* capability structure register group size functions */ 1576 1577 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1578 const XenPTRegGroupInfo *grp_reg, 1579 uint32_t base_offset, uint8_t *size) 1580 { 1581 *size = grp_reg->grp_size; 1582 return 0; 1583 } 1584 /* get Vendor Specific Capability Structure register group size */ 1585 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1586 const XenPTRegGroupInfo *grp_reg, 1587 uint32_t base_offset, uint8_t *size) 1588 { 1589 return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); 1590 } 1591 /* get PCI Express Capability Structure register group size */ 1592 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1593 const XenPTRegGroupInfo *grp_reg, 1594 uint32_t base_offset, uint8_t *size) 1595 { 1596 PCIDevice *d = PCI_DEVICE(s); 1597 uint8_t version = get_capability_version(s, base_offset); 1598 uint8_t type = get_device_type(s, base_offset); 1599 uint8_t pcie_size = 0; 1600 1601 1602 /* calculate size depend on capability version and device/port type */ 1603 /* in case of PCI Express Base Specification Rev 1.x */ 1604 if (version == 1) { 1605 /* The PCI Express Capabilities, Device Capabilities, and Device 1606 * Status/Control registers are required for all PCI Express devices. 1607 * The Link Capabilities and Link Status/Control are required for all 1608 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1609 * are not required to implement registers other than those listed 1610 * above and terminate the capability structure. 1611 */ 1612 switch (type) { 1613 case PCI_EXP_TYPE_ENDPOINT: 1614 case PCI_EXP_TYPE_LEG_END: 1615 pcie_size = 0x14; 1616 break; 1617 case PCI_EXP_TYPE_RC_END: 1618 /* has no link */ 1619 pcie_size = 0x0C; 1620 break; 1621 /* only EndPoint passthrough is supported */ 1622 case PCI_EXP_TYPE_ROOT_PORT: 1623 case PCI_EXP_TYPE_UPSTREAM: 1624 case PCI_EXP_TYPE_DOWNSTREAM: 1625 case PCI_EXP_TYPE_PCI_BRIDGE: 1626 case PCI_EXP_TYPE_PCIE_BRIDGE: 1627 case PCI_EXP_TYPE_RC_EC: 1628 default: 1629 XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); 1630 return -1; 1631 } 1632 } 1633 /* in case of PCI Express Base Specification Rev 2.0 */ 1634 else if (version == 2) { 1635 switch (type) { 1636 case PCI_EXP_TYPE_ENDPOINT: 1637 case PCI_EXP_TYPE_LEG_END: 1638 case PCI_EXP_TYPE_RC_END: 1639 /* For Functions that do not implement the registers, 1640 * these spaces must be hardwired to 0b. 1641 */ 1642 pcie_size = 0x3C; 1643 break; 1644 /* only EndPoint passthrough is supported */ 1645 case PCI_EXP_TYPE_ROOT_PORT: 1646 case PCI_EXP_TYPE_UPSTREAM: 1647 case PCI_EXP_TYPE_DOWNSTREAM: 1648 case PCI_EXP_TYPE_PCI_BRIDGE: 1649 case PCI_EXP_TYPE_PCIE_BRIDGE: 1650 case PCI_EXP_TYPE_RC_EC: 1651 default: 1652 XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); 1653 return -1; 1654 } 1655 } else { 1656 XEN_PT_ERR(d, "Unsupported capability version 0x%x.\n", version); 1657 return -1; 1658 } 1659 1660 *size = pcie_size; 1661 return 0; 1662 } 1663 /* get MSI Capability Structure register group size */ 1664 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1665 const XenPTRegGroupInfo *grp_reg, 1666 uint32_t base_offset, uint8_t *size) 1667 { 1668 uint16_t msg_ctrl = 0; 1669 uint8_t msi_size = 0xa; 1670 int rc; 1671 1672 rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, 1673 &msg_ctrl); 1674 if (rc) { 1675 return rc; 1676 } 1677 /* check if 64-bit address is capable of per-vector masking */ 1678 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1679 msi_size += 4; 1680 } 1681 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1682 msi_size += 10; 1683 } 1684 1685 s->msi = g_new0(XenPTMSI, 1); 1686 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1687 1688 *size = msi_size; 1689 return 0; 1690 } 1691 /* get MSI-X Capability Structure register group size */ 1692 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1693 const XenPTRegGroupInfo *grp_reg, 1694 uint32_t base_offset, uint8_t *size) 1695 { 1696 int rc = 0; 1697 1698 rc = xen_pt_msix_init(s, base_offset); 1699 1700 if (rc < 0) { 1701 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1702 return rc; 1703 } 1704 1705 *size = grp_reg->grp_size; 1706 return 0; 1707 } 1708 1709 1710 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1711 /* Header Type0 reg group */ 1712 { 1713 .grp_id = 0xFF, 1714 .grp_type = XEN_PT_GRP_TYPE_EMU, 1715 .grp_size = 0x40, 1716 .size_init = xen_pt_reg_grp_size_init, 1717 .emu_regs = xen_pt_emu_reg_header0, 1718 }, 1719 /* PCI PowerManagement Capability reg group */ 1720 { 1721 .grp_id = PCI_CAP_ID_PM, 1722 .grp_type = XEN_PT_GRP_TYPE_EMU, 1723 .grp_size = PCI_PM_SIZEOF, 1724 .size_init = xen_pt_reg_grp_size_init, 1725 .emu_regs = xen_pt_emu_reg_pm, 1726 }, 1727 /* AGP Capability Structure reg group */ 1728 { 1729 .grp_id = PCI_CAP_ID_AGP, 1730 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1731 .grp_size = 0x30, 1732 .size_init = xen_pt_reg_grp_size_init, 1733 }, 1734 /* Vital Product Data Capability Structure reg group */ 1735 { 1736 .grp_id = PCI_CAP_ID_VPD, 1737 .grp_type = XEN_PT_GRP_TYPE_EMU, 1738 .grp_size = 0x08, 1739 .size_init = xen_pt_reg_grp_size_init, 1740 .emu_regs = xen_pt_emu_reg_vpd, 1741 }, 1742 /* Slot Identification reg group */ 1743 { 1744 .grp_id = PCI_CAP_ID_SLOTID, 1745 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1746 .grp_size = 0x04, 1747 .size_init = xen_pt_reg_grp_size_init, 1748 }, 1749 /* MSI Capability Structure reg group */ 1750 { 1751 .grp_id = PCI_CAP_ID_MSI, 1752 .grp_type = XEN_PT_GRP_TYPE_EMU, 1753 .grp_size = 0xFF, 1754 .size_init = xen_pt_msi_size_init, 1755 .emu_regs = xen_pt_emu_reg_msi, 1756 }, 1757 /* PCI-X Capabilities List Item reg group */ 1758 { 1759 .grp_id = PCI_CAP_ID_PCIX, 1760 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1761 .grp_size = 0x18, 1762 .size_init = xen_pt_reg_grp_size_init, 1763 }, 1764 /* Vendor Specific Capability Structure reg group */ 1765 { 1766 .grp_id = PCI_CAP_ID_VNDR, 1767 .grp_type = XEN_PT_GRP_TYPE_EMU, 1768 .grp_size = 0xFF, 1769 .size_init = xen_pt_vendor_size_init, 1770 .emu_regs = xen_pt_emu_reg_vendor, 1771 }, 1772 /* SHPC Capability List Item reg group */ 1773 { 1774 .grp_id = PCI_CAP_ID_SHPC, 1775 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1776 .grp_size = 0x08, 1777 .size_init = xen_pt_reg_grp_size_init, 1778 }, 1779 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1780 { 1781 .grp_id = PCI_CAP_ID_SSVID, 1782 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1783 .grp_size = 0x08, 1784 .size_init = xen_pt_reg_grp_size_init, 1785 }, 1786 /* AGP 8x Capability Structure reg group */ 1787 { 1788 .grp_id = PCI_CAP_ID_AGP3, 1789 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1790 .grp_size = 0x30, 1791 .size_init = xen_pt_reg_grp_size_init, 1792 }, 1793 /* PCI Express Capability Structure reg group */ 1794 { 1795 .grp_id = PCI_CAP_ID_EXP, 1796 .grp_type = XEN_PT_GRP_TYPE_EMU, 1797 .grp_size = 0xFF, 1798 .size_init = xen_pt_pcie_size_init, 1799 .emu_regs = xen_pt_emu_reg_pcie, 1800 }, 1801 /* MSI-X Capability Structure reg group */ 1802 { 1803 .grp_id = PCI_CAP_ID_MSIX, 1804 .grp_type = XEN_PT_GRP_TYPE_EMU, 1805 .grp_size = 0x0C, 1806 .size_init = xen_pt_msix_size_init, 1807 .emu_regs = xen_pt_emu_reg_msix, 1808 }, 1809 /* Intel IGD Opregion group */ 1810 { 1811 .grp_id = XEN_PCI_INTEL_OPREGION, 1812 .grp_type = XEN_PT_GRP_TYPE_EMU, 1813 .grp_size = 0x4, 1814 .size_init = xen_pt_reg_grp_size_init, 1815 .emu_regs = xen_pt_emu_reg_igd_opregion, 1816 }, 1817 { 1818 .grp_size = 0, 1819 }, 1820 }; 1821 1822 /* initialize Capabilities Pointer or Next Pointer register */ 1823 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1824 XenPTRegInfo *reg, uint32_t real_offset, 1825 uint32_t *data) 1826 { 1827 int i, rc; 1828 uint8_t reg_field; 1829 uint8_t cap_id = 0; 1830 1831 rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); 1832 if (rc) { 1833 return rc; 1834 } 1835 /* find capability offset */ 1836 while (reg_field) { 1837 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1838 if (xen_pt_hide_dev_cap(&s->real_device, 1839 xen_pt_emu_reg_grps[i].grp_id)) { 1840 continue; 1841 } 1842 1843 rc = xen_host_pci_get_byte(&s->real_device, 1844 reg_field + PCI_CAP_LIST_ID, &cap_id); 1845 if (rc) { 1846 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", 1847 reg_field + PCI_CAP_LIST_ID, rc); 1848 return rc; 1849 } 1850 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1851 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1852 goto out; 1853 } 1854 /* ignore the 0 hardwired capability, find next one */ 1855 break; 1856 } 1857 } 1858 1859 /* next capability */ 1860 rc = xen_host_pci_get_byte(&s->real_device, 1861 reg_field + PCI_CAP_LIST_NEXT, ®_field); 1862 if (rc) { 1863 return rc; 1864 } 1865 } 1866 1867 out: 1868 *data = reg_field; 1869 return 0; 1870 } 1871 1872 1873 /************* 1874 * Main 1875 */ 1876 1877 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1878 { 1879 uint8_t id; 1880 unsigned max_cap = XEN_PCI_CAP_MAX; 1881 uint8_t pos = PCI_CAPABILITY_LIST; 1882 uint8_t status = 0; 1883 1884 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1885 return 0; 1886 } 1887 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1888 return 0; 1889 } 1890 1891 while (max_cap--) { 1892 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1893 break; 1894 } 1895 if (pos < PCI_CONFIG_HEADER_SIZE) { 1896 break; 1897 } 1898 1899 pos &= ~3; 1900 if (xen_host_pci_get_byte(&s->real_device, 1901 pos + PCI_CAP_LIST_ID, &id)) { 1902 break; 1903 } 1904 1905 if (id == 0xff) { 1906 break; 1907 } 1908 if (id == cap) { 1909 return pos; 1910 } 1911 1912 pos += PCI_CAP_LIST_NEXT; 1913 } 1914 return 0; 1915 } 1916 1917 static void xen_pt_config_reg_init(XenPCIPassthroughState *s, 1918 XenPTRegGroup *reg_grp, XenPTRegInfo *reg, 1919 Error **errp) 1920 { 1921 XenPTReg *reg_entry; 1922 uint32_t data = 0; 1923 int rc = 0; 1924 1925 reg_entry = g_new0(XenPTReg, 1); 1926 reg_entry->reg = reg; 1927 1928 if (reg->init) { 1929 uint32_t host_mask, size_mask; 1930 unsigned int offset; 1931 uint32_t val = 0; 1932 1933 /* initialize emulate register */ 1934 rc = reg->init(s, reg_entry->reg, 1935 reg_grp->base_offset + reg->offset, &data); 1936 if (rc < 0) { 1937 g_free(reg_entry); 1938 error_setg(errp, "Init emulate register fail"); 1939 return; 1940 } 1941 if (data == XEN_PT_INVALID_REG) { 1942 /* free unused BAR register entry */ 1943 g_free(reg_entry); 1944 return; 1945 } 1946 /* Sync up the data to dev.config */ 1947 offset = reg_grp->base_offset + reg->offset; 1948 size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); 1949 1950 switch (reg->size) { 1951 case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); 1952 break; 1953 case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); 1954 break; 1955 case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); 1956 break; 1957 default: abort(); 1958 } 1959 if (rc) { 1960 /* Serious issues when we cannot read the host values! */ 1961 g_free(reg_entry); 1962 error_setg(errp, "Cannot read host values"); 1963 return; 1964 } 1965 /* Set bits in emu_mask are the ones we emulate. The dev.config shall 1966 * contain the emulated view of the guest - therefore we flip the mask 1967 * to mask out the host values (which dev.config initially has) . */ 1968 host_mask = size_mask & ~reg->emu_mask; 1969 1970 if ((data & host_mask) != (val & host_mask)) { 1971 uint32_t new_val; 1972 /* 1973 * Merge the emulated bits (data) with the host bits (val) 1974 * and mask out the bits past size to enable restoration 1975 * of the proper value for logging below. 1976 */ 1977 new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask; 1978 /* Leave intact host and emulated values past the size - even though 1979 * we do not care as we write per reg->size granularity, but for the 1980 * logging below lets have the proper value. */ 1981 new_val |= ((val | data)) & ~size_mask; 1982 XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", 1983 offset, data, val, new_val); 1984 val = new_val; 1985 } else 1986 val = data; 1987 1988 if (val & ~size_mask) { 1989 error_setg(errp, "Offset 0x%04x:0x%04x expands past" 1990 " register size (%d)", offset, val, reg->size); 1991 g_free(reg_entry); 1992 return; 1993 } 1994 /* This could be just pci_set_long as we don't modify the bits 1995 * past reg->size, but in case this routine is run in parallel or the 1996 * init value is larger, we do not want to over-write registers. */ 1997 switch (reg->size) { 1998 case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); 1999 break; 2000 case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); 2001 break; 2002 case 4: pci_set_long(s->dev.config + offset, val); 2003 break; 2004 default: abort(); 2005 } 2006 /* set register value pointer to the data. */ 2007 reg_entry->ptr.byte = s->dev.config + offset; 2008 2009 } 2010 /* list add register entry */ 2011 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 2012 } 2013 2014 void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) 2015 { 2016 ERRP_GUARD(); 2017 int i, rc; 2018 2019 QLIST_INIT(&s->reg_grps); 2020 2021 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 2022 uint32_t reg_grp_offset = 0; 2023 XenPTRegGroup *reg_grp_entry = NULL; 2024 2025 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF 2026 && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { 2027 if (xen_pt_hide_dev_cap(&s->real_device, 2028 xen_pt_emu_reg_grps[i].grp_id)) { 2029 continue; 2030 } 2031 2032 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 2033 2034 if (!reg_grp_offset) { 2035 continue; 2036 } 2037 } 2038 2039 if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { 2040 if (!is_igd_vga_passthrough(&s->real_device) || 2041 s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) { 2042 continue; 2043 } 2044 /* 2045 * By default we will trap up to 0x40 in the cfg space. 2046 * If an intel device is pass through we need to trap 0xfc, 2047 * therefore the size should be 0xff. 2048 */ 2049 reg_grp_offset = XEN_PCI_INTEL_OPREGION; 2050 } 2051 2052 reg_grp_entry = g_new0(XenPTRegGroup, 1); 2053 QLIST_INIT(®_grp_entry->reg_tbl_list); 2054 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 2055 2056 reg_grp_entry->base_offset = reg_grp_offset; 2057 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 2058 if (xen_pt_emu_reg_grps[i].size_init) { 2059 /* get register group size */ 2060 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 2061 reg_grp_offset, 2062 ®_grp_entry->size); 2063 if (rc < 0) { 2064 error_setg(errp, "Failed to initialize %d/%zu, type = 0x%x," 2065 " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), 2066 xen_pt_emu_reg_grps[i].grp_type, rc); 2067 xen_pt_config_delete(s); 2068 return; 2069 } 2070 } 2071 2072 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 2073 if (xen_pt_emu_reg_grps[i].emu_regs) { 2074 int j = 0; 2075 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 2076 2077 /* initialize capability register */ 2078 for (j = 0; regs->size != 0; j++, regs++) { 2079 xen_pt_config_reg_init(s, reg_grp_entry, regs, errp); 2080 if (*errp) { 2081 error_append_hint(errp, "Failed to init register %d" 2082 " offsets 0x%x in grp_type = 0x%x (%d/%zu)", 2083 j, 2084 regs->offset, 2085 xen_pt_emu_reg_grps[i].grp_type, 2086 i, ARRAY_SIZE(xen_pt_emu_reg_grps)); 2087 xen_pt_config_delete(s); 2088 return; 2089 } 2090 } 2091 } 2092 } 2093 } 2094 } 2095 2096 /* delete all emulate register */ 2097 void xen_pt_config_delete(XenPCIPassthroughState *s) 2098 { 2099 struct XenPTRegGroup *reg_group, *next_grp; 2100 struct XenPTReg *reg, *next_reg; 2101 2102 /* free MSI/MSI-X info table */ 2103 if (s->msix) { 2104 xen_pt_msix_unmap(s); 2105 } 2106 g_free(s->msi); 2107 2108 /* free all register group entry */ 2109 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 2110 /* free all register entry */ 2111 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 2112 QLIST_REMOVE(reg, entries); 2113 g_free(reg); 2114 } 2115 2116 QLIST_REMOVE(reg_group, entries); 2117 g_free(reg_group); 2118 } 2119 } 2120