1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/error.h" 17 #include "qemu/timer.h" 18 #include "hw/xen/xen_pt.h" 19 #include "hw/xen/xen_igd.h" 20 #include "hw/xen/xen-legacy-backend.h" 21 22 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 23 (((value) & (val_mask)) | ((data) & ~(val_mask))) 24 25 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 26 27 /* prototype */ 28 29 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 30 uint32_t real_offset, uint32_t *data); 31 32 33 /* helper */ 34 35 /* A return value of 1 means the capability should NOT be exposed to guest. */ 36 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 37 { 38 switch (grp_id) { 39 case PCI_CAP_ID_EXP: 40 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 41 * Controller looks trivial, e.g., the PCI Express Capabilities 42 * Register is 0. We should not try to expose it to guest. 43 * 44 * The datasheet is available at 45 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 46 * 47 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 48 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 49 * Controller looks trivial, e.g., the PCI Express Capabilities 50 * Register is 0, so the Capability Version is 0 and 51 * xen_pt_pcie_size_init() would fail. 52 */ 53 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 54 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 55 return 1; 56 } 57 break; 58 } 59 return 0; 60 } 61 62 /* find emulate register group entry */ 63 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 64 { 65 XenPTRegGroup *entry = NULL; 66 67 /* find register group entry */ 68 QLIST_FOREACH(entry, &s->reg_grps, entries) { 69 /* check address */ 70 if ((entry->base_offset <= address) 71 && ((entry->base_offset + entry->size) > address)) { 72 return entry; 73 } 74 } 75 76 /* group entry not found */ 77 return NULL; 78 } 79 80 /* find emulate register entry */ 81 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 82 { 83 XenPTReg *reg_entry = NULL; 84 XenPTRegInfo *reg = NULL; 85 uint32_t real_offset = 0; 86 87 /* find register entry */ 88 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 89 reg = reg_entry->reg; 90 real_offset = reg_grp->base_offset + reg->offset; 91 /* check address */ 92 if ((real_offset <= address) 93 && ((real_offset + reg->size) > address)) { 94 return reg_entry; 95 } 96 } 97 98 return NULL; 99 } 100 101 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, 102 XenPTRegInfo *reg, uint32_t valid_mask) 103 { 104 uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); 105 106 if (!s->permissive) { 107 throughable_mask &= ~reg->res_mask; 108 } 109 110 return throughable_mask & valid_mask; 111 } 112 113 /**************** 114 * general register functions 115 */ 116 117 /* register initialization function */ 118 119 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 120 XenPTRegInfo *reg, uint32_t real_offset, 121 uint32_t *data) 122 { 123 *data = reg->init_val; 124 return 0; 125 } 126 127 /* Read register functions */ 128 129 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 130 uint8_t *value, uint8_t valid_mask) 131 { 132 XenPTRegInfo *reg = cfg_entry->reg; 133 uint8_t valid_emu_mask = 0; 134 uint8_t *data = cfg_entry->ptr.byte; 135 136 /* emulate byte register */ 137 valid_emu_mask = reg->emu_mask & valid_mask; 138 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 139 140 return 0; 141 } 142 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 143 uint16_t *value, uint16_t valid_mask) 144 { 145 XenPTRegInfo *reg = cfg_entry->reg; 146 uint16_t valid_emu_mask = 0; 147 uint16_t *data = cfg_entry->ptr.half_word; 148 149 /* emulate word register */ 150 valid_emu_mask = reg->emu_mask & valid_mask; 151 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 152 153 return 0; 154 } 155 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 156 uint32_t *value, uint32_t valid_mask) 157 { 158 XenPTRegInfo *reg = cfg_entry->reg; 159 uint32_t valid_emu_mask = 0; 160 uint32_t *data = cfg_entry->ptr.word; 161 162 /* emulate long register */ 163 valid_emu_mask = reg->emu_mask & valid_mask; 164 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 165 166 return 0; 167 } 168 169 /* Write register functions */ 170 171 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 172 uint8_t *val, uint8_t dev_value, 173 uint8_t valid_mask) 174 { 175 XenPTRegInfo *reg = cfg_entry->reg; 176 uint8_t writable_mask = 0; 177 uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 178 uint8_t *data = cfg_entry->ptr.byte; 179 180 /* modify emulate register */ 181 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 182 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 183 184 /* create value for writing to I/O device register */ 185 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 186 throughable_mask); 187 188 return 0; 189 } 190 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 191 uint16_t *val, uint16_t dev_value, 192 uint16_t valid_mask) 193 { 194 XenPTRegInfo *reg = cfg_entry->reg; 195 uint16_t writable_mask = 0; 196 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 197 uint16_t *data = cfg_entry->ptr.half_word; 198 199 /* modify emulate register */ 200 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 201 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 202 203 /* create value for writing to I/O device register */ 204 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 205 throughable_mask); 206 207 return 0; 208 } 209 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 210 uint32_t *val, uint32_t dev_value, 211 uint32_t valid_mask) 212 { 213 XenPTRegInfo *reg = cfg_entry->reg; 214 uint32_t writable_mask = 0; 215 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 216 uint32_t *data = cfg_entry->ptr.word; 217 218 /* modify emulate register */ 219 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 220 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 221 222 /* create value for writing to I/O device register */ 223 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 224 throughable_mask); 225 226 return 0; 227 } 228 229 230 /* XenPTRegInfo declaration 231 * - only for emulated register (either a part or whole bit). 232 * - for passthrough register that need special behavior (like interacting with 233 * other component), set emu_mask to all 0 and specify r/w func properly. 234 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 235 */ 236 237 /******************** 238 * Header Type0 239 */ 240 241 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 242 XenPTRegInfo *reg, uint32_t real_offset, 243 uint32_t *data) 244 { 245 *data = s->real_device.vendor_id; 246 return 0; 247 } 248 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 249 XenPTRegInfo *reg, uint32_t real_offset, 250 uint32_t *data) 251 { 252 *data = s->real_device.device_id; 253 return 0; 254 } 255 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 256 XenPTRegInfo *reg, uint32_t real_offset, 257 uint32_t *data) 258 { 259 XenPTRegGroup *reg_grp_entry = NULL; 260 XenPTReg *reg_entry = NULL; 261 uint32_t reg_field = 0; 262 263 /* find Header register group */ 264 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 265 if (reg_grp_entry) { 266 /* find Capabilities Pointer register */ 267 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 268 if (reg_entry) { 269 /* check Capabilities Pointer register */ 270 if (*reg_entry->ptr.half_word) { 271 reg_field |= PCI_STATUS_CAP_LIST; 272 } else { 273 reg_field &= ~PCI_STATUS_CAP_LIST; 274 } 275 } else { 276 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 277 " for Capabilities Pointer register." 278 " (%s)\n", __func__); 279 return -1; 280 } 281 } else { 282 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 283 " for Header. (%s)\n", __func__); 284 return -1; 285 } 286 287 *data = reg_field; 288 return 0; 289 } 290 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 291 XenPTRegInfo *reg, uint32_t real_offset, 292 uint32_t *data) 293 { 294 /* read PCI_HEADER_TYPE */ 295 *data = reg->init_val | 0x80; 296 return 0; 297 } 298 299 /* initialize Interrupt Pin register */ 300 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 301 XenPTRegInfo *reg, uint32_t real_offset, 302 uint32_t *data) 303 { 304 if (s->real_device.irq) { 305 *data = xen_pt_pci_read_intx(s); 306 } 307 return 0; 308 } 309 310 /* Command register */ 311 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 312 uint16_t *val, uint16_t dev_value, 313 uint16_t valid_mask) 314 { 315 XenPTRegInfo *reg = cfg_entry->reg; 316 uint16_t writable_mask = 0; 317 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 318 uint16_t *data = cfg_entry->ptr.half_word; 319 320 /* modify emulate register */ 321 writable_mask = ~reg->ro_mask & valid_mask; 322 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 323 324 /* create value for writing to I/O device register */ 325 if (*val & PCI_COMMAND_INTX_DISABLE) { 326 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 327 } else { 328 if (s->machine_irq) { 329 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 330 } 331 } 332 333 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 334 335 return 0; 336 } 337 338 /* BAR */ 339 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 340 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 341 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 342 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 343 344 static bool is_64bit_bar(PCIIORegion *r) 345 { 346 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 347 } 348 349 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 350 { 351 if (is_64bit_bar(r)) { 352 uint64_t size64; 353 size64 = (r + 1)->size; 354 size64 <<= 32; 355 size64 += r->size; 356 return size64; 357 } 358 return r->size; 359 } 360 361 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 362 int index) 363 { 364 PCIDevice *d = PCI_DEVICE(s); 365 XenPTRegion *region = NULL; 366 PCIIORegion *r; 367 368 /* check 64bit BAR */ 369 if ((0 < index) && (index < PCI_ROM_SLOT)) { 370 int type = s->real_device.io_regions[index - 1].type; 371 372 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 373 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 374 region = &s->bases[index - 1]; 375 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 376 return XEN_PT_BAR_FLAG_UPPER; 377 } 378 } 379 } 380 381 /* check unused BAR */ 382 r = &d->io_regions[index]; 383 if (!xen_pt_get_bar_size(r)) { 384 return XEN_PT_BAR_FLAG_UNUSED; 385 } 386 387 /* for ExpROM BAR */ 388 if (index == PCI_ROM_SLOT) { 389 return XEN_PT_BAR_FLAG_MEM; 390 } 391 392 /* check BAR I/O indicator */ 393 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 394 return XEN_PT_BAR_FLAG_IO; 395 } else { 396 return XEN_PT_BAR_FLAG_MEM; 397 } 398 } 399 400 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 401 { 402 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 403 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 404 } else { 405 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 406 } 407 } 408 409 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 410 uint32_t real_offset, uint32_t *data) 411 { 412 uint32_t reg_field = 0; 413 int index; 414 415 index = xen_pt_bar_offset_to_index(reg->offset); 416 if (index < 0 || index >= PCI_NUM_REGIONS) { 417 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 418 return -1; 419 } 420 421 /* set BAR flag */ 422 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 423 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 424 reg_field = XEN_PT_INVALID_REG; 425 } 426 427 *data = reg_field; 428 return 0; 429 } 430 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 431 uint32_t *value, uint32_t valid_mask) 432 { 433 XenPTRegInfo *reg = cfg_entry->reg; 434 uint32_t valid_emu_mask = 0; 435 uint32_t bar_emu_mask = 0; 436 int index; 437 438 /* get BAR index */ 439 index = xen_pt_bar_offset_to_index(reg->offset); 440 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 441 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 442 return -1; 443 } 444 445 /* use fixed-up value from kernel sysfs */ 446 *value = base_address_with_flags(&s->real_device.io_regions[index]); 447 448 /* set emulate mask depend on BAR flag */ 449 switch (s->bases[index].bar_flag) { 450 case XEN_PT_BAR_FLAG_MEM: 451 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 452 break; 453 case XEN_PT_BAR_FLAG_IO: 454 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 455 break; 456 case XEN_PT_BAR_FLAG_UPPER: 457 bar_emu_mask = XEN_PT_BAR_ALLF; 458 break; 459 default: 460 break; 461 } 462 463 /* emulate BAR */ 464 valid_emu_mask = bar_emu_mask & valid_mask; 465 *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); 466 467 return 0; 468 } 469 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 470 uint32_t *val, uint32_t dev_value, 471 uint32_t valid_mask) 472 { 473 XenPTRegInfo *reg = cfg_entry->reg; 474 XenPTRegion *base = NULL; 475 PCIDevice *d = PCI_DEVICE(s); 476 const PCIIORegion *r; 477 uint32_t writable_mask = 0; 478 uint32_t bar_emu_mask = 0; 479 uint32_t bar_ro_mask = 0; 480 uint32_t r_size = 0; 481 int index = 0; 482 uint32_t *data = cfg_entry->ptr.word; 483 484 index = xen_pt_bar_offset_to_index(reg->offset); 485 if (index < 0 || index >= PCI_NUM_REGIONS) { 486 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 487 return -1; 488 } 489 490 r = &d->io_regions[index]; 491 base = &s->bases[index]; 492 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 493 494 /* set emulate mask and read-only mask values depend on the BAR flag */ 495 switch (s->bases[index].bar_flag) { 496 case XEN_PT_BAR_FLAG_MEM: 497 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 498 if (!r_size) { 499 /* low 32 bits mask for 64 bit bars */ 500 bar_ro_mask = XEN_PT_BAR_ALLF; 501 } else { 502 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 503 } 504 break; 505 case XEN_PT_BAR_FLAG_IO: 506 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 507 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 508 break; 509 case XEN_PT_BAR_FLAG_UPPER: 510 assert(index > 0); 511 r_size = d->io_regions[index - 1].size >> 32; 512 bar_emu_mask = XEN_PT_BAR_ALLF; 513 bar_ro_mask = r_size ? r_size - 1 : 0; 514 break; 515 default: 516 break; 517 } 518 519 /* modify emulate register */ 520 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 521 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 522 523 /* check whether we need to update the virtual region address or not */ 524 switch (s->bases[index].bar_flag) { 525 case XEN_PT_BAR_FLAG_UPPER: 526 case XEN_PT_BAR_FLAG_MEM: 527 /* nothing to do */ 528 break; 529 case XEN_PT_BAR_FLAG_IO: 530 /* nothing to do */ 531 break; 532 default: 533 break; 534 } 535 536 /* create value for writing to I/O device register */ 537 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 538 539 return 0; 540 } 541 542 /* write Exp ROM BAR */ 543 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 544 XenPTReg *cfg_entry, uint32_t *val, 545 uint32_t dev_value, uint32_t valid_mask) 546 { 547 XenPTRegInfo *reg = cfg_entry->reg; 548 XenPTRegion *base = NULL; 549 PCIDevice *d = PCI_DEVICE(s); 550 uint32_t writable_mask = 0; 551 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 552 pcibus_t r_size = 0; 553 uint32_t bar_ro_mask = 0; 554 uint32_t *data = cfg_entry->ptr.word; 555 556 r_size = d->io_regions[PCI_ROM_SLOT].size; 557 base = &s->bases[PCI_ROM_SLOT]; 558 /* align memory type resource size */ 559 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 560 561 /* set emulate mask and read-only mask */ 562 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 563 564 /* modify emulate register */ 565 writable_mask = ~bar_ro_mask & valid_mask; 566 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 567 568 /* create value for writing to I/O device register */ 569 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 570 571 return 0; 572 } 573 574 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, 575 XenPTReg *cfg_entry, 576 uint32_t *value, uint32_t valid_mask) 577 { 578 *value = igd_read_opregion(s); 579 return 0; 580 } 581 582 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, 583 XenPTReg *cfg_entry, uint32_t *value, 584 uint32_t dev_value, uint32_t valid_mask) 585 { 586 igd_write_opregion(s, *value); 587 return 0; 588 } 589 590 /* Header Type0 reg static information table */ 591 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 592 /* Vendor ID reg */ 593 { 594 .offset = PCI_VENDOR_ID, 595 .size = 2, 596 .init_val = 0x0000, 597 .ro_mask = 0xFFFF, 598 .emu_mask = 0xFFFF, 599 .init = xen_pt_vendor_reg_init, 600 .u.w.read = xen_pt_word_reg_read, 601 .u.w.write = xen_pt_word_reg_write, 602 }, 603 /* Device ID reg */ 604 { 605 .offset = PCI_DEVICE_ID, 606 .size = 2, 607 .init_val = 0x0000, 608 .ro_mask = 0xFFFF, 609 .emu_mask = 0xFFFF, 610 .init = xen_pt_device_reg_init, 611 .u.w.read = xen_pt_word_reg_read, 612 .u.w.write = xen_pt_word_reg_write, 613 }, 614 /* Command reg */ 615 { 616 .offset = PCI_COMMAND, 617 .size = 2, 618 .init_val = 0x0000, 619 .res_mask = 0xF880, 620 .emu_mask = 0x0743, 621 .init = xen_pt_common_reg_init, 622 .u.w.read = xen_pt_word_reg_read, 623 .u.w.write = xen_pt_cmd_reg_write, 624 }, 625 /* Capabilities Pointer reg */ 626 { 627 .offset = PCI_CAPABILITY_LIST, 628 .size = 1, 629 .init_val = 0x00, 630 .ro_mask = 0xFF, 631 .emu_mask = 0xFF, 632 .init = xen_pt_ptr_reg_init, 633 .u.b.read = xen_pt_byte_reg_read, 634 .u.b.write = xen_pt_byte_reg_write, 635 }, 636 /* Status reg */ 637 /* use emulated Cap Ptr value to initialize, 638 * so need to be declared after Cap Ptr reg 639 */ 640 { 641 .offset = PCI_STATUS, 642 .size = 2, 643 .init_val = 0x0000, 644 .res_mask = 0x0007, 645 .ro_mask = 0x06F8, 646 .rw1c_mask = 0xF900, 647 .emu_mask = 0x0010, 648 .init = xen_pt_status_reg_init, 649 .u.w.read = xen_pt_word_reg_read, 650 .u.w.write = xen_pt_word_reg_write, 651 }, 652 /* Cache Line Size reg */ 653 { 654 .offset = PCI_CACHE_LINE_SIZE, 655 .size = 1, 656 .init_val = 0x00, 657 .ro_mask = 0x00, 658 .emu_mask = 0xFF, 659 .init = xen_pt_common_reg_init, 660 .u.b.read = xen_pt_byte_reg_read, 661 .u.b.write = xen_pt_byte_reg_write, 662 }, 663 /* Latency Timer reg */ 664 { 665 .offset = PCI_LATENCY_TIMER, 666 .size = 1, 667 .init_val = 0x00, 668 .ro_mask = 0x00, 669 .emu_mask = 0xFF, 670 .init = xen_pt_common_reg_init, 671 .u.b.read = xen_pt_byte_reg_read, 672 .u.b.write = xen_pt_byte_reg_write, 673 }, 674 /* Header Type reg */ 675 { 676 .offset = PCI_HEADER_TYPE, 677 .size = 1, 678 .init_val = 0x00, 679 .ro_mask = 0xFF, 680 .emu_mask = 0x00, 681 .init = xen_pt_header_type_reg_init, 682 .u.b.read = xen_pt_byte_reg_read, 683 .u.b.write = xen_pt_byte_reg_write, 684 }, 685 /* Interrupt Line reg */ 686 { 687 .offset = PCI_INTERRUPT_LINE, 688 .size = 1, 689 .init_val = 0x00, 690 .ro_mask = 0x00, 691 .emu_mask = 0xFF, 692 .init = xen_pt_common_reg_init, 693 .u.b.read = xen_pt_byte_reg_read, 694 .u.b.write = xen_pt_byte_reg_write, 695 }, 696 /* Interrupt Pin reg */ 697 { 698 .offset = PCI_INTERRUPT_PIN, 699 .size = 1, 700 .init_val = 0x00, 701 .ro_mask = 0xFF, 702 .emu_mask = 0xFF, 703 .init = xen_pt_irqpin_reg_init, 704 .u.b.read = xen_pt_byte_reg_read, 705 .u.b.write = xen_pt_byte_reg_write, 706 }, 707 /* BAR 0 reg */ 708 /* mask of BAR need to be decided later, depends on IO/MEM type */ 709 { 710 .offset = PCI_BASE_ADDRESS_0, 711 .size = 4, 712 .init_val = 0x00000000, 713 .init = xen_pt_bar_reg_init, 714 .u.dw.read = xen_pt_bar_reg_read, 715 .u.dw.write = xen_pt_bar_reg_write, 716 }, 717 /* BAR 1 reg */ 718 { 719 .offset = PCI_BASE_ADDRESS_1, 720 .size = 4, 721 .init_val = 0x00000000, 722 .init = xen_pt_bar_reg_init, 723 .u.dw.read = xen_pt_bar_reg_read, 724 .u.dw.write = xen_pt_bar_reg_write, 725 }, 726 /* BAR 2 reg */ 727 { 728 .offset = PCI_BASE_ADDRESS_2, 729 .size = 4, 730 .init_val = 0x00000000, 731 .init = xen_pt_bar_reg_init, 732 .u.dw.read = xen_pt_bar_reg_read, 733 .u.dw.write = xen_pt_bar_reg_write, 734 }, 735 /* BAR 3 reg */ 736 { 737 .offset = PCI_BASE_ADDRESS_3, 738 .size = 4, 739 .init_val = 0x00000000, 740 .init = xen_pt_bar_reg_init, 741 .u.dw.read = xen_pt_bar_reg_read, 742 .u.dw.write = xen_pt_bar_reg_write, 743 }, 744 /* BAR 4 reg */ 745 { 746 .offset = PCI_BASE_ADDRESS_4, 747 .size = 4, 748 .init_val = 0x00000000, 749 .init = xen_pt_bar_reg_init, 750 .u.dw.read = xen_pt_bar_reg_read, 751 .u.dw.write = xen_pt_bar_reg_write, 752 }, 753 /* BAR 5 reg */ 754 { 755 .offset = PCI_BASE_ADDRESS_5, 756 .size = 4, 757 .init_val = 0x00000000, 758 .init = xen_pt_bar_reg_init, 759 .u.dw.read = xen_pt_bar_reg_read, 760 .u.dw.write = xen_pt_bar_reg_write, 761 }, 762 /* Expansion ROM BAR reg */ 763 { 764 .offset = PCI_ROM_ADDRESS, 765 .size = 4, 766 .init_val = 0x00000000, 767 .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, 768 .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, 769 .init = xen_pt_bar_reg_init, 770 .u.dw.read = xen_pt_long_reg_read, 771 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 772 }, 773 { 774 .size = 0, 775 }, 776 }; 777 778 779 /********************************* 780 * Vital Product Data Capability 781 */ 782 783 /* Vital Product Data Capability Structure reg static information table */ 784 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 785 { 786 .offset = PCI_CAP_LIST_NEXT, 787 .size = 1, 788 .init_val = 0x00, 789 .ro_mask = 0xFF, 790 .emu_mask = 0xFF, 791 .init = xen_pt_ptr_reg_init, 792 .u.b.read = xen_pt_byte_reg_read, 793 .u.b.write = xen_pt_byte_reg_write, 794 }, 795 { 796 .offset = PCI_VPD_ADDR, 797 .size = 2, 798 .ro_mask = 0x0003, 799 .emu_mask = 0x0003, 800 .init = xen_pt_common_reg_init, 801 .u.w.read = xen_pt_word_reg_read, 802 .u.w.write = xen_pt_word_reg_write, 803 }, 804 { 805 .size = 0, 806 }, 807 }; 808 809 810 /************************************** 811 * Vendor Specific Capability 812 */ 813 814 /* Vendor Specific Capability Structure reg static information table */ 815 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 816 { 817 .offset = PCI_CAP_LIST_NEXT, 818 .size = 1, 819 .init_val = 0x00, 820 .ro_mask = 0xFF, 821 .emu_mask = 0xFF, 822 .init = xen_pt_ptr_reg_init, 823 .u.b.read = xen_pt_byte_reg_read, 824 .u.b.write = xen_pt_byte_reg_write, 825 }, 826 { 827 .size = 0, 828 }, 829 }; 830 831 832 /***************************** 833 * PCI Express Capability 834 */ 835 836 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 837 uint32_t offset) 838 { 839 uint8_t flag; 840 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 841 return 0; 842 } 843 return flag & PCI_EXP_FLAGS_VERS; 844 } 845 846 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 847 uint32_t offset) 848 { 849 uint8_t flag; 850 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 851 return 0; 852 } 853 return (flag & PCI_EXP_FLAGS_TYPE) >> 4; 854 } 855 856 /* initialize Link Control register */ 857 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 858 XenPTRegInfo *reg, uint32_t real_offset, 859 uint32_t *data) 860 { 861 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 862 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 863 864 /* no need to initialize in case of Root Complex Integrated Endpoint 865 * with cap_ver 1.x 866 */ 867 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 868 *data = XEN_PT_INVALID_REG; 869 } 870 871 *data = reg->init_val; 872 return 0; 873 } 874 /* initialize Device Control 2 register */ 875 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 876 XenPTRegInfo *reg, uint32_t real_offset, 877 uint32_t *data) 878 { 879 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 880 881 /* no need to initialize in case of cap_ver 1.x */ 882 if (cap_ver == 1) { 883 *data = XEN_PT_INVALID_REG; 884 } 885 886 *data = reg->init_val; 887 return 0; 888 } 889 /* initialize Link Control 2 register */ 890 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 891 XenPTRegInfo *reg, uint32_t real_offset, 892 uint32_t *data) 893 { 894 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 895 uint32_t reg_field = 0; 896 897 /* no need to initialize in case of cap_ver 1.x */ 898 if (cap_ver == 1) { 899 reg_field = XEN_PT_INVALID_REG; 900 } else { 901 /* set Supported Link Speed */ 902 uint8_t lnkcap; 903 int rc; 904 rc = xen_host_pci_get_byte(&s->real_device, 905 real_offset - reg->offset + PCI_EXP_LNKCAP, 906 &lnkcap); 907 if (rc) { 908 return rc; 909 } 910 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 911 } 912 913 *data = reg_field; 914 return 0; 915 } 916 917 /* PCI Express Capability Structure reg static information table */ 918 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 919 /* Next Pointer reg */ 920 { 921 .offset = PCI_CAP_LIST_NEXT, 922 .size = 1, 923 .init_val = 0x00, 924 .ro_mask = 0xFF, 925 .emu_mask = 0xFF, 926 .init = xen_pt_ptr_reg_init, 927 .u.b.read = xen_pt_byte_reg_read, 928 .u.b.write = xen_pt_byte_reg_write, 929 }, 930 /* Device Capabilities reg */ 931 { 932 .offset = PCI_EXP_DEVCAP, 933 .size = 4, 934 .init_val = 0x00000000, 935 .ro_mask = 0xFFFFFFFF, 936 .emu_mask = 0x10000000, 937 .init = xen_pt_common_reg_init, 938 .u.dw.read = xen_pt_long_reg_read, 939 .u.dw.write = xen_pt_long_reg_write, 940 }, 941 /* Device Control reg */ 942 { 943 .offset = PCI_EXP_DEVCTL, 944 .size = 2, 945 .init_val = 0x2810, 946 .ro_mask = 0x8400, 947 .emu_mask = 0xFFFF, 948 .init = xen_pt_common_reg_init, 949 .u.w.read = xen_pt_word_reg_read, 950 .u.w.write = xen_pt_word_reg_write, 951 }, 952 /* Device Status reg */ 953 { 954 .offset = PCI_EXP_DEVSTA, 955 .size = 2, 956 .res_mask = 0xFFC0, 957 .ro_mask = 0x0030, 958 .rw1c_mask = 0x000F, 959 .init = xen_pt_common_reg_init, 960 .u.w.read = xen_pt_word_reg_read, 961 .u.w.write = xen_pt_word_reg_write, 962 }, 963 /* Link Control reg */ 964 { 965 .offset = PCI_EXP_LNKCTL, 966 .size = 2, 967 .init_val = 0x0000, 968 .ro_mask = 0xFC34, 969 .emu_mask = 0xFFFF, 970 .init = xen_pt_linkctrl_reg_init, 971 .u.w.read = xen_pt_word_reg_read, 972 .u.w.write = xen_pt_word_reg_write, 973 }, 974 /* Link Status reg */ 975 { 976 .offset = PCI_EXP_LNKSTA, 977 .size = 2, 978 .ro_mask = 0x3FFF, 979 .rw1c_mask = 0xC000, 980 .init = xen_pt_common_reg_init, 981 .u.w.read = xen_pt_word_reg_read, 982 .u.w.write = xen_pt_word_reg_write, 983 }, 984 /* Device Control 2 reg */ 985 { 986 .offset = 0x28, 987 .size = 2, 988 .init_val = 0x0000, 989 .ro_mask = 0xFFA0, 990 .emu_mask = 0xFFBF, 991 .init = xen_pt_devctrl2_reg_init, 992 .u.w.read = xen_pt_word_reg_read, 993 .u.w.write = xen_pt_word_reg_write, 994 }, 995 /* Link Control 2 reg */ 996 { 997 .offset = 0x30, 998 .size = 2, 999 .init_val = 0x0000, 1000 .ro_mask = 0xE040, 1001 .emu_mask = 0xFFFF, 1002 .init = xen_pt_linkctrl2_reg_init, 1003 .u.w.read = xen_pt_word_reg_read, 1004 .u.w.write = xen_pt_word_reg_write, 1005 }, 1006 { 1007 .size = 0, 1008 }, 1009 }; 1010 1011 1012 /********************************* 1013 * Power Management Capability 1014 */ 1015 1016 /* Power Management Capability reg static information table */ 1017 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 1018 /* Next Pointer reg */ 1019 { 1020 .offset = PCI_CAP_LIST_NEXT, 1021 .size = 1, 1022 .init_val = 0x00, 1023 .ro_mask = 0xFF, 1024 .emu_mask = 0xFF, 1025 .init = xen_pt_ptr_reg_init, 1026 .u.b.read = xen_pt_byte_reg_read, 1027 .u.b.write = xen_pt_byte_reg_write, 1028 }, 1029 /* Power Management Capabilities reg */ 1030 { 1031 .offset = PCI_CAP_FLAGS, 1032 .size = 2, 1033 .init_val = 0x0000, 1034 .ro_mask = 0xFFFF, 1035 .emu_mask = 0xF9C8, 1036 .init = xen_pt_common_reg_init, 1037 .u.w.read = xen_pt_word_reg_read, 1038 .u.w.write = xen_pt_word_reg_write, 1039 }, 1040 /* PCI Power Management Control/Status reg */ 1041 { 1042 .offset = PCI_PM_CTRL, 1043 .size = 2, 1044 .init_val = 0x0008, 1045 .res_mask = 0x00F0, 1046 .ro_mask = 0x610C, 1047 .rw1c_mask = 0x8000, 1048 .emu_mask = 0x810B, 1049 .init = xen_pt_common_reg_init, 1050 .u.w.read = xen_pt_word_reg_read, 1051 .u.w.write = xen_pt_word_reg_write, 1052 }, 1053 { 1054 .size = 0, 1055 }, 1056 }; 1057 1058 1059 /******************************** 1060 * MSI Capability 1061 */ 1062 1063 /* Helper */ 1064 #define xen_pt_msi_check_type(offset, flags, what) \ 1065 ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ 1066 PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) 1067 1068 /* Message Control register */ 1069 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1070 XenPTRegInfo *reg, uint32_t real_offset, 1071 uint32_t *data) 1072 { 1073 XenPTMSI *msi = s->msi; 1074 uint16_t reg_field; 1075 int rc; 1076 1077 /* use I/O device register's value as initial value */ 1078 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1079 if (rc) { 1080 return rc; 1081 } 1082 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1083 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1084 xen_host_pci_set_word(&s->real_device, real_offset, 1085 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1086 } 1087 msi->flags |= reg_field; 1088 msi->ctrl_offset = real_offset; 1089 msi->initialized = false; 1090 msi->mapped = false; 1091 1092 *data = reg->init_val; 1093 return 0; 1094 } 1095 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1096 XenPTReg *cfg_entry, uint16_t *val, 1097 uint16_t dev_value, uint16_t valid_mask) 1098 { 1099 XenPTRegInfo *reg = cfg_entry->reg; 1100 XenPTMSI *msi = s->msi; 1101 uint16_t writable_mask = 0; 1102 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1103 uint16_t *data = cfg_entry->ptr.half_word; 1104 1105 /* Currently no support for multi-vector */ 1106 if (*val & PCI_MSI_FLAGS_QSIZE) { 1107 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1108 } 1109 1110 /* modify emulate register */ 1111 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1112 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1113 msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; 1114 1115 /* create value for writing to I/O device register */ 1116 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1117 1118 /* update MSI */ 1119 if (*val & PCI_MSI_FLAGS_ENABLE) { 1120 /* setup MSI pirq for the first time */ 1121 if (!msi->initialized) { 1122 /* Init physical one */ 1123 XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); 1124 if (xen_pt_msi_setup(s)) { 1125 /* We do not broadcast the error to the framework code, so 1126 * that MSI errors are contained in MSI emulation code and 1127 * QEMU can go on running. 1128 * Guest MSI would be actually not working. 1129 */ 1130 *val &= ~PCI_MSI_FLAGS_ENABLE; 1131 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); 1132 return 0; 1133 } 1134 if (xen_pt_msi_update(s)) { 1135 *val &= ~PCI_MSI_FLAGS_ENABLE; 1136 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); 1137 return 0; 1138 } 1139 msi->initialized = true; 1140 msi->mapped = true; 1141 } 1142 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1143 } else if (msi->mapped) { 1144 xen_pt_msi_disable(s); 1145 } 1146 1147 return 0; 1148 } 1149 1150 /* initialize Message Upper Address register */ 1151 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1152 XenPTRegInfo *reg, uint32_t real_offset, 1153 uint32_t *data) 1154 { 1155 /* no need to initialize in case of 32 bit type */ 1156 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1157 *data = XEN_PT_INVALID_REG; 1158 } else { 1159 *data = reg->init_val; 1160 } 1161 1162 return 0; 1163 } 1164 /* this function will be called twice (for 32 bit and 64 bit type) */ 1165 /* initialize Message Data register */ 1166 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1167 XenPTRegInfo *reg, uint32_t real_offset, 1168 uint32_t *data) 1169 { 1170 uint32_t flags = s->msi->flags; 1171 uint32_t offset = reg->offset; 1172 1173 /* check the offset whether matches the type or not */ 1174 if (xen_pt_msi_check_type(offset, flags, DATA)) { 1175 *data = reg->init_val; 1176 } else { 1177 *data = XEN_PT_INVALID_REG; 1178 } 1179 return 0; 1180 } 1181 1182 /* this function will be called twice (for 32 bit and 64 bit type) */ 1183 /* initialize Mask register */ 1184 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, 1185 XenPTRegInfo *reg, uint32_t real_offset, 1186 uint32_t *data) 1187 { 1188 uint32_t flags = s->msi->flags; 1189 1190 /* check the offset whether matches the type or not */ 1191 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1192 *data = XEN_PT_INVALID_REG; 1193 } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { 1194 *data = reg->init_val; 1195 } else { 1196 *data = XEN_PT_INVALID_REG; 1197 } 1198 return 0; 1199 } 1200 1201 /* this function will be called twice (for 32 bit and 64 bit type) */ 1202 /* initialize Pending register */ 1203 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, 1204 XenPTRegInfo *reg, uint32_t real_offset, 1205 uint32_t *data) 1206 { 1207 uint32_t flags = s->msi->flags; 1208 1209 /* check the offset whether matches the type or not */ 1210 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1211 *data = XEN_PT_INVALID_REG; 1212 } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { 1213 *data = reg->init_val; 1214 } else { 1215 *data = XEN_PT_INVALID_REG; 1216 } 1217 return 0; 1218 } 1219 1220 /* write Message Address register */ 1221 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1222 XenPTReg *cfg_entry, uint32_t *val, 1223 uint32_t dev_value, uint32_t valid_mask) 1224 { 1225 XenPTRegInfo *reg = cfg_entry->reg; 1226 uint32_t writable_mask = 0; 1227 uint32_t old_addr = *cfg_entry->ptr.word; 1228 uint32_t *data = cfg_entry->ptr.word; 1229 1230 /* modify emulate register */ 1231 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1232 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1233 s->msi->addr_lo = *data; 1234 1235 /* create value for writing to I/O device register */ 1236 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1237 1238 /* update MSI */ 1239 if (*data != old_addr) { 1240 if (s->msi->mapped) { 1241 xen_pt_msi_update(s); 1242 } 1243 } 1244 1245 return 0; 1246 } 1247 /* write Message Upper Address register */ 1248 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1249 XenPTReg *cfg_entry, uint32_t *val, 1250 uint32_t dev_value, uint32_t valid_mask) 1251 { 1252 XenPTRegInfo *reg = cfg_entry->reg; 1253 uint32_t writable_mask = 0; 1254 uint32_t old_addr = *cfg_entry->ptr.word; 1255 uint32_t *data = cfg_entry->ptr.word; 1256 1257 /* check whether the type is 64 bit or not */ 1258 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1259 XEN_PT_ERR(&s->dev, 1260 "Can't write to the upper address without 64 bit support\n"); 1261 return -1; 1262 } 1263 1264 /* modify emulate register */ 1265 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1266 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1267 /* update the msi_info too */ 1268 s->msi->addr_hi = *data; 1269 1270 /* create value for writing to I/O device register */ 1271 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1272 1273 /* update MSI */ 1274 if (*data != old_addr) { 1275 if (s->msi->mapped) { 1276 xen_pt_msi_update(s); 1277 } 1278 } 1279 1280 return 0; 1281 } 1282 1283 1284 /* this function will be called twice (for 32 bit and 64 bit type) */ 1285 /* write Message Data register */ 1286 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1287 XenPTReg *cfg_entry, uint16_t *val, 1288 uint16_t dev_value, uint16_t valid_mask) 1289 { 1290 XenPTRegInfo *reg = cfg_entry->reg; 1291 XenPTMSI *msi = s->msi; 1292 uint16_t writable_mask = 0; 1293 uint16_t old_data = *cfg_entry->ptr.half_word; 1294 uint32_t offset = reg->offset; 1295 uint16_t *data = cfg_entry->ptr.half_word; 1296 1297 /* check the offset whether matches the type or not */ 1298 if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { 1299 /* exit I/O emulator */ 1300 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1301 return -1; 1302 } 1303 1304 /* modify emulate register */ 1305 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1306 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1307 /* update the msi_info too */ 1308 msi->data = *data; 1309 1310 /* create value for writing to I/O device register */ 1311 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1312 1313 /* update MSI */ 1314 if (*data != old_data) { 1315 if (msi->mapped) { 1316 xen_pt_msi_update(s); 1317 } 1318 } 1319 1320 return 0; 1321 } 1322 1323 static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 1324 uint32_t *val, uint32_t dev_value, 1325 uint32_t valid_mask) 1326 { 1327 int rc; 1328 1329 rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask); 1330 if (rc) { 1331 return rc; 1332 } 1333 1334 s->msi->mask = *val; 1335 1336 return 0; 1337 } 1338 1339 /* MSI Capability Structure reg static information table */ 1340 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1341 /* Next Pointer reg */ 1342 { 1343 .offset = PCI_CAP_LIST_NEXT, 1344 .size = 1, 1345 .init_val = 0x00, 1346 .ro_mask = 0xFF, 1347 .emu_mask = 0xFF, 1348 .init = xen_pt_ptr_reg_init, 1349 .u.b.read = xen_pt_byte_reg_read, 1350 .u.b.write = xen_pt_byte_reg_write, 1351 }, 1352 /* Message Control reg */ 1353 { 1354 .offset = PCI_MSI_FLAGS, 1355 .size = 2, 1356 .init_val = 0x0000, 1357 .res_mask = 0xFE00, 1358 .ro_mask = 0x018E, 1359 .emu_mask = 0x017E, 1360 .init = xen_pt_msgctrl_reg_init, 1361 .u.w.read = xen_pt_word_reg_read, 1362 .u.w.write = xen_pt_msgctrl_reg_write, 1363 }, 1364 /* Message Address reg */ 1365 { 1366 .offset = PCI_MSI_ADDRESS_LO, 1367 .size = 4, 1368 .init_val = 0x00000000, 1369 .ro_mask = 0x00000003, 1370 .emu_mask = 0xFFFFFFFF, 1371 .init = xen_pt_common_reg_init, 1372 .u.dw.read = xen_pt_long_reg_read, 1373 .u.dw.write = xen_pt_msgaddr32_reg_write, 1374 }, 1375 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1376 { 1377 .offset = PCI_MSI_ADDRESS_HI, 1378 .size = 4, 1379 .init_val = 0x00000000, 1380 .ro_mask = 0x00000000, 1381 .emu_mask = 0xFFFFFFFF, 1382 .init = xen_pt_msgaddr64_reg_init, 1383 .u.dw.read = xen_pt_long_reg_read, 1384 .u.dw.write = xen_pt_msgaddr64_reg_write, 1385 }, 1386 /* Message Data reg (16 bits of data for 32-bit devices) */ 1387 { 1388 .offset = PCI_MSI_DATA_32, 1389 .size = 2, 1390 .init_val = 0x0000, 1391 .ro_mask = 0x0000, 1392 .emu_mask = 0xFFFF, 1393 .init = xen_pt_msgdata_reg_init, 1394 .u.w.read = xen_pt_word_reg_read, 1395 .u.w.write = xen_pt_msgdata_reg_write, 1396 }, 1397 /* Message Data reg (16 bits of data for 64-bit devices) */ 1398 { 1399 .offset = PCI_MSI_DATA_64, 1400 .size = 2, 1401 .init_val = 0x0000, 1402 .ro_mask = 0x0000, 1403 .emu_mask = 0xFFFF, 1404 .init = xen_pt_msgdata_reg_init, 1405 .u.w.read = xen_pt_word_reg_read, 1406 .u.w.write = xen_pt_msgdata_reg_write, 1407 }, 1408 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1409 { 1410 .offset = PCI_MSI_MASK_32, 1411 .size = 4, 1412 .init_val = 0x00000000, 1413 .ro_mask = 0xFFFFFFFF, 1414 .emu_mask = 0xFFFFFFFF, 1415 .init = xen_pt_mask_reg_init, 1416 .u.dw.read = xen_pt_long_reg_read, 1417 .u.dw.write = xen_pt_mask_reg_write, 1418 }, 1419 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1420 { 1421 .offset = PCI_MSI_MASK_64, 1422 .size = 4, 1423 .init_val = 0x00000000, 1424 .ro_mask = 0xFFFFFFFF, 1425 .emu_mask = 0xFFFFFFFF, 1426 .init = xen_pt_mask_reg_init, 1427 .u.dw.read = xen_pt_long_reg_read, 1428 .u.dw.write = xen_pt_mask_reg_write, 1429 }, 1430 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1431 { 1432 .offset = PCI_MSI_MASK_32 + 4, 1433 .size = 4, 1434 .init_val = 0x00000000, 1435 .ro_mask = 0xFFFFFFFF, 1436 .emu_mask = 0x00000000, 1437 .init = xen_pt_pending_reg_init, 1438 .u.dw.read = xen_pt_long_reg_read, 1439 .u.dw.write = xen_pt_long_reg_write, 1440 }, 1441 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1442 { 1443 .offset = PCI_MSI_MASK_64 + 4, 1444 .size = 4, 1445 .init_val = 0x00000000, 1446 .ro_mask = 0xFFFFFFFF, 1447 .emu_mask = 0x00000000, 1448 .init = xen_pt_pending_reg_init, 1449 .u.dw.read = xen_pt_long_reg_read, 1450 .u.dw.write = xen_pt_long_reg_write, 1451 }, 1452 { 1453 .size = 0, 1454 }, 1455 }; 1456 1457 1458 /************************************** 1459 * MSI-X Capability 1460 */ 1461 1462 /* Message Control register for MSI-X */ 1463 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1464 XenPTRegInfo *reg, uint32_t real_offset, 1465 uint32_t *data) 1466 { 1467 uint16_t reg_field; 1468 int rc; 1469 1470 /* use I/O device register's value as initial value */ 1471 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1472 if (rc) { 1473 return rc; 1474 } 1475 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1476 XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); 1477 xen_host_pci_set_word(&s->real_device, real_offset, 1478 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1479 } 1480 1481 s->msix->ctrl_offset = real_offset; 1482 1483 *data = reg->init_val; 1484 return 0; 1485 } 1486 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1487 XenPTReg *cfg_entry, uint16_t *val, 1488 uint16_t dev_value, uint16_t valid_mask) 1489 { 1490 XenPTRegInfo *reg = cfg_entry->reg; 1491 uint16_t writable_mask = 0; 1492 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1493 int debug_msix_enabled_old; 1494 uint16_t *data = cfg_entry->ptr.half_word; 1495 1496 /* modify emulate register */ 1497 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1498 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1499 1500 /* create value for writing to I/O device register */ 1501 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1502 1503 /* update MSI-X */ 1504 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1505 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1506 xen_pt_msix_update(s); 1507 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1508 xen_pt_msix_disable(s); 1509 } 1510 1511 s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; 1512 1513 debug_msix_enabled_old = s->msix->enabled; 1514 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1515 if (s->msix->enabled != debug_msix_enabled_old) { 1516 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1517 s->msix->enabled ? "enable" : "disable"); 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* MSI-X Capability Structure reg static information table */ 1524 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1525 /* Next Pointer reg */ 1526 { 1527 .offset = PCI_CAP_LIST_NEXT, 1528 .size = 1, 1529 .init_val = 0x00, 1530 .ro_mask = 0xFF, 1531 .emu_mask = 0xFF, 1532 .init = xen_pt_ptr_reg_init, 1533 .u.b.read = xen_pt_byte_reg_read, 1534 .u.b.write = xen_pt_byte_reg_write, 1535 }, 1536 /* Message Control reg */ 1537 { 1538 .offset = PCI_MSI_FLAGS, 1539 .size = 2, 1540 .init_val = 0x0000, 1541 .res_mask = 0x3800, 1542 .ro_mask = 0x07FF, 1543 .emu_mask = 0x0000, 1544 .init = xen_pt_msixctrl_reg_init, 1545 .u.w.read = xen_pt_word_reg_read, 1546 .u.w.write = xen_pt_msixctrl_reg_write, 1547 }, 1548 { 1549 .size = 0, 1550 }, 1551 }; 1552 1553 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { 1554 /* Intel IGFX OpRegion reg */ 1555 { 1556 .offset = 0x0, 1557 .size = 4, 1558 .init_val = 0, 1559 .emu_mask = 0xFFFFFFFF, 1560 .u.dw.read = xen_pt_intel_opregion_read, 1561 .u.dw.write = xen_pt_intel_opregion_write, 1562 }, 1563 { 1564 .size = 0, 1565 }, 1566 }; 1567 1568 /**************************** 1569 * Capabilities 1570 */ 1571 1572 /* capability structure register group size functions */ 1573 1574 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1575 const XenPTRegGroupInfo *grp_reg, 1576 uint32_t base_offset, uint8_t *size) 1577 { 1578 *size = grp_reg->grp_size; 1579 return 0; 1580 } 1581 /* get Vendor Specific Capability Structure register group size */ 1582 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1583 const XenPTRegGroupInfo *grp_reg, 1584 uint32_t base_offset, uint8_t *size) 1585 { 1586 return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); 1587 } 1588 /* get PCI Express Capability Structure register group size */ 1589 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1590 const XenPTRegGroupInfo *grp_reg, 1591 uint32_t base_offset, uint8_t *size) 1592 { 1593 PCIDevice *d = PCI_DEVICE(s); 1594 uint8_t version = get_capability_version(s, base_offset); 1595 uint8_t type = get_device_type(s, base_offset); 1596 uint8_t pcie_size = 0; 1597 1598 1599 /* calculate size depend on capability version and device/port type */ 1600 /* in case of PCI Express Base Specification Rev 1.x */ 1601 if (version == 1) { 1602 /* The PCI Express Capabilities, Device Capabilities, and Device 1603 * Status/Control registers are required for all PCI Express devices. 1604 * The Link Capabilities and Link Status/Control are required for all 1605 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1606 * are not required to implement registers other than those listed 1607 * above and terminate the capability structure. 1608 */ 1609 switch (type) { 1610 case PCI_EXP_TYPE_ENDPOINT: 1611 case PCI_EXP_TYPE_LEG_END: 1612 pcie_size = 0x14; 1613 break; 1614 case PCI_EXP_TYPE_RC_END: 1615 /* has no link */ 1616 pcie_size = 0x0C; 1617 break; 1618 /* only EndPoint passthrough is supported */ 1619 case PCI_EXP_TYPE_ROOT_PORT: 1620 case PCI_EXP_TYPE_UPSTREAM: 1621 case PCI_EXP_TYPE_DOWNSTREAM: 1622 case PCI_EXP_TYPE_PCI_BRIDGE: 1623 case PCI_EXP_TYPE_PCIE_BRIDGE: 1624 case PCI_EXP_TYPE_RC_EC: 1625 default: 1626 XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); 1627 return -1; 1628 } 1629 } 1630 /* in case of PCI Express Base Specification Rev 2.0 */ 1631 else if (version == 2) { 1632 switch (type) { 1633 case PCI_EXP_TYPE_ENDPOINT: 1634 case PCI_EXP_TYPE_LEG_END: 1635 case PCI_EXP_TYPE_RC_END: 1636 /* For Functions that do not implement the registers, 1637 * these spaces must be hardwired to 0b. 1638 */ 1639 pcie_size = 0x3C; 1640 break; 1641 /* only EndPoint passthrough is supported */ 1642 case PCI_EXP_TYPE_ROOT_PORT: 1643 case PCI_EXP_TYPE_UPSTREAM: 1644 case PCI_EXP_TYPE_DOWNSTREAM: 1645 case PCI_EXP_TYPE_PCI_BRIDGE: 1646 case PCI_EXP_TYPE_PCIE_BRIDGE: 1647 case PCI_EXP_TYPE_RC_EC: 1648 default: 1649 XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); 1650 return -1; 1651 } 1652 } else { 1653 XEN_PT_ERR(d, "Unsupported capability version 0x%x.\n", version); 1654 return -1; 1655 } 1656 1657 *size = pcie_size; 1658 return 0; 1659 } 1660 /* get MSI Capability Structure register group size */ 1661 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1662 const XenPTRegGroupInfo *grp_reg, 1663 uint32_t base_offset, uint8_t *size) 1664 { 1665 uint16_t msg_ctrl = 0; 1666 uint8_t msi_size = 0xa; 1667 int rc; 1668 1669 rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, 1670 &msg_ctrl); 1671 if (rc) { 1672 return rc; 1673 } 1674 /* check if 64-bit address is capable of per-vector masking */ 1675 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1676 msi_size += 4; 1677 } 1678 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1679 msi_size += 10; 1680 } 1681 1682 s->msi = g_new0(XenPTMSI, 1); 1683 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1684 1685 *size = msi_size; 1686 return 0; 1687 } 1688 /* get MSI-X Capability Structure register group size */ 1689 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1690 const XenPTRegGroupInfo *grp_reg, 1691 uint32_t base_offset, uint8_t *size) 1692 { 1693 int rc = 0; 1694 1695 rc = xen_pt_msix_init(s, base_offset); 1696 1697 if (rc < 0) { 1698 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1699 return rc; 1700 } 1701 1702 *size = grp_reg->grp_size; 1703 return 0; 1704 } 1705 1706 1707 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1708 /* Header Type0 reg group */ 1709 { 1710 .grp_id = 0xFF, 1711 .grp_type = XEN_PT_GRP_TYPE_EMU, 1712 .grp_size = 0x40, 1713 .size_init = xen_pt_reg_grp_size_init, 1714 .emu_regs = xen_pt_emu_reg_header0, 1715 }, 1716 /* PCI PowerManagement Capability reg group */ 1717 { 1718 .grp_id = PCI_CAP_ID_PM, 1719 .grp_type = XEN_PT_GRP_TYPE_EMU, 1720 .grp_size = PCI_PM_SIZEOF, 1721 .size_init = xen_pt_reg_grp_size_init, 1722 .emu_regs = xen_pt_emu_reg_pm, 1723 }, 1724 /* AGP Capability Structure reg group */ 1725 { 1726 .grp_id = PCI_CAP_ID_AGP, 1727 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1728 .grp_size = 0x30, 1729 .size_init = xen_pt_reg_grp_size_init, 1730 }, 1731 /* Vital Product Data Capability Structure reg group */ 1732 { 1733 .grp_id = PCI_CAP_ID_VPD, 1734 .grp_type = XEN_PT_GRP_TYPE_EMU, 1735 .grp_size = 0x08, 1736 .size_init = xen_pt_reg_grp_size_init, 1737 .emu_regs = xen_pt_emu_reg_vpd, 1738 }, 1739 /* Slot Identification reg group */ 1740 { 1741 .grp_id = PCI_CAP_ID_SLOTID, 1742 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1743 .grp_size = 0x04, 1744 .size_init = xen_pt_reg_grp_size_init, 1745 }, 1746 /* MSI Capability Structure reg group */ 1747 { 1748 .grp_id = PCI_CAP_ID_MSI, 1749 .grp_type = XEN_PT_GRP_TYPE_EMU, 1750 .grp_size = 0xFF, 1751 .size_init = xen_pt_msi_size_init, 1752 .emu_regs = xen_pt_emu_reg_msi, 1753 }, 1754 /* PCI-X Capabilities List Item reg group */ 1755 { 1756 .grp_id = PCI_CAP_ID_PCIX, 1757 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1758 .grp_size = 0x18, 1759 .size_init = xen_pt_reg_grp_size_init, 1760 }, 1761 /* Vendor Specific Capability Structure reg group */ 1762 { 1763 .grp_id = PCI_CAP_ID_VNDR, 1764 .grp_type = XEN_PT_GRP_TYPE_EMU, 1765 .grp_size = 0xFF, 1766 .size_init = xen_pt_vendor_size_init, 1767 .emu_regs = xen_pt_emu_reg_vendor, 1768 }, 1769 /* SHPC Capability List Item reg group */ 1770 { 1771 .grp_id = PCI_CAP_ID_SHPC, 1772 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1773 .grp_size = 0x08, 1774 .size_init = xen_pt_reg_grp_size_init, 1775 }, 1776 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1777 { 1778 .grp_id = PCI_CAP_ID_SSVID, 1779 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1780 .grp_size = 0x08, 1781 .size_init = xen_pt_reg_grp_size_init, 1782 }, 1783 /* AGP 8x Capability Structure reg group */ 1784 { 1785 .grp_id = PCI_CAP_ID_AGP3, 1786 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1787 .grp_size = 0x30, 1788 .size_init = xen_pt_reg_grp_size_init, 1789 }, 1790 /* PCI Express Capability Structure reg group */ 1791 { 1792 .grp_id = PCI_CAP_ID_EXP, 1793 .grp_type = XEN_PT_GRP_TYPE_EMU, 1794 .grp_size = 0xFF, 1795 .size_init = xen_pt_pcie_size_init, 1796 .emu_regs = xen_pt_emu_reg_pcie, 1797 }, 1798 /* MSI-X Capability Structure reg group */ 1799 { 1800 .grp_id = PCI_CAP_ID_MSIX, 1801 .grp_type = XEN_PT_GRP_TYPE_EMU, 1802 .grp_size = 0x0C, 1803 .size_init = xen_pt_msix_size_init, 1804 .emu_regs = xen_pt_emu_reg_msix, 1805 }, 1806 /* Intel IGD Opregion group */ 1807 { 1808 .grp_id = XEN_PCI_INTEL_OPREGION, 1809 .grp_type = XEN_PT_GRP_TYPE_EMU, 1810 .grp_size = 0x4, 1811 .size_init = xen_pt_reg_grp_size_init, 1812 .emu_regs = xen_pt_emu_reg_igd_opregion, 1813 }, 1814 { 1815 .grp_size = 0, 1816 }, 1817 }; 1818 1819 /* initialize Capabilities Pointer or Next Pointer register */ 1820 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1821 XenPTRegInfo *reg, uint32_t real_offset, 1822 uint32_t *data) 1823 { 1824 int i, rc; 1825 uint8_t reg_field; 1826 uint8_t cap_id = 0; 1827 1828 rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); 1829 if (rc) { 1830 return rc; 1831 } 1832 /* find capability offset */ 1833 while (reg_field) { 1834 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1835 if (xen_pt_hide_dev_cap(&s->real_device, 1836 xen_pt_emu_reg_grps[i].grp_id)) { 1837 continue; 1838 } 1839 1840 rc = xen_host_pci_get_byte(&s->real_device, 1841 reg_field + PCI_CAP_LIST_ID, &cap_id); 1842 if (rc) { 1843 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", 1844 reg_field + PCI_CAP_LIST_ID, rc); 1845 return rc; 1846 } 1847 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1848 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1849 goto out; 1850 } 1851 /* ignore the 0 hardwired capability, find next one */ 1852 break; 1853 } 1854 } 1855 1856 /* next capability */ 1857 rc = xen_host_pci_get_byte(&s->real_device, 1858 reg_field + PCI_CAP_LIST_NEXT, ®_field); 1859 if (rc) { 1860 return rc; 1861 } 1862 } 1863 1864 out: 1865 *data = reg_field; 1866 return 0; 1867 } 1868 1869 1870 /************* 1871 * Main 1872 */ 1873 1874 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1875 { 1876 uint8_t id; 1877 unsigned max_cap = XEN_PCI_CAP_MAX; 1878 uint8_t pos = PCI_CAPABILITY_LIST; 1879 uint8_t status = 0; 1880 1881 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1882 return 0; 1883 } 1884 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1885 return 0; 1886 } 1887 1888 while (max_cap--) { 1889 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1890 break; 1891 } 1892 if (pos < PCI_CONFIG_HEADER_SIZE) { 1893 break; 1894 } 1895 1896 pos &= ~3; 1897 if (xen_host_pci_get_byte(&s->real_device, 1898 pos + PCI_CAP_LIST_ID, &id)) { 1899 break; 1900 } 1901 1902 if (id == 0xff) { 1903 break; 1904 } 1905 if (id == cap) { 1906 return pos; 1907 } 1908 1909 pos += PCI_CAP_LIST_NEXT; 1910 } 1911 return 0; 1912 } 1913 1914 static void xen_pt_config_reg_init(XenPCIPassthroughState *s, 1915 XenPTRegGroup *reg_grp, XenPTRegInfo *reg, 1916 Error **errp) 1917 { 1918 XenPTReg *reg_entry; 1919 uint32_t data = 0; 1920 int rc = 0; 1921 1922 reg_entry = g_new0(XenPTReg, 1); 1923 reg_entry->reg = reg; 1924 1925 if (reg->init) { 1926 uint32_t host_mask, size_mask; 1927 unsigned int offset; 1928 uint32_t val = 0; 1929 1930 /* initialize emulate register */ 1931 rc = reg->init(s, reg_entry->reg, 1932 reg_grp->base_offset + reg->offset, &data); 1933 if (rc < 0) { 1934 g_free(reg_entry); 1935 error_setg(errp, "Init emulate register fail"); 1936 return; 1937 } 1938 if (data == XEN_PT_INVALID_REG) { 1939 /* free unused BAR register entry */ 1940 g_free(reg_entry); 1941 return; 1942 } 1943 /* Sync up the data to dev.config */ 1944 offset = reg_grp->base_offset + reg->offset; 1945 size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); 1946 1947 switch (reg->size) { 1948 case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); 1949 break; 1950 case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); 1951 break; 1952 case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); 1953 break; 1954 default: abort(); 1955 } 1956 if (rc) { 1957 /* Serious issues when we cannot read the host values! */ 1958 g_free(reg_entry); 1959 error_setg(errp, "Cannot read host values"); 1960 return; 1961 } 1962 /* Set bits in emu_mask are the ones we emulate. The dev.config shall 1963 * contain the emulated view of the guest - therefore we flip the mask 1964 * to mask out the host values (which dev.config initially has) . */ 1965 host_mask = size_mask & ~reg->emu_mask; 1966 1967 if ((data & host_mask) != (val & host_mask)) { 1968 uint32_t new_val; 1969 /* 1970 * Merge the emulated bits (data) with the host bits (val) 1971 * and mask out the bits past size to enable restoration 1972 * of the proper value for logging below. 1973 */ 1974 new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask; 1975 /* Leave intact host and emulated values past the size - even though 1976 * we do not care as we write per reg->size granularity, but for the 1977 * logging below lets have the proper value. */ 1978 new_val |= ((val | data)) & ~size_mask; 1979 XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", 1980 offset, data, val, new_val); 1981 val = new_val; 1982 } else 1983 val = data; 1984 1985 if (val & ~size_mask) { 1986 error_setg(errp, "Offset 0x%04x:0x%04x expands past" 1987 " register size (%d)", offset, val, reg->size); 1988 g_free(reg_entry); 1989 return; 1990 } 1991 /* This could be just pci_set_long as we don't modify the bits 1992 * past reg->size, but in case this routine is run in parallel or the 1993 * init value is larger, we do not want to over-write registers. */ 1994 switch (reg->size) { 1995 case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); 1996 break; 1997 case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); 1998 break; 1999 case 4: pci_set_long(s->dev.config + offset, val); 2000 break; 2001 default: abort(); 2002 } 2003 /* set register value pointer to the data. */ 2004 reg_entry->ptr.byte = s->dev.config + offset; 2005 2006 } 2007 /* list add register entry */ 2008 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 2009 } 2010 2011 void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) 2012 { 2013 ERRP_GUARD(); 2014 int i, rc; 2015 2016 QLIST_INIT(&s->reg_grps); 2017 2018 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 2019 uint32_t reg_grp_offset = 0; 2020 XenPTRegGroup *reg_grp_entry = NULL; 2021 2022 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF 2023 && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { 2024 if (xen_pt_hide_dev_cap(&s->real_device, 2025 xen_pt_emu_reg_grps[i].grp_id)) { 2026 continue; 2027 } 2028 2029 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 2030 2031 if (!reg_grp_offset) { 2032 continue; 2033 } 2034 } 2035 2036 if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { 2037 if (!is_igd_vga_passthrough(&s->real_device) || 2038 s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) { 2039 continue; 2040 } 2041 /* 2042 * By default we will trap up to 0x40 in the cfg space. 2043 * If an intel device is pass through we need to trap 0xfc, 2044 * therefore the size should be 0xff. 2045 */ 2046 reg_grp_offset = XEN_PCI_INTEL_OPREGION; 2047 } 2048 2049 reg_grp_entry = g_new0(XenPTRegGroup, 1); 2050 QLIST_INIT(®_grp_entry->reg_tbl_list); 2051 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 2052 2053 reg_grp_entry->base_offset = reg_grp_offset; 2054 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 2055 if (xen_pt_emu_reg_grps[i].size_init) { 2056 /* get register group size */ 2057 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 2058 reg_grp_offset, 2059 ®_grp_entry->size); 2060 if (rc < 0) { 2061 error_setg(errp, "Failed to initialize %d/%zu, type = 0x%x," 2062 " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), 2063 xen_pt_emu_reg_grps[i].grp_type, rc); 2064 xen_pt_config_delete(s); 2065 return; 2066 } 2067 } 2068 2069 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 2070 if (xen_pt_emu_reg_grps[i].emu_regs) { 2071 int j = 0; 2072 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 2073 2074 /* initialize capability register */ 2075 for (j = 0; regs->size != 0; j++, regs++) { 2076 xen_pt_config_reg_init(s, reg_grp_entry, regs, errp); 2077 if (*errp) { 2078 error_append_hint(errp, "Failed to init register %d" 2079 " offsets 0x%x in grp_type = 0x%x (%d/%zu)", 2080 j, 2081 regs->offset, 2082 xen_pt_emu_reg_grps[i].grp_type, 2083 i, ARRAY_SIZE(xen_pt_emu_reg_grps)); 2084 xen_pt_config_delete(s); 2085 return; 2086 } 2087 } 2088 } 2089 } 2090 } 2091 } 2092 2093 /* delete all emulate register */ 2094 void xen_pt_config_delete(XenPCIPassthroughState *s) 2095 { 2096 struct XenPTRegGroup *reg_group, *next_grp; 2097 struct XenPTReg *reg, *next_reg; 2098 2099 /* free MSI/MSI-X info table */ 2100 if (s->msix) { 2101 xen_pt_msix_unmap(s); 2102 } 2103 g_free(s->msi); 2104 2105 /* free all register group entry */ 2106 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 2107 /* free all register entry */ 2108 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 2109 QLIST_REMOVE(reg, entries); 2110 g_free(reg); 2111 } 2112 2113 QLIST_REMOVE(reg_group, entries); 2114 g_free(reg_group); 2115 } 2116 } 2117