1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qemu/timer.h" 17 #include "hw/xen/xen_backend.h" 18 #include "xen_pt.h" 19 20 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 21 (((value) & (val_mask)) | ((data) & ~(val_mask))) 22 23 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 24 25 /* prototype */ 26 27 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 28 uint32_t real_offset, uint32_t *data); 29 30 31 /* helper */ 32 33 /* A return value of 1 means the capability should NOT be exposed to guest. */ 34 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 35 { 36 switch (grp_id) { 37 case PCI_CAP_ID_EXP: 38 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 39 * Controller looks trivial, e.g., the PCI Express Capabilities 40 * Register is 0. We should not try to expose it to guest. 41 * 42 * The datasheet is available at 43 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 44 * 45 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 46 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 47 * Controller looks trivial, e.g., the PCI Express Capabilities 48 * Register is 0, so the Capability Version is 0 and 49 * xen_pt_pcie_size_init() would fail. 50 */ 51 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 52 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 53 return 1; 54 } 55 break; 56 } 57 return 0; 58 } 59 60 /* find emulate register group entry */ 61 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 62 { 63 XenPTRegGroup *entry = NULL; 64 65 /* find register group entry */ 66 QLIST_FOREACH(entry, &s->reg_grps, entries) { 67 /* check address */ 68 if ((entry->base_offset <= address) 69 && ((entry->base_offset + entry->size) > address)) { 70 return entry; 71 } 72 } 73 74 /* group entry not found */ 75 return NULL; 76 } 77 78 /* find emulate register entry */ 79 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 80 { 81 XenPTReg *reg_entry = NULL; 82 XenPTRegInfo *reg = NULL; 83 uint32_t real_offset = 0; 84 85 /* find register entry */ 86 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 87 reg = reg_entry->reg; 88 real_offset = reg_grp->base_offset + reg->offset; 89 /* check address */ 90 if ((real_offset <= address) 91 && ((real_offset + reg->size) > address)) { 92 return reg_entry; 93 } 94 } 95 96 return NULL; 97 } 98 99 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, 100 XenPTRegInfo *reg, uint32_t valid_mask) 101 { 102 uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); 103 104 if (!s->permissive) { 105 throughable_mask &= ~reg->res_mask; 106 } 107 108 return throughable_mask & valid_mask; 109 } 110 111 /**************** 112 * general register functions 113 */ 114 115 /* register initialization function */ 116 117 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 118 XenPTRegInfo *reg, uint32_t real_offset, 119 uint32_t *data) 120 { 121 *data = reg->init_val; 122 return 0; 123 } 124 125 /* Read register functions */ 126 127 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 128 uint8_t *value, uint8_t valid_mask) 129 { 130 XenPTRegInfo *reg = cfg_entry->reg; 131 uint8_t valid_emu_mask = 0; 132 uint8_t *data = cfg_entry->ptr.byte; 133 134 /* emulate byte register */ 135 valid_emu_mask = reg->emu_mask & valid_mask; 136 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 137 138 return 0; 139 } 140 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 141 uint16_t *value, uint16_t valid_mask) 142 { 143 XenPTRegInfo *reg = cfg_entry->reg; 144 uint16_t valid_emu_mask = 0; 145 uint16_t *data = cfg_entry->ptr.half_word; 146 147 /* emulate word register */ 148 valid_emu_mask = reg->emu_mask & valid_mask; 149 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 150 151 return 0; 152 } 153 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 154 uint32_t *value, uint32_t valid_mask) 155 { 156 XenPTRegInfo *reg = cfg_entry->reg; 157 uint32_t valid_emu_mask = 0; 158 uint32_t *data = cfg_entry->ptr.word; 159 160 /* emulate long register */ 161 valid_emu_mask = reg->emu_mask & valid_mask; 162 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 163 164 return 0; 165 } 166 167 /* Write register functions */ 168 169 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 170 uint8_t *val, uint8_t dev_value, 171 uint8_t valid_mask) 172 { 173 XenPTRegInfo *reg = cfg_entry->reg; 174 uint8_t writable_mask = 0; 175 uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 176 uint8_t *data = cfg_entry->ptr.byte; 177 178 /* modify emulate register */ 179 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 180 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 181 182 /* create value for writing to I/O device register */ 183 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 184 throughable_mask); 185 186 return 0; 187 } 188 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 189 uint16_t *val, uint16_t dev_value, 190 uint16_t valid_mask) 191 { 192 XenPTRegInfo *reg = cfg_entry->reg; 193 uint16_t writable_mask = 0; 194 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 195 uint16_t *data = cfg_entry->ptr.half_word; 196 197 /* modify emulate register */ 198 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 199 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 200 201 /* create value for writing to I/O device register */ 202 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 203 throughable_mask); 204 205 return 0; 206 } 207 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 208 uint32_t *val, uint32_t dev_value, 209 uint32_t valid_mask) 210 { 211 XenPTRegInfo *reg = cfg_entry->reg; 212 uint32_t writable_mask = 0; 213 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 214 uint32_t *data = cfg_entry->ptr.word; 215 216 /* modify emulate register */ 217 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 218 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 219 220 /* create value for writing to I/O device register */ 221 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 222 throughable_mask); 223 224 return 0; 225 } 226 227 228 /* XenPTRegInfo declaration 229 * - only for emulated register (either a part or whole bit). 230 * - for passthrough register that need special behavior (like interacting with 231 * other component), set emu_mask to all 0 and specify r/w func properly. 232 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 233 */ 234 235 /******************** 236 * Header Type0 237 */ 238 239 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 240 XenPTRegInfo *reg, uint32_t real_offset, 241 uint32_t *data) 242 { 243 *data = s->real_device.vendor_id; 244 return 0; 245 } 246 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 247 XenPTRegInfo *reg, uint32_t real_offset, 248 uint32_t *data) 249 { 250 *data = s->real_device.device_id; 251 return 0; 252 } 253 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 254 XenPTRegInfo *reg, uint32_t real_offset, 255 uint32_t *data) 256 { 257 XenPTRegGroup *reg_grp_entry = NULL; 258 XenPTReg *reg_entry = NULL; 259 uint32_t reg_field = 0; 260 261 /* find Header register group */ 262 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 263 if (reg_grp_entry) { 264 /* find Capabilities Pointer register */ 265 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 266 if (reg_entry) { 267 /* check Capabilities Pointer register */ 268 if (*reg_entry->ptr.half_word) { 269 reg_field |= PCI_STATUS_CAP_LIST; 270 } else { 271 reg_field &= ~PCI_STATUS_CAP_LIST; 272 } 273 } else { 274 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 275 " for Capabilities Pointer register." 276 " (%s)\n", __func__); 277 return -1; 278 } 279 } else { 280 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 281 " for Header. (%s)\n", __func__); 282 return -1; 283 } 284 285 *data = reg_field; 286 return 0; 287 } 288 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 289 XenPTRegInfo *reg, uint32_t real_offset, 290 uint32_t *data) 291 { 292 /* read PCI_HEADER_TYPE */ 293 *data = reg->init_val | 0x80; 294 return 0; 295 } 296 297 /* initialize Interrupt Pin register */ 298 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 299 XenPTRegInfo *reg, uint32_t real_offset, 300 uint32_t *data) 301 { 302 *data = xen_pt_pci_read_intx(s); 303 return 0; 304 } 305 306 /* Command register */ 307 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 308 uint16_t *val, uint16_t dev_value, 309 uint16_t valid_mask) 310 { 311 XenPTRegInfo *reg = cfg_entry->reg; 312 uint16_t writable_mask = 0; 313 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 314 uint16_t *data = cfg_entry->ptr.half_word; 315 316 /* modify emulate register */ 317 writable_mask = ~reg->ro_mask & valid_mask; 318 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 319 320 /* create value for writing to I/O device register */ 321 if (*val & PCI_COMMAND_INTX_DISABLE) { 322 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 323 } else { 324 if (s->machine_irq) { 325 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 326 } 327 } 328 329 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 330 331 return 0; 332 } 333 334 /* BAR */ 335 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 336 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 337 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 338 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 339 340 static bool is_64bit_bar(PCIIORegion *r) 341 { 342 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 343 } 344 345 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 346 { 347 if (is_64bit_bar(r)) { 348 uint64_t size64; 349 size64 = (r + 1)->size; 350 size64 <<= 32; 351 size64 += r->size; 352 return size64; 353 } 354 return r->size; 355 } 356 357 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 358 int index) 359 { 360 PCIDevice *d = &s->dev; 361 XenPTRegion *region = NULL; 362 PCIIORegion *r; 363 364 /* check 64bit BAR */ 365 if ((0 < index) && (index < PCI_ROM_SLOT)) { 366 int type = s->real_device.io_regions[index - 1].type; 367 368 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 369 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 370 region = &s->bases[index - 1]; 371 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 372 return XEN_PT_BAR_FLAG_UPPER; 373 } 374 } 375 } 376 377 /* check unused BAR */ 378 r = &d->io_regions[index]; 379 if (!xen_pt_get_bar_size(r)) { 380 return XEN_PT_BAR_FLAG_UNUSED; 381 } 382 383 /* for ExpROM BAR */ 384 if (index == PCI_ROM_SLOT) { 385 return XEN_PT_BAR_FLAG_MEM; 386 } 387 388 /* check BAR I/O indicator */ 389 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 390 return XEN_PT_BAR_FLAG_IO; 391 } else { 392 return XEN_PT_BAR_FLAG_MEM; 393 } 394 } 395 396 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 397 { 398 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 399 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 400 } else { 401 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 402 } 403 } 404 405 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 406 uint32_t real_offset, uint32_t *data) 407 { 408 uint32_t reg_field = 0; 409 int index; 410 411 index = xen_pt_bar_offset_to_index(reg->offset); 412 if (index < 0 || index >= PCI_NUM_REGIONS) { 413 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 414 return -1; 415 } 416 417 /* set BAR flag */ 418 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 419 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 420 reg_field = XEN_PT_INVALID_REG; 421 } 422 423 *data = reg_field; 424 return 0; 425 } 426 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 427 uint32_t *value, uint32_t valid_mask) 428 { 429 XenPTRegInfo *reg = cfg_entry->reg; 430 uint32_t valid_emu_mask = 0; 431 uint32_t bar_emu_mask = 0; 432 int index; 433 434 /* get BAR index */ 435 index = xen_pt_bar_offset_to_index(reg->offset); 436 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 437 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 438 return -1; 439 } 440 441 /* use fixed-up value from kernel sysfs */ 442 *value = base_address_with_flags(&s->real_device.io_regions[index]); 443 444 /* set emulate mask depend on BAR flag */ 445 switch (s->bases[index].bar_flag) { 446 case XEN_PT_BAR_FLAG_MEM: 447 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 448 break; 449 case XEN_PT_BAR_FLAG_IO: 450 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 451 break; 452 case XEN_PT_BAR_FLAG_UPPER: 453 bar_emu_mask = XEN_PT_BAR_ALLF; 454 break; 455 default: 456 break; 457 } 458 459 /* emulate BAR */ 460 valid_emu_mask = bar_emu_mask & valid_mask; 461 *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); 462 463 return 0; 464 } 465 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 466 uint32_t *val, uint32_t dev_value, 467 uint32_t valid_mask) 468 { 469 XenPTRegInfo *reg = cfg_entry->reg; 470 XenPTRegion *base = NULL; 471 PCIDevice *d = &s->dev; 472 const PCIIORegion *r; 473 uint32_t writable_mask = 0; 474 uint32_t bar_emu_mask = 0; 475 uint32_t bar_ro_mask = 0; 476 uint32_t r_size = 0; 477 int index = 0; 478 uint32_t *data = cfg_entry->ptr.word; 479 480 index = xen_pt_bar_offset_to_index(reg->offset); 481 if (index < 0 || index >= PCI_NUM_REGIONS) { 482 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 483 return -1; 484 } 485 486 r = &d->io_regions[index]; 487 base = &s->bases[index]; 488 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 489 490 /* set emulate mask and read-only mask values depend on the BAR flag */ 491 switch (s->bases[index].bar_flag) { 492 case XEN_PT_BAR_FLAG_MEM: 493 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 494 if (!r_size) { 495 /* low 32 bits mask for 64 bit bars */ 496 bar_ro_mask = XEN_PT_BAR_ALLF; 497 } else { 498 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 499 } 500 break; 501 case XEN_PT_BAR_FLAG_IO: 502 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 503 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 504 break; 505 case XEN_PT_BAR_FLAG_UPPER: 506 bar_emu_mask = XEN_PT_BAR_ALLF; 507 bar_ro_mask = r_size ? r_size - 1 : 0; 508 break; 509 default: 510 break; 511 } 512 513 /* modify emulate register */ 514 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 515 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 516 517 /* check whether we need to update the virtual region address or not */ 518 switch (s->bases[index].bar_flag) { 519 case XEN_PT_BAR_FLAG_UPPER: 520 case XEN_PT_BAR_FLAG_MEM: 521 /* nothing to do */ 522 break; 523 case XEN_PT_BAR_FLAG_IO: 524 /* nothing to do */ 525 break; 526 default: 527 break; 528 } 529 530 /* create value for writing to I/O device register */ 531 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 532 533 return 0; 534 } 535 536 /* write Exp ROM BAR */ 537 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 538 XenPTReg *cfg_entry, uint32_t *val, 539 uint32_t dev_value, uint32_t valid_mask) 540 { 541 XenPTRegInfo *reg = cfg_entry->reg; 542 XenPTRegion *base = NULL; 543 PCIDevice *d = (PCIDevice *)&s->dev; 544 uint32_t writable_mask = 0; 545 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 546 pcibus_t r_size = 0; 547 uint32_t bar_ro_mask = 0; 548 uint32_t *data = cfg_entry->ptr.word; 549 550 r_size = d->io_regions[PCI_ROM_SLOT].size; 551 base = &s->bases[PCI_ROM_SLOT]; 552 /* align memory type resource size */ 553 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 554 555 /* set emulate mask and read-only mask */ 556 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 557 558 /* modify emulate register */ 559 writable_mask = ~bar_ro_mask & valid_mask; 560 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 561 562 /* create value for writing to I/O device register */ 563 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 564 565 return 0; 566 } 567 568 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, 569 XenPTReg *cfg_entry, 570 uint32_t *value, uint32_t valid_mask) 571 { 572 *value = igd_read_opregion(s); 573 return 0; 574 } 575 576 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, 577 XenPTReg *cfg_entry, uint32_t *value, 578 uint32_t dev_value, uint32_t valid_mask) 579 { 580 igd_write_opregion(s, *value); 581 return 0; 582 } 583 584 /* Header Type0 reg static information table */ 585 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 586 /* Vendor ID reg */ 587 { 588 .offset = PCI_VENDOR_ID, 589 .size = 2, 590 .init_val = 0x0000, 591 .ro_mask = 0xFFFF, 592 .emu_mask = 0xFFFF, 593 .init = xen_pt_vendor_reg_init, 594 .u.w.read = xen_pt_word_reg_read, 595 .u.w.write = xen_pt_word_reg_write, 596 }, 597 /* Device ID reg */ 598 { 599 .offset = PCI_DEVICE_ID, 600 .size = 2, 601 .init_val = 0x0000, 602 .ro_mask = 0xFFFF, 603 .emu_mask = 0xFFFF, 604 .init = xen_pt_device_reg_init, 605 .u.w.read = xen_pt_word_reg_read, 606 .u.w.write = xen_pt_word_reg_write, 607 }, 608 /* Command reg */ 609 { 610 .offset = PCI_COMMAND, 611 .size = 2, 612 .init_val = 0x0000, 613 .res_mask = 0xF880, 614 .emu_mask = 0x0743, 615 .init = xen_pt_common_reg_init, 616 .u.w.read = xen_pt_word_reg_read, 617 .u.w.write = xen_pt_cmd_reg_write, 618 }, 619 /* Capabilities Pointer reg */ 620 { 621 .offset = PCI_CAPABILITY_LIST, 622 .size = 1, 623 .init_val = 0x00, 624 .ro_mask = 0xFF, 625 .emu_mask = 0xFF, 626 .init = xen_pt_ptr_reg_init, 627 .u.b.read = xen_pt_byte_reg_read, 628 .u.b.write = xen_pt_byte_reg_write, 629 }, 630 /* Status reg */ 631 /* use emulated Cap Ptr value to initialize, 632 * so need to be declared after Cap Ptr reg 633 */ 634 { 635 .offset = PCI_STATUS, 636 .size = 2, 637 .init_val = 0x0000, 638 .res_mask = 0x0007, 639 .ro_mask = 0x06F8, 640 .rw1c_mask = 0xF900, 641 .emu_mask = 0x0010, 642 .init = xen_pt_status_reg_init, 643 .u.w.read = xen_pt_word_reg_read, 644 .u.w.write = xen_pt_word_reg_write, 645 }, 646 /* Cache Line Size reg */ 647 { 648 .offset = PCI_CACHE_LINE_SIZE, 649 .size = 1, 650 .init_val = 0x00, 651 .ro_mask = 0x00, 652 .emu_mask = 0xFF, 653 .init = xen_pt_common_reg_init, 654 .u.b.read = xen_pt_byte_reg_read, 655 .u.b.write = xen_pt_byte_reg_write, 656 }, 657 /* Latency Timer reg */ 658 { 659 .offset = PCI_LATENCY_TIMER, 660 .size = 1, 661 .init_val = 0x00, 662 .ro_mask = 0x00, 663 .emu_mask = 0xFF, 664 .init = xen_pt_common_reg_init, 665 .u.b.read = xen_pt_byte_reg_read, 666 .u.b.write = xen_pt_byte_reg_write, 667 }, 668 /* Header Type reg */ 669 { 670 .offset = PCI_HEADER_TYPE, 671 .size = 1, 672 .init_val = 0x00, 673 .ro_mask = 0xFF, 674 .emu_mask = 0x00, 675 .init = xen_pt_header_type_reg_init, 676 .u.b.read = xen_pt_byte_reg_read, 677 .u.b.write = xen_pt_byte_reg_write, 678 }, 679 /* Interrupt Line reg */ 680 { 681 .offset = PCI_INTERRUPT_LINE, 682 .size = 1, 683 .init_val = 0x00, 684 .ro_mask = 0x00, 685 .emu_mask = 0xFF, 686 .init = xen_pt_common_reg_init, 687 .u.b.read = xen_pt_byte_reg_read, 688 .u.b.write = xen_pt_byte_reg_write, 689 }, 690 /* Interrupt Pin reg */ 691 { 692 .offset = PCI_INTERRUPT_PIN, 693 .size = 1, 694 .init_val = 0x00, 695 .ro_mask = 0xFF, 696 .emu_mask = 0xFF, 697 .init = xen_pt_irqpin_reg_init, 698 .u.b.read = xen_pt_byte_reg_read, 699 .u.b.write = xen_pt_byte_reg_write, 700 }, 701 /* BAR 0 reg */ 702 /* mask of BAR need to be decided later, depends on IO/MEM type */ 703 { 704 .offset = PCI_BASE_ADDRESS_0, 705 .size = 4, 706 .init_val = 0x00000000, 707 .init = xen_pt_bar_reg_init, 708 .u.dw.read = xen_pt_bar_reg_read, 709 .u.dw.write = xen_pt_bar_reg_write, 710 }, 711 /* BAR 1 reg */ 712 { 713 .offset = PCI_BASE_ADDRESS_1, 714 .size = 4, 715 .init_val = 0x00000000, 716 .init = xen_pt_bar_reg_init, 717 .u.dw.read = xen_pt_bar_reg_read, 718 .u.dw.write = xen_pt_bar_reg_write, 719 }, 720 /* BAR 2 reg */ 721 { 722 .offset = PCI_BASE_ADDRESS_2, 723 .size = 4, 724 .init_val = 0x00000000, 725 .init = xen_pt_bar_reg_init, 726 .u.dw.read = xen_pt_bar_reg_read, 727 .u.dw.write = xen_pt_bar_reg_write, 728 }, 729 /* BAR 3 reg */ 730 { 731 .offset = PCI_BASE_ADDRESS_3, 732 .size = 4, 733 .init_val = 0x00000000, 734 .init = xen_pt_bar_reg_init, 735 .u.dw.read = xen_pt_bar_reg_read, 736 .u.dw.write = xen_pt_bar_reg_write, 737 }, 738 /* BAR 4 reg */ 739 { 740 .offset = PCI_BASE_ADDRESS_4, 741 .size = 4, 742 .init_val = 0x00000000, 743 .init = xen_pt_bar_reg_init, 744 .u.dw.read = xen_pt_bar_reg_read, 745 .u.dw.write = xen_pt_bar_reg_write, 746 }, 747 /* BAR 5 reg */ 748 { 749 .offset = PCI_BASE_ADDRESS_5, 750 .size = 4, 751 .init_val = 0x00000000, 752 .init = xen_pt_bar_reg_init, 753 .u.dw.read = xen_pt_bar_reg_read, 754 .u.dw.write = xen_pt_bar_reg_write, 755 }, 756 /* Expansion ROM BAR reg */ 757 { 758 .offset = PCI_ROM_ADDRESS, 759 .size = 4, 760 .init_val = 0x00000000, 761 .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, 762 .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, 763 .init = xen_pt_bar_reg_init, 764 .u.dw.read = xen_pt_long_reg_read, 765 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 766 }, 767 { 768 .size = 0, 769 }, 770 }; 771 772 773 /********************************* 774 * Vital Product Data Capability 775 */ 776 777 /* Vital Product Data Capability Structure reg static information table */ 778 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 779 { 780 .offset = PCI_CAP_LIST_NEXT, 781 .size = 1, 782 .init_val = 0x00, 783 .ro_mask = 0xFF, 784 .emu_mask = 0xFF, 785 .init = xen_pt_ptr_reg_init, 786 .u.b.read = xen_pt_byte_reg_read, 787 .u.b.write = xen_pt_byte_reg_write, 788 }, 789 { 790 .offset = PCI_VPD_ADDR, 791 .size = 2, 792 .ro_mask = 0x0003, 793 .emu_mask = 0x0003, 794 .init = xen_pt_common_reg_init, 795 .u.w.read = xen_pt_word_reg_read, 796 .u.w.write = xen_pt_word_reg_write, 797 }, 798 { 799 .size = 0, 800 }, 801 }; 802 803 804 /************************************** 805 * Vendor Specific Capability 806 */ 807 808 /* Vendor Specific Capability Structure reg static information table */ 809 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 810 { 811 .offset = PCI_CAP_LIST_NEXT, 812 .size = 1, 813 .init_val = 0x00, 814 .ro_mask = 0xFF, 815 .emu_mask = 0xFF, 816 .init = xen_pt_ptr_reg_init, 817 .u.b.read = xen_pt_byte_reg_read, 818 .u.b.write = xen_pt_byte_reg_write, 819 }, 820 { 821 .size = 0, 822 }, 823 }; 824 825 826 /***************************** 827 * PCI Express Capability 828 */ 829 830 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 831 uint32_t offset) 832 { 833 uint8_t flag; 834 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 835 return 0; 836 } 837 return flag & PCI_EXP_FLAGS_VERS; 838 } 839 840 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 841 uint32_t offset) 842 { 843 uint8_t flag; 844 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 845 return 0; 846 } 847 return (flag & PCI_EXP_FLAGS_TYPE) >> 4; 848 } 849 850 /* initialize Link Control register */ 851 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 852 XenPTRegInfo *reg, uint32_t real_offset, 853 uint32_t *data) 854 { 855 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 856 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 857 858 /* no need to initialize in case of Root Complex Integrated Endpoint 859 * with cap_ver 1.x 860 */ 861 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 862 *data = XEN_PT_INVALID_REG; 863 } 864 865 *data = reg->init_val; 866 return 0; 867 } 868 /* initialize Device Control 2 register */ 869 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 870 XenPTRegInfo *reg, uint32_t real_offset, 871 uint32_t *data) 872 { 873 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 874 875 /* no need to initialize in case of cap_ver 1.x */ 876 if (cap_ver == 1) { 877 *data = XEN_PT_INVALID_REG; 878 } 879 880 *data = reg->init_val; 881 return 0; 882 } 883 /* initialize Link Control 2 register */ 884 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 885 XenPTRegInfo *reg, uint32_t real_offset, 886 uint32_t *data) 887 { 888 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 889 uint32_t reg_field = 0; 890 891 /* no need to initialize in case of cap_ver 1.x */ 892 if (cap_ver == 1) { 893 reg_field = XEN_PT_INVALID_REG; 894 } else { 895 /* set Supported Link Speed */ 896 uint8_t lnkcap; 897 int rc; 898 rc = xen_host_pci_get_byte(&s->real_device, 899 real_offset - reg->offset + PCI_EXP_LNKCAP, 900 &lnkcap); 901 if (rc) { 902 return rc; 903 } 904 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 905 } 906 907 *data = reg_field; 908 return 0; 909 } 910 911 /* PCI Express Capability Structure reg static information table */ 912 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 913 /* Next Pointer reg */ 914 { 915 .offset = PCI_CAP_LIST_NEXT, 916 .size = 1, 917 .init_val = 0x00, 918 .ro_mask = 0xFF, 919 .emu_mask = 0xFF, 920 .init = xen_pt_ptr_reg_init, 921 .u.b.read = xen_pt_byte_reg_read, 922 .u.b.write = xen_pt_byte_reg_write, 923 }, 924 /* Device Capabilities reg */ 925 { 926 .offset = PCI_EXP_DEVCAP, 927 .size = 4, 928 .init_val = 0x00000000, 929 .ro_mask = 0xFFFFFFFF, 930 .emu_mask = 0x10000000, 931 .init = xen_pt_common_reg_init, 932 .u.dw.read = xen_pt_long_reg_read, 933 .u.dw.write = xen_pt_long_reg_write, 934 }, 935 /* Device Control reg */ 936 { 937 .offset = PCI_EXP_DEVCTL, 938 .size = 2, 939 .init_val = 0x2810, 940 .ro_mask = 0x8400, 941 .emu_mask = 0xFFFF, 942 .init = xen_pt_common_reg_init, 943 .u.w.read = xen_pt_word_reg_read, 944 .u.w.write = xen_pt_word_reg_write, 945 }, 946 /* Device Status reg */ 947 { 948 .offset = PCI_EXP_DEVSTA, 949 .size = 2, 950 .res_mask = 0xFFC0, 951 .ro_mask = 0x0030, 952 .rw1c_mask = 0x000F, 953 .init = xen_pt_common_reg_init, 954 .u.w.read = xen_pt_word_reg_read, 955 .u.w.write = xen_pt_word_reg_write, 956 }, 957 /* Link Control reg */ 958 { 959 .offset = PCI_EXP_LNKCTL, 960 .size = 2, 961 .init_val = 0x0000, 962 .ro_mask = 0xFC34, 963 .emu_mask = 0xFFFF, 964 .init = xen_pt_linkctrl_reg_init, 965 .u.w.read = xen_pt_word_reg_read, 966 .u.w.write = xen_pt_word_reg_write, 967 }, 968 /* Link Status reg */ 969 { 970 .offset = PCI_EXP_LNKSTA, 971 .size = 2, 972 .ro_mask = 0x3FFF, 973 .rw1c_mask = 0xC000, 974 .init = xen_pt_common_reg_init, 975 .u.w.read = xen_pt_word_reg_read, 976 .u.w.write = xen_pt_word_reg_write, 977 }, 978 /* Device Control 2 reg */ 979 { 980 .offset = 0x28, 981 .size = 2, 982 .init_val = 0x0000, 983 .ro_mask = 0xFFE0, 984 .emu_mask = 0xFFFF, 985 .init = xen_pt_devctrl2_reg_init, 986 .u.w.read = xen_pt_word_reg_read, 987 .u.w.write = xen_pt_word_reg_write, 988 }, 989 /* Link Control 2 reg */ 990 { 991 .offset = 0x30, 992 .size = 2, 993 .init_val = 0x0000, 994 .ro_mask = 0xE040, 995 .emu_mask = 0xFFFF, 996 .init = xen_pt_linkctrl2_reg_init, 997 .u.w.read = xen_pt_word_reg_read, 998 .u.w.write = xen_pt_word_reg_write, 999 }, 1000 { 1001 .size = 0, 1002 }, 1003 }; 1004 1005 1006 /********************************* 1007 * Power Management Capability 1008 */ 1009 1010 /* Power Management Capability reg static information table */ 1011 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 1012 /* Next Pointer reg */ 1013 { 1014 .offset = PCI_CAP_LIST_NEXT, 1015 .size = 1, 1016 .init_val = 0x00, 1017 .ro_mask = 0xFF, 1018 .emu_mask = 0xFF, 1019 .init = xen_pt_ptr_reg_init, 1020 .u.b.read = xen_pt_byte_reg_read, 1021 .u.b.write = xen_pt_byte_reg_write, 1022 }, 1023 /* Power Management Capabilities reg */ 1024 { 1025 .offset = PCI_CAP_FLAGS, 1026 .size = 2, 1027 .init_val = 0x0000, 1028 .ro_mask = 0xFFFF, 1029 .emu_mask = 0xF9C8, 1030 .init = xen_pt_common_reg_init, 1031 .u.w.read = xen_pt_word_reg_read, 1032 .u.w.write = xen_pt_word_reg_write, 1033 }, 1034 /* PCI Power Management Control/Status reg */ 1035 { 1036 .offset = PCI_PM_CTRL, 1037 .size = 2, 1038 .init_val = 0x0008, 1039 .res_mask = 0x00F0, 1040 .ro_mask = 0x610C, 1041 .rw1c_mask = 0x8000, 1042 .emu_mask = 0x810B, 1043 .init = xen_pt_common_reg_init, 1044 .u.w.read = xen_pt_word_reg_read, 1045 .u.w.write = xen_pt_word_reg_write, 1046 }, 1047 { 1048 .size = 0, 1049 }, 1050 }; 1051 1052 1053 /******************************** 1054 * MSI Capability 1055 */ 1056 1057 /* Helper */ 1058 #define xen_pt_msi_check_type(offset, flags, what) \ 1059 ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ 1060 PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) 1061 1062 /* Message Control register */ 1063 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1064 XenPTRegInfo *reg, uint32_t real_offset, 1065 uint32_t *data) 1066 { 1067 XenPTMSI *msi = s->msi; 1068 uint16_t reg_field; 1069 int rc; 1070 1071 /* use I/O device register's value as initial value */ 1072 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1073 if (rc) { 1074 return rc; 1075 } 1076 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1077 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1078 xen_host_pci_set_word(&s->real_device, real_offset, 1079 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1080 } 1081 msi->flags |= reg_field; 1082 msi->ctrl_offset = real_offset; 1083 msi->initialized = false; 1084 msi->mapped = false; 1085 1086 *data = reg->init_val; 1087 return 0; 1088 } 1089 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1090 XenPTReg *cfg_entry, uint16_t *val, 1091 uint16_t dev_value, uint16_t valid_mask) 1092 { 1093 XenPTRegInfo *reg = cfg_entry->reg; 1094 XenPTMSI *msi = s->msi; 1095 uint16_t writable_mask = 0; 1096 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1097 uint16_t *data = cfg_entry->ptr.half_word; 1098 1099 /* Currently no support for multi-vector */ 1100 if (*val & PCI_MSI_FLAGS_QSIZE) { 1101 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1102 } 1103 1104 /* modify emulate register */ 1105 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1106 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1107 msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; 1108 1109 /* create value for writing to I/O device register */ 1110 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1111 1112 /* update MSI */ 1113 if (*val & PCI_MSI_FLAGS_ENABLE) { 1114 /* setup MSI pirq for the first time */ 1115 if (!msi->initialized) { 1116 /* Init physical one */ 1117 XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); 1118 if (xen_pt_msi_setup(s)) { 1119 /* We do not broadcast the error to the framework code, so 1120 * that MSI errors are contained in MSI emulation code and 1121 * QEMU can go on running. 1122 * Guest MSI would be actually not working. 1123 */ 1124 *val &= ~PCI_MSI_FLAGS_ENABLE; 1125 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); 1126 return 0; 1127 } 1128 if (xen_pt_msi_update(s)) { 1129 *val &= ~PCI_MSI_FLAGS_ENABLE; 1130 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); 1131 return 0; 1132 } 1133 msi->initialized = true; 1134 msi->mapped = true; 1135 } 1136 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1137 } else if (msi->mapped) { 1138 xen_pt_msi_disable(s); 1139 } 1140 1141 return 0; 1142 } 1143 1144 /* initialize Message Upper Address register */ 1145 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1146 XenPTRegInfo *reg, uint32_t real_offset, 1147 uint32_t *data) 1148 { 1149 /* no need to initialize in case of 32 bit type */ 1150 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1151 *data = XEN_PT_INVALID_REG; 1152 } else { 1153 *data = reg->init_val; 1154 } 1155 1156 return 0; 1157 } 1158 /* this function will be called twice (for 32 bit and 64 bit type) */ 1159 /* initialize Message Data register */ 1160 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1161 XenPTRegInfo *reg, uint32_t real_offset, 1162 uint32_t *data) 1163 { 1164 uint32_t flags = s->msi->flags; 1165 uint32_t offset = reg->offset; 1166 1167 /* check the offset whether matches the type or not */ 1168 if (xen_pt_msi_check_type(offset, flags, DATA)) { 1169 *data = reg->init_val; 1170 } else { 1171 *data = XEN_PT_INVALID_REG; 1172 } 1173 return 0; 1174 } 1175 1176 /* this function will be called twice (for 32 bit and 64 bit type) */ 1177 /* initialize Mask register */ 1178 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, 1179 XenPTRegInfo *reg, uint32_t real_offset, 1180 uint32_t *data) 1181 { 1182 uint32_t flags = s->msi->flags; 1183 1184 /* check the offset whether matches the type or not */ 1185 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1186 *data = XEN_PT_INVALID_REG; 1187 } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { 1188 *data = reg->init_val; 1189 } else { 1190 *data = XEN_PT_INVALID_REG; 1191 } 1192 return 0; 1193 } 1194 1195 /* this function will be called twice (for 32 bit and 64 bit type) */ 1196 /* initialize Pending register */ 1197 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, 1198 XenPTRegInfo *reg, uint32_t real_offset, 1199 uint32_t *data) 1200 { 1201 uint32_t flags = s->msi->flags; 1202 1203 /* check the offset whether matches the type or not */ 1204 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1205 *data = XEN_PT_INVALID_REG; 1206 } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { 1207 *data = reg->init_val; 1208 } else { 1209 *data = XEN_PT_INVALID_REG; 1210 } 1211 return 0; 1212 } 1213 1214 /* write Message Address register */ 1215 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1216 XenPTReg *cfg_entry, uint32_t *val, 1217 uint32_t dev_value, uint32_t valid_mask) 1218 { 1219 XenPTRegInfo *reg = cfg_entry->reg; 1220 uint32_t writable_mask = 0; 1221 uint32_t old_addr = *cfg_entry->ptr.word; 1222 uint32_t *data = cfg_entry->ptr.word; 1223 1224 /* modify emulate register */ 1225 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1226 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1227 s->msi->addr_lo = *data; 1228 1229 /* create value for writing to I/O device register */ 1230 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1231 1232 /* update MSI */ 1233 if (*data != old_addr) { 1234 if (s->msi->mapped) { 1235 xen_pt_msi_update(s); 1236 } 1237 } 1238 1239 return 0; 1240 } 1241 /* write Message Upper Address register */ 1242 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1243 XenPTReg *cfg_entry, uint32_t *val, 1244 uint32_t dev_value, uint32_t valid_mask) 1245 { 1246 XenPTRegInfo *reg = cfg_entry->reg; 1247 uint32_t writable_mask = 0; 1248 uint32_t old_addr = *cfg_entry->ptr.word; 1249 uint32_t *data = cfg_entry->ptr.word; 1250 1251 /* check whether the type is 64 bit or not */ 1252 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1253 XEN_PT_ERR(&s->dev, 1254 "Can't write to the upper address without 64 bit support\n"); 1255 return -1; 1256 } 1257 1258 /* modify emulate register */ 1259 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1260 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1261 /* update the msi_info too */ 1262 s->msi->addr_hi = *data; 1263 1264 /* create value for writing to I/O device register */ 1265 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1266 1267 /* update MSI */ 1268 if (*data != old_addr) { 1269 if (s->msi->mapped) { 1270 xen_pt_msi_update(s); 1271 } 1272 } 1273 1274 return 0; 1275 } 1276 1277 1278 /* this function will be called twice (for 32 bit and 64 bit type) */ 1279 /* write Message Data register */ 1280 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1281 XenPTReg *cfg_entry, uint16_t *val, 1282 uint16_t dev_value, uint16_t valid_mask) 1283 { 1284 XenPTRegInfo *reg = cfg_entry->reg; 1285 XenPTMSI *msi = s->msi; 1286 uint16_t writable_mask = 0; 1287 uint16_t old_data = *cfg_entry->ptr.half_word; 1288 uint32_t offset = reg->offset; 1289 uint16_t *data = cfg_entry->ptr.half_word; 1290 1291 /* check the offset whether matches the type or not */ 1292 if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { 1293 /* exit I/O emulator */ 1294 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1295 return -1; 1296 } 1297 1298 /* modify emulate register */ 1299 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1300 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1301 /* update the msi_info too */ 1302 msi->data = *data; 1303 1304 /* create value for writing to I/O device register */ 1305 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1306 1307 /* update MSI */ 1308 if (*data != old_data) { 1309 if (msi->mapped) { 1310 xen_pt_msi_update(s); 1311 } 1312 } 1313 1314 return 0; 1315 } 1316 1317 /* MSI Capability Structure reg static information table */ 1318 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1319 /* Next Pointer reg */ 1320 { 1321 .offset = PCI_CAP_LIST_NEXT, 1322 .size = 1, 1323 .init_val = 0x00, 1324 .ro_mask = 0xFF, 1325 .emu_mask = 0xFF, 1326 .init = xen_pt_ptr_reg_init, 1327 .u.b.read = xen_pt_byte_reg_read, 1328 .u.b.write = xen_pt_byte_reg_write, 1329 }, 1330 /* Message Control reg */ 1331 { 1332 .offset = PCI_MSI_FLAGS, 1333 .size = 2, 1334 .init_val = 0x0000, 1335 .res_mask = 0xFE00, 1336 .ro_mask = 0x018E, 1337 .emu_mask = 0x017E, 1338 .init = xen_pt_msgctrl_reg_init, 1339 .u.w.read = xen_pt_word_reg_read, 1340 .u.w.write = xen_pt_msgctrl_reg_write, 1341 }, 1342 /* Message Address reg */ 1343 { 1344 .offset = PCI_MSI_ADDRESS_LO, 1345 .size = 4, 1346 .init_val = 0x00000000, 1347 .ro_mask = 0x00000003, 1348 .emu_mask = 0xFFFFFFFF, 1349 .init = xen_pt_common_reg_init, 1350 .u.dw.read = xen_pt_long_reg_read, 1351 .u.dw.write = xen_pt_msgaddr32_reg_write, 1352 }, 1353 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1354 { 1355 .offset = PCI_MSI_ADDRESS_HI, 1356 .size = 4, 1357 .init_val = 0x00000000, 1358 .ro_mask = 0x00000000, 1359 .emu_mask = 0xFFFFFFFF, 1360 .init = xen_pt_msgaddr64_reg_init, 1361 .u.dw.read = xen_pt_long_reg_read, 1362 .u.dw.write = xen_pt_msgaddr64_reg_write, 1363 }, 1364 /* Message Data reg (16 bits of data for 32-bit devices) */ 1365 { 1366 .offset = PCI_MSI_DATA_32, 1367 .size = 2, 1368 .init_val = 0x0000, 1369 .ro_mask = 0x0000, 1370 .emu_mask = 0xFFFF, 1371 .init = xen_pt_msgdata_reg_init, 1372 .u.w.read = xen_pt_word_reg_read, 1373 .u.w.write = xen_pt_msgdata_reg_write, 1374 }, 1375 /* Message Data reg (16 bits of data for 64-bit devices) */ 1376 { 1377 .offset = PCI_MSI_DATA_64, 1378 .size = 2, 1379 .init_val = 0x0000, 1380 .ro_mask = 0x0000, 1381 .emu_mask = 0xFFFF, 1382 .init = xen_pt_msgdata_reg_init, 1383 .u.w.read = xen_pt_word_reg_read, 1384 .u.w.write = xen_pt_msgdata_reg_write, 1385 }, 1386 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1387 { 1388 .offset = PCI_MSI_MASK_32, 1389 .size = 4, 1390 .init_val = 0x00000000, 1391 .ro_mask = 0xFFFFFFFF, 1392 .emu_mask = 0xFFFFFFFF, 1393 .init = xen_pt_mask_reg_init, 1394 .u.dw.read = xen_pt_long_reg_read, 1395 .u.dw.write = xen_pt_long_reg_write, 1396 }, 1397 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1398 { 1399 .offset = PCI_MSI_MASK_64, 1400 .size = 4, 1401 .init_val = 0x00000000, 1402 .ro_mask = 0xFFFFFFFF, 1403 .emu_mask = 0xFFFFFFFF, 1404 .init = xen_pt_mask_reg_init, 1405 .u.dw.read = xen_pt_long_reg_read, 1406 .u.dw.write = xen_pt_long_reg_write, 1407 }, 1408 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1409 { 1410 .offset = PCI_MSI_MASK_32 + 4, 1411 .size = 4, 1412 .init_val = 0x00000000, 1413 .ro_mask = 0xFFFFFFFF, 1414 .emu_mask = 0x00000000, 1415 .init = xen_pt_pending_reg_init, 1416 .u.dw.read = xen_pt_long_reg_read, 1417 .u.dw.write = xen_pt_long_reg_write, 1418 }, 1419 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1420 { 1421 .offset = PCI_MSI_MASK_64 + 4, 1422 .size = 4, 1423 .init_val = 0x00000000, 1424 .ro_mask = 0xFFFFFFFF, 1425 .emu_mask = 0x00000000, 1426 .init = xen_pt_pending_reg_init, 1427 .u.dw.read = xen_pt_long_reg_read, 1428 .u.dw.write = xen_pt_long_reg_write, 1429 }, 1430 { 1431 .size = 0, 1432 }, 1433 }; 1434 1435 1436 /************************************** 1437 * MSI-X Capability 1438 */ 1439 1440 /* Message Control register for MSI-X */ 1441 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1442 XenPTRegInfo *reg, uint32_t real_offset, 1443 uint32_t *data) 1444 { 1445 uint16_t reg_field; 1446 int rc; 1447 1448 /* use I/O device register's value as initial value */ 1449 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1450 if (rc) { 1451 return rc; 1452 } 1453 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1454 XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); 1455 xen_host_pci_set_word(&s->real_device, real_offset, 1456 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1457 } 1458 1459 s->msix->ctrl_offset = real_offset; 1460 1461 *data = reg->init_val; 1462 return 0; 1463 } 1464 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1465 XenPTReg *cfg_entry, uint16_t *val, 1466 uint16_t dev_value, uint16_t valid_mask) 1467 { 1468 XenPTRegInfo *reg = cfg_entry->reg; 1469 uint16_t writable_mask = 0; 1470 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1471 int debug_msix_enabled_old; 1472 uint16_t *data = cfg_entry->ptr.half_word; 1473 1474 /* modify emulate register */ 1475 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1476 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1477 1478 /* create value for writing to I/O device register */ 1479 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1480 1481 /* update MSI-X */ 1482 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1483 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1484 xen_pt_msix_update(s); 1485 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1486 xen_pt_msix_disable(s); 1487 } 1488 1489 s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; 1490 1491 debug_msix_enabled_old = s->msix->enabled; 1492 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1493 if (s->msix->enabled != debug_msix_enabled_old) { 1494 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1495 s->msix->enabled ? "enable" : "disable"); 1496 } 1497 1498 return 0; 1499 } 1500 1501 /* MSI-X Capability Structure reg static information table */ 1502 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1503 /* Next Pointer reg */ 1504 { 1505 .offset = PCI_CAP_LIST_NEXT, 1506 .size = 1, 1507 .init_val = 0x00, 1508 .ro_mask = 0xFF, 1509 .emu_mask = 0xFF, 1510 .init = xen_pt_ptr_reg_init, 1511 .u.b.read = xen_pt_byte_reg_read, 1512 .u.b.write = xen_pt_byte_reg_write, 1513 }, 1514 /* Message Control reg */ 1515 { 1516 .offset = PCI_MSI_FLAGS, 1517 .size = 2, 1518 .init_val = 0x0000, 1519 .res_mask = 0x3800, 1520 .ro_mask = 0x07FF, 1521 .emu_mask = 0x0000, 1522 .init = xen_pt_msixctrl_reg_init, 1523 .u.w.read = xen_pt_word_reg_read, 1524 .u.w.write = xen_pt_msixctrl_reg_write, 1525 }, 1526 { 1527 .size = 0, 1528 }, 1529 }; 1530 1531 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { 1532 /* Intel IGFX OpRegion reg */ 1533 { 1534 .offset = 0x0, 1535 .size = 4, 1536 .init_val = 0, 1537 .u.dw.read = xen_pt_intel_opregion_read, 1538 .u.dw.write = xen_pt_intel_opregion_write, 1539 }, 1540 { 1541 .size = 0, 1542 }, 1543 }; 1544 1545 /**************************** 1546 * Capabilities 1547 */ 1548 1549 /* capability structure register group size functions */ 1550 1551 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1552 const XenPTRegGroupInfo *grp_reg, 1553 uint32_t base_offset, uint8_t *size) 1554 { 1555 *size = grp_reg->grp_size; 1556 return 0; 1557 } 1558 /* get Vendor Specific Capability Structure register group size */ 1559 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1560 const XenPTRegGroupInfo *grp_reg, 1561 uint32_t base_offset, uint8_t *size) 1562 { 1563 return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); 1564 } 1565 /* get PCI Express Capability Structure register group size */ 1566 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1567 const XenPTRegGroupInfo *grp_reg, 1568 uint32_t base_offset, uint8_t *size) 1569 { 1570 PCIDevice *d = &s->dev; 1571 uint8_t version = get_capability_version(s, base_offset); 1572 uint8_t type = get_device_type(s, base_offset); 1573 uint8_t pcie_size = 0; 1574 1575 1576 /* calculate size depend on capability version and device/port type */ 1577 /* in case of PCI Express Base Specification Rev 1.x */ 1578 if (version == 1) { 1579 /* The PCI Express Capabilities, Device Capabilities, and Device 1580 * Status/Control registers are required for all PCI Express devices. 1581 * The Link Capabilities and Link Status/Control are required for all 1582 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1583 * are not required to implement registers other than those listed 1584 * above and terminate the capability structure. 1585 */ 1586 switch (type) { 1587 case PCI_EXP_TYPE_ENDPOINT: 1588 case PCI_EXP_TYPE_LEG_END: 1589 pcie_size = 0x14; 1590 break; 1591 case PCI_EXP_TYPE_RC_END: 1592 /* has no link */ 1593 pcie_size = 0x0C; 1594 break; 1595 /* only EndPoint passthrough is supported */ 1596 case PCI_EXP_TYPE_ROOT_PORT: 1597 case PCI_EXP_TYPE_UPSTREAM: 1598 case PCI_EXP_TYPE_DOWNSTREAM: 1599 case PCI_EXP_TYPE_PCI_BRIDGE: 1600 case PCI_EXP_TYPE_PCIE_BRIDGE: 1601 case PCI_EXP_TYPE_RC_EC: 1602 default: 1603 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1604 return -1; 1605 } 1606 } 1607 /* in case of PCI Express Base Specification Rev 2.0 */ 1608 else if (version == 2) { 1609 switch (type) { 1610 case PCI_EXP_TYPE_ENDPOINT: 1611 case PCI_EXP_TYPE_LEG_END: 1612 case PCI_EXP_TYPE_RC_END: 1613 /* For Functions that do not implement the registers, 1614 * these spaces must be hardwired to 0b. 1615 */ 1616 pcie_size = 0x3C; 1617 break; 1618 /* only EndPoint passthrough is supported */ 1619 case PCI_EXP_TYPE_ROOT_PORT: 1620 case PCI_EXP_TYPE_UPSTREAM: 1621 case PCI_EXP_TYPE_DOWNSTREAM: 1622 case PCI_EXP_TYPE_PCI_BRIDGE: 1623 case PCI_EXP_TYPE_PCIE_BRIDGE: 1624 case PCI_EXP_TYPE_RC_EC: 1625 default: 1626 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1627 return -1; 1628 } 1629 } else { 1630 XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); 1631 return -1; 1632 } 1633 1634 *size = pcie_size; 1635 return 0; 1636 } 1637 /* get MSI Capability Structure register group size */ 1638 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1639 const XenPTRegGroupInfo *grp_reg, 1640 uint32_t base_offset, uint8_t *size) 1641 { 1642 uint16_t msg_ctrl = 0; 1643 uint8_t msi_size = 0xa; 1644 int rc; 1645 1646 rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, 1647 &msg_ctrl); 1648 if (rc) { 1649 return rc; 1650 } 1651 /* check if 64-bit address is capable of per-vector masking */ 1652 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1653 msi_size += 4; 1654 } 1655 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1656 msi_size += 10; 1657 } 1658 1659 s->msi = g_new0(XenPTMSI, 1); 1660 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1661 1662 *size = msi_size; 1663 return 0; 1664 } 1665 /* get MSI-X Capability Structure register group size */ 1666 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1667 const XenPTRegGroupInfo *grp_reg, 1668 uint32_t base_offset, uint8_t *size) 1669 { 1670 int rc = 0; 1671 1672 rc = xen_pt_msix_init(s, base_offset); 1673 1674 if (rc < 0) { 1675 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1676 return rc; 1677 } 1678 1679 *size = grp_reg->grp_size; 1680 return 0; 1681 } 1682 1683 1684 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1685 /* Header Type0 reg group */ 1686 { 1687 .grp_id = 0xFF, 1688 .grp_type = XEN_PT_GRP_TYPE_EMU, 1689 .grp_size = 0x40, 1690 .size_init = xen_pt_reg_grp_size_init, 1691 .emu_regs = xen_pt_emu_reg_header0, 1692 }, 1693 /* PCI PowerManagement Capability reg group */ 1694 { 1695 .grp_id = PCI_CAP_ID_PM, 1696 .grp_type = XEN_PT_GRP_TYPE_EMU, 1697 .grp_size = PCI_PM_SIZEOF, 1698 .size_init = xen_pt_reg_grp_size_init, 1699 .emu_regs = xen_pt_emu_reg_pm, 1700 }, 1701 /* AGP Capability Structure reg group */ 1702 { 1703 .grp_id = PCI_CAP_ID_AGP, 1704 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1705 .grp_size = 0x30, 1706 .size_init = xen_pt_reg_grp_size_init, 1707 }, 1708 /* Vital Product Data Capability Structure reg group */ 1709 { 1710 .grp_id = PCI_CAP_ID_VPD, 1711 .grp_type = XEN_PT_GRP_TYPE_EMU, 1712 .grp_size = 0x08, 1713 .size_init = xen_pt_reg_grp_size_init, 1714 .emu_regs = xen_pt_emu_reg_vpd, 1715 }, 1716 /* Slot Identification reg group */ 1717 { 1718 .grp_id = PCI_CAP_ID_SLOTID, 1719 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1720 .grp_size = 0x04, 1721 .size_init = xen_pt_reg_grp_size_init, 1722 }, 1723 /* MSI Capability Structure reg group */ 1724 { 1725 .grp_id = PCI_CAP_ID_MSI, 1726 .grp_type = XEN_PT_GRP_TYPE_EMU, 1727 .grp_size = 0xFF, 1728 .size_init = xen_pt_msi_size_init, 1729 .emu_regs = xen_pt_emu_reg_msi, 1730 }, 1731 /* PCI-X Capabilities List Item reg group */ 1732 { 1733 .grp_id = PCI_CAP_ID_PCIX, 1734 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1735 .grp_size = 0x18, 1736 .size_init = xen_pt_reg_grp_size_init, 1737 }, 1738 /* Vendor Specific Capability Structure reg group */ 1739 { 1740 .grp_id = PCI_CAP_ID_VNDR, 1741 .grp_type = XEN_PT_GRP_TYPE_EMU, 1742 .grp_size = 0xFF, 1743 .size_init = xen_pt_vendor_size_init, 1744 .emu_regs = xen_pt_emu_reg_vendor, 1745 }, 1746 /* SHPC Capability List Item reg group */ 1747 { 1748 .grp_id = PCI_CAP_ID_SHPC, 1749 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1750 .grp_size = 0x08, 1751 .size_init = xen_pt_reg_grp_size_init, 1752 }, 1753 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1754 { 1755 .grp_id = PCI_CAP_ID_SSVID, 1756 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1757 .grp_size = 0x08, 1758 .size_init = xen_pt_reg_grp_size_init, 1759 }, 1760 /* AGP 8x Capability Structure reg group */ 1761 { 1762 .grp_id = PCI_CAP_ID_AGP3, 1763 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1764 .grp_size = 0x30, 1765 .size_init = xen_pt_reg_grp_size_init, 1766 }, 1767 /* PCI Express Capability Structure reg group */ 1768 { 1769 .grp_id = PCI_CAP_ID_EXP, 1770 .grp_type = XEN_PT_GRP_TYPE_EMU, 1771 .grp_size = 0xFF, 1772 .size_init = xen_pt_pcie_size_init, 1773 .emu_regs = xen_pt_emu_reg_pcie, 1774 }, 1775 /* MSI-X Capability Structure reg group */ 1776 { 1777 .grp_id = PCI_CAP_ID_MSIX, 1778 .grp_type = XEN_PT_GRP_TYPE_EMU, 1779 .grp_size = 0x0C, 1780 .size_init = xen_pt_msix_size_init, 1781 .emu_regs = xen_pt_emu_reg_msix, 1782 }, 1783 /* Intel IGD Opregion group */ 1784 { 1785 .grp_id = XEN_PCI_INTEL_OPREGION, 1786 .grp_type = XEN_PT_GRP_TYPE_EMU, 1787 .grp_size = 0x4, 1788 .size_init = xen_pt_reg_grp_size_init, 1789 .emu_regs = xen_pt_emu_reg_igd_opregion, 1790 }, 1791 { 1792 .grp_size = 0, 1793 }, 1794 }; 1795 1796 /* initialize Capabilities Pointer or Next Pointer register */ 1797 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1798 XenPTRegInfo *reg, uint32_t real_offset, 1799 uint32_t *data) 1800 { 1801 int i, rc; 1802 uint8_t reg_field; 1803 uint8_t cap_id = 0; 1804 1805 rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); 1806 if (rc) { 1807 return rc; 1808 } 1809 /* find capability offset */ 1810 while (reg_field) { 1811 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1812 if (xen_pt_hide_dev_cap(&s->real_device, 1813 xen_pt_emu_reg_grps[i].grp_id)) { 1814 continue; 1815 } 1816 1817 rc = xen_host_pci_get_byte(&s->real_device, 1818 reg_field + PCI_CAP_LIST_ID, &cap_id); 1819 if (rc) { 1820 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", 1821 reg_field + PCI_CAP_LIST_ID, rc); 1822 return rc; 1823 } 1824 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1825 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1826 goto out; 1827 } 1828 /* ignore the 0 hardwired capability, find next one */ 1829 break; 1830 } 1831 } 1832 1833 /* next capability */ 1834 rc = xen_host_pci_get_byte(&s->real_device, 1835 reg_field + PCI_CAP_LIST_NEXT, ®_field); 1836 if (rc) { 1837 return rc; 1838 } 1839 } 1840 1841 out: 1842 *data = reg_field; 1843 return 0; 1844 } 1845 1846 1847 /************* 1848 * Main 1849 */ 1850 1851 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1852 { 1853 uint8_t id; 1854 unsigned max_cap = XEN_PCI_CAP_MAX; 1855 uint8_t pos = PCI_CAPABILITY_LIST; 1856 uint8_t status = 0; 1857 1858 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1859 return 0; 1860 } 1861 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1862 return 0; 1863 } 1864 1865 while (max_cap--) { 1866 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1867 break; 1868 } 1869 if (pos < PCI_CONFIG_HEADER_SIZE) { 1870 break; 1871 } 1872 1873 pos &= ~3; 1874 if (xen_host_pci_get_byte(&s->real_device, 1875 pos + PCI_CAP_LIST_ID, &id)) { 1876 break; 1877 } 1878 1879 if (id == 0xff) { 1880 break; 1881 } 1882 if (id == cap) { 1883 return pos; 1884 } 1885 1886 pos += PCI_CAP_LIST_NEXT; 1887 } 1888 return 0; 1889 } 1890 1891 static void xen_pt_config_reg_init(XenPCIPassthroughState *s, 1892 XenPTRegGroup *reg_grp, XenPTRegInfo *reg, 1893 Error **errp) 1894 { 1895 XenPTReg *reg_entry; 1896 uint32_t data = 0; 1897 int rc = 0; 1898 1899 reg_entry = g_new0(XenPTReg, 1); 1900 reg_entry->reg = reg; 1901 1902 if (reg->init) { 1903 uint32_t host_mask, size_mask; 1904 unsigned int offset; 1905 uint32_t val; 1906 1907 /* initialize emulate register */ 1908 rc = reg->init(s, reg_entry->reg, 1909 reg_grp->base_offset + reg->offset, &data); 1910 if (rc < 0) { 1911 g_free(reg_entry); 1912 error_setg(errp, "Init emulate register fail"); 1913 return; 1914 } 1915 if (data == XEN_PT_INVALID_REG) { 1916 /* free unused BAR register entry */ 1917 g_free(reg_entry); 1918 return; 1919 } 1920 /* Sync up the data to dev.config */ 1921 offset = reg_grp->base_offset + reg->offset; 1922 size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); 1923 1924 switch (reg->size) { 1925 case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); 1926 break; 1927 case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); 1928 break; 1929 case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); 1930 break; 1931 default: abort(); 1932 } 1933 if (rc) { 1934 /* Serious issues when we cannot read the host values! */ 1935 g_free(reg_entry); 1936 error_setg(errp, "Cannot read host values"); 1937 return; 1938 } 1939 /* Set bits in emu_mask are the ones we emulate. The dev.config shall 1940 * contain the emulated view of the guest - therefore we flip the mask 1941 * to mask out the host values (which dev.config initially has) . */ 1942 host_mask = size_mask & ~reg->emu_mask; 1943 1944 if ((data & host_mask) != (val & host_mask)) { 1945 uint32_t new_val; 1946 1947 /* Mask out host (including past size). */ 1948 new_val = val & host_mask; 1949 /* Merge emulated ones (excluding the non-emulated ones). */ 1950 new_val |= data & host_mask; 1951 /* Leave intact host and emulated values past the size - even though 1952 * we do not care as we write per reg->size granularity, but for the 1953 * logging below lets have the proper value. */ 1954 new_val |= ((val | data)) & ~size_mask; 1955 XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", 1956 offset, data, val, new_val); 1957 val = new_val; 1958 } else 1959 val = data; 1960 1961 if (val & ~size_mask) { 1962 error_setg(errp, "Offset 0x%04x:0x%04x expands past" 1963 " register size (%d)", offset, val, reg->size); 1964 g_free(reg_entry); 1965 return; 1966 } 1967 /* This could be just pci_set_long as we don't modify the bits 1968 * past reg->size, but in case this routine is run in parallel or the 1969 * init value is larger, we do not want to over-write registers. */ 1970 switch (reg->size) { 1971 case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); 1972 break; 1973 case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); 1974 break; 1975 case 4: pci_set_long(s->dev.config + offset, val); 1976 break; 1977 default: abort(); 1978 } 1979 /* set register value pointer to the data. */ 1980 reg_entry->ptr.byte = s->dev.config + offset; 1981 1982 } 1983 /* list add register entry */ 1984 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 1985 } 1986 1987 void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) 1988 { 1989 int i, rc; 1990 Error *err = NULL; 1991 1992 QLIST_INIT(&s->reg_grps); 1993 1994 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1995 uint32_t reg_grp_offset = 0; 1996 XenPTRegGroup *reg_grp_entry = NULL; 1997 1998 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF 1999 && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { 2000 if (xen_pt_hide_dev_cap(&s->real_device, 2001 xen_pt_emu_reg_grps[i].grp_id)) { 2002 continue; 2003 } 2004 2005 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 2006 2007 if (!reg_grp_offset) { 2008 continue; 2009 } 2010 } 2011 2012 /* 2013 * By default we will trap up to 0x40 in the cfg space. 2014 * If an intel device is pass through we need to trap 0xfc, 2015 * therefore the size should be 0xff. 2016 */ 2017 if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { 2018 reg_grp_offset = XEN_PCI_INTEL_OPREGION; 2019 } 2020 2021 reg_grp_entry = g_new0(XenPTRegGroup, 1); 2022 QLIST_INIT(®_grp_entry->reg_tbl_list); 2023 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 2024 2025 reg_grp_entry->base_offset = reg_grp_offset; 2026 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 2027 if (xen_pt_emu_reg_grps[i].size_init) { 2028 /* get register group size */ 2029 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 2030 reg_grp_offset, 2031 ®_grp_entry->size); 2032 if (rc < 0) { 2033 error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x," 2034 " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), 2035 xen_pt_emu_reg_grps[i].grp_type, rc); 2036 error_propagate(errp, err); 2037 xen_pt_config_delete(s); 2038 return; 2039 } 2040 } 2041 2042 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 2043 if (xen_pt_emu_reg_grps[i].emu_regs) { 2044 int j = 0; 2045 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 2046 2047 /* initialize capability register */ 2048 for (j = 0; regs->size != 0; j++, regs++) { 2049 xen_pt_config_reg_init(s, reg_grp_entry, regs, &err); 2050 if (err) { 2051 error_append_hint(&err, "Failed to initialize %d/%zu" 2052 " reg 0x%x in grp_type = 0x%x (%d/%zu)", 2053 j, ARRAY_SIZE(xen_pt_emu_reg_grps[i].emu_regs), 2054 regs->offset, xen_pt_emu_reg_grps[i].grp_type, 2055 i, ARRAY_SIZE(xen_pt_emu_reg_grps)); 2056 error_propagate(errp, err); 2057 xen_pt_config_delete(s); 2058 return; 2059 } 2060 } 2061 } 2062 } 2063 } 2064 } 2065 2066 /* delete all emulate register */ 2067 void xen_pt_config_delete(XenPCIPassthroughState *s) 2068 { 2069 struct XenPTRegGroup *reg_group, *next_grp; 2070 struct XenPTReg *reg, *next_reg; 2071 2072 /* free MSI/MSI-X info table */ 2073 if (s->msix) { 2074 xen_pt_msix_unmap(s); 2075 } 2076 g_free(s->msi); 2077 2078 /* free all register group entry */ 2079 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 2080 /* free all register entry */ 2081 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 2082 QLIST_REMOVE(reg, entries); 2083 g_free(reg); 2084 } 2085 2086 QLIST_REMOVE(reg_group, entries); 2087 g_free(reg_group); 2088 } 2089 } 2090