1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/timer.h" 16 #include "hw/xen/xen_backend.h" 17 #include "xen_pt.h" 18 19 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 20 (((value) & (val_mask)) | ((data) & ~(val_mask))) 21 22 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 23 24 /* prototype */ 25 26 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 27 uint32_t real_offset, uint32_t *data); 28 29 30 /* helper */ 31 32 /* A return value of 1 means the capability should NOT be exposed to guest. */ 33 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 34 { 35 switch (grp_id) { 36 case PCI_CAP_ID_EXP: 37 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 38 * Controller looks trivial, e.g., the PCI Express Capabilities 39 * Register is 0. We should not try to expose it to guest. 40 * 41 * The datasheet is available at 42 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 43 * 44 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 45 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 46 * Controller looks trivial, e.g., the PCI Express Capabilities 47 * Register is 0, so the Capability Version is 0 and 48 * xen_pt_pcie_size_init() would fail. 49 */ 50 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 51 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 52 return 1; 53 } 54 break; 55 } 56 return 0; 57 } 58 59 /* find emulate register group entry */ 60 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 61 { 62 XenPTRegGroup *entry = NULL; 63 64 /* find register group entry */ 65 QLIST_FOREACH(entry, &s->reg_grps, entries) { 66 /* check address */ 67 if ((entry->base_offset <= address) 68 && ((entry->base_offset + entry->size) > address)) { 69 return entry; 70 } 71 } 72 73 /* group entry not found */ 74 return NULL; 75 } 76 77 /* find emulate register entry */ 78 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 79 { 80 XenPTReg *reg_entry = NULL; 81 XenPTRegInfo *reg = NULL; 82 uint32_t real_offset = 0; 83 84 /* find register entry */ 85 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 86 reg = reg_entry->reg; 87 real_offset = reg_grp->base_offset + reg->offset; 88 /* check address */ 89 if ((real_offset <= address) 90 && ((real_offset + reg->size) > address)) { 91 return reg_entry; 92 } 93 } 94 95 return NULL; 96 } 97 98 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, 99 XenPTRegInfo *reg, uint32_t valid_mask) 100 { 101 uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); 102 103 if (!s->permissive) { 104 throughable_mask &= ~reg->res_mask; 105 } 106 107 return throughable_mask & valid_mask; 108 } 109 110 /**************** 111 * general register functions 112 */ 113 114 /* register initialization function */ 115 116 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 117 XenPTRegInfo *reg, uint32_t real_offset, 118 uint32_t *data) 119 { 120 *data = reg->init_val; 121 return 0; 122 } 123 124 /* Read register functions */ 125 126 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 127 uint8_t *value, uint8_t valid_mask) 128 { 129 XenPTRegInfo *reg = cfg_entry->reg; 130 uint8_t valid_emu_mask = 0; 131 uint8_t *data = cfg_entry->ptr.byte; 132 133 /* emulate byte register */ 134 valid_emu_mask = reg->emu_mask & valid_mask; 135 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 136 137 return 0; 138 } 139 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 140 uint16_t *value, uint16_t valid_mask) 141 { 142 XenPTRegInfo *reg = cfg_entry->reg; 143 uint16_t valid_emu_mask = 0; 144 uint16_t *data = cfg_entry->ptr.half_word; 145 146 /* emulate word register */ 147 valid_emu_mask = reg->emu_mask & valid_mask; 148 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 149 150 return 0; 151 } 152 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 153 uint32_t *value, uint32_t valid_mask) 154 { 155 XenPTRegInfo *reg = cfg_entry->reg; 156 uint32_t valid_emu_mask = 0; 157 uint32_t *data = cfg_entry->ptr.word; 158 159 /* emulate long register */ 160 valid_emu_mask = reg->emu_mask & valid_mask; 161 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 162 163 return 0; 164 } 165 166 /* Write register functions */ 167 168 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 169 uint8_t *val, uint8_t dev_value, 170 uint8_t valid_mask) 171 { 172 XenPTRegInfo *reg = cfg_entry->reg; 173 uint8_t writable_mask = 0; 174 uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 175 uint8_t *data = cfg_entry->ptr.byte; 176 177 /* modify emulate register */ 178 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 179 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 180 181 /* create value for writing to I/O device register */ 182 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 183 184 return 0; 185 } 186 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 187 uint16_t *val, uint16_t dev_value, 188 uint16_t valid_mask) 189 { 190 XenPTRegInfo *reg = cfg_entry->reg; 191 uint16_t writable_mask = 0; 192 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 193 uint16_t *data = cfg_entry->ptr.half_word; 194 195 /* modify emulate register */ 196 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 197 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 198 199 /* create value for writing to I/O device register */ 200 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 201 202 return 0; 203 } 204 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 205 uint32_t *val, uint32_t dev_value, 206 uint32_t valid_mask) 207 { 208 XenPTRegInfo *reg = cfg_entry->reg; 209 uint32_t writable_mask = 0; 210 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 211 uint32_t *data = cfg_entry->ptr.word; 212 213 /* modify emulate register */ 214 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 215 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 216 217 /* create value for writing to I/O device register */ 218 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 219 220 return 0; 221 } 222 223 224 /* XenPTRegInfo declaration 225 * - only for emulated register (either a part or whole bit). 226 * - for passthrough register that need special behavior (like interacting with 227 * other component), set emu_mask to all 0 and specify r/w func properly. 228 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 229 */ 230 231 /******************** 232 * Header Type0 233 */ 234 235 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 236 XenPTRegInfo *reg, uint32_t real_offset, 237 uint32_t *data) 238 { 239 *data = s->real_device.vendor_id; 240 return 0; 241 } 242 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 243 XenPTRegInfo *reg, uint32_t real_offset, 244 uint32_t *data) 245 { 246 *data = s->real_device.device_id; 247 return 0; 248 } 249 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 250 XenPTRegInfo *reg, uint32_t real_offset, 251 uint32_t *data) 252 { 253 XenPTRegGroup *reg_grp_entry = NULL; 254 XenPTReg *reg_entry = NULL; 255 uint32_t reg_field = 0; 256 257 /* find Header register group */ 258 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 259 if (reg_grp_entry) { 260 /* find Capabilities Pointer register */ 261 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 262 if (reg_entry) { 263 /* check Capabilities Pointer register */ 264 if (*reg_entry->ptr.half_word) { 265 reg_field |= PCI_STATUS_CAP_LIST; 266 } else { 267 reg_field &= ~PCI_STATUS_CAP_LIST; 268 } 269 } else { 270 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 271 " for Capabilities Pointer register." 272 " (%s)\n", __func__); 273 return -1; 274 } 275 } else { 276 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 277 " for Header. (%s)\n", __func__); 278 return -1; 279 } 280 281 *data = reg_field; 282 return 0; 283 } 284 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 285 XenPTRegInfo *reg, uint32_t real_offset, 286 uint32_t *data) 287 { 288 /* read PCI_HEADER_TYPE */ 289 *data = reg->init_val | 0x80; 290 return 0; 291 } 292 293 /* initialize Interrupt Pin register */ 294 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 295 XenPTRegInfo *reg, uint32_t real_offset, 296 uint32_t *data) 297 { 298 *data = xen_pt_pci_read_intx(s); 299 return 0; 300 } 301 302 /* Command register */ 303 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 304 uint16_t *val, uint16_t dev_value, 305 uint16_t valid_mask) 306 { 307 XenPTRegInfo *reg = cfg_entry->reg; 308 uint16_t writable_mask = 0; 309 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 310 uint16_t *data = cfg_entry->ptr.half_word; 311 312 /* modify emulate register */ 313 writable_mask = ~reg->ro_mask & valid_mask; 314 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 315 316 /* create value for writing to I/O device register */ 317 if (*val & PCI_COMMAND_INTX_DISABLE) { 318 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 319 } else { 320 if (s->machine_irq) { 321 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 322 } 323 } 324 325 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 326 327 return 0; 328 } 329 330 /* BAR */ 331 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 332 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 333 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 334 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 335 336 static bool is_64bit_bar(PCIIORegion *r) 337 { 338 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 339 } 340 341 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 342 { 343 if (is_64bit_bar(r)) { 344 uint64_t size64; 345 size64 = (r + 1)->size; 346 size64 <<= 32; 347 size64 += r->size; 348 return size64; 349 } 350 return r->size; 351 } 352 353 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 354 int index) 355 { 356 PCIDevice *d = &s->dev; 357 XenPTRegion *region = NULL; 358 PCIIORegion *r; 359 360 /* check 64bit BAR */ 361 if ((0 < index) && (index < PCI_ROM_SLOT)) { 362 int type = s->real_device.io_regions[index - 1].type; 363 364 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 365 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 366 region = &s->bases[index - 1]; 367 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 368 return XEN_PT_BAR_FLAG_UPPER; 369 } 370 } 371 } 372 373 /* check unused BAR */ 374 r = &d->io_regions[index]; 375 if (!xen_pt_get_bar_size(r)) { 376 return XEN_PT_BAR_FLAG_UNUSED; 377 } 378 379 /* for ExpROM BAR */ 380 if (index == PCI_ROM_SLOT) { 381 return XEN_PT_BAR_FLAG_MEM; 382 } 383 384 /* check BAR I/O indicator */ 385 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 386 return XEN_PT_BAR_FLAG_IO; 387 } else { 388 return XEN_PT_BAR_FLAG_MEM; 389 } 390 } 391 392 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 393 { 394 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 395 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 396 } else { 397 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 398 } 399 } 400 401 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 402 uint32_t real_offset, uint32_t *data) 403 { 404 uint32_t reg_field = 0; 405 int index; 406 407 index = xen_pt_bar_offset_to_index(reg->offset); 408 if (index < 0 || index >= PCI_NUM_REGIONS) { 409 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 410 return -1; 411 } 412 413 /* set BAR flag */ 414 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 415 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 416 reg_field = XEN_PT_INVALID_REG; 417 } 418 419 *data = reg_field; 420 return 0; 421 } 422 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 423 uint32_t *value, uint32_t valid_mask) 424 { 425 XenPTRegInfo *reg = cfg_entry->reg; 426 uint32_t valid_emu_mask = 0; 427 uint32_t bar_emu_mask = 0; 428 int index; 429 430 /* get BAR index */ 431 index = xen_pt_bar_offset_to_index(reg->offset); 432 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 433 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 434 return -1; 435 } 436 437 /* use fixed-up value from kernel sysfs */ 438 *value = base_address_with_flags(&s->real_device.io_regions[index]); 439 440 /* set emulate mask depend on BAR flag */ 441 switch (s->bases[index].bar_flag) { 442 case XEN_PT_BAR_FLAG_MEM: 443 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 444 break; 445 case XEN_PT_BAR_FLAG_IO: 446 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 447 break; 448 case XEN_PT_BAR_FLAG_UPPER: 449 bar_emu_mask = XEN_PT_BAR_ALLF; 450 break; 451 default: 452 break; 453 } 454 455 /* emulate BAR */ 456 valid_emu_mask = bar_emu_mask & valid_mask; 457 *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); 458 459 return 0; 460 } 461 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 462 uint32_t *val, uint32_t dev_value, 463 uint32_t valid_mask) 464 { 465 XenPTRegInfo *reg = cfg_entry->reg; 466 XenPTRegion *base = NULL; 467 PCIDevice *d = &s->dev; 468 const PCIIORegion *r; 469 uint32_t writable_mask = 0; 470 uint32_t bar_emu_mask = 0; 471 uint32_t bar_ro_mask = 0; 472 uint32_t r_size = 0; 473 int index = 0; 474 uint32_t *data = cfg_entry->ptr.word; 475 476 index = xen_pt_bar_offset_to_index(reg->offset); 477 if (index < 0 || index >= PCI_NUM_REGIONS) { 478 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 479 return -1; 480 } 481 482 r = &d->io_regions[index]; 483 base = &s->bases[index]; 484 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 485 486 /* set emulate mask and read-only mask values depend on the BAR flag */ 487 switch (s->bases[index].bar_flag) { 488 case XEN_PT_BAR_FLAG_MEM: 489 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 490 if (!r_size) { 491 /* low 32 bits mask for 64 bit bars */ 492 bar_ro_mask = XEN_PT_BAR_ALLF; 493 } else { 494 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 495 } 496 break; 497 case XEN_PT_BAR_FLAG_IO: 498 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 499 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 500 break; 501 case XEN_PT_BAR_FLAG_UPPER: 502 bar_emu_mask = XEN_PT_BAR_ALLF; 503 bar_ro_mask = r_size ? r_size - 1 : 0; 504 break; 505 default: 506 break; 507 } 508 509 /* modify emulate register */ 510 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 511 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 512 513 /* check whether we need to update the virtual region address or not */ 514 switch (s->bases[index].bar_flag) { 515 case XEN_PT_BAR_FLAG_UPPER: 516 case XEN_PT_BAR_FLAG_MEM: 517 /* nothing to do */ 518 break; 519 case XEN_PT_BAR_FLAG_IO: 520 /* nothing to do */ 521 break; 522 default: 523 break; 524 } 525 526 /* create value for writing to I/O device register */ 527 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 528 529 return 0; 530 } 531 532 /* write Exp ROM BAR */ 533 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 534 XenPTReg *cfg_entry, uint32_t *val, 535 uint32_t dev_value, uint32_t valid_mask) 536 { 537 XenPTRegInfo *reg = cfg_entry->reg; 538 XenPTRegion *base = NULL; 539 PCIDevice *d = (PCIDevice *)&s->dev; 540 uint32_t writable_mask = 0; 541 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 542 pcibus_t r_size = 0; 543 uint32_t bar_ro_mask = 0; 544 uint32_t *data = cfg_entry->ptr.word; 545 546 r_size = d->io_regions[PCI_ROM_SLOT].size; 547 base = &s->bases[PCI_ROM_SLOT]; 548 /* align memory type resource size */ 549 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 550 551 /* set emulate mask and read-only mask */ 552 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 553 554 /* modify emulate register */ 555 writable_mask = ~bar_ro_mask & valid_mask; 556 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 557 558 /* create value for writing to I/O device register */ 559 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 560 561 return 0; 562 } 563 564 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, 565 XenPTReg *cfg_entry, 566 uint32_t *value, uint32_t valid_mask) 567 { 568 *value = igd_read_opregion(s); 569 return 0; 570 } 571 572 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, 573 XenPTReg *cfg_entry, uint32_t *value, 574 uint32_t dev_value, uint32_t valid_mask) 575 { 576 igd_write_opregion(s, *value); 577 return 0; 578 } 579 580 /* Header Type0 reg static information table */ 581 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 582 /* Vendor ID reg */ 583 { 584 .offset = PCI_VENDOR_ID, 585 .size = 2, 586 .init_val = 0x0000, 587 .ro_mask = 0xFFFF, 588 .emu_mask = 0xFFFF, 589 .init = xen_pt_vendor_reg_init, 590 .u.w.read = xen_pt_word_reg_read, 591 .u.w.write = xen_pt_word_reg_write, 592 }, 593 /* Device ID reg */ 594 { 595 .offset = PCI_DEVICE_ID, 596 .size = 2, 597 .init_val = 0x0000, 598 .ro_mask = 0xFFFF, 599 .emu_mask = 0xFFFF, 600 .init = xen_pt_device_reg_init, 601 .u.w.read = xen_pt_word_reg_read, 602 .u.w.write = xen_pt_word_reg_write, 603 }, 604 /* Command reg */ 605 { 606 .offset = PCI_COMMAND, 607 .size = 2, 608 .init_val = 0x0000, 609 .res_mask = 0xF880, 610 .emu_mask = 0x0743, 611 .init = xen_pt_common_reg_init, 612 .u.w.read = xen_pt_word_reg_read, 613 .u.w.write = xen_pt_cmd_reg_write, 614 }, 615 /* Capabilities Pointer reg */ 616 { 617 .offset = PCI_CAPABILITY_LIST, 618 .size = 1, 619 .init_val = 0x00, 620 .ro_mask = 0xFF, 621 .emu_mask = 0xFF, 622 .init = xen_pt_ptr_reg_init, 623 .u.b.read = xen_pt_byte_reg_read, 624 .u.b.write = xen_pt_byte_reg_write, 625 }, 626 /* Status reg */ 627 /* use emulated Cap Ptr value to initialize, 628 * so need to be declared after Cap Ptr reg 629 */ 630 { 631 .offset = PCI_STATUS, 632 .size = 2, 633 .init_val = 0x0000, 634 .res_mask = 0x0007, 635 .ro_mask = 0x06F8, 636 .emu_mask = 0x0010, 637 .init = xen_pt_status_reg_init, 638 .u.w.read = xen_pt_word_reg_read, 639 .u.w.write = xen_pt_word_reg_write, 640 }, 641 /* Cache Line Size reg */ 642 { 643 .offset = PCI_CACHE_LINE_SIZE, 644 .size = 1, 645 .init_val = 0x00, 646 .ro_mask = 0x00, 647 .emu_mask = 0xFF, 648 .init = xen_pt_common_reg_init, 649 .u.b.read = xen_pt_byte_reg_read, 650 .u.b.write = xen_pt_byte_reg_write, 651 }, 652 /* Latency Timer reg */ 653 { 654 .offset = PCI_LATENCY_TIMER, 655 .size = 1, 656 .init_val = 0x00, 657 .ro_mask = 0x00, 658 .emu_mask = 0xFF, 659 .init = xen_pt_common_reg_init, 660 .u.b.read = xen_pt_byte_reg_read, 661 .u.b.write = xen_pt_byte_reg_write, 662 }, 663 /* Header Type reg */ 664 { 665 .offset = PCI_HEADER_TYPE, 666 .size = 1, 667 .init_val = 0x00, 668 .ro_mask = 0xFF, 669 .emu_mask = 0x00, 670 .init = xen_pt_header_type_reg_init, 671 .u.b.read = xen_pt_byte_reg_read, 672 .u.b.write = xen_pt_byte_reg_write, 673 }, 674 /* Interrupt Line reg */ 675 { 676 .offset = PCI_INTERRUPT_LINE, 677 .size = 1, 678 .init_val = 0x00, 679 .ro_mask = 0x00, 680 .emu_mask = 0xFF, 681 .init = xen_pt_common_reg_init, 682 .u.b.read = xen_pt_byte_reg_read, 683 .u.b.write = xen_pt_byte_reg_write, 684 }, 685 /* Interrupt Pin reg */ 686 { 687 .offset = PCI_INTERRUPT_PIN, 688 .size = 1, 689 .init_val = 0x00, 690 .ro_mask = 0xFF, 691 .emu_mask = 0xFF, 692 .init = xen_pt_irqpin_reg_init, 693 .u.b.read = xen_pt_byte_reg_read, 694 .u.b.write = xen_pt_byte_reg_write, 695 }, 696 /* BAR 0 reg */ 697 /* mask of BAR need to be decided later, depends on IO/MEM type */ 698 { 699 .offset = PCI_BASE_ADDRESS_0, 700 .size = 4, 701 .init_val = 0x00000000, 702 .init = xen_pt_bar_reg_init, 703 .u.dw.read = xen_pt_bar_reg_read, 704 .u.dw.write = xen_pt_bar_reg_write, 705 }, 706 /* BAR 1 reg */ 707 { 708 .offset = PCI_BASE_ADDRESS_1, 709 .size = 4, 710 .init_val = 0x00000000, 711 .init = xen_pt_bar_reg_init, 712 .u.dw.read = xen_pt_bar_reg_read, 713 .u.dw.write = xen_pt_bar_reg_write, 714 }, 715 /* BAR 2 reg */ 716 { 717 .offset = PCI_BASE_ADDRESS_2, 718 .size = 4, 719 .init_val = 0x00000000, 720 .init = xen_pt_bar_reg_init, 721 .u.dw.read = xen_pt_bar_reg_read, 722 .u.dw.write = xen_pt_bar_reg_write, 723 }, 724 /* BAR 3 reg */ 725 { 726 .offset = PCI_BASE_ADDRESS_3, 727 .size = 4, 728 .init_val = 0x00000000, 729 .init = xen_pt_bar_reg_init, 730 .u.dw.read = xen_pt_bar_reg_read, 731 .u.dw.write = xen_pt_bar_reg_write, 732 }, 733 /* BAR 4 reg */ 734 { 735 .offset = PCI_BASE_ADDRESS_4, 736 .size = 4, 737 .init_val = 0x00000000, 738 .init = xen_pt_bar_reg_init, 739 .u.dw.read = xen_pt_bar_reg_read, 740 .u.dw.write = xen_pt_bar_reg_write, 741 }, 742 /* BAR 5 reg */ 743 { 744 .offset = PCI_BASE_ADDRESS_5, 745 .size = 4, 746 .init_val = 0x00000000, 747 .init = xen_pt_bar_reg_init, 748 .u.dw.read = xen_pt_bar_reg_read, 749 .u.dw.write = xen_pt_bar_reg_write, 750 }, 751 /* Expansion ROM BAR reg */ 752 { 753 .offset = PCI_ROM_ADDRESS, 754 .size = 4, 755 .init_val = 0x00000000, 756 .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, 757 .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, 758 .init = xen_pt_bar_reg_init, 759 .u.dw.read = xen_pt_long_reg_read, 760 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 761 }, 762 { 763 .size = 0, 764 }, 765 }; 766 767 768 /********************************* 769 * Vital Product Data Capability 770 */ 771 772 /* Vital Product Data Capability Structure reg static information table */ 773 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 774 { 775 .offset = PCI_CAP_LIST_NEXT, 776 .size = 1, 777 .init_val = 0x00, 778 .ro_mask = 0xFF, 779 .emu_mask = 0xFF, 780 .init = xen_pt_ptr_reg_init, 781 .u.b.read = xen_pt_byte_reg_read, 782 .u.b.write = xen_pt_byte_reg_write, 783 }, 784 { 785 .offset = PCI_VPD_ADDR, 786 .size = 2, 787 .ro_mask = 0x0003, 788 .emu_mask = 0x0003, 789 .init = xen_pt_common_reg_init, 790 .u.w.read = xen_pt_word_reg_read, 791 .u.w.write = xen_pt_word_reg_write, 792 }, 793 { 794 .size = 0, 795 }, 796 }; 797 798 799 /************************************** 800 * Vendor Specific Capability 801 */ 802 803 /* Vendor Specific Capability Structure reg static information table */ 804 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 805 { 806 .offset = PCI_CAP_LIST_NEXT, 807 .size = 1, 808 .init_val = 0x00, 809 .ro_mask = 0xFF, 810 .emu_mask = 0xFF, 811 .init = xen_pt_ptr_reg_init, 812 .u.b.read = xen_pt_byte_reg_read, 813 .u.b.write = xen_pt_byte_reg_write, 814 }, 815 { 816 .size = 0, 817 }, 818 }; 819 820 821 /***************************** 822 * PCI Express Capability 823 */ 824 825 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 826 uint32_t offset) 827 { 828 uint8_t flag; 829 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 830 return 0; 831 } 832 return flag & PCI_EXP_FLAGS_VERS; 833 } 834 835 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 836 uint32_t offset) 837 { 838 uint8_t flag; 839 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 840 return 0; 841 } 842 return (flag & PCI_EXP_FLAGS_TYPE) >> 4; 843 } 844 845 /* initialize Link Control register */ 846 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 847 XenPTRegInfo *reg, uint32_t real_offset, 848 uint32_t *data) 849 { 850 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 851 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 852 853 /* no need to initialize in case of Root Complex Integrated Endpoint 854 * with cap_ver 1.x 855 */ 856 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 857 *data = XEN_PT_INVALID_REG; 858 } 859 860 *data = reg->init_val; 861 return 0; 862 } 863 /* initialize Device Control 2 register */ 864 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 865 XenPTRegInfo *reg, uint32_t real_offset, 866 uint32_t *data) 867 { 868 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 869 870 /* no need to initialize in case of cap_ver 1.x */ 871 if (cap_ver == 1) { 872 *data = XEN_PT_INVALID_REG; 873 } 874 875 *data = reg->init_val; 876 return 0; 877 } 878 /* initialize Link Control 2 register */ 879 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 880 XenPTRegInfo *reg, uint32_t real_offset, 881 uint32_t *data) 882 { 883 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 884 uint32_t reg_field = 0; 885 886 /* no need to initialize in case of cap_ver 1.x */ 887 if (cap_ver == 1) { 888 reg_field = XEN_PT_INVALID_REG; 889 } else { 890 /* set Supported Link Speed */ 891 uint8_t lnkcap; 892 int rc; 893 rc = xen_host_pci_get_byte(&s->real_device, 894 real_offset - reg->offset + PCI_EXP_LNKCAP, 895 &lnkcap); 896 if (rc) { 897 return rc; 898 } 899 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 900 } 901 902 *data = reg_field; 903 return 0; 904 } 905 906 /* PCI Express Capability Structure reg static information table */ 907 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 908 /* Next Pointer reg */ 909 { 910 .offset = PCI_CAP_LIST_NEXT, 911 .size = 1, 912 .init_val = 0x00, 913 .ro_mask = 0xFF, 914 .emu_mask = 0xFF, 915 .init = xen_pt_ptr_reg_init, 916 .u.b.read = xen_pt_byte_reg_read, 917 .u.b.write = xen_pt_byte_reg_write, 918 }, 919 /* Device Capabilities reg */ 920 { 921 .offset = PCI_EXP_DEVCAP, 922 .size = 4, 923 .init_val = 0x00000000, 924 .ro_mask = 0xFFFFFFFF, 925 .emu_mask = 0x10000000, 926 .init = xen_pt_common_reg_init, 927 .u.dw.read = xen_pt_long_reg_read, 928 .u.dw.write = xen_pt_long_reg_write, 929 }, 930 /* Device Control reg */ 931 { 932 .offset = PCI_EXP_DEVCTL, 933 .size = 2, 934 .init_val = 0x2810, 935 .ro_mask = 0x8400, 936 .emu_mask = 0xFFFF, 937 .init = xen_pt_common_reg_init, 938 .u.w.read = xen_pt_word_reg_read, 939 .u.w.write = xen_pt_word_reg_write, 940 }, 941 /* Device Status reg */ 942 { 943 .offset = PCI_EXP_DEVSTA, 944 .size = 2, 945 .res_mask = 0xFFC0, 946 .ro_mask = 0x0030, 947 .init = xen_pt_common_reg_init, 948 .u.w.read = xen_pt_word_reg_read, 949 .u.w.write = xen_pt_word_reg_write, 950 }, 951 /* Link Control reg */ 952 { 953 .offset = PCI_EXP_LNKCTL, 954 .size = 2, 955 .init_val = 0x0000, 956 .ro_mask = 0xFC34, 957 .emu_mask = 0xFFFF, 958 .init = xen_pt_linkctrl_reg_init, 959 .u.w.read = xen_pt_word_reg_read, 960 .u.w.write = xen_pt_word_reg_write, 961 }, 962 /* Link Status reg */ 963 { 964 .offset = PCI_EXP_LNKSTA, 965 .size = 2, 966 .ro_mask = 0x3FFF, 967 .init = xen_pt_common_reg_init, 968 .u.w.read = xen_pt_word_reg_read, 969 .u.w.write = xen_pt_word_reg_write, 970 }, 971 /* Device Control 2 reg */ 972 { 973 .offset = 0x28, 974 .size = 2, 975 .init_val = 0x0000, 976 .ro_mask = 0xFFE0, 977 .emu_mask = 0xFFFF, 978 .init = xen_pt_devctrl2_reg_init, 979 .u.w.read = xen_pt_word_reg_read, 980 .u.w.write = xen_pt_word_reg_write, 981 }, 982 /* Link Control 2 reg */ 983 { 984 .offset = 0x30, 985 .size = 2, 986 .init_val = 0x0000, 987 .ro_mask = 0xE040, 988 .emu_mask = 0xFFFF, 989 .init = xen_pt_linkctrl2_reg_init, 990 .u.w.read = xen_pt_word_reg_read, 991 .u.w.write = xen_pt_word_reg_write, 992 }, 993 { 994 .size = 0, 995 }, 996 }; 997 998 999 /********************************* 1000 * Power Management Capability 1001 */ 1002 1003 /* write Power Management Control/Status register */ 1004 static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, 1005 XenPTReg *cfg_entry, uint16_t *val, 1006 uint16_t dev_value, uint16_t valid_mask) 1007 { 1008 XenPTRegInfo *reg = cfg_entry->reg; 1009 uint16_t writable_mask = 0; 1010 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1011 uint16_t *data = cfg_entry->ptr.half_word; 1012 1013 /* modify emulate register */ 1014 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1015 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1016 1017 /* create value for writing to I/O device register */ 1018 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~PCI_PM_CTRL_PME_STATUS, 1019 throughable_mask); 1020 1021 return 0; 1022 } 1023 1024 /* Power Management Capability reg static information table */ 1025 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 1026 /* Next Pointer reg */ 1027 { 1028 .offset = PCI_CAP_LIST_NEXT, 1029 .size = 1, 1030 .init_val = 0x00, 1031 .ro_mask = 0xFF, 1032 .emu_mask = 0xFF, 1033 .init = xen_pt_ptr_reg_init, 1034 .u.b.read = xen_pt_byte_reg_read, 1035 .u.b.write = xen_pt_byte_reg_write, 1036 }, 1037 /* Power Management Capabilities reg */ 1038 { 1039 .offset = PCI_CAP_FLAGS, 1040 .size = 2, 1041 .init_val = 0x0000, 1042 .ro_mask = 0xFFFF, 1043 .emu_mask = 0xF9C8, 1044 .init = xen_pt_common_reg_init, 1045 .u.w.read = xen_pt_word_reg_read, 1046 .u.w.write = xen_pt_word_reg_write, 1047 }, 1048 /* PCI Power Management Control/Status reg */ 1049 { 1050 .offset = PCI_PM_CTRL, 1051 .size = 2, 1052 .init_val = 0x0008, 1053 .res_mask = 0x00F0, 1054 .ro_mask = 0xE10C, 1055 .emu_mask = 0x810B, 1056 .init = xen_pt_common_reg_init, 1057 .u.w.read = xen_pt_word_reg_read, 1058 .u.w.write = xen_pt_pmcsr_reg_write, 1059 }, 1060 { 1061 .size = 0, 1062 }, 1063 }; 1064 1065 1066 /******************************** 1067 * MSI Capability 1068 */ 1069 1070 /* Helper */ 1071 #define xen_pt_msi_check_type(offset, flags, what) \ 1072 ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ 1073 PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) 1074 1075 /* Message Control register */ 1076 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1077 XenPTRegInfo *reg, uint32_t real_offset, 1078 uint32_t *data) 1079 { 1080 XenPTMSI *msi = s->msi; 1081 uint16_t reg_field; 1082 int rc; 1083 1084 /* use I/O device register's value as initial value */ 1085 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1086 if (rc) { 1087 return rc; 1088 } 1089 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1090 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1091 xen_host_pci_set_word(&s->real_device, real_offset, 1092 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1093 } 1094 msi->flags |= reg_field; 1095 msi->ctrl_offset = real_offset; 1096 msi->initialized = false; 1097 msi->mapped = false; 1098 1099 *data = reg->init_val; 1100 return 0; 1101 } 1102 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1103 XenPTReg *cfg_entry, uint16_t *val, 1104 uint16_t dev_value, uint16_t valid_mask) 1105 { 1106 XenPTRegInfo *reg = cfg_entry->reg; 1107 XenPTMSI *msi = s->msi; 1108 uint16_t writable_mask = 0; 1109 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1110 uint16_t *data = cfg_entry->ptr.half_word; 1111 1112 /* Currently no support for multi-vector */ 1113 if (*val & PCI_MSI_FLAGS_QSIZE) { 1114 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1115 } 1116 1117 /* modify emulate register */ 1118 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1119 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1120 msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; 1121 1122 /* create value for writing to I/O device register */ 1123 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1124 1125 /* update MSI */ 1126 if (*val & PCI_MSI_FLAGS_ENABLE) { 1127 /* setup MSI pirq for the first time */ 1128 if (!msi->initialized) { 1129 /* Init physical one */ 1130 XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); 1131 if (xen_pt_msi_setup(s)) { 1132 /* We do not broadcast the error to the framework code, so 1133 * that MSI errors are contained in MSI emulation code and 1134 * QEMU can go on running. 1135 * Guest MSI would be actually not working. 1136 */ 1137 *val &= ~PCI_MSI_FLAGS_ENABLE; 1138 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); 1139 return 0; 1140 } 1141 if (xen_pt_msi_update(s)) { 1142 *val &= ~PCI_MSI_FLAGS_ENABLE; 1143 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); 1144 return 0; 1145 } 1146 msi->initialized = true; 1147 msi->mapped = true; 1148 } 1149 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1150 } else if (msi->mapped) { 1151 xen_pt_msi_disable(s); 1152 } 1153 1154 return 0; 1155 } 1156 1157 /* initialize Message Upper Address register */ 1158 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1159 XenPTRegInfo *reg, uint32_t real_offset, 1160 uint32_t *data) 1161 { 1162 /* no need to initialize in case of 32 bit type */ 1163 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1164 *data = XEN_PT_INVALID_REG; 1165 } else { 1166 *data = reg->init_val; 1167 } 1168 1169 return 0; 1170 } 1171 /* this function will be called twice (for 32 bit and 64 bit type) */ 1172 /* initialize Message Data register */ 1173 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1174 XenPTRegInfo *reg, uint32_t real_offset, 1175 uint32_t *data) 1176 { 1177 uint32_t flags = s->msi->flags; 1178 uint32_t offset = reg->offset; 1179 1180 /* check the offset whether matches the type or not */ 1181 if (xen_pt_msi_check_type(offset, flags, DATA)) { 1182 *data = reg->init_val; 1183 } else { 1184 *data = XEN_PT_INVALID_REG; 1185 } 1186 return 0; 1187 } 1188 1189 /* this function will be called twice (for 32 bit and 64 bit type) */ 1190 /* initialize Mask register */ 1191 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, 1192 XenPTRegInfo *reg, uint32_t real_offset, 1193 uint32_t *data) 1194 { 1195 uint32_t flags = s->msi->flags; 1196 1197 /* check the offset whether matches the type or not */ 1198 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1199 *data = XEN_PT_INVALID_REG; 1200 } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { 1201 *data = reg->init_val; 1202 } else { 1203 *data = XEN_PT_INVALID_REG; 1204 } 1205 return 0; 1206 } 1207 1208 /* this function will be called twice (for 32 bit and 64 bit type) */ 1209 /* initialize Pending register */ 1210 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, 1211 XenPTRegInfo *reg, uint32_t real_offset, 1212 uint32_t *data) 1213 { 1214 uint32_t flags = s->msi->flags; 1215 1216 /* check the offset whether matches the type or not */ 1217 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1218 *data = XEN_PT_INVALID_REG; 1219 } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { 1220 *data = reg->init_val; 1221 } else { 1222 *data = XEN_PT_INVALID_REG; 1223 } 1224 return 0; 1225 } 1226 1227 /* write Message Address register */ 1228 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1229 XenPTReg *cfg_entry, uint32_t *val, 1230 uint32_t dev_value, uint32_t valid_mask) 1231 { 1232 XenPTRegInfo *reg = cfg_entry->reg; 1233 uint32_t writable_mask = 0; 1234 uint32_t old_addr = *cfg_entry->ptr.word; 1235 uint32_t *data = cfg_entry->ptr.word; 1236 1237 /* modify emulate register */ 1238 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1239 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1240 s->msi->addr_lo = *data; 1241 1242 /* create value for writing to I/O device register */ 1243 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1244 1245 /* update MSI */ 1246 if (*data != old_addr) { 1247 if (s->msi->mapped) { 1248 xen_pt_msi_update(s); 1249 } 1250 } 1251 1252 return 0; 1253 } 1254 /* write Message Upper Address register */ 1255 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1256 XenPTReg *cfg_entry, uint32_t *val, 1257 uint32_t dev_value, uint32_t valid_mask) 1258 { 1259 XenPTRegInfo *reg = cfg_entry->reg; 1260 uint32_t writable_mask = 0; 1261 uint32_t old_addr = *cfg_entry->ptr.word; 1262 uint32_t *data = cfg_entry->ptr.word; 1263 1264 /* check whether the type is 64 bit or not */ 1265 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1266 XEN_PT_ERR(&s->dev, 1267 "Can't write to the upper address without 64 bit support\n"); 1268 return -1; 1269 } 1270 1271 /* modify emulate register */ 1272 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1273 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1274 /* update the msi_info too */ 1275 s->msi->addr_hi = *data; 1276 1277 /* create value for writing to I/O device register */ 1278 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1279 1280 /* update MSI */ 1281 if (*data != old_addr) { 1282 if (s->msi->mapped) { 1283 xen_pt_msi_update(s); 1284 } 1285 } 1286 1287 return 0; 1288 } 1289 1290 1291 /* this function will be called twice (for 32 bit and 64 bit type) */ 1292 /* write Message Data register */ 1293 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1294 XenPTReg *cfg_entry, uint16_t *val, 1295 uint16_t dev_value, uint16_t valid_mask) 1296 { 1297 XenPTRegInfo *reg = cfg_entry->reg; 1298 XenPTMSI *msi = s->msi; 1299 uint16_t writable_mask = 0; 1300 uint16_t old_data = *cfg_entry->ptr.half_word; 1301 uint32_t offset = reg->offset; 1302 uint16_t *data = cfg_entry->ptr.half_word; 1303 1304 /* check the offset whether matches the type or not */ 1305 if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { 1306 /* exit I/O emulator */ 1307 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1308 return -1; 1309 } 1310 1311 /* modify emulate register */ 1312 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1313 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1314 /* update the msi_info too */ 1315 msi->data = *data; 1316 1317 /* create value for writing to I/O device register */ 1318 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1319 1320 /* update MSI */ 1321 if (*data != old_data) { 1322 if (msi->mapped) { 1323 xen_pt_msi_update(s); 1324 } 1325 } 1326 1327 return 0; 1328 } 1329 1330 /* MSI Capability Structure reg static information table */ 1331 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1332 /* Next Pointer reg */ 1333 { 1334 .offset = PCI_CAP_LIST_NEXT, 1335 .size = 1, 1336 .init_val = 0x00, 1337 .ro_mask = 0xFF, 1338 .emu_mask = 0xFF, 1339 .init = xen_pt_ptr_reg_init, 1340 .u.b.read = xen_pt_byte_reg_read, 1341 .u.b.write = xen_pt_byte_reg_write, 1342 }, 1343 /* Message Control reg */ 1344 { 1345 .offset = PCI_MSI_FLAGS, 1346 .size = 2, 1347 .init_val = 0x0000, 1348 .res_mask = 0xFE00, 1349 .ro_mask = 0x018E, 1350 .emu_mask = 0x017E, 1351 .init = xen_pt_msgctrl_reg_init, 1352 .u.w.read = xen_pt_word_reg_read, 1353 .u.w.write = xen_pt_msgctrl_reg_write, 1354 }, 1355 /* Message Address reg */ 1356 { 1357 .offset = PCI_MSI_ADDRESS_LO, 1358 .size = 4, 1359 .init_val = 0x00000000, 1360 .ro_mask = 0x00000003, 1361 .emu_mask = 0xFFFFFFFF, 1362 .init = xen_pt_common_reg_init, 1363 .u.dw.read = xen_pt_long_reg_read, 1364 .u.dw.write = xen_pt_msgaddr32_reg_write, 1365 }, 1366 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1367 { 1368 .offset = PCI_MSI_ADDRESS_HI, 1369 .size = 4, 1370 .init_val = 0x00000000, 1371 .ro_mask = 0x00000000, 1372 .emu_mask = 0xFFFFFFFF, 1373 .init = xen_pt_msgaddr64_reg_init, 1374 .u.dw.read = xen_pt_long_reg_read, 1375 .u.dw.write = xen_pt_msgaddr64_reg_write, 1376 }, 1377 /* Message Data reg (16 bits of data for 32-bit devices) */ 1378 { 1379 .offset = PCI_MSI_DATA_32, 1380 .size = 2, 1381 .init_val = 0x0000, 1382 .ro_mask = 0x0000, 1383 .emu_mask = 0xFFFF, 1384 .init = xen_pt_msgdata_reg_init, 1385 .u.w.read = xen_pt_word_reg_read, 1386 .u.w.write = xen_pt_msgdata_reg_write, 1387 }, 1388 /* Message Data reg (16 bits of data for 64-bit devices) */ 1389 { 1390 .offset = PCI_MSI_DATA_64, 1391 .size = 2, 1392 .init_val = 0x0000, 1393 .ro_mask = 0x0000, 1394 .emu_mask = 0xFFFF, 1395 .init = xen_pt_msgdata_reg_init, 1396 .u.w.read = xen_pt_word_reg_read, 1397 .u.w.write = xen_pt_msgdata_reg_write, 1398 }, 1399 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1400 { 1401 .offset = PCI_MSI_MASK_32, 1402 .size = 4, 1403 .init_val = 0x00000000, 1404 .ro_mask = 0xFFFFFFFF, 1405 .emu_mask = 0xFFFFFFFF, 1406 .init = xen_pt_mask_reg_init, 1407 .u.dw.read = xen_pt_long_reg_read, 1408 .u.dw.write = xen_pt_long_reg_write, 1409 }, 1410 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1411 { 1412 .offset = PCI_MSI_MASK_64, 1413 .size = 4, 1414 .init_val = 0x00000000, 1415 .ro_mask = 0xFFFFFFFF, 1416 .emu_mask = 0xFFFFFFFF, 1417 .init = xen_pt_mask_reg_init, 1418 .u.dw.read = xen_pt_long_reg_read, 1419 .u.dw.write = xen_pt_long_reg_write, 1420 }, 1421 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1422 { 1423 .offset = PCI_MSI_MASK_32 + 4, 1424 .size = 4, 1425 .init_val = 0x00000000, 1426 .ro_mask = 0xFFFFFFFF, 1427 .emu_mask = 0x00000000, 1428 .init = xen_pt_pending_reg_init, 1429 .u.dw.read = xen_pt_long_reg_read, 1430 .u.dw.write = xen_pt_long_reg_write, 1431 }, 1432 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1433 { 1434 .offset = PCI_MSI_MASK_64 + 4, 1435 .size = 4, 1436 .init_val = 0x00000000, 1437 .ro_mask = 0xFFFFFFFF, 1438 .emu_mask = 0x00000000, 1439 .init = xen_pt_pending_reg_init, 1440 .u.dw.read = xen_pt_long_reg_read, 1441 .u.dw.write = xen_pt_long_reg_write, 1442 }, 1443 { 1444 .size = 0, 1445 }, 1446 }; 1447 1448 1449 /************************************** 1450 * MSI-X Capability 1451 */ 1452 1453 /* Message Control register for MSI-X */ 1454 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1455 XenPTRegInfo *reg, uint32_t real_offset, 1456 uint32_t *data) 1457 { 1458 uint16_t reg_field; 1459 int rc; 1460 1461 /* use I/O device register's value as initial value */ 1462 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1463 if (rc) { 1464 return rc; 1465 } 1466 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1467 XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); 1468 xen_host_pci_set_word(&s->real_device, real_offset, 1469 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1470 } 1471 1472 s->msix->ctrl_offset = real_offset; 1473 1474 *data = reg->init_val; 1475 return 0; 1476 } 1477 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1478 XenPTReg *cfg_entry, uint16_t *val, 1479 uint16_t dev_value, uint16_t valid_mask) 1480 { 1481 XenPTRegInfo *reg = cfg_entry->reg; 1482 uint16_t writable_mask = 0; 1483 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1484 int debug_msix_enabled_old; 1485 uint16_t *data = cfg_entry->ptr.half_word; 1486 1487 /* modify emulate register */ 1488 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1489 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1490 1491 /* create value for writing to I/O device register */ 1492 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1493 1494 /* update MSI-X */ 1495 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1496 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1497 xen_pt_msix_update(s); 1498 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1499 xen_pt_msix_disable(s); 1500 } 1501 1502 debug_msix_enabled_old = s->msix->enabled; 1503 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1504 if (s->msix->enabled != debug_msix_enabled_old) { 1505 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1506 s->msix->enabled ? "enable" : "disable"); 1507 } 1508 1509 return 0; 1510 } 1511 1512 /* MSI-X Capability Structure reg static information table */ 1513 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1514 /* Next Pointer reg */ 1515 { 1516 .offset = PCI_CAP_LIST_NEXT, 1517 .size = 1, 1518 .init_val = 0x00, 1519 .ro_mask = 0xFF, 1520 .emu_mask = 0xFF, 1521 .init = xen_pt_ptr_reg_init, 1522 .u.b.read = xen_pt_byte_reg_read, 1523 .u.b.write = xen_pt_byte_reg_write, 1524 }, 1525 /* Message Control reg */ 1526 { 1527 .offset = PCI_MSI_FLAGS, 1528 .size = 2, 1529 .init_val = 0x0000, 1530 .res_mask = 0x3800, 1531 .ro_mask = 0x07FF, 1532 .emu_mask = 0x0000, 1533 .init = xen_pt_msixctrl_reg_init, 1534 .u.w.read = xen_pt_word_reg_read, 1535 .u.w.write = xen_pt_msixctrl_reg_write, 1536 }, 1537 { 1538 .size = 0, 1539 }, 1540 }; 1541 1542 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { 1543 /* Intel IGFX OpRegion reg */ 1544 { 1545 .offset = 0x0, 1546 .size = 4, 1547 .init_val = 0, 1548 .u.dw.read = xen_pt_intel_opregion_read, 1549 .u.dw.write = xen_pt_intel_opregion_write, 1550 }, 1551 { 1552 .size = 0, 1553 }, 1554 }; 1555 1556 /**************************** 1557 * Capabilities 1558 */ 1559 1560 /* capability structure register group size functions */ 1561 1562 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1563 const XenPTRegGroupInfo *grp_reg, 1564 uint32_t base_offset, uint8_t *size) 1565 { 1566 *size = grp_reg->grp_size; 1567 return 0; 1568 } 1569 /* get Vendor Specific Capability Structure register group size */ 1570 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1571 const XenPTRegGroupInfo *grp_reg, 1572 uint32_t base_offset, uint8_t *size) 1573 { 1574 return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); 1575 } 1576 /* get PCI Express Capability Structure register group size */ 1577 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1578 const XenPTRegGroupInfo *grp_reg, 1579 uint32_t base_offset, uint8_t *size) 1580 { 1581 PCIDevice *d = &s->dev; 1582 uint8_t version = get_capability_version(s, base_offset); 1583 uint8_t type = get_device_type(s, base_offset); 1584 uint8_t pcie_size = 0; 1585 1586 1587 /* calculate size depend on capability version and device/port type */ 1588 /* in case of PCI Express Base Specification Rev 1.x */ 1589 if (version == 1) { 1590 /* The PCI Express Capabilities, Device Capabilities, and Device 1591 * Status/Control registers are required for all PCI Express devices. 1592 * The Link Capabilities and Link Status/Control are required for all 1593 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1594 * are not required to implement registers other than those listed 1595 * above and terminate the capability structure. 1596 */ 1597 switch (type) { 1598 case PCI_EXP_TYPE_ENDPOINT: 1599 case PCI_EXP_TYPE_LEG_END: 1600 pcie_size = 0x14; 1601 break; 1602 case PCI_EXP_TYPE_RC_END: 1603 /* has no link */ 1604 pcie_size = 0x0C; 1605 break; 1606 /* only EndPoint passthrough is supported */ 1607 case PCI_EXP_TYPE_ROOT_PORT: 1608 case PCI_EXP_TYPE_UPSTREAM: 1609 case PCI_EXP_TYPE_DOWNSTREAM: 1610 case PCI_EXP_TYPE_PCI_BRIDGE: 1611 case PCI_EXP_TYPE_PCIE_BRIDGE: 1612 case PCI_EXP_TYPE_RC_EC: 1613 default: 1614 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1615 return -1; 1616 } 1617 } 1618 /* in case of PCI Express Base Specification Rev 2.0 */ 1619 else if (version == 2) { 1620 switch (type) { 1621 case PCI_EXP_TYPE_ENDPOINT: 1622 case PCI_EXP_TYPE_LEG_END: 1623 case PCI_EXP_TYPE_RC_END: 1624 /* For Functions that do not implement the registers, 1625 * these spaces must be hardwired to 0b. 1626 */ 1627 pcie_size = 0x3C; 1628 break; 1629 /* only EndPoint passthrough is supported */ 1630 case PCI_EXP_TYPE_ROOT_PORT: 1631 case PCI_EXP_TYPE_UPSTREAM: 1632 case PCI_EXP_TYPE_DOWNSTREAM: 1633 case PCI_EXP_TYPE_PCI_BRIDGE: 1634 case PCI_EXP_TYPE_PCIE_BRIDGE: 1635 case PCI_EXP_TYPE_RC_EC: 1636 default: 1637 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1638 return -1; 1639 } 1640 } else { 1641 XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); 1642 return -1; 1643 } 1644 1645 *size = pcie_size; 1646 return 0; 1647 } 1648 /* get MSI Capability Structure register group size */ 1649 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1650 const XenPTRegGroupInfo *grp_reg, 1651 uint32_t base_offset, uint8_t *size) 1652 { 1653 uint16_t msg_ctrl = 0; 1654 uint8_t msi_size = 0xa; 1655 int rc; 1656 1657 rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, 1658 &msg_ctrl); 1659 if (rc) { 1660 return rc; 1661 } 1662 /* check if 64-bit address is capable of per-vector masking */ 1663 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1664 msi_size += 4; 1665 } 1666 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1667 msi_size += 10; 1668 } 1669 1670 s->msi = g_new0(XenPTMSI, 1); 1671 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1672 1673 *size = msi_size; 1674 return 0; 1675 } 1676 /* get MSI-X Capability Structure register group size */ 1677 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1678 const XenPTRegGroupInfo *grp_reg, 1679 uint32_t base_offset, uint8_t *size) 1680 { 1681 int rc = 0; 1682 1683 rc = xen_pt_msix_init(s, base_offset); 1684 1685 if (rc < 0) { 1686 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1687 return rc; 1688 } 1689 1690 *size = grp_reg->grp_size; 1691 return 0; 1692 } 1693 1694 1695 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1696 /* Header Type0 reg group */ 1697 { 1698 .grp_id = 0xFF, 1699 .grp_type = XEN_PT_GRP_TYPE_EMU, 1700 .grp_size = 0x40, 1701 .size_init = xen_pt_reg_grp_size_init, 1702 .emu_regs = xen_pt_emu_reg_header0, 1703 }, 1704 /* PCI PowerManagement Capability reg group */ 1705 { 1706 .grp_id = PCI_CAP_ID_PM, 1707 .grp_type = XEN_PT_GRP_TYPE_EMU, 1708 .grp_size = PCI_PM_SIZEOF, 1709 .size_init = xen_pt_reg_grp_size_init, 1710 .emu_regs = xen_pt_emu_reg_pm, 1711 }, 1712 /* AGP Capability Structure reg group */ 1713 { 1714 .grp_id = PCI_CAP_ID_AGP, 1715 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1716 .grp_size = 0x30, 1717 .size_init = xen_pt_reg_grp_size_init, 1718 }, 1719 /* Vital Product Data Capability Structure reg group */ 1720 { 1721 .grp_id = PCI_CAP_ID_VPD, 1722 .grp_type = XEN_PT_GRP_TYPE_EMU, 1723 .grp_size = 0x08, 1724 .size_init = xen_pt_reg_grp_size_init, 1725 .emu_regs = xen_pt_emu_reg_vpd, 1726 }, 1727 /* Slot Identification reg group */ 1728 { 1729 .grp_id = PCI_CAP_ID_SLOTID, 1730 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1731 .grp_size = 0x04, 1732 .size_init = xen_pt_reg_grp_size_init, 1733 }, 1734 /* MSI Capability Structure reg group */ 1735 { 1736 .grp_id = PCI_CAP_ID_MSI, 1737 .grp_type = XEN_PT_GRP_TYPE_EMU, 1738 .grp_size = 0xFF, 1739 .size_init = xen_pt_msi_size_init, 1740 .emu_regs = xen_pt_emu_reg_msi, 1741 }, 1742 /* PCI-X Capabilities List Item reg group */ 1743 { 1744 .grp_id = PCI_CAP_ID_PCIX, 1745 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1746 .grp_size = 0x18, 1747 .size_init = xen_pt_reg_grp_size_init, 1748 }, 1749 /* Vendor Specific Capability Structure reg group */ 1750 { 1751 .grp_id = PCI_CAP_ID_VNDR, 1752 .grp_type = XEN_PT_GRP_TYPE_EMU, 1753 .grp_size = 0xFF, 1754 .size_init = xen_pt_vendor_size_init, 1755 .emu_regs = xen_pt_emu_reg_vendor, 1756 }, 1757 /* SHPC Capability List Item reg group */ 1758 { 1759 .grp_id = PCI_CAP_ID_SHPC, 1760 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1761 .grp_size = 0x08, 1762 .size_init = xen_pt_reg_grp_size_init, 1763 }, 1764 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1765 { 1766 .grp_id = PCI_CAP_ID_SSVID, 1767 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1768 .grp_size = 0x08, 1769 .size_init = xen_pt_reg_grp_size_init, 1770 }, 1771 /* AGP 8x Capability Structure reg group */ 1772 { 1773 .grp_id = PCI_CAP_ID_AGP3, 1774 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1775 .grp_size = 0x30, 1776 .size_init = xen_pt_reg_grp_size_init, 1777 }, 1778 /* PCI Express Capability Structure reg group */ 1779 { 1780 .grp_id = PCI_CAP_ID_EXP, 1781 .grp_type = XEN_PT_GRP_TYPE_EMU, 1782 .grp_size = 0xFF, 1783 .size_init = xen_pt_pcie_size_init, 1784 .emu_regs = xen_pt_emu_reg_pcie, 1785 }, 1786 /* MSI-X Capability Structure reg group */ 1787 { 1788 .grp_id = PCI_CAP_ID_MSIX, 1789 .grp_type = XEN_PT_GRP_TYPE_EMU, 1790 .grp_size = 0x0C, 1791 .size_init = xen_pt_msix_size_init, 1792 .emu_regs = xen_pt_emu_reg_msix, 1793 }, 1794 /* Intel IGD Opregion group */ 1795 { 1796 .grp_id = XEN_PCI_INTEL_OPREGION, 1797 .grp_type = XEN_PT_GRP_TYPE_EMU, 1798 .grp_size = 0x4, 1799 .size_init = xen_pt_reg_grp_size_init, 1800 .emu_regs = xen_pt_emu_reg_igd_opregion, 1801 }, 1802 { 1803 .grp_size = 0, 1804 }, 1805 }; 1806 1807 /* initialize Capabilities Pointer or Next Pointer register */ 1808 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1809 XenPTRegInfo *reg, uint32_t real_offset, 1810 uint32_t *data) 1811 { 1812 int i, rc; 1813 uint8_t reg_field; 1814 uint8_t cap_id = 0; 1815 1816 rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); 1817 if (rc) { 1818 return rc; 1819 } 1820 /* find capability offset */ 1821 while (reg_field) { 1822 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1823 if (xen_pt_hide_dev_cap(&s->real_device, 1824 xen_pt_emu_reg_grps[i].grp_id)) { 1825 continue; 1826 } 1827 1828 rc = xen_host_pci_get_byte(&s->real_device, 1829 reg_field + PCI_CAP_LIST_ID, &cap_id); 1830 if (rc) { 1831 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", 1832 reg_field + PCI_CAP_LIST_ID, rc); 1833 return rc; 1834 } 1835 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1836 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1837 goto out; 1838 } 1839 /* ignore the 0 hardwired capability, find next one */ 1840 break; 1841 } 1842 } 1843 1844 /* next capability */ 1845 rc = xen_host_pci_get_byte(&s->real_device, 1846 reg_field + PCI_CAP_LIST_NEXT, ®_field); 1847 if (rc) { 1848 return rc; 1849 } 1850 } 1851 1852 out: 1853 *data = reg_field; 1854 return 0; 1855 } 1856 1857 1858 /************* 1859 * Main 1860 */ 1861 1862 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1863 { 1864 uint8_t id; 1865 unsigned max_cap = XEN_PCI_CAP_MAX; 1866 uint8_t pos = PCI_CAPABILITY_LIST; 1867 uint8_t status = 0; 1868 1869 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1870 return 0; 1871 } 1872 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1873 return 0; 1874 } 1875 1876 while (max_cap--) { 1877 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1878 break; 1879 } 1880 if (pos < PCI_CONFIG_HEADER_SIZE) { 1881 break; 1882 } 1883 1884 pos &= ~3; 1885 if (xen_host_pci_get_byte(&s->real_device, 1886 pos + PCI_CAP_LIST_ID, &id)) { 1887 break; 1888 } 1889 1890 if (id == 0xff) { 1891 break; 1892 } 1893 if (id == cap) { 1894 return pos; 1895 } 1896 1897 pos += PCI_CAP_LIST_NEXT; 1898 } 1899 return 0; 1900 } 1901 1902 static int xen_pt_config_reg_init(XenPCIPassthroughState *s, 1903 XenPTRegGroup *reg_grp, XenPTRegInfo *reg) 1904 { 1905 XenPTReg *reg_entry; 1906 uint32_t data = 0; 1907 int rc = 0; 1908 1909 reg_entry = g_new0(XenPTReg, 1); 1910 reg_entry->reg = reg; 1911 1912 if (reg->init) { 1913 uint32_t host_mask, size_mask; 1914 unsigned int offset; 1915 uint32_t val; 1916 1917 /* initialize emulate register */ 1918 rc = reg->init(s, reg_entry->reg, 1919 reg_grp->base_offset + reg->offset, &data); 1920 if (rc < 0) { 1921 g_free(reg_entry); 1922 return rc; 1923 } 1924 if (data == XEN_PT_INVALID_REG) { 1925 /* free unused BAR register entry */ 1926 g_free(reg_entry); 1927 return 0; 1928 } 1929 /* Sync up the data to dev.config */ 1930 offset = reg_grp->base_offset + reg->offset; 1931 size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); 1932 1933 switch (reg->size) { 1934 case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); 1935 break; 1936 case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); 1937 break; 1938 case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); 1939 break; 1940 default: assert(1); 1941 } 1942 if (rc) { 1943 /* Serious issues when we cannot read the host values! */ 1944 g_free(reg_entry); 1945 return rc; 1946 } 1947 /* Set bits in emu_mask are the ones we emulate. The dev.config shall 1948 * contain the emulated view of the guest - therefore we flip the mask 1949 * to mask out the host values (which dev.config initially has) . */ 1950 host_mask = size_mask & ~reg->emu_mask; 1951 1952 if ((data & host_mask) != (val & host_mask)) { 1953 uint32_t new_val; 1954 1955 /* Mask out host (including past size). */ 1956 new_val = val & host_mask; 1957 /* Merge emulated ones (excluding the non-emulated ones). */ 1958 new_val |= data & host_mask; 1959 /* Leave intact host and emulated values past the size - even though 1960 * we do not care as we write per reg->size granularity, but for the 1961 * logging below lets have the proper value. */ 1962 new_val |= ((val | data)) & ~size_mask; 1963 XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", 1964 offset, data, val, new_val); 1965 val = new_val; 1966 } else 1967 val = data; 1968 1969 if (val & ~size_mask) { 1970 XEN_PT_ERR(&s->dev,"Offset 0x%04x:0x%04x expands past register size(%d)!\n", 1971 offset, val, reg->size); 1972 g_free(reg_entry); 1973 return -ENXIO; 1974 } 1975 /* This could be just pci_set_long as we don't modify the bits 1976 * past reg->size, but in case this routine is run in parallel or the 1977 * init value is larger, we do not want to over-write registers. */ 1978 switch (reg->size) { 1979 case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); 1980 break; 1981 case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); 1982 break; 1983 case 4: pci_set_long(s->dev.config + offset, val); 1984 break; 1985 default: assert(1); 1986 } 1987 /* set register value pointer to the data. */ 1988 reg_entry->ptr.byte = s->dev.config + offset; 1989 1990 } 1991 /* list add register entry */ 1992 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 1993 1994 return 0; 1995 } 1996 1997 int xen_pt_config_init(XenPCIPassthroughState *s) 1998 { 1999 int i, rc; 2000 2001 QLIST_INIT(&s->reg_grps); 2002 2003 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 2004 uint32_t reg_grp_offset = 0; 2005 XenPTRegGroup *reg_grp_entry = NULL; 2006 2007 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF 2008 && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { 2009 if (xen_pt_hide_dev_cap(&s->real_device, 2010 xen_pt_emu_reg_grps[i].grp_id)) { 2011 continue; 2012 } 2013 2014 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 2015 2016 if (!reg_grp_offset) { 2017 continue; 2018 } 2019 } 2020 2021 /* 2022 * By default we will trap up to 0x40 in the cfg space. 2023 * If an intel device is pass through we need to trap 0xfc, 2024 * therefore the size should be 0xff. 2025 */ 2026 if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { 2027 reg_grp_offset = XEN_PCI_INTEL_OPREGION; 2028 } 2029 2030 reg_grp_entry = g_new0(XenPTRegGroup, 1); 2031 QLIST_INIT(®_grp_entry->reg_tbl_list); 2032 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 2033 2034 reg_grp_entry->base_offset = reg_grp_offset; 2035 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 2036 if (xen_pt_emu_reg_grps[i].size_init) { 2037 /* get register group size */ 2038 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 2039 reg_grp_offset, 2040 ®_grp_entry->size); 2041 if (rc < 0) { 2042 XEN_PT_LOG(&s->dev, "Failed to initialize %d/%ld, type=0x%x, rc:%d\n", 2043 i, ARRAY_SIZE(xen_pt_emu_reg_grps), 2044 xen_pt_emu_reg_grps[i].grp_type, rc); 2045 xen_pt_config_delete(s); 2046 return rc; 2047 } 2048 } 2049 2050 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 2051 if (xen_pt_emu_reg_grps[i].emu_regs) { 2052 int j = 0; 2053 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 2054 /* initialize capability register */ 2055 for (j = 0; regs->size != 0; j++, regs++) { 2056 /* initialize capability register */ 2057 rc = xen_pt_config_reg_init(s, reg_grp_entry, regs); 2058 if (rc < 0) { 2059 XEN_PT_LOG(&s->dev, "Failed to initialize %d/%ld reg 0x%x in grp_type=0x%x (%d/%ld), rc=%d\n", 2060 j, ARRAY_SIZE(xen_pt_emu_reg_grps[i].emu_regs), 2061 regs->offset, xen_pt_emu_reg_grps[i].grp_type, 2062 i, ARRAY_SIZE(xen_pt_emu_reg_grps), rc); 2063 xen_pt_config_delete(s); 2064 return rc; 2065 } 2066 } 2067 } 2068 } 2069 } 2070 2071 return 0; 2072 } 2073 2074 /* delete all emulate register */ 2075 void xen_pt_config_delete(XenPCIPassthroughState *s) 2076 { 2077 struct XenPTRegGroup *reg_group, *next_grp; 2078 struct XenPTReg *reg, *next_reg; 2079 2080 /* free MSI/MSI-X info table */ 2081 if (s->msix) { 2082 xen_pt_msix_delete(s); 2083 } 2084 g_free(s->msi); 2085 2086 /* free all register group entry */ 2087 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 2088 /* free all register entry */ 2089 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 2090 QLIST_REMOVE(reg, entries); 2091 g_free(reg); 2092 } 2093 2094 QLIST_REMOVE(reg_group, entries); 2095 g_free(reg_group); 2096 } 2097 } 2098