1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/timer.h" 16 #include "hw/xen/xen_backend.h" 17 #include "xen_pt.h" 18 19 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 20 (((value) & (val_mask)) | ((data) & ~(val_mask))) 21 22 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 23 24 /* prototype */ 25 26 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 27 uint32_t real_offset, uint32_t *data); 28 29 30 /* helper */ 31 32 /* A return value of 1 means the capability should NOT be exposed to guest. */ 33 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 34 { 35 switch (grp_id) { 36 case PCI_CAP_ID_EXP: 37 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 38 * Controller looks trivial, e.g., the PCI Express Capabilities 39 * Register is 0. We should not try to expose it to guest. 40 * 41 * The datasheet is available at 42 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 43 * 44 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 45 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 46 * Controller looks trivial, e.g., the PCI Express Capabilities 47 * Register is 0, so the Capability Version is 0 and 48 * xen_pt_pcie_size_init() would fail. 49 */ 50 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 51 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 52 return 1; 53 } 54 break; 55 } 56 return 0; 57 } 58 59 /* find emulate register group entry */ 60 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 61 { 62 XenPTRegGroup *entry = NULL; 63 64 /* find register group entry */ 65 QLIST_FOREACH(entry, &s->reg_grps, entries) { 66 /* check address */ 67 if ((entry->base_offset <= address) 68 && ((entry->base_offset + entry->size) > address)) { 69 return entry; 70 } 71 } 72 73 /* group entry not found */ 74 return NULL; 75 } 76 77 /* find emulate register entry */ 78 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 79 { 80 XenPTReg *reg_entry = NULL; 81 XenPTRegInfo *reg = NULL; 82 uint32_t real_offset = 0; 83 84 /* find register entry */ 85 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 86 reg = reg_entry->reg; 87 real_offset = reg_grp->base_offset + reg->offset; 88 /* check address */ 89 if ((real_offset <= address) 90 && ((real_offset + reg->size) > address)) { 91 return reg_entry; 92 } 93 } 94 95 return NULL; 96 } 97 98 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, 99 XenPTRegInfo *reg, uint32_t valid_mask) 100 { 101 uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); 102 103 if (!s->permissive) { 104 throughable_mask &= ~reg->res_mask; 105 } 106 107 return throughable_mask & valid_mask; 108 } 109 110 /**************** 111 * general register functions 112 */ 113 114 /* register initialization function */ 115 116 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 117 XenPTRegInfo *reg, uint32_t real_offset, 118 uint32_t *data) 119 { 120 *data = reg->init_val; 121 return 0; 122 } 123 124 /* Read register functions */ 125 126 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 127 uint8_t *value, uint8_t valid_mask) 128 { 129 XenPTRegInfo *reg = cfg_entry->reg; 130 uint8_t valid_emu_mask = 0; 131 uint8_t *data = cfg_entry->ptr.byte; 132 133 /* emulate byte register */ 134 valid_emu_mask = reg->emu_mask & valid_mask; 135 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 136 137 return 0; 138 } 139 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 140 uint16_t *value, uint16_t valid_mask) 141 { 142 XenPTRegInfo *reg = cfg_entry->reg; 143 uint16_t valid_emu_mask = 0; 144 uint16_t *data = cfg_entry->ptr.half_word; 145 146 /* emulate word register */ 147 valid_emu_mask = reg->emu_mask & valid_mask; 148 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 149 150 return 0; 151 } 152 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 153 uint32_t *value, uint32_t valid_mask) 154 { 155 XenPTRegInfo *reg = cfg_entry->reg; 156 uint32_t valid_emu_mask = 0; 157 uint32_t *data = cfg_entry->ptr.word; 158 159 /* emulate long register */ 160 valid_emu_mask = reg->emu_mask & valid_mask; 161 *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); 162 163 return 0; 164 } 165 166 /* Write register functions */ 167 168 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 169 uint8_t *val, uint8_t dev_value, 170 uint8_t valid_mask) 171 { 172 XenPTRegInfo *reg = cfg_entry->reg; 173 uint8_t writable_mask = 0; 174 uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 175 uint8_t *data = cfg_entry->ptr.byte; 176 177 /* modify emulate register */ 178 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 179 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 180 181 /* create value for writing to I/O device register */ 182 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 183 throughable_mask); 184 185 return 0; 186 } 187 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 188 uint16_t *val, uint16_t dev_value, 189 uint16_t valid_mask) 190 { 191 XenPTRegInfo *reg = cfg_entry->reg; 192 uint16_t writable_mask = 0; 193 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 194 uint16_t *data = cfg_entry->ptr.half_word; 195 196 /* modify emulate register */ 197 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 198 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 199 200 /* create value for writing to I/O device register */ 201 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 202 throughable_mask); 203 204 return 0; 205 } 206 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 207 uint32_t *val, uint32_t dev_value, 208 uint32_t valid_mask) 209 { 210 XenPTRegInfo *reg = cfg_entry->reg; 211 uint32_t writable_mask = 0; 212 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 213 uint32_t *data = cfg_entry->ptr.word; 214 215 /* modify emulate register */ 216 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 217 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 218 219 /* create value for writing to I/O device register */ 220 *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, 221 throughable_mask); 222 223 return 0; 224 } 225 226 227 /* XenPTRegInfo declaration 228 * - only for emulated register (either a part or whole bit). 229 * - for passthrough register that need special behavior (like interacting with 230 * other component), set emu_mask to all 0 and specify r/w func properly. 231 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 232 */ 233 234 /******************** 235 * Header Type0 236 */ 237 238 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 239 XenPTRegInfo *reg, uint32_t real_offset, 240 uint32_t *data) 241 { 242 *data = s->real_device.vendor_id; 243 return 0; 244 } 245 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 246 XenPTRegInfo *reg, uint32_t real_offset, 247 uint32_t *data) 248 { 249 *data = s->real_device.device_id; 250 return 0; 251 } 252 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 253 XenPTRegInfo *reg, uint32_t real_offset, 254 uint32_t *data) 255 { 256 XenPTRegGroup *reg_grp_entry = NULL; 257 XenPTReg *reg_entry = NULL; 258 uint32_t reg_field = 0; 259 260 /* find Header register group */ 261 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 262 if (reg_grp_entry) { 263 /* find Capabilities Pointer register */ 264 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 265 if (reg_entry) { 266 /* check Capabilities Pointer register */ 267 if (*reg_entry->ptr.half_word) { 268 reg_field |= PCI_STATUS_CAP_LIST; 269 } else { 270 reg_field &= ~PCI_STATUS_CAP_LIST; 271 } 272 } else { 273 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 274 " for Capabilities Pointer register." 275 " (%s)\n", __func__); 276 return -1; 277 } 278 } else { 279 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 280 " for Header. (%s)\n", __func__); 281 return -1; 282 } 283 284 *data = reg_field; 285 return 0; 286 } 287 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 288 XenPTRegInfo *reg, uint32_t real_offset, 289 uint32_t *data) 290 { 291 /* read PCI_HEADER_TYPE */ 292 *data = reg->init_val | 0x80; 293 return 0; 294 } 295 296 /* initialize Interrupt Pin register */ 297 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 298 XenPTRegInfo *reg, uint32_t real_offset, 299 uint32_t *data) 300 { 301 *data = xen_pt_pci_read_intx(s); 302 return 0; 303 } 304 305 /* Command register */ 306 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 307 uint16_t *val, uint16_t dev_value, 308 uint16_t valid_mask) 309 { 310 XenPTRegInfo *reg = cfg_entry->reg; 311 uint16_t writable_mask = 0; 312 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 313 uint16_t *data = cfg_entry->ptr.half_word; 314 315 /* modify emulate register */ 316 writable_mask = ~reg->ro_mask & valid_mask; 317 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 318 319 /* create value for writing to I/O device register */ 320 if (*val & PCI_COMMAND_INTX_DISABLE) { 321 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 322 } else { 323 if (s->machine_irq) { 324 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 325 } 326 } 327 328 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 329 330 return 0; 331 } 332 333 /* BAR */ 334 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 335 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 336 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 337 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 338 339 static bool is_64bit_bar(PCIIORegion *r) 340 { 341 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 342 } 343 344 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 345 { 346 if (is_64bit_bar(r)) { 347 uint64_t size64; 348 size64 = (r + 1)->size; 349 size64 <<= 32; 350 size64 += r->size; 351 return size64; 352 } 353 return r->size; 354 } 355 356 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 357 int index) 358 { 359 PCIDevice *d = &s->dev; 360 XenPTRegion *region = NULL; 361 PCIIORegion *r; 362 363 /* check 64bit BAR */ 364 if ((0 < index) && (index < PCI_ROM_SLOT)) { 365 int type = s->real_device.io_regions[index - 1].type; 366 367 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 368 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 369 region = &s->bases[index - 1]; 370 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 371 return XEN_PT_BAR_FLAG_UPPER; 372 } 373 } 374 } 375 376 /* check unused BAR */ 377 r = &d->io_regions[index]; 378 if (!xen_pt_get_bar_size(r)) { 379 return XEN_PT_BAR_FLAG_UNUSED; 380 } 381 382 /* for ExpROM BAR */ 383 if (index == PCI_ROM_SLOT) { 384 return XEN_PT_BAR_FLAG_MEM; 385 } 386 387 /* check BAR I/O indicator */ 388 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 389 return XEN_PT_BAR_FLAG_IO; 390 } else { 391 return XEN_PT_BAR_FLAG_MEM; 392 } 393 } 394 395 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 396 { 397 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 398 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 399 } else { 400 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 401 } 402 } 403 404 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 405 uint32_t real_offset, uint32_t *data) 406 { 407 uint32_t reg_field = 0; 408 int index; 409 410 index = xen_pt_bar_offset_to_index(reg->offset); 411 if (index < 0 || index >= PCI_NUM_REGIONS) { 412 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 413 return -1; 414 } 415 416 /* set BAR flag */ 417 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 418 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 419 reg_field = XEN_PT_INVALID_REG; 420 } 421 422 *data = reg_field; 423 return 0; 424 } 425 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 426 uint32_t *value, uint32_t valid_mask) 427 { 428 XenPTRegInfo *reg = cfg_entry->reg; 429 uint32_t valid_emu_mask = 0; 430 uint32_t bar_emu_mask = 0; 431 int index; 432 433 /* get BAR index */ 434 index = xen_pt_bar_offset_to_index(reg->offset); 435 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 436 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 437 return -1; 438 } 439 440 /* use fixed-up value from kernel sysfs */ 441 *value = base_address_with_flags(&s->real_device.io_regions[index]); 442 443 /* set emulate mask depend on BAR flag */ 444 switch (s->bases[index].bar_flag) { 445 case XEN_PT_BAR_FLAG_MEM: 446 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 447 break; 448 case XEN_PT_BAR_FLAG_IO: 449 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 450 break; 451 case XEN_PT_BAR_FLAG_UPPER: 452 bar_emu_mask = XEN_PT_BAR_ALLF; 453 break; 454 default: 455 break; 456 } 457 458 /* emulate BAR */ 459 valid_emu_mask = bar_emu_mask & valid_mask; 460 *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); 461 462 return 0; 463 } 464 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 465 uint32_t *val, uint32_t dev_value, 466 uint32_t valid_mask) 467 { 468 XenPTRegInfo *reg = cfg_entry->reg; 469 XenPTRegion *base = NULL; 470 PCIDevice *d = &s->dev; 471 const PCIIORegion *r; 472 uint32_t writable_mask = 0; 473 uint32_t bar_emu_mask = 0; 474 uint32_t bar_ro_mask = 0; 475 uint32_t r_size = 0; 476 int index = 0; 477 uint32_t *data = cfg_entry->ptr.word; 478 479 index = xen_pt_bar_offset_to_index(reg->offset); 480 if (index < 0 || index >= PCI_NUM_REGIONS) { 481 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 482 return -1; 483 } 484 485 r = &d->io_regions[index]; 486 base = &s->bases[index]; 487 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 488 489 /* set emulate mask and read-only mask values depend on the BAR flag */ 490 switch (s->bases[index].bar_flag) { 491 case XEN_PT_BAR_FLAG_MEM: 492 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 493 if (!r_size) { 494 /* low 32 bits mask for 64 bit bars */ 495 bar_ro_mask = XEN_PT_BAR_ALLF; 496 } else { 497 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 498 } 499 break; 500 case XEN_PT_BAR_FLAG_IO: 501 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 502 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 503 break; 504 case XEN_PT_BAR_FLAG_UPPER: 505 bar_emu_mask = XEN_PT_BAR_ALLF; 506 bar_ro_mask = r_size ? r_size - 1 : 0; 507 break; 508 default: 509 break; 510 } 511 512 /* modify emulate register */ 513 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 514 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 515 516 /* check whether we need to update the virtual region address or not */ 517 switch (s->bases[index].bar_flag) { 518 case XEN_PT_BAR_FLAG_UPPER: 519 case XEN_PT_BAR_FLAG_MEM: 520 /* nothing to do */ 521 break; 522 case XEN_PT_BAR_FLAG_IO: 523 /* nothing to do */ 524 break; 525 default: 526 break; 527 } 528 529 /* create value for writing to I/O device register */ 530 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 531 532 return 0; 533 } 534 535 /* write Exp ROM BAR */ 536 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 537 XenPTReg *cfg_entry, uint32_t *val, 538 uint32_t dev_value, uint32_t valid_mask) 539 { 540 XenPTRegInfo *reg = cfg_entry->reg; 541 XenPTRegion *base = NULL; 542 PCIDevice *d = (PCIDevice *)&s->dev; 543 uint32_t writable_mask = 0; 544 uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 545 pcibus_t r_size = 0; 546 uint32_t bar_ro_mask = 0; 547 uint32_t *data = cfg_entry->ptr.word; 548 549 r_size = d->io_regions[PCI_ROM_SLOT].size; 550 base = &s->bases[PCI_ROM_SLOT]; 551 /* align memory type resource size */ 552 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 553 554 /* set emulate mask and read-only mask */ 555 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 556 557 /* modify emulate register */ 558 writable_mask = ~bar_ro_mask & valid_mask; 559 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 560 561 /* create value for writing to I/O device register */ 562 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 563 564 return 0; 565 } 566 567 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, 568 XenPTReg *cfg_entry, 569 uint32_t *value, uint32_t valid_mask) 570 { 571 *value = igd_read_opregion(s); 572 return 0; 573 } 574 575 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, 576 XenPTReg *cfg_entry, uint32_t *value, 577 uint32_t dev_value, uint32_t valid_mask) 578 { 579 igd_write_opregion(s, *value); 580 return 0; 581 } 582 583 /* Header Type0 reg static information table */ 584 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 585 /* Vendor ID reg */ 586 { 587 .offset = PCI_VENDOR_ID, 588 .size = 2, 589 .init_val = 0x0000, 590 .ro_mask = 0xFFFF, 591 .emu_mask = 0xFFFF, 592 .init = xen_pt_vendor_reg_init, 593 .u.w.read = xen_pt_word_reg_read, 594 .u.w.write = xen_pt_word_reg_write, 595 }, 596 /* Device ID reg */ 597 { 598 .offset = PCI_DEVICE_ID, 599 .size = 2, 600 .init_val = 0x0000, 601 .ro_mask = 0xFFFF, 602 .emu_mask = 0xFFFF, 603 .init = xen_pt_device_reg_init, 604 .u.w.read = xen_pt_word_reg_read, 605 .u.w.write = xen_pt_word_reg_write, 606 }, 607 /* Command reg */ 608 { 609 .offset = PCI_COMMAND, 610 .size = 2, 611 .init_val = 0x0000, 612 .res_mask = 0xF880, 613 .emu_mask = 0x0743, 614 .init = xen_pt_common_reg_init, 615 .u.w.read = xen_pt_word_reg_read, 616 .u.w.write = xen_pt_cmd_reg_write, 617 }, 618 /* Capabilities Pointer reg */ 619 { 620 .offset = PCI_CAPABILITY_LIST, 621 .size = 1, 622 .init_val = 0x00, 623 .ro_mask = 0xFF, 624 .emu_mask = 0xFF, 625 .init = xen_pt_ptr_reg_init, 626 .u.b.read = xen_pt_byte_reg_read, 627 .u.b.write = xen_pt_byte_reg_write, 628 }, 629 /* Status reg */ 630 /* use emulated Cap Ptr value to initialize, 631 * so need to be declared after Cap Ptr reg 632 */ 633 { 634 .offset = PCI_STATUS, 635 .size = 2, 636 .init_val = 0x0000, 637 .res_mask = 0x0007, 638 .ro_mask = 0x06F8, 639 .rw1c_mask = 0xF900, 640 .emu_mask = 0x0010, 641 .init = xen_pt_status_reg_init, 642 .u.w.read = xen_pt_word_reg_read, 643 .u.w.write = xen_pt_word_reg_write, 644 }, 645 /* Cache Line Size reg */ 646 { 647 .offset = PCI_CACHE_LINE_SIZE, 648 .size = 1, 649 .init_val = 0x00, 650 .ro_mask = 0x00, 651 .emu_mask = 0xFF, 652 .init = xen_pt_common_reg_init, 653 .u.b.read = xen_pt_byte_reg_read, 654 .u.b.write = xen_pt_byte_reg_write, 655 }, 656 /* Latency Timer reg */ 657 { 658 .offset = PCI_LATENCY_TIMER, 659 .size = 1, 660 .init_val = 0x00, 661 .ro_mask = 0x00, 662 .emu_mask = 0xFF, 663 .init = xen_pt_common_reg_init, 664 .u.b.read = xen_pt_byte_reg_read, 665 .u.b.write = xen_pt_byte_reg_write, 666 }, 667 /* Header Type reg */ 668 { 669 .offset = PCI_HEADER_TYPE, 670 .size = 1, 671 .init_val = 0x00, 672 .ro_mask = 0xFF, 673 .emu_mask = 0x00, 674 .init = xen_pt_header_type_reg_init, 675 .u.b.read = xen_pt_byte_reg_read, 676 .u.b.write = xen_pt_byte_reg_write, 677 }, 678 /* Interrupt Line reg */ 679 { 680 .offset = PCI_INTERRUPT_LINE, 681 .size = 1, 682 .init_val = 0x00, 683 .ro_mask = 0x00, 684 .emu_mask = 0xFF, 685 .init = xen_pt_common_reg_init, 686 .u.b.read = xen_pt_byte_reg_read, 687 .u.b.write = xen_pt_byte_reg_write, 688 }, 689 /* Interrupt Pin reg */ 690 { 691 .offset = PCI_INTERRUPT_PIN, 692 .size = 1, 693 .init_val = 0x00, 694 .ro_mask = 0xFF, 695 .emu_mask = 0xFF, 696 .init = xen_pt_irqpin_reg_init, 697 .u.b.read = xen_pt_byte_reg_read, 698 .u.b.write = xen_pt_byte_reg_write, 699 }, 700 /* BAR 0 reg */ 701 /* mask of BAR need to be decided later, depends on IO/MEM type */ 702 { 703 .offset = PCI_BASE_ADDRESS_0, 704 .size = 4, 705 .init_val = 0x00000000, 706 .init = xen_pt_bar_reg_init, 707 .u.dw.read = xen_pt_bar_reg_read, 708 .u.dw.write = xen_pt_bar_reg_write, 709 }, 710 /* BAR 1 reg */ 711 { 712 .offset = PCI_BASE_ADDRESS_1, 713 .size = 4, 714 .init_val = 0x00000000, 715 .init = xen_pt_bar_reg_init, 716 .u.dw.read = xen_pt_bar_reg_read, 717 .u.dw.write = xen_pt_bar_reg_write, 718 }, 719 /* BAR 2 reg */ 720 { 721 .offset = PCI_BASE_ADDRESS_2, 722 .size = 4, 723 .init_val = 0x00000000, 724 .init = xen_pt_bar_reg_init, 725 .u.dw.read = xen_pt_bar_reg_read, 726 .u.dw.write = xen_pt_bar_reg_write, 727 }, 728 /* BAR 3 reg */ 729 { 730 .offset = PCI_BASE_ADDRESS_3, 731 .size = 4, 732 .init_val = 0x00000000, 733 .init = xen_pt_bar_reg_init, 734 .u.dw.read = xen_pt_bar_reg_read, 735 .u.dw.write = xen_pt_bar_reg_write, 736 }, 737 /* BAR 4 reg */ 738 { 739 .offset = PCI_BASE_ADDRESS_4, 740 .size = 4, 741 .init_val = 0x00000000, 742 .init = xen_pt_bar_reg_init, 743 .u.dw.read = xen_pt_bar_reg_read, 744 .u.dw.write = xen_pt_bar_reg_write, 745 }, 746 /* BAR 5 reg */ 747 { 748 .offset = PCI_BASE_ADDRESS_5, 749 .size = 4, 750 .init_val = 0x00000000, 751 .init = xen_pt_bar_reg_init, 752 .u.dw.read = xen_pt_bar_reg_read, 753 .u.dw.write = xen_pt_bar_reg_write, 754 }, 755 /* Expansion ROM BAR reg */ 756 { 757 .offset = PCI_ROM_ADDRESS, 758 .size = 4, 759 .init_val = 0x00000000, 760 .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, 761 .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, 762 .init = xen_pt_bar_reg_init, 763 .u.dw.read = xen_pt_long_reg_read, 764 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 765 }, 766 { 767 .size = 0, 768 }, 769 }; 770 771 772 /********************************* 773 * Vital Product Data Capability 774 */ 775 776 /* Vital Product Data Capability Structure reg static information table */ 777 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 778 { 779 .offset = PCI_CAP_LIST_NEXT, 780 .size = 1, 781 .init_val = 0x00, 782 .ro_mask = 0xFF, 783 .emu_mask = 0xFF, 784 .init = xen_pt_ptr_reg_init, 785 .u.b.read = xen_pt_byte_reg_read, 786 .u.b.write = xen_pt_byte_reg_write, 787 }, 788 { 789 .offset = PCI_VPD_ADDR, 790 .size = 2, 791 .ro_mask = 0x0003, 792 .emu_mask = 0x0003, 793 .init = xen_pt_common_reg_init, 794 .u.w.read = xen_pt_word_reg_read, 795 .u.w.write = xen_pt_word_reg_write, 796 }, 797 { 798 .size = 0, 799 }, 800 }; 801 802 803 /************************************** 804 * Vendor Specific Capability 805 */ 806 807 /* Vendor Specific Capability Structure reg static information table */ 808 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 809 { 810 .offset = PCI_CAP_LIST_NEXT, 811 .size = 1, 812 .init_val = 0x00, 813 .ro_mask = 0xFF, 814 .emu_mask = 0xFF, 815 .init = xen_pt_ptr_reg_init, 816 .u.b.read = xen_pt_byte_reg_read, 817 .u.b.write = xen_pt_byte_reg_write, 818 }, 819 { 820 .size = 0, 821 }, 822 }; 823 824 825 /***************************** 826 * PCI Express Capability 827 */ 828 829 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 830 uint32_t offset) 831 { 832 uint8_t flag; 833 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 834 return 0; 835 } 836 return flag & PCI_EXP_FLAGS_VERS; 837 } 838 839 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 840 uint32_t offset) 841 { 842 uint8_t flag; 843 if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { 844 return 0; 845 } 846 return (flag & PCI_EXP_FLAGS_TYPE) >> 4; 847 } 848 849 /* initialize Link Control register */ 850 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 851 XenPTRegInfo *reg, uint32_t real_offset, 852 uint32_t *data) 853 { 854 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 855 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 856 857 /* no need to initialize in case of Root Complex Integrated Endpoint 858 * with cap_ver 1.x 859 */ 860 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 861 *data = XEN_PT_INVALID_REG; 862 } 863 864 *data = reg->init_val; 865 return 0; 866 } 867 /* initialize Device Control 2 register */ 868 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 869 XenPTRegInfo *reg, uint32_t real_offset, 870 uint32_t *data) 871 { 872 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 873 874 /* no need to initialize in case of cap_ver 1.x */ 875 if (cap_ver == 1) { 876 *data = XEN_PT_INVALID_REG; 877 } 878 879 *data = reg->init_val; 880 return 0; 881 } 882 /* initialize Link Control 2 register */ 883 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 884 XenPTRegInfo *reg, uint32_t real_offset, 885 uint32_t *data) 886 { 887 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 888 uint32_t reg_field = 0; 889 890 /* no need to initialize in case of cap_ver 1.x */ 891 if (cap_ver == 1) { 892 reg_field = XEN_PT_INVALID_REG; 893 } else { 894 /* set Supported Link Speed */ 895 uint8_t lnkcap; 896 int rc; 897 rc = xen_host_pci_get_byte(&s->real_device, 898 real_offset - reg->offset + PCI_EXP_LNKCAP, 899 &lnkcap); 900 if (rc) { 901 return rc; 902 } 903 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 904 } 905 906 *data = reg_field; 907 return 0; 908 } 909 910 /* PCI Express Capability Structure reg static information table */ 911 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 912 /* Next Pointer reg */ 913 { 914 .offset = PCI_CAP_LIST_NEXT, 915 .size = 1, 916 .init_val = 0x00, 917 .ro_mask = 0xFF, 918 .emu_mask = 0xFF, 919 .init = xen_pt_ptr_reg_init, 920 .u.b.read = xen_pt_byte_reg_read, 921 .u.b.write = xen_pt_byte_reg_write, 922 }, 923 /* Device Capabilities reg */ 924 { 925 .offset = PCI_EXP_DEVCAP, 926 .size = 4, 927 .init_val = 0x00000000, 928 .ro_mask = 0xFFFFFFFF, 929 .emu_mask = 0x10000000, 930 .init = xen_pt_common_reg_init, 931 .u.dw.read = xen_pt_long_reg_read, 932 .u.dw.write = xen_pt_long_reg_write, 933 }, 934 /* Device Control reg */ 935 { 936 .offset = PCI_EXP_DEVCTL, 937 .size = 2, 938 .init_val = 0x2810, 939 .ro_mask = 0x8400, 940 .emu_mask = 0xFFFF, 941 .init = xen_pt_common_reg_init, 942 .u.w.read = xen_pt_word_reg_read, 943 .u.w.write = xen_pt_word_reg_write, 944 }, 945 /* Device Status reg */ 946 { 947 .offset = PCI_EXP_DEVSTA, 948 .size = 2, 949 .res_mask = 0xFFC0, 950 .ro_mask = 0x0030, 951 .rw1c_mask = 0x000F, 952 .init = xen_pt_common_reg_init, 953 .u.w.read = xen_pt_word_reg_read, 954 .u.w.write = xen_pt_word_reg_write, 955 }, 956 /* Link Control reg */ 957 { 958 .offset = PCI_EXP_LNKCTL, 959 .size = 2, 960 .init_val = 0x0000, 961 .ro_mask = 0xFC34, 962 .emu_mask = 0xFFFF, 963 .init = xen_pt_linkctrl_reg_init, 964 .u.w.read = xen_pt_word_reg_read, 965 .u.w.write = xen_pt_word_reg_write, 966 }, 967 /* Link Status reg */ 968 { 969 .offset = PCI_EXP_LNKSTA, 970 .size = 2, 971 .ro_mask = 0x3FFF, 972 .rw1c_mask = 0xC000, 973 .init = xen_pt_common_reg_init, 974 .u.w.read = xen_pt_word_reg_read, 975 .u.w.write = xen_pt_word_reg_write, 976 }, 977 /* Device Control 2 reg */ 978 { 979 .offset = 0x28, 980 .size = 2, 981 .init_val = 0x0000, 982 .ro_mask = 0xFFE0, 983 .emu_mask = 0xFFFF, 984 .init = xen_pt_devctrl2_reg_init, 985 .u.w.read = xen_pt_word_reg_read, 986 .u.w.write = xen_pt_word_reg_write, 987 }, 988 /* Link Control 2 reg */ 989 { 990 .offset = 0x30, 991 .size = 2, 992 .init_val = 0x0000, 993 .ro_mask = 0xE040, 994 .emu_mask = 0xFFFF, 995 .init = xen_pt_linkctrl2_reg_init, 996 .u.w.read = xen_pt_word_reg_read, 997 .u.w.write = xen_pt_word_reg_write, 998 }, 999 { 1000 .size = 0, 1001 }, 1002 }; 1003 1004 1005 /********************************* 1006 * Power Management Capability 1007 */ 1008 1009 /* Power Management Capability reg static information table */ 1010 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 1011 /* Next Pointer reg */ 1012 { 1013 .offset = PCI_CAP_LIST_NEXT, 1014 .size = 1, 1015 .init_val = 0x00, 1016 .ro_mask = 0xFF, 1017 .emu_mask = 0xFF, 1018 .init = xen_pt_ptr_reg_init, 1019 .u.b.read = xen_pt_byte_reg_read, 1020 .u.b.write = xen_pt_byte_reg_write, 1021 }, 1022 /* Power Management Capabilities reg */ 1023 { 1024 .offset = PCI_CAP_FLAGS, 1025 .size = 2, 1026 .init_val = 0x0000, 1027 .ro_mask = 0xFFFF, 1028 .emu_mask = 0xF9C8, 1029 .init = xen_pt_common_reg_init, 1030 .u.w.read = xen_pt_word_reg_read, 1031 .u.w.write = xen_pt_word_reg_write, 1032 }, 1033 /* PCI Power Management Control/Status reg */ 1034 { 1035 .offset = PCI_PM_CTRL, 1036 .size = 2, 1037 .init_val = 0x0008, 1038 .res_mask = 0x00F0, 1039 .ro_mask = 0x610C, 1040 .rw1c_mask = 0x8000, 1041 .emu_mask = 0x810B, 1042 .init = xen_pt_common_reg_init, 1043 .u.w.read = xen_pt_word_reg_read, 1044 .u.w.write = xen_pt_word_reg_write, 1045 }, 1046 { 1047 .size = 0, 1048 }, 1049 }; 1050 1051 1052 /******************************** 1053 * MSI Capability 1054 */ 1055 1056 /* Helper */ 1057 #define xen_pt_msi_check_type(offset, flags, what) \ 1058 ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ 1059 PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) 1060 1061 /* Message Control register */ 1062 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1063 XenPTRegInfo *reg, uint32_t real_offset, 1064 uint32_t *data) 1065 { 1066 XenPTMSI *msi = s->msi; 1067 uint16_t reg_field; 1068 int rc; 1069 1070 /* use I/O device register's value as initial value */ 1071 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1072 if (rc) { 1073 return rc; 1074 } 1075 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1076 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1077 xen_host_pci_set_word(&s->real_device, real_offset, 1078 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1079 } 1080 msi->flags |= reg_field; 1081 msi->ctrl_offset = real_offset; 1082 msi->initialized = false; 1083 msi->mapped = false; 1084 1085 *data = reg->init_val; 1086 return 0; 1087 } 1088 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1089 XenPTReg *cfg_entry, uint16_t *val, 1090 uint16_t dev_value, uint16_t valid_mask) 1091 { 1092 XenPTRegInfo *reg = cfg_entry->reg; 1093 XenPTMSI *msi = s->msi; 1094 uint16_t writable_mask = 0; 1095 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1096 uint16_t *data = cfg_entry->ptr.half_word; 1097 1098 /* Currently no support for multi-vector */ 1099 if (*val & PCI_MSI_FLAGS_QSIZE) { 1100 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1101 } 1102 1103 /* modify emulate register */ 1104 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1105 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1106 msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; 1107 1108 /* create value for writing to I/O device register */ 1109 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1110 1111 /* update MSI */ 1112 if (*val & PCI_MSI_FLAGS_ENABLE) { 1113 /* setup MSI pirq for the first time */ 1114 if (!msi->initialized) { 1115 /* Init physical one */ 1116 XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); 1117 if (xen_pt_msi_setup(s)) { 1118 /* We do not broadcast the error to the framework code, so 1119 * that MSI errors are contained in MSI emulation code and 1120 * QEMU can go on running. 1121 * Guest MSI would be actually not working. 1122 */ 1123 *val &= ~PCI_MSI_FLAGS_ENABLE; 1124 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); 1125 return 0; 1126 } 1127 if (xen_pt_msi_update(s)) { 1128 *val &= ~PCI_MSI_FLAGS_ENABLE; 1129 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); 1130 return 0; 1131 } 1132 msi->initialized = true; 1133 msi->mapped = true; 1134 } 1135 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1136 } else if (msi->mapped) { 1137 xen_pt_msi_disable(s); 1138 } 1139 1140 return 0; 1141 } 1142 1143 /* initialize Message Upper Address register */ 1144 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1145 XenPTRegInfo *reg, uint32_t real_offset, 1146 uint32_t *data) 1147 { 1148 /* no need to initialize in case of 32 bit type */ 1149 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1150 *data = XEN_PT_INVALID_REG; 1151 } else { 1152 *data = reg->init_val; 1153 } 1154 1155 return 0; 1156 } 1157 /* this function will be called twice (for 32 bit and 64 bit type) */ 1158 /* initialize Message Data register */ 1159 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1160 XenPTRegInfo *reg, uint32_t real_offset, 1161 uint32_t *data) 1162 { 1163 uint32_t flags = s->msi->flags; 1164 uint32_t offset = reg->offset; 1165 1166 /* check the offset whether matches the type or not */ 1167 if (xen_pt_msi_check_type(offset, flags, DATA)) { 1168 *data = reg->init_val; 1169 } else { 1170 *data = XEN_PT_INVALID_REG; 1171 } 1172 return 0; 1173 } 1174 1175 /* this function will be called twice (for 32 bit and 64 bit type) */ 1176 /* initialize Mask register */ 1177 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, 1178 XenPTRegInfo *reg, uint32_t real_offset, 1179 uint32_t *data) 1180 { 1181 uint32_t flags = s->msi->flags; 1182 1183 /* check the offset whether matches the type or not */ 1184 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1185 *data = XEN_PT_INVALID_REG; 1186 } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { 1187 *data = reg->init_val; 1188 } else { 1189 *data = XEN_PT_INVALID_REG; 1190 } 1191 return 0; 1192 } 1193 1194 /* this function will be called twice (for 32 bit and 64 bit type) */ 1195 /* initialize Pending register */ 1196 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, 1197 XenPTRegInfo *reg, uint32_t real_offset, 1198 uint32_t *data) 1199 { 1200 uint32_t flags = s->msi->flags; 1201 1202 /* check the offset whether matches the type or not */ 1203 if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { 1204 *data = XEN_PT_INVALID_REG; 1205 } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { 1206 *data = reg->init_val; 1207 } else { 1208 *data = XEN_PT_INVALID_REG; 1209 } 1210 return 0; 1211 } 1212 1213 /* write Message Address register */ 1214 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1215 XenPTReg *cfg_entry, uint32_t *val, 1216 uint32_t dev_value, uint32_t valid_mask) 1217 { 1218 XenPTRegInfo *reg = cfg_entry->reg; 1219 uint32_t writable_mask = 0; 1220 uint32_t old_addr = *cfg_entry->ptr.word; 1221 uint32_t *data = cfg_entry->ptr.word; 1222 1223 /* modify emulate register */ 1224 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1225 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1226 s->msi->addr_lo = *data; 1227 1228 /* create value for writing to I/O device register */ 1229 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1230 1231 /* update MSI */ 1232 if (*data != old_addr) { 1233 if (s->msi->mapped) { 1234 xen_pt_msi_update(s); 1235 } 1236 } 1237 1238 return 0; 1239 } 1240 /* write Message Upper Address register */ 1241 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1242 XenPTReg *cfg_entry, uint32_t *val, 1243 uint32_t dev_value, uint32_t valid_mask) 1244 { 1245 XenPTRegInfo *reg = cfg_entry->reg; 1246 uint32_t writable_mask = 0; 1247 uint32_t old_addr = *cfg_entry->ptr.word; 1248 uint32_t *data = cfg_entry->ptr.word; 1249 1250 /* check whether the type is 64 bit or not */ 1251 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1252 XEN_PT_ERR(&s->dev, 1253 "Can't write to the upper address without 64 bit support\n"); 1254 return -1; 1255 } 1256 1257 /* modify emulate register */ 1258 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1259 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1260 /* update the msi_info too */ 1261 s->msi->addr_hi = *data; 1262 1263 /* create value for writing to I/O device register */ 1264 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1265 1266 /* update MSI */ 1267 if (*data != old_addr) { 1268 if (s->msi->mapped) { 1269 xen_pt_msi_update(s); 1270 } 1271 } 1272 1273 return 0; 1274 } 1275 1276 1277 /* this function will be called twice (for 32 bit and 64 bit type) */ 1278 /* write Message Data register */ 1279 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1280 XenPTReg *cfg_entry, uint16_t *val, 1281 uint16_t dev_value, uint16_t valid_mask) 1282 { 1283 XenPTRegInfo *reg = cfg_entry->reg; 1284 XenPTMSI *msi = s->msi; 1285 uint16_t writable_mask = 0; 1286 uint16_t old_data = *cfg_entry->ptr.half_word; 1287 uint32_t offset = reg->offset; 1288 uint16_t *data = cfg_entry->ptr.half_word; 1289 1290 /* check the offset whether matches the type or not */ 1291 if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { 1292 /* exit I/O emulator */ 1293 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1294 return -1; 1295 } 1296 1297 /* modify emulate register */ 1298 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1299 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1300 /* update the msi_info too */ 1301 msi->data = *data; 1302 1303 /* create value for writing to I/O device register */ 1304 *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); 1305 1306 /* update MSI */ 1307 if (*data != old_data) { 1308 if (msi->mapped) { 1309 xen_pt_msi_update(s); 1310 } 1311 } 1312 1313 return 0; 1314 } 1315 1316 /* MSI Capability Structure reg static information table */ 1317 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1318 /* Next Pointer reg */ 1319 { 1320 .offset = PCI_CAP_LIST_NEXT, 1321 .size = 1, 1322 .init_val = 0x00, 1323 .ro_mask = 0xFF, 1324 .emu_mask = 0xFF, 1325 .init = xen_pt_ptr_reg_init, 1326 .u.b.read = xen_pt_byte_reg_read, 1327 .u.b.write = xen_pt_byte_reg_write, 1328 }, 1329 /* Message Control reg */ 1330 { 1331 .offset = PCI_MSI_FLAGS, 1332 .size = 2, 1333 .init_val = 0x0000, 1334 .res_mask = 0xFE00, 1335 .ro_mask = 0x018E, 1336 .emu_mask = 0x017E, 1337 .init = xen_pt_msgctrl_reg_init, 1338 .u.w.read = xen_pt_word_reg_read, 1339 .u.w.write = xen_pt_msgctrl_reg_write, 1340 }, 1341 /* Message Address reg */ 1342 { 1343 .offset = PCI_MSI_ADDRESS_LO, 1344 .size = 4, 1345 .init_val = 0x00000000, 1346 .ro_mask = 0x00000003, 1347 .emu_mask = 0xFFFFFFFF, 1348 .init = xen_pt_common_reg_init, 1349 .u.dw.read = xen_pt_long_reg_read, 1350 .u.dw.write = xen_pt_msgaddr32_reg_write, 1351 }, 1352 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1353 { 1354 .offset = PCI_MSI_ADDRESS_HI, 1355 .size = 4, 1356 .init_val = 0x00000000, 1357 .ro_mask = 0x00000000, 1358 .emu_mask = 0xFFFFFFFF, 1359 .init = xen_pt_msgaddr64_reg_init, 1360 .u.dw.read = xen_pt_long_reg_read, 1361 .u.dw.write = xen_pt_msgaddr64_reg_write, 1362 }, 1363 /* Message Data reg (16 bits of data for 32-bit devices) */ 1364 { 1365 .offset = PCI_MSI_DATA_32, 1366 .size = 2, 1367 .init_val = 0x0000, 1368 .ro_mask = 0x0000, 1369 .emu_mask = 0xFFFF, 1370 .init = xen_pt_msgdata_reg_init, 1371 .u.w.read = xen_pt_word_reg_read, 1372 .u.w.write = xen_pt_msgdata_reg_write, 1373 }, 1374 /* Message Data reg (16 bits of data for 64-bit devices) */ 1375 { 1376 .offset = PCI_MSI_DATA_64, 1377 .size = 2, 1378 .init_val = 0x0000, 1379 .ro_mask = 0x0000, 1380 .emu_mask = 0xFFFF, 1381 .init = xen_pt_msgdata_reg_init, 1382 .u.w.read = xen_pt_word_reg_read, 1383 .u.w.write = xen_pt_msgdata_reg_write, 1384 }, 1385 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1386 { 1387 .offset = PCI_MSI_MASK_32, 1388 .size = 4, 1389 .init_val = 0x00000000, 1390 .ro_mask = 0xFFFFFFFF, 1391 .emu_mask = 0xFFFFFFFF, 1392 .init = xen_pt_mask_reg_init, 1393 .u.dw.read = xen_pt_long_reg_read, 1394 .u.dw.write = xen_pt_long_reg_write, 1395 }, 1396 /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1397 { 1398 .offset = PCI_MSI_MASK_64, 1399 .size = 4, 1400 .init_val = 0x00000000, 1401 .ro_mask = 0xFFFFFFFF, 1402 .emu_mask = 0xFFFFFFFF, 1403 .init = xen_pt_mask_reg_init, 1404 .u.dw.read = xen_pt_long_reg_read, 1405 .u.dw.write = xen_pt_long_reg_write, 1406 }, 1407 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ 1408 { 1409 .offset = PCI_MSI_MASK_32 + 4, 1410 .size = 4, 1411 .init_val = 0x00000000, 1412 .ro_mask = 0xFFFFFFFF, 1413 .emu_mask = 0x00000000, 1414 .init = xen_pt_pending_reg_init, 1415 .u.dw.read = xen_pt_long_reg_read, 1416 .u.dw.write = xen_pt_long_reg_write, 1417 }, 1418 /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ 1419 { 1420 .offset = PCI_MSI_MASK_64 + 4, 1421 .size = 4, 1422 .init_val = 0x00000000, 1423 .ro_mask = 0xFFFFFFFF, 1424 .emu_mask = 0x00000000, 1425 .init = xen_pt_pending_reg_init, 1426 .u.dw.read = xen_pt_long_reg_read, 1427 .u.dw.write = xen_pt_long_reg_write, 1428 }, 1429 { 1430 .size = 0, 1431 }, 1432 }; 1433 1434 1435 /************************************** 1436 * MSI-X Capability 1437 */ 1438 1439 /* Message Control register for MSI-X */ 1440 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1441 XenPTRegInfo *reg, uint32_t real_offset, 1442 uint32_t *data) 1443 { 1444 uint16_t reg_field; 1445 int rc; 1446 1447 /* use I/O device register's value as initial value */ 1448 rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); 1449 if (rc) { 1450 return rc; 1451 } 1452 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1453 XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); 1454 xen_host_pci_set_word(&s->real_device, real_offset, 1455 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1456 } 1457 1458 s->msix->ctrl_offset = real_offset; 1459 1460 *data = reg->init_val; 1461 return 0; 1462 } 1463 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1464 XenPTReg *cfg_entry, uint16_t *val, 1465 uint16_t dev_value, uint16_t valid_mask) 1466 { 1467 XenPTRegInfo *reg = cfg_entry->reg; 1468 uint16_t writable_mask = 0; 1469 uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); 1470 int debug_msix_enabled_old; 1471 uint16_t *data = cfg_entry->ptr.half_word; 1472 1473 /* modify emulate register */ 1474 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1475 *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); 1476 1477 /* create value for writing to I/O device register */ 1478 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1479 1480 /* update MSI-X */ 1481 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1482 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1483 xen_pt_msix_update(s); 1484 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1485 xen_pt_msix_disable(s); 1486 } 1487 1488 s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; 1489 1490 debug_msix_enabled_old = s->msix->enabled; 1491 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1492 if (s->msix->enabled != debug_msix_enabled_old) { 1493 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1494 s->msix->enabled ? "enable" : "disable"); 1495 } 1496 1497 return 0; 1498 } 1499 1500 /* MSI-X Capability Structure reg static information table */ 1501 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1502 /* Next Pointer reg */ 1503 { 1504 .offset = PCI_CAP_LIST_NEXT, 1505 .size = 1, 1506 .init_val = 0x00, 1507 .ro_mask = 0xFF, 1508 .emu_mask = 0xFF, 1509 .init = xen_pt_ptr_reg_init, 1510 .u.b.read = xen_pt_byte_reg_read, 1511 .u.b.write = xen_pt_byte_reg_write, 1512 }, 1513 /* Message Control reg */ 1514 { 1515 .offset = PCI_MSI_FLAGS, 1516 .size = 2, 1517 .init_val = 0x0000, 1518 .res_mask = 0x3800, 1519 .ro_mask = 0x07FF, 1520 .emu_mask = 0x0000, 1521 .init = xen_pt_msixctrl_reg_init, 1522 .u.w.read = xen_pt_word_reg_read, 1523 .u.w.write = xen_pt_msixctrl_reg_write, 1524 }, 1525 { 1526 .size = 0, 1527 }, 1528 }; 1529 1530 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { 1531 /* Intel IGFX OpRegion reg */ 1532 { 1533 .offset = 0x0, 1534 .size = 4, 1535 .init_val = 0, 1536 .u.dw.read = xen_pt_intel_opregion_read, 1537 .u.dw.write = xen_pt_intel_opregion_write, 1538 }, 1539 { 1540 .size = 0, 1541 }, 1542 }; 1543 1544 /**************************** 1545 * Capabilities 1546 */ 1547 1548 /* capability structure register group size functions */ 1549 1550 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1551 const XenPTRegGroupInfo *grp_reg, 1552 uint32_t base_offset, uint8_t *size) 1553 { 1554 *size = grp_reg->grp_size; 1555 return 0; 1556 } 1557 /* get Vendor Specific Capability Structure register group size */ 1558 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1559 const XenPTRegGroupInfo *grp_reg, 1560 uint32_t base_offset, uint8_t *size) 1561 { 1562 return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); 1563 } 1564 /* get PCI Express Capability Structure register group size */ 1565 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1566 const XenPTRegGroupInfo *grp_reg, 1567 uint32_t base_offset, uint8_t *size) 1568 { 1569 PCIDevice *d = &s->dev; 1570 uint8_t version = get_capability_version(s, base_offset); 1571 uint8_t type = get_device_type(s, base_offset); 1572 uint8_t pcie_size = 0; 1573 1574 1575 /* calculate size depend on capability version and device/port type */ 1576 /* in case of PCI Express Base Specification Rev 1.x */ 1577 if (version == 1) { 1578 /* The PCI Express Capabilities, Device Capabilities, and Device 1579 * Status/Control registers are required for all PCI Express devices. 1580 * The Link Capabilities and Link Status/Control are required for all 1581 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1582 * are not required to implement registers other than those listed 1583 * above and terminate the capability structure. 1584 */ 1585 switch (type) { 1586 case PCI_EXP_TYPE_ENDPOINT: 1587 case PCI_EXP_TYPE_LEG_END: 1588 pcie_size = 0x14; 1589 break; 1590 case PCI_EXP_TYPE_RC_END: 1591 /* has no link */ 1592 pcie_size = 0x0C; 1593 break; 1594 /* only EndPoint passthrough is supported */ 1595 case PCI_EXP_TYPE_ROOT_PORT: 1596 case PCI_EXP_TYPE_UPSTREAM: 1597 case PCI_EXP_TYPE_DOWNSTREAM: 1598 case PCI_EXP_TYPE_PCI_BRIDGE: 1599 case PCI_EXP_TYPE_PCIE_BRIDGE: 1600 case PCI_EXP_TYPE_RC_EC: 1601 default: 1602 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1603 return -1; 1604 } 1605 } 1606 /* in case of PCI Express Base Specification Rev 2.0 */ 1607 else if (version == 2) { 1608 switch (type) { 1609 case PCI_EXP_TYPE_ENDPOINT: 1610 case PCI_EXP_TYPE_LEG_END: 1611 case PCI_EXP_TYPE_RC_END: 1612 /* For Functions that do not implement the registers, 1613 * these spaces must be hardwired to 0b. 1614 */ 1615 pcie_size = 0x3C; 1616 break; 1617 /* only EndPoint passthrough is supported */ 1618 case PCI_EXP_TYPE_ROOT_PORT: 1619 case PCI_EXP_TYPE_UPSTREAM: 1620 case PCI_EXP_TYPE_DOWNSTREAM: 1621 case PCI_EXP_TYPE_PCI_BRIDGE: 1622 case PCI_EXP_TYPE_PCIE_BRIDGE: 1623 case PCI_EXP_TYPE_RC_EC: 1624 default: 1625 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1626 return -1; 1627 } 1628 } else { 1629 XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); 1630 return -1; 1631 } 1632 1633 *size = pcie_size; 1634 return 0; 1635 } 1636 /* get MSI Capability Structure register group size */ 1637 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1638 const XenPTRegGroupInfo *grp_reg, 1639 uint32_t base_offset, uint8_t *size) 1640 { 1641 uint16_t msg_ctrl = 0; 1642 uint8_t msi_size = 0xa; 1643 int rc; 1644 1645 rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, 1646 &msg_ctrl); 1647 if (rc) { 1648 return rc; 1649 } 1650 /* check if 64-bit address is capable of per-vector masking */ 1651 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1652 msi_size += 4; 1653 } 1654 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1655 msi_size += 10; 1656 } 1657 1658 s->msi = g_new0(XenPTMSI, 1); 1659 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1660 1661 *size = msi_size; 1662 return 0; 1663 } 1664 /* get MSI-X Capability Structure register group size */ 1665 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1666 const XenPTRegGroupInfo *grp_reg, 1667 uint32_t base_offset, uint8_t *size) 1668 { 1669 int rc = 0; 1670 1671 rc = xen_pt_msix_init(s, base_offset); 1672 1673 if (rc < 0) { 1674 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1675 return rc; 1676 } 1677 1678 *size = grp_reg->grp_size; 1679 return 0; 1680 } 1681 1682 1683 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1684 /* Header Type0 reg group */ 1685 { 1686 .grp_id = 0xFF, 1687 .grp_type = XEN_PT_GRP_TYPE_EMU, 1688 .grp_size = 0x40, 1689 .size_init = xen_pt_reg_grp_size_init, 1690 .emu_regs = xen_pt_emu_reg_header0, 1691 }, 1692 /* PCI PowerManagement Capability reg group */ 1693 { 1694 .grp_id = PCI_CAP_ID_PM, 1695 .grp_type = XEN_PT_GRP_TYPE_EMU, 1696 .grp_size = PCI_PM_SIZEOF, 1697 .size_init = xen_pt_reg_grp_size_init, 1698 .emu_regs = xen_pt_emu_reg_pm, 1699 }, 1700 /* AGP Capability Structure reg group */ 1701 { 1702 .grp_id = PCI_CAP_ID_AGP, 1703 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1704 .grp_size = 0x30, 1705 .size_init = xen_pt_reg_grp_size_init, 1706 }, 1707 /* Vital Product Data Capability Structure reg group */ 1708 { 1709 .grp_id = PCI_CAP_ID_VPD, 1710 .grp_type = XEN_PT_GRP_TYPE_EMU, 1711 .grp_size = 0x08, 1712 .size_init = xen_pt_reg_grp_size_init, 1713 .emu_regs = xen_pt_emu_reg_vpd, 1714 }, 1715 /* Slot Identification reg group */ 1716 { 1717 .grp_id = PCI_CAP_ID_SLOTID, 1718 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1719 .grp_size = 0x04, 1720 .size_init = xen_pt_reg_grp_size_init, 1721 }, 1722 /* MSI Capability Structure reg group */ 1723 { 1724 .grp_id = PCI_CAP_ID_MSI, 1725 .grp_type = XEN_PT_GRP_TYPE_EMU, 1726 .grp_size = 0xFF, 1727 .size_init = xen_pt_msi_size_init, 1728 .emu_regs = xen_pt_emu_reg_msi, 1729 }, 1730 /* PCI-X Capabilities List Item reg group */ 1731 { 1732 .grp_id = PCI_CAP_ID_PCIX, 1733 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1734 .grp_size = 0x18, 1735 .size_init = xen_pt_reg_grp_size_init, 1736 }, 1737 /* Vendor Specific Capability Structure reg group */ 1738 { 1739 .grp_id = PCI_CAP_ID_VNDR, 1740 .grp_type = XEN_PT_GRP_TYPE_EMU, 1741 .grp_size = 0xFF, 1742 .size_init = xen_pt_vendor_size_init, 1743 .emu_regs = xen_pt_emu_reg_vendor, 1744 }, 1745 /* SHPC Capability List Item reg group */ 1746 { 1747 .grp_id = PCI_CAP_ID_SHPC, 1748 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1749 .grp_size = 0x08, 1750 .size_init = xen_pt_reg_grp_size_init, 1751 }, 1752 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1753 { 1754 .grp_id = PCI_CAP_ID_SSVID, 1755 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1756 .grp_size = 0x08, 1757 .size_init = xen_pt_reg_grp_size_init, 1758 }, 1759 /* AGP 8x Capability Structure reg group */ 1760 { 1761 .grp_id = PCI_CAP_ID_AGP3, 1762 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1763 .grp_size = 0x30, 1764 .size_init = xen_pt_reg_grp_size_init, 1765 }, 1766 /* PCI Express Capability Structure reg group */ 1767 { 1768 .grp_id = PCI_CAP_ID_EXP, 1769 .grp_type = XEN_PT_GRP_TYPE_EMU, 1770 .grp_size = 0xFF, 1771 .size_init = xen_pt_pcie_size_init, 1772 .emu_regs = xen_pt_emu_reg_pcie, 1773 }, 1774 /* MSI-X Capability Structure reg group */ 1775 { 1776 .grp_id = PCI_CAP_ID_MSIX, 1777 .grp_type = XEN_PT_GRP_TYPE_EMU, 1778 .grp_size = 0x0C, 1779 .size_init = xen_pt_msix_size_init, 1780 .emu_regs = xen_pt_emu_reg_msix, 1781 }, 1782 /* Intel IGD Opregion group */ 1783 { 1784 .grp_id = XEN_PCI_INTEL_OPREGION, 1785 .grp_type = XEN_PT_GRP_TYPE_EMU, 1786 .grp_size = 0x4, 1787 .size_init = xen_pt_reg_grp_size_init, 1788 .emu_regs = xen_pt_emu_reg_igd_opregion, 1789 }, 1790 { 1791 .grp_size = 0, 1792 }, 1793 }; 1794 1795 /* initialize Capabilities Pointer or Next Pointer register */ 1796 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1797 XenPTRegInfo *reg, uint32_t real_offset, 1798 uint32_t *data) 1799 { 1800 int i, rc; 1801 uint8_t reg_field; 1802 uint8_t cap_id = 0; 1803 1804 rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); 1805 if (rc) { 1806 return rc; 1807 } 1808 /* find capability offset */ 1809 while (reg_field) { 1810 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1811 if (xen_pt_hide_dev_cap(&s->real_device, 1812 xen_pt_emu_reg_grps[i].grp_id)) { 1813 continue; 1814 } 1815 1816 rc = xen_host_pci_get_byte(&s->real_device, 1817 reg_field + PCI_CAP_LIST_ID, &cap_id); 1818 if (rc) { 1819 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", 1820 reg_field + PCI_CAP_LIST_ID, rc); 1821 return rc; 1822 } 1823 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1824 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1825 goto out; 1826 } 1827 /* ignore the 0 hardwired capability, find next one */ 1828 break; 1829 } 1830 } 1831 1832 /* next capability */ 1833 rc = xen_host_pci_get_byte(&s->real_device, 1834 reg_field + PCI_CAP_LIST_NEXT, ®_field); 1835 if (rc) { 1836 return rc; 1837 } 1838 } 1839 1840 out: 1841 *data = reg_field; 1842 return 0; 1843 } 1844 1845 1846 /************* 1847 * Main 1848 */ 1849 1850 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1851 { 1852 uint8_t id; 1853 unsigned max_cap = XEN_PCI_CAP_MAX; 1854 uint8_t pos = PCI_CAPABILITY_LIST; 1855 uint8_t status = 0; 1856 1857 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1858 return 0; 1859 } 1860 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1861 return 0; 1862 } 1863 1864 while (max_cap--) { 1865 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1866 break; 1867 } 1868 if (pos < PCI_CONFIG_HEADER_SIZE) { 1869 break; 1870 } 1871 1872 pos &= ~3; 1873 if (xen_host_pci_get_byte(&s->real_device, 1874 pos + PCI_CAP_LIST_ID, &id)) { 1875 break; 1876 } 1877 1878 if (id == 0xff) { 1879 break; 1880 } 1881 if (id == cap) { 1882 return pos; 1883 } 1884 1885 pos += PCI_CAP_LIST_NEXT; 1886 } 1887 return 0; 1888 } 1889 1890 static void xen_pt_config_reg_init(XenPCIPassthroughState *s, 1891 XenPTRegGroup *reg_grp, XenPTRegInfo *reg, 1892 Error **errp) 1893 { 1894 XenPTReg *reg_entry; 1895 uint32_t data = 0; 1896 int rc = 0; 1897 1898 reg_entry = g_new0(XenPTReg, 1); 1899 reg_entry->reg = reg; 1900 1901 if (reg->init) { 1902 uint32_t host_mask, size_mask; 1903 unsigned int offset; 1904 uint32_t val; 1905 1906 /* initialize emulate register */ 1907 rc = reg->init(s, reg_entry->reg, 1908 reg_grp->base_offset + reg->offset, &data); 1909 if (rc < 0) { 1910 g_free(reg_entry); 1911 error_setg(errp, "Init emulate register fail"); 1912 return; 1913 } 1914 if (data == XEN_PT_INVALID_REG) { 1915 /* free unused BAR register entry */ 1916 g_free(reg_entry); 1917 return; 1918 } 1919 /* Sync up the data to dev.config */ 1920 offset = reg_grp->base_offset + reg->offset; 1921 size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); 1922 1923 switch (reg->size) { 1924 case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); 1925 break; 1926 case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); 1927 break; 1928 case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); 1929 break; 1930 default: abort(); 1931 } 1932 if (rc) { 1933 /* Serious issues when we cannot read the host values! */ 1934 g_free(reg_entry); 1935 error_setg(errp, "Cannot read host values"); 1936 return; 1937 } 1938 /* Set bits in emu_mask are the ones we emulate. The dev.config shall 1939 * contain the emulated view of the guest - therefore we flip the mask 1940 * to mask out the host values (which dev.config initially has) . */ 1941 host_mask = size_mask & ~reg->emu_mask; 1942 1943 if ((data & host_mask) != (val & host_mask)) { 1944 uint32_t new_val; 1945 1946 /* Mask out host (including past size). */ 1947 new_val = val & host_mask; 1948 /* Merge emulated ones (excluding the non-emulated ones). */ 1949 new_val |= data & host_mask; 1950 /* Leave intact host and emulated values past the size - even though 1951 * we do not care as we write per reg->size granularity, but for the 1952 * logging below lets have the proper value. */ 1953 new_val |= ((val | data)) & ~size_mask; 1954 XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", 1955 offset, data, val, new_val); 1956 val = new_val; 1957 } else 1958 val = data; 1959 1960 if (val & ~size_mask) { 1961 error_setg(errp, "Offset 0x%04x:0x%04x expands past" 1962 " register size (%d)", offset, val, reg->size); 1963 g_free(reg_entry); 1964 return; 1965 } 1966 /* This could be just pci_set_long as we don't modify the bits 1967 * past reg->size, but in case this routine is run in parallel or the 1968 * init value is larger, we do not want to over-write registers. */ 1969 switch (reg->size) { 1970 case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); 1971 break; 1972 case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); 1973 break; 1974 case 4: pci_set_long(s->dev.config + offset, val); 1975 break; 1976 default: abort(); 1977 } 1978 /* set register value pointer to the data. */ 1979 reg_entry->ptr.byte = s->dev.config + offset; 1980 1981 } 1982 /* list add register entry */ 1983 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 1984 } 1985 1986 void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) 1987 { 1988 int i, rc; 1989 Error *err = NULL; 1990 1991 QLIST_INIT(&s->reg_grps); 1992 1993 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1994 uint32_t reg_grp_offset = 0; 1995 XenPTRegGroup *reg_grp_entry = NULL; 1996 1997 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF 1998 && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { 1999 if (xen_pt_hide_dev_cap(&s->real_device, 2000 xen_pt_emu_reg_grps[i].grp_id)) { 2001 continue; 2002 } 2003 2004 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 2005 2006 if (!reg_grp_offset) { 2007 continue; 2008 } 2009 } 2010 2011 /* 2012 * By default we will trap up to 0x40 in the cfg space. 2013 * If an intel device is pass through we need to trap 0xfc, 2014 * therefore the size should be 0xff. 2015 */ 2016 if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { 2017 reg_grp_offset = XEN_PCI_INTEL_OPREGION; 2018 } 2019 2020 reg_grp_entry = g_new0(XenPTRegGroup, 1); 2021 QLIST_INIT(®_grp_entry->reg_tbl_list); 2022 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 2023 2024 reg_grp_entry->base_offset = reg_grp_offset; 2025 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 2026 if (xen_pt_emu_reg_grps[i].size_init) { 2027 /* get register group size */ 2028 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 2029 reg_grp_offset, 2030 ®_grp_entry->size); 2031 if (rc < 0) { 2032 error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x," 2033 " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), 2034 xen_pt_emu_reg_grps[i].grp_type, rc); 2035 error_propagate(errp, err); 2036 xen_pt_config_delete(s); 2037 return; 2038 } 2039 } 2040 2041 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 2042 if (xen_pt_emu_reg_grps[i].emu_regs) { 2043 int j = 0; 2044 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 2045 2046 /* initialize capability register */ 2047 for (j = 0; regs->size != 0; j++, regs++) { 2048 xen_pt_config_reg_init(s, reg_grp_entry, regs, &err); 2049 if (err) { 2050 error_append_hint(&err, "Failed to initialize %d/%zu" 2051 " reg 0x%x in grp_type = 0x%x (%d/%zu)", 2052 j, ARRAY_SIZE(xen_pt_emu_reg_grps[i].emu_regs), 2053 regs->offset, xen_pt_emu_reg_grps[i].grp_type, 2054 i, ARRAY_SIZE(xen_pt_emu_reg_grps)); 2055 error_propagate(errp, err); 2056 xen_pt_config_delete(s); 2057 return; 2058 } 2059 } 2060 } 2061 } 2062 } 2063 } 2064 2065 /* delete all emulate register */ 2066 void xen_pt_config_delete(XenPCIPassthroughState *s) 2067 { 2068 struct XenPTRegGroup *reg_group, *next_grp; 2069 struct XenPTReg *reg, *next_reg; 2070 2071 /* free MSI/MSI-X info table */ 2072 if (s->msix) { 2073 xen_pt_msix_unmap(s); 2074 } 2075 g_free(s->msi); 2076 2077 /* free all register group entry */ 2078 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 2079 /* free all register entry */ 2080 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 2081 QLIST_REMOVE(reg, entries); 2082 g_free(reg); 2083 } 2084 2085 QLIST_REMOVE(reg_group, entries); 2086 g_free(reg_group); 2087 } 2088 } 2089