1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/timer.h" 16 #include "hw/xen/xen_backend.h" 17 #include "xen_pt.h" 18 19 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 20 (((value) & (val_mask)) | ((data) & ~(val_mask))) 21 22 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 23 24 /* prototype */ 25 26 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 27 uint32_t real_offset, uint32_t *data); 28 29 30 /* helper */ 31 32 /* A return value of 1 means the capability should NOT be exposed to guest. */ 33 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 34 { 35 switch (grp_id) { 36 case PCI_CAP_ID_EXP: 37 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 38 * Controller looks trivial, e.g., the PCI Express Capabilities 39 * Register is 0. We should not try to expose it to guest. 40 * 41 * The datasheet is available at 42 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 43 * 44 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 45 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 46 * Controller looks trivial, e.g., the PCI Express Capabilities 47 * Register is 0, so the Capability Version is 0 and 48 * xen_pt_pcie_size_init() would fail. 49 */ 50 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 51 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 52 return 1; 53 } 54 break; 55 } 56 return 0; 57 } 58 59 /* find emulate register group entry */ 60 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 61 { 62 XenPTRegGroup *entry = NULL; 63 64 /* find register group entry */ 65 QLIST_FOREACH(entry, &s->reg_grps, entries) { 66 /* check address */ 67 if ((entry->base_offset <= address) 68 && ((entry->base_offset + entry->size) > address)) { 69 return entry; 70 } 71 } 72 73 /* group entry not found */ 74 return NULL; 75 } 76 77 /* find emulate register entry */ 78 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 79 { 80 XenPTReg *reg_entry = NULL; 81 XenPTRegInfo *reg = NULL; 82 uint32_t real_offset = 0; 83 84 /* find register entry */ 85 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 86 reg = reg_entry->reg; 87 real_offset = reg_grp->base_offset + reg->offset; 88 /* check address */ 89 if ((real_offset <= address) 90 && ((real_offset + reg->size) > address)) { 91 return reg_entry; 92 } 93 } 94 95 return NULL; 96 } 97 98 99 /**************** 100 * general register functions 101 */ 102 103 /* register initialization function */ 104 105 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 106 XenPTRegInfo *reg, uint32_t real_offset, 107 uint32_t *data) 108 { 109 *data = reg->init_val; 110 return 0; 111 } 112 113 /* Read register functions */ 114 115 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 116 uint8_t *value, uint8_t valid_mask) 117 { 118 XenPTRegInfo *reg = cfg_entry->reg; 119 uint8_t valid_emu_mask = 0; 120 121 /* emulate byte register */ 122 valid_emu_mask = reg->emu_mask & valid_mask; 123 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 124 125 return 0; 126 } 127 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 128 uint16_t *value, uint16_t valid_mask) 129 { 130 XenPTRegInfo *reg = cfg_entry->reg; 131 uint16_t valid_emu_mask = 0; 132 133 /* emulate word register */ 134 valid_emu_mask = reg->emu_mask & valid_mask; 135 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 136 137 return 0; 138 } 139 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 140 uint32_t *value, uint32_t valid_mask) 141 { 142 XenPTRegInfo *reg = cfg_entry->reg; 143 uint32_t valid_emu_mask = 0; 144 145 /* emulate long register */ 146 valid_emu_mask = reg->emu_mask & valid_mask; 147 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 148 149 return 0; 150 } 151 152 /* Write register functions */ 153 154 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 155 uint8_t *val, uint8_t dev_value, 156 uint8_t valid_mask) 157 { 158 XenPTRegInfo *reg = cfg_entry->reg; 159 uint8_t writable_mask = 0; 160 uint8_t throughable_mask = 0; 161 162 /* modify emulate register */ 163 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 164 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 165 166 /* create value for writing to I/O device register */ 167 throughable_mask = ~reg->emu_mask & valid_mask; 168 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 169 170 return 0; 171 } 172 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 173 uint16_t *val, uint16_t dev_value, 174 uint16_t valid_mask) 175 { 176 XenPTRegInfo *reg = cfg_entry->reg; 177 uint16_t writable_mask = 0; 178 uint16_t throughable_mask = 0; 179 180 /* modify emulate register */ 181 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 182 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 183 184 /* create value for writing to I/O device register */ 185 throughable_mask = ~reg->emu_mask & valid_mask; 186 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 187 188 return 0; 189 } 190 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 191 uint32_t *val, uint32_t dev_value, 192 uint32_t valid_mask) 193 { 194 XenPTRegInfo *reg = cfg_entry->reg; 195 uint32_t writable_mask = 0; 196 uint32_t throughable_mask = 0; 197 198 /* modify emulate register */ 199 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 200 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 201 202 /* create value for writing to I/O device register */ 203 throughable_mask = ~reg->emu_mask & valid_mask; 204 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 205 206 return 0; 207 } 208 209 210 /* XenPTRegInfo declaration 211 * - only for emulated register (either a part or whole bit). 212 * - for passthrough register that need special behavior (like interacting with 213 * other component), set emu_mask to all 0 and specify r/w func properly. 214 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 215 */ 216 217 /******************** 218 * Header Type0 219 */ 220 221 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 222 XenPTRegInfo *reg, uint32_t real_offset, 223 uint32_t *data) 224 { 225 *data = s->real_device.vendor_id; 226 return 0; 227 } 228 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 229 XenPTRegInfo *reg, uint32_t real_offset, 230 uint32_t *data) 231 { 232 *data = s->real_device.device_id; 233 return 0; 234 } 235 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 236 XenPTRegInfo *reg, uint32_t real_offset, 237 uint32_t *data) 238 { 239 XenPTRegGroup *reg_grp_entry = NULL; 240 XenPTReg *reg_entry = NULL; 241 uint32_t reg_field = 0; 242 243 /* find Header register group */ 244 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 245 if (reg_grp_entry) { 246 /* find Capabilities Pointer register */ 247 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 248 if (reg_entry) { 249 /* check Capabilities Pointer register */ 250 if (reg_entry->data) { 251 reg_field |= PCI_STATUS_CAP_LIST; 252 } else { 253 reg_field &= ~PCI_STATUS_CAP_LIST; 254 } 255 } else { 256 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 257 " for Capabilities Pointer register." 258 " (%s)\n", __func__); 259 return -1; 260 } 261 } else { 262 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 263 " for Header. (%s)\n", __func__); 264 return -1; 265 } 266 267 *data = reg_field; 268 return 0; 269 } 270 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 271 XenPTRegInfo *reg, uint32_t real_offset, 272 uint32_t *data) 273 { 274 /* read PCI_HEADER_TYPE */ 275 *data = reg->init_val | 0x80; 276 return 0; 277 } 278 279 /* initialize Interrupt Pin register */ 280 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 281 XenPTRegInfo *reg, uint32_t real_offset, 282 uint32_t *data) 283 { 284 *data = xen_pt_pci_read_intx(s); 285 return 0; 286 } 287 288 /* Command register */ 289 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 290 uint16_t *val, uint16_t dev_value, 291 uint16_t valid_mask) 292 { 293 XenPTRegInfo *reg = cfg_entry->reg; 294 uint16_t writable_mask = 0; 295 uint16_t throughable_mask = 0; 296 297 /* modify emulate register */ 298 writable_mask = ~reg->ro_mask & valid_mask; 299 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 300 301 /* create value for writing to I/O device register */ 302 throughable_mask = ~reg->emu_mask & valid_mask; 303 304 if (*val & PCI_COMMAND_INTX_DISABLE) { 305 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 306 } else { 307 if (s->machine_irq) { 308 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 309 } 310 } 311 312 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 313 314 return 0; 315 } 316 317 /* BAR */ 318 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 319 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 320 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 321 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 322 323 static bool is_64bit_bar(PCIIORegion *r) 324 { 325 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 326 } 327 328 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 329 { 330 if (is_64bit_bar(r)) { 331 uint64_t size64; 332 size64 = (r + 1)->size; 333 size64 <<= 32; 334 size64 += r->size; 335 return size64; 336 } 337 return r->size; 338 } 339 340 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 341 int index) 342 { 343 PCIDevice *d = &s->dev; 344 XenPTRegion *region = NULL; 345 PCIIORegion *r; 346 347 /* check 64bit BAR */ 348 if ((0 < index) && (index < PCI_ROM_SLOT)) { 349 int type = s->real_device.io_regions[index - 1].type; 350 351 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 352 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 353 region = &s->bases[index - 1]; 354 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 355 return XEN_PT_BAR_FLAG_UPPER; 356 } 357 } 358 } 359 360 /* check unused BAR */ 361 r = &d->io_regions[index]; 362 if (!xen_pt_get_bar_size(r)) { 363 return XEN_PT_BAR_FLAG_UNUSED; 364 } 365 366 /* for ExpROM BAR */ 367 if (index == PCI_ROM_SLOT) { 368 return XEN_PT_BAR_FLAG_MEM; 369 } 370 371 /* check BAR I/O indicator */ 372 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 373 return XEN_PT_BAR_FLAG_IO; 374 } else { 375 return XEN_PT_BAR_FLAG_MEM; 376 } 377 } 378 379 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 380 { 381 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 382 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 383 } else { 384 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 385 } 386 } 387 388 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 389 uint32_t real_offset, uint32_t *data) 390 { 391 uint32_t reg_field = 0; 392 int index; 393 394 index = xen_pt_bar_offset_to_index(reg->offset); 395 if (index < 0 || index >= PCI_NUM_REGIONS) { 396 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 397 return -1; 398 } 399 400 /* set BAR flag */ 401 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 402 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 403 reg_field = XEN_PT_INVALID_REG; 404 } 405 406 *data = reg_field; 407 return 0; 408 } 409 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 410 uint32_t *value, uint32_t valid_mask) 411 { 412 XenPTRegInfo *reg = cfg_entry->reg; 413 uint32_t valid_emu_mask = 0; 414 uint32_t bar_emu_mask = 0; 415 int index; 416 417 /* get BAR index */ 418 index = xen_pt_bar_offset_to_index(reg->offset); 419 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 420 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 421 return -1; 422 } 423 424 /* use fixed-up value from kernel sysfs */ 425 *value = base_address_with_flags(&s->real_device.io_regions[index]); 426 427 /* set emulate mask depend on BAR flag */ 428 switch (s->bases[index].bar_flag) { 429 case XEN_PT_BAR_FLAG_MEM: 430 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 431 break; 432 case XEN_PT_BAR_FLAG_IO: 433 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 434 break; 435 case XEN_PT_BAR_FLAG_UPPER: 436 bar_emu_mask = XEN_PT_BAR_ALLF; 437 break; 438 default: 439 break; 440 } 441 442 /* emulate BAR */ 443 valid_emu_mask = bar_emu_mask & valid_mask; 444 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 445 446 return 0; 447 } 448 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 449 uint32_t *val, uint32_t dev_value, 450 uint32_t valid_mask) 451 { 452 XenPTRegInfo *reg = cfg_entry->reg; 453 XenPTRegion *base = NULL; 454 PCIDevice *d = &s->dev; 455 const PCIIORegion *r; 456 uint32_t writable_mask = 0; 457 uint32_t throughable_mask = 0; 458 uint32_t bar_emu_mask = 0; 459 uint32_t bar_ro_mask = 0; 460 uint32_t r_size = 0; 461 int index = 0; 462 463 index = xen_pt_bar_offset_to_index(reg->offset); 464 if (index < 0 || index >= PCI_NUM_REGIONS) { 465 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 466 return -1; 467 } 468 469 r = &d->io_regions[index]; 470 base = &s->bases[index]; 471 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 472 473 /* set emulate mask and read-only mask values depend on the BAR flag */ 474 switch (s->bases[index].bar_flag) { 475 case XEN_PT_BAR_FLAG_MEM: 476 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 477 if (!r_size) { 478 /* low 32 bits mask for 64 bit bars */ 479 bar_ro_mask = XEN_PT_BAR_ALLF; 480 } else { 481 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 482 } 483 break; 484 case XEN_PT_BAR_FLAG_IO: 485 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 486 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 487 break; 488 case XEN_PT_BAR_FLAG_UPPER: 489 bar_emu_mask = XEN_PT_BAR_ALLF; 490 bar_ro_mask = r_size ? r_size - 1 : 0; 491 break; 492 default: 493 break; 494 } 495 496 /* modify emulate register */ 497 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 498 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 499 500 /* check whether we need to update the virtual region address or not */ 501 switch (s->bases[index].bar_flag) { 502 case XEN_PT_BAR_FLAG_UPPER: 503 case XEN_PT_BAR_FLAG_MEM: 504 /* nothing to do */ 505 break; 506 case XEN_PT_BAR_FLAG_IO: 507 /* nothing to do */ 508 break; 509 default: 510 break; 511 } 512 513 /* create value for writing to I/O device register */ 514 throughable_mask = ~bar_emu_mask & valid_mask; 515 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 516 517 return 0; 518 } 519 520 /* write Exp ROM BAR */ 521 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 522 XenPTReg *cfg_entry, uint32_t *val, 523 uint32_t dev_value, uint32_t valid_mask) 524 { 525 XenPTRegInfo *reg = cfg_entry->reg; 526 XenPTRegion *base = NULL; 527 PCIDevice *d = (PCIDevice *)&s->dev; 528 uint32_t writable_mask = 0; 529 uint32_t throughable_mask = 0; 530 pcibus_t r_size = 0; 531 uint32_t bar_emu_mask = 0; 532 uint32_t bar_ro_mask = 0; 533 534 r_size = d->io_regions[PCI_ROM_SLOT].size; 535 base = &s->bases[PCI_ROM_SLOT]; 536 /* align memory type resource size */ 537 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 538 539 /* set emulate mask and read-only mask */ 540 bar_emu_mask = reg->emu_mask; 541 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 542 543 /* modify emulate register */ 544 writable_mask = ~bar_ro_mask & valid_mask; 545 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 546 547 /* create value for writing to I/O device register */ 548 throughable_mask = ~bar_emu_mask & valid_mask; 549 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 550 551 return 0; 552 } 553 554 /* Header Type0 reg static information table */ 555 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 556 /* Vendor ID reg */ 557 { 558 .offset = PCI_VENDOR_ID, 559 .size = 2, 560 .init_val = 0x0000, 561 .ro_mask = 0xFFFF, 562 .emu_mask = 0xFFFF, 563 .init = xen_pt_vendor_reg_init, 564 .u.w.read = xen_pt_word_reg_read, 565 .u.w.write = xen_pt_word_reg_write, 566 }, 567 /* Device ID reg */ 568 { 569 .offset = PCI_DEVICE_ID, 570 .size = 2, 571 .init_val = 0x0000, 572 .ro_mask = 0xFFFF, 573 .emu_mask = 0xFFFF, 574 .init = xen_pt_device_reg_init, 575 .u.w.read = xen_pt_word_reg_read, 576 .u.w.write = xen_pt_word_reg_write, 577 }, 578 /* Command reg */ 579 { 580 .offset = PCI_COMMAND, 581 .size = 2, 582 .init_val = 0x0000, 583 .ro_mask = 0xF880, 584 .emu_mask = 0x0743, 585 .init = xen_pt_common_reg_init, 586 .u.w.read = xen_pt_word_reg_read, 587 .u.w.write = xen_pt_cmd_reg_write, 588 }, 589 /* Capabilities Pointer reg */ 590 { 591 .offset = PCI_CAPABILITY_LIST, 592 .size = 1, 593 .init_val = 0x00, 594 .ro_mask = 0xFF, 595 .emu_mask = 0xFF, 596 .init = xen_pt_ptr_reg_init, 597 .u.b.read = xen_pt_byte_reg_read, 598 .u.b.write = xen_pt_byte_reg_write, 599 }, 600 /* Status reg */ 601 /* use emulated Cap Ptr value to initialize, 602 * so need to be declared after Cap Ptr reg 603 */ 604 { 605 .offset = PCI_STATUS, 606 .size = 2, 607 .init_val = 0x0000, 608 .ro_mask = 0x06FF, 609 .emu_mask = 0x0010, 610 .init = xen_pt_status_reg_init, 611 .u.w.read = xen_pt_word_reg_read, 612 .u.w.write = xen_pt_word_reg_write, 613 }, 614 /* Cache Line Size reg */ 615 { 616 .offset = PCI_CACHE_LINE_SIZE, 617 .size = 1, 618 .init_val = 0x00, 619 .ro_mask = 0x00, 620 .emu_mask = 0xFF, 621 .init = xen_pt_common_reg_init, 622 .u.b.read = xen_pt_byte_reg_read, 623 .u.b.write = xen_pt_byte_reg_write, 624 }, 625 /* Latency Timer reg */ 626 { 627 .offset = PCI_LATENCY_TIMER, 628 .size = 1, 629 .init_val = 0x00, 630 .ro_mask = 0x00, 631 .emu_mask = 0xFF, 632 .init = xen_pt_common_reg_init, 633 .u.b.read = xen_pt_byte_reg_read, 634 .u.b.write = xen_pt_byte_reg_write, 635 }, 636 /* Header Type reg */ 637 { 638 .offset = PCI_HEADER_TYPE, 639 .size = 1, 640 .init_val = 0x00, 641 .ro_mask = 0xFF, 642 .emu_mask = 0x00, 643 .init = xen_pt_header_type_reg_init, 644 .u.b.read = xen_pt_byte_reg_read, 645 .u.b.write = xen_pt_byte_reg_write, 646 }, 647 /* Interrupt Line reg */ 648 { 649 .offset = PCI_INTERRUPT_LINE, 650 .size = 1, 651 .init_val = 0x00, 652 .ro_mask = 0x00, 653 .emu_mask = 0xFF, 654 .init = xen_pt_common_reg_init, 655 .u.b.read = xen_pt_byte_reg_read, 656 .u.b.write = xen_pt_byte_reg_write, 657 }, 658 /* Interrupt Pin reg */ 659 { 660 .offset = PCI_INTERRUPT_PIN, 661 .size = 1, 662 .init_val = 0x00, 663 .ro_mask = 0xFF, 664 .emu_mask = 0xFF, 665 .init = xen_pt_irqpin_reg_init, 666 .u.b.read = xen_pt_byte_reg_read, 667 .u.b.write = xen_pt_byte_reg_write, 668 }, 669 /* BAR 0 reg */ 670 /* mask of BAR need to be decided later, depends on IO/MEM type */ 671 { 672 .offset = PCI_BASE_ADDRESS_0, 673 .size = 4, 674 .init_val = 0x00000000, 675 .init = xen_pt_bar_reg_init, 676 .u.dw.read = xen_pt_bar_reg_read, 677 .u.dw.write = xen_pt_bar_reg_write, 678 }, 679 /* BAR 1 reg */ 680 { 681 .offset = PCI_BASE_ADDRESS_1, 682 .size = 4, 683 .init_val = 0x00000000, 684 .init = xen_pt_bar_reg_init, 685 .u.dw.read = xen_pt_bar_reg_read, 686 .u.dw.write = xen_pt_bar_reg_write, 687 }, 688 /* BAR 2 reg */ 689 { 690 .offset = PCI_BASE_ADDRESS_2, 691 .size = 4, 692 .init_val = 0x00000000, 693 .init = xen_pt_bar_reg_init, 694 .u.dw.read = xen_pt_bar_reg_read, 695 .u.dw.write = xen_pt_bar_reg_write, 696 }, 697 /* BAR 3 reg */ 698 { 699 .offset = PCI_BASE_ADDRESS_3, 700 .size = 4, 701 .init_val = 0x00000000, 702 .init = xen_pt_bar_reg_init, 703 .u.dw.read = xen_pt_bar_reg_read, 704 .u.dw.write = xen_pt_bar_reg_write, 705 }, 706 /* BAR 4 reg */ 707 { 708 .offset = PCI_BASE_ADDRESS_4, 709 .size = 4, 710 .init_val = 0x00000000, 711 .init = xen_pt_bar_reg_init, 712 .u.dw.read = xen_pt_bar_reg_read, 713 .u.dw.write = xen_pt_bar_reg_write, 714 }, 715 /* BAR 5 reg */ 716 { 717 .offset = PCI_BASE_ADDRESS_5, 718 .size = 4, 719 .init_val = 0x00000000, 720 .init = xen_pt_bar_reg_init, 721 .u.dw.read = xen_pt_bar_reg_read, 722 .u.dw.write = xen_pt_bar_reg_write, 723 }, 724 /* Expansion ROM BAR reg */ 725 { 726 .offset = PCI_ROM_ADDRESS, 727 .size = 4, 728 .init_val = 0x00000000, 729 .ro_mask = 0x000007FE, 730 .emu_mask = 0xFFFFF800, 731 .init = xen_pt_bar_reg_init, 732 .u.dw.read = xen_pt_long_reg_read, 733 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 734 }, 735 { 736 .size = 0, 737 }, 738 }; 739 740 741 /********************************* 742 * Vital Product Data Capability 743 */ 744 745 /* Vital Product Data Capability Structure reg static information table */ 746 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 747 { 748 .offset = PCI_CAP_LIST_NEXT, 749 .size = 1, 750 .init_val = 0x00, 751 .ro_mask = 0xFF, 752 .emu_mask = 0xFF, 753 .init = xen_pt_ptr_reg_init, 754 .u.b.read = xen_pt_byte_reg_read, 755 .u.b.write = xen_pt_byte_reg_write, 756 }, 757 { 758 .size = 0, 759 }, 760 }; 761 762 763 /************************************** 764 * Vendor Specific Capability 765 */ 766 767 /* Vendor Specific Capability Structure reg static information table */ 768 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 769 { 770 .offset = PCI_CAP_LIST_NEXT, 771 .size = 1, 772 .init_val = 0x00, 773 .ro_mask = 0xFF, 774 .emu_mask = 0xFF, 775 .init = xen_pt_ptr_reg_init, 776 .u.b.read = xen_pt_byte_reg_read, 777 .u.b.write = xen_pt_byte_reg_write, 778 }, 779 { 780 .size = 0, 781 }, 782 }; 783 784 785 /***************************** 786 * PCI Express Capability 787 */ 788 789 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 790 uint32_t offset) 791 { 792 uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); 793 return flags & PCI_EXP_FLAGS_VERS; 794 } 795 796 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 797 uint32_t offset) 798 { 799 uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); 800 return (flags & PCI_EXP_FLAGS_TYPE) >> 4; 801 } 802 803 /* initialize Link Control register */ 804 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 805 XenPTRegInfo *reg, uint32_t real_offset, 806 uint32_t *data) 807 { 808 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 809 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 810 811 /* no need to initialize in case of Root Complex Integrated Endpoint 812 * with cap_ver 1.x 813 */ 814 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 815 *data = XEN_PT_INVALID_REG; 816 } 817 818 *data = reg->init_val; 819 return 0; 820 } 821 /* initialize Device Control 2 register */ 822 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 823 XenPTRegInfo *reg, uint32_t real_offset, 824 uint32_t *data) 825 { 826 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 827 828 /* no need to initialize in case of cap_ver 1.x */ 829 if (cap_ver == 1) { 830 *data = XEN_PT_INVALID_REG; 831 } 832 833 *data = reg->init_val; 834 return 0; 835 } 836 /* initialize Link Control 2 register */ 837 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 838 XenPTRegInfo *reg, uint32_t real_offset, 839 uint32_t *data) 840 { 841 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 842 uint32_t reg_field = 0; 843 844 /* no need to initialize in case of cap_ver 1.x */ 845 if (cap_ver == 1) { 846 reg_field = XEN_PT_INVALID_REG; 847 } else { 848 /* set Supported Link Speed */ 849 uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset 850 + PCI_EXP_LNKCAP); 851 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 852 } 853 854 *data = reg_field; 855 return 0; 856 } 857 858 /* PCI Express Capability Structure reg static information table */ 859 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 860 /* Next Pointer reg */ 861 { 862 .offset = PCI_CAP_LIST_NEXT, 863 .size = 1, 864 .init_val = 0x00, 865 .ro_mask = 0xFF, 866 .emu_mask = 0xFF, 867 .init = xen_pt_ptr_reg_init, 868 .u.b.read = xen_pt_byte_reg_read, 869 .u.b.write = xen_pt_byte_reg_write, 870 }, 871 /* Device Capabilities reg */ 872 { 873 .offset = PCI_EXP_DEVCAP, 874 .size = 4, 875 .init_val = 0x00000000, 876 .ro_mask = 0x1FFCFFFF, 877 .emu_mask = 0x10000000, 878 .init = xen_pt_common_reg_init, 879 .u.dw.read = xen_pt_long_reg_read, 880 .u.dw.write = xen_pt_long_reg_write, 881 }, 882 /* Device Control reg */ 883 { 884 .offset = PCI_EXP_DEVCTL, 885 .size = 2, 886 .init_val = 0x2810, 887 .ro_mask = 0x8400, 888 .emu_mask = 0xFFFF, 889 .init = xen_pt_common_reg_init, 890 .u.w.read = xen_pt_word_reg_read, 891 .u.w.write = xen_pt_word_reg_write, 892 }, 893 /* Link Control reg */ 894 { 895 .offset = PCI_EXP_LNKCTL, 896 .size = 2, 897 .init_val = 0x0000, 898 .ro_mask = 0xFC34, 899 .emu_mask = 0xFFFF, 900 .init = xen_pt_linkctrl_reg_init, 901 .u.w.read = xen_pt_word_reg_read, 902 .u.w.write = xen_pt_word_reg_write, 903 }, 904 /* Device Control 2 reg */ 905 { 906 .offset = 0x28, 907 .size = 2, 908 .init_val = 0x0000, 909 .ro_mask = 0xFFE0, 910 .emu_mask = 0xFFFF, 911 .init = xen_pt_devctrl2_reg_init, 912 .u.w.read = xen_pt_word_reg_read, 913 .u.w.write = xen_pt_word_reg_write, 914 }, 915 /* Link Control 2 reg */ 916 { 917 .offset = 0x30, 918 .size = 2, 919 .init_val = 0x0000, 920 .ro_mask = 0xE040, 921 .emu_mask = 0xFFFF, 922 .init = xen_pt_linkctrl2_reg_init, 923 .u.w.read = xen_pt_word_reg_read, 924 .u.w.write = xen_pt_word_reg_write, 925 }, 926 { 927 .size = 0, 928 }, 929 }; 930 931 932 /********************************* 933 * Power Management Capability 934 */ 935 936 /* read Power Management Control/Status register */ 937 static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 938 uint16_t *value, uint16_t valid_mask) 939 { 940 XenPTRegInfo *reg = cfg_entry->reg; 941 uint16_t valid_emu_mask = reg->emu_mask; 942 943 valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; 944 945 valid_emu_mask = valid_emu_mask & valid_mask; 946 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 947 948 return 0; 949 } 950 /* write Power Management Control/Status register */ 951 static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, 952 XenPTReg *cfg_entry, uint16_t *val, 953 uint16_t dev_value, uint16_t valid_mask) 954 { 955 XenPTRegInfo *reg = cfg_entry->reg; 956 uint16_t emu_mask = reg->emu_mask; 957 uint16_t writable_mask = 0; 958 uint16_t throughable_mask = 0; 959 960 emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; 961 962 /* modify emulate register */ 963 writable_mask = emu_mask & ~reg->ro_mask & valid_mask; 964 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 965 966 /* create value for writing to I/O device register */ 967 throughable_mask = ~emu_mask & valid_mask; 968 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 969 970 return 0; 971 } 972 973 /* Power Management Capability reg static information table */ 974 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 975 /* Next Pointer reg */ 976 { 977 .offset = PCI_CAP_LIST_NEXT, 978 .size = 1, 979 .init_val = 0x00, 980 .ro_mask = 0xFF, 981 .emu_mask = 0xFF, 982 .init = xen_pt_ptr_reg_init, 983 .u.b.read = xen_pt_byte_reg_read, 984 .u.b.write = xen_pt_byte_reg_write, 985 }, 986 /* Power Management Capabilities reg */ 987 { 988 .offset = PCI_CAP_FLAGS, 989 .size = 2, 990 .init_val = 0x0000, 991 .ro_mask = 0xFFFF, 992 .emu_mask = 0xF9C8, 993 .init = xen_pt_common_reg_init, 994 .u.w.read = xen_pt_word_reg_read, 995 .u.w.write = xen_pt_word_reg_write, 996 }, 997 /* PCI Power Management Control/Status reg */ 998 { 999 .offset = PCI_PM_CTRL, 1000 .size = 2, 1001 .init_val = 0x0008, 1002 .ro_mask = 0xE1FC, 1003 .emu_mask = 0x8100, 1004 .init = xen_pt_common_reg_init, 1005 .u.w.read = xen_pt_pmcsr_reg_read, 1006 .u.w.write = xen_pt_pmcsr_reg_write, 1007 }, 1008 { 1009 .size = 0, 1010 }, 1011 }; 1012 1013 1014 /******************************** 1015 * MSI Capability 1016 */ 1017 1018 /* Helper */ 1019 static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags) 1020 { 1021 /* check the offset whether matches the type or not */ 1022 bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT); 1023 bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT); 1024 return is_32 || is_64; 1025 } 1026 1027 /* Message Control register */ 1028 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1029 XenPTRegInfo *reg, uint32_t real_offset, 1030 uint32_t *data) 1031 { 1032 PCIDevice *d = &s->dev; 1033 XenPTMSI *msi = s->msi; 1034 uint16_t reg_field = 0; 1035 1036 /* use I/O device register's value as initial value */ 1037 reg_field = pci_get_word(d->config + real_offset); 1038 1039 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1040 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1041 xen_host_pci_set_word(&s->real_device, real_offset, 1042 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1043 } 1044 msi->flags |= reg_field; 1045 msi->ctrl_offset = real_offset; 1046 msi->initialized = false; 1047 msi->mapped = false; 1048 1049 *data = reg->init_val; 1050 return 0; 1051 } 1052 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1053 XenPTReg *cfg_entry, uint16_t *val, 1054 uint16_t dev_value, uint16_t valid_mask) 1055 { 1056 XenPTRegInfo *reg = cfg_entry->reg; 1057 XenPTMSI *msi = s->msi; 1058 uint16_t writable_mask = 0; 1059 uint16_t throughable_mask = 0; 1060 uint16_t raw_val; 1061 1062 /* Currently no support for multi-vector */ 1063 if (*val & PCI_MSI_FLAGS_QSIZE) { 1064 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1065 } 1066 1067 /* modify emulate register */ 1068 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1069 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1070 msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE; 1071 1072 /* create value for writing to I/O device register */ 1073 raw_val = *val; 1074 throughable_mask = ~reg->emu_mask & valid_mask; 1075 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1076 1077 /* update MSI */ 1078 if (raw_val & PCI_MSI_FLAGS_ENABLE) { 1079 /* setup MSI pirq for the first time */ 1080 if (!msi->initialized) { 1081 /* Init physical one */ 1082 XEN_PT_LOG(&s->dev, "setup MSI\n"); 1083 if (xen_pt_msi_setup(s)) { 1084 /* We do not broadcast the error to the framework code, so 1085 * that MSI errors are contained in MSI emulation code and 1086 * QEMU can go on running. 1087 * Guest MSI would be actually not working. 1088 */ 1089 *val &= ~PCI_MSI_FLAGS_ENABLE; 1090 XEN_PT_WARN(&s->dev, "Can not map MSI.\n"); 1091 return 0; 1092 } 1093 if (xen_pt_msi_update(s)) { 1094 *val &= ~PCI_MSI_FLAGS_ENABLE; 1095 XEN_PT_WARN(&s->dev, "Can not bind MSI\n"); 1096 return 0; 1097 } 1098 msi->initialized = true; 1099 msi->mapped = true; 1100 } 1101 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1102 } else if (msi->mapped) { 1103 xen_pt_msi_disable(s); 1104 } 1105 1106 /* pass through MSI_ENABLE bit */ 1107 *val &= ~PCI_MSI_FLAGS_ENABLE; 1108 *val |= raw_val & PCI_MSI_FLAGS_ENABLE; 1109 1110 return 0; 1111 } 1112 1113 /* initialize Message Upper Address register */ 1114 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1115 XenPTRegInfo *reg, uint32_t real_offset, 1116 uint32_t *data) 1117 { 1118 /* no need to initialize in case of 32 bit type */ 1119 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1120 *data = XEN_PT_INVALID_REG; 1121 } else { 1122 *data = reg->init_val; 1123 } 1124 1125 return 0; 1126 } 1127 /* this function will be called twice (for 32 bit and 64 bit type) */ 1128 /* initialize Message Data register */ 1129 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1130 XenPTRegInfo *reg, uint32_t real_offset, 1131 uint32_t *data) 1132 { 1133 uint32_t flags = s->msi->flags; 1134 uint32_t offset = reg->offset; 1135 1136 /* check the offset whether matches the type or not */ 1137 if (xen_pt_msgdata_check_type(offset, flags)) { 1138 *data = reg->init_val; 1139 } else { 1140 *data = XEN_PT_INVALID_REG; 1141 } 1142 return 0; 1143 } 1144 1145 /* write Message Address register */ 1146 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1147 XenPTReg *cfg_entry, uint32_t *val, 1148 uint32_t dev_value, uint32_t valid_mask) 1149 { 1150 XenPTRegInfo *reg = cfg_entry->reg; 1151 uint32_t writable_mask = 0; 1152 uint32_t throughable_mask = 0; 1153 uint32_t old_addr = cfg_entry->data; 1154 1155 /* modify emulate register */ 1156 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1157 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1158 s->msi->addr_lo = cfg_entry->data; 1159 1160 /* create value for writing to I/O device register */ 1161 throughable_mask = ~reg->emu_mask & valid_mask; 1162 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1163 1164 /* update MSI */ 1165 if (cfg_entry->data != old_addr) { 1166 if (s->msi->mapped) { 1167 xen_pt_msi_update(s); 1168 } 1169 } 1170 1171 return 0; 1172 } 1173 /* write Message Upper Address register */ 1174 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1175 XenPTReg *cfg_entry, uint32_t *val, 1176 uint32_t dev_value, uint32_t valid_mask) 1177 { 1178 XenPTRegInfo *reg = cfg_entry->reg; 1179 uint32_t writable_mask = 0; 1180 uint32_t throughable_mask = 0; 1181 uint32_t old_addr = cfg_entry->data; 1182 1183 /* check whether the type is 64 bit or not */ 1184 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1185 XEN_PT_ERR(&s->dev, 1186 "Can't write to the upper address without 64 bit support\n"); 1187 return -1; 1188 } 1189 1190 /* modify emulate register */ 1191 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1192 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1193 /* update the msi_info too */ 1194 s->msi->addr_hi = cfg_entry->data; 1195 1196 /* create value for writing to I/O device register */ 1197 throughable_mask = ~reg->emu_mask & valid_mask; 1198 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1199 1200 /* update MSI */ 1201 if (cfg_entry->data != old_addr) { 1202 if (s->msi->mapped) { 1203 xen_pt_msi_update(s); 1204 } 1205 } 1206 1207 return 0; 1208 } 1209 1210 1211 /* this function will be called twice (for 32 bit and 64 bit type) */ 1212 /* write Message Data register */ 1213 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1214 XenPTReg *cfg_entry, uint16_t *val, 1215 uint16_t dev_value, uint16_t valid_mask) 1216 { 1217 XenPTRegInfo *reg = cfg_entry->reg; 1218 XenPTMSI *msi = s->msi; 1219 uint16_t writable_mask = 0; 1220 uint16_t throughable_mask = 0; 1221 uint16_t old_data = cfg_entry->data; 1222 uint32_t offset = reg->offset; 1223 1224 /* check the offset whether matches the type or not */ 1225 if (!xen_pt_msgdata_check_type(offset, msi->flags)) { 1226 /* exit I/O emulator */ 1227 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1228 return -1; 1229 } 1230 1231 /* modify emulate register */ 1232 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1233 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1234 /* update the msi_info too */ 1235 msi->data = cfg_entry->data; 1236 1237 /* create value for writing to I/O device register */ 1238 throughable_mask = ~reg->emu_mask & valid_mask; 1239 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1240 1241 /* update MSI */ 1242 if (cfg_entry->data != old_data) { 1243 if (msi->mapped) { 1244 xen_pt_msi_update(s); 1245 } 1246 } 1247 1248 return 0; 1249 } 1250 1251 /* MSI Capability Structure reg static information table */ 1252 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1253 /* Next Pointer reg */ 1254 { 1255 .offset = PCI_CAP_LIST_NEXT, 1256 .size = 1, 1257 .init_val = 0x00, 1258 .ro_mask = 0xFF, 1259 .emu_mask = 0xFF, 1260 .init = xen_pt_ptr_reg_init, 1261 .u.b.read = xen_pt_byte_reg_read, 1262 .u.b.write = xen_pt_byte_reg_write, 1263 }, 1264 /* Message Control reg */ 1265 { 1266 .offset = PCI_MSI_FLAGS, 1267 .size = 2, 1268 .init_val = 0x0000, 1269 .ro_mask = 0xFF8E, 1270 .emu_mask = 0x007F, 1271 .init = xen_pt_msgctrl_reg_init, 1272 .u.w.read = xen_pt_word_reg_read, 1273 .u.w.write = xen_pt_msgctrl_reg_write, 1274 }, 1275 /* Message Address reg */ 1276 { 1277 .offset = PCI_MSI_ADDRESS_LO, 1278 .size = 4, 1279 .init_val = 0x00000000, 1280 .ro_mask = 0x00000003, 1281 .emu_mask = 0xFFFFFFFF, 1282 .no_wb = 1, 1283 .init = xen_pt_common_reg_init, 1284 .u.dw.read = xen_pt_long_reg_read, 1285 .u.dw.write = xen_pt_msgaddr32_reg_write, 1286 }, 1287 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1288 { 1289 .offset = PCI_MSI_ADDRESS_HI, 1290 .size = 4, 1291 .init_val = 0x00000000, 1292 .ro_mask = 0x00000000, 1293 .emu_mask = 0xFFFFFFFF, 1294 .no_wb = 1, 1295 .init = xen_pt_msgaddr64_reg_init, 1296 .u.dw.read = xen_pt_long_reg_read, 1297 .u.dw.write = xen_pt_msgaddr64_reg_write, 1298 }, 1299 /* Message Data reg (16 bits of data for 32-bit devices) */ 1300 { 1301 .offset = PCI_MSI_DATA_32, 1302 .size = 2, 1303 .init_val = 0x0000, 1304 .ro_mask = 0x0000, 1305 .emu_mask = 0xFFFF, 1306 .no_wb = 1, 1307 .init = xen_pt_msgdata_reg_init, 1308 .u.w.read = xen_pt_word_reg_read, 1309 .u.w.write = xen_pt_msgdata_reg_write, 1310 }, 1311 /* Message Data reg (16 bits of data for 64-bit devices) */ 1312 { 1313 .offset = PCI_MSI_DATA_64, 1314 .size = 2, 1315 .init_val = 0x0000, 1316 .ro_mask = 0x0000, 1317 .emu_mask = 0xFFFF, 1318 .no_wb = 1, 1319 .init = xen_pt_msgdata_reg_init, 1320 .u.w.read = xen_pt_word_reg_read, 1321 .u.w.write = xen_pt_msgdata_reg_write, 1322 }, 1323 { 1324 .size = 0, 1325 }, 1326 }; 1327 1328 1329 /************************************** 1330 * MSI-X Capability 1331 */ 1332 1333 /* Message Control register for MSI-X */ 1334 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1335 XenPTRegInfo *reg, uint32_t real_offset, 1336 uint32_t *data) 1337 { 1338 PCIDevice *d = &s->dev; 1339 uint16_t reg_field = 0; 1340 1341 /* use I/O device register's value as initial value */ 1342 reg_field = pci_get_word(d->config + real_offset); 1343 1344 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1345 XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n"); 1346 xen_host_pci_set_word(&s->real_device, real_offset, 1347 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1348 } 1349 1350 s->msix->ctrl_offset = real_offset; 1351 1352 *data = reg->init_val; 1353 return 0; 1354 } 1355 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1356 XenPTReg *cfg_entry, uint16_t *val, 1357 uint16_t dev_value, uint16_t valid_mask) 1358 { 1359 XenPTRegInfo *reg = cfg_entry->reg; 1360 uint16_t writable_mask = 0; 1361 uint16_t throughable_mask = 0; 1362 int debug_msix_enabled_old; 1363 1364 /* modify emulate register */ 1365 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1366 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1367 1368 /* create value for writing to I/O device register */ 1369 throughable_mask = ~reg->emu_mask & valid_mask; 1370 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1371 1372 /* update MSI-X */ 1373 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1374 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1375 xen_pt_msix_update(s); 1376 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1377 xen_pt_msix_disable(s); 1378 } 1379 1380 debug_msix_enabled_old = s->msix->enabled; 1381 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1382 if (s->msix->enabled != debug_msix_enabled_old) { 1383 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1384 s->msix->enabled ? "enable" : "disable"); 1385 } 1386 1387 return 0; 1388 } 1389 1390 /* MSI-X Capability Structure reg static information table */ 1391 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1392 /* Next Pointer reg */ 1393 { 1394 .offset = PCI_CAP_LIST_NEXT, 1395 .size = 1, 1396 .init_val = 0x00, 1397 .ro_mask = 0xFF, 1398 .emu_mask = 0xFF, 1399 .init = xen_pt_ptr_reg_init, 1400 .u.b.read = xen_pt_byte_reg_read, 1401 .u.b.write = xen_pt_byte_reg_write, 1402 }, 1403 /* Message Control reg */ 1404 { 1405 .offset = PCI_MSI_FLAGS, 1406 .size = 2, 1407 .init_val = 0x0000, 1408 .ro_mask = 0x3FFF, 1409 .emu_mask = 0x0000, 1410 .init = xen_pt_msixctrl_reg_init, 1411 .u.w.read = xen_pt_word_reg_read, 1412 .u.w.write = xen_pt_msixctrl_reg_write, 1413 }, 1414 { 1415 .size = 0, 1416 }, 1417 }; 1418 1419 1420 /**************************** 1421 * Capabilities 1422 */ 1423 1424 /* capability structure register group size functions */ 1425 1426 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1427 const XenPTRegGroupInfo *grp_reg, 1428 uint32_t base_offset, uint8_t *size) 1429 { 1430 *size = grp_reg->grp_size; 1431 return 0; 1432 } 1433 /* get Vendor Specific Capability Structure register group size */ 1434 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1435 const XenPTRegGroupInfo *grp_reg, 1436 uint32_t base_offset, uint8_t *size) 1437 { 1438 *size = pci_get_byte(s->dev.config + base_offset + 0x02); 1439 return 0; 1440 } 1441 /* get PCI Express Capability Structure register group size */ 1442 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1443 const XenPTRegGroupInfo *grp_reg, 1444 uint32_t base_offset, uint8_t *size) 1445 { 1446 PCIDevice *d = &s->dev; 1447 uint8_t version = get_capability_version(s, base_offset); 1448 uint8_t type = get_device_type(s, base_offset); 1449 uint8_t pcie_size = 0; 1450 1451 1452 /* calculate size depend on capability version and device/port type */ 1453 /* in case of PCI Express Base Specification Rev 1.x */ 1454 if (version == 1) { 1455 /* The PCI Express Capabilities, Device Capabilities, and Device 1456 * Status/Control registers are required for all PCI Express devices. 1457 * The Link Capabilities and Link Status/Control are required for all 1458 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1459 * are not required to implement registers other than those listed 1460 * above and terminate the capability structure. 1461 */ 1462 switch (type) { 1463 case PCI_EXP_TYPE_ENDPOINT: 1464 case PCI_EXP_TYPE_LEG_END: 1465 pcie_size = 0x14; 1466 break; 1467 case PCI_EXP_TYPE_RC_END: 1468 /* has no link */ 1469 pcie_size = 0x0C; 1470 break; 1471 /* only EndPoint passthrough is supported */ 1472 case PCI_EXP_TYPE_ROOT_PORT: 1473 case PCI_EXP_TYPE_UPSTREAM: 1474 case PCI_EXP_TYPE_DOWNSTREAM: 1475 case PCI_EXP_TYPE_PCI_BRIDGE: 1476 case PCI_EXP_TYPE_PCIE_BRIDGE: 1477 case PCI_EXP_TYPE_RC_EC: 1478 default: 1479 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1480 return -1; 1481 } 1482 } 1483 /* in case of PCI Express Base Specification Rev 2.0 */ 1484 else if (version == 2) { 1485 switch (type) { 1486 case PCI_EXP_TYPE_ENDPOINT: 1487 case PCI_EXP_TYPE_LEG_END: 1488 case PCI_EXP_TYPE_RC_END: 1489 /* For Functions that do not implement the registers, 1490 * these spaces must be hardwired to 0b. 1491 */ 1492 pcie_size = 0x3C; 1493 break; 1494 /* only EndPoint passthrough is supported */ 1495 case PCI_EXP_TYPE_ROOT_PORT: 1496 case PCI_EXP_TYPE_UPSTREAM: 1497 case PCI_EXP_TYPE_DOWNSTREAM: 1498 case PCI_EXP_TYPE_PCI_BRIDGE: 1499 case PCI_EXP_TYPE_PCIE_BRIDGE: 1500 case PCI_EXP_TYPE_RC_EC: 1501 default: 1502 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1503 return -1; 1504 } 1505 } else { 1506 XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); 1507 return -1; 1508 } 1509 1510 *size = pcie_size; 1511 return 0; 1512 } 1513 /* get MSI Capability Structure register group size */ 1514 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1515 const XenPTRegGroupInfo *grp_reg, 1516 uint32_t base_offset, uint8_t *size) 1517 { 1518 PCIDevice *d = &s->dev; 1519 uint16_t msg_ctrl = 0; 1520 uint8_t msi_size = 0xa; 1521 1522 msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS)); 1523 1524 /* check if 64-bit address is capable of per-vector masking */ 1525 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1526 msi_size += 4; 1527 } 1528 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1529 msi_size += 10; 1530 } 1531 1532 s->msi = g_new0(XenPTMSI, 1); 1533 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1534 1535 *size = msi_size; 1536 return 0; 1537 } 1538 /* get MSI-X Capability Structure register group size */ 1539 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1540 const XenPTRegGroupInfo *grp_reg, 1541 uint32_t base_offset, uint8_t *size) 1542 { 1543 int rc = 0; 1544 1545 rc = xen_pt_msix_init(s, base_offset); 1546 1547 if (rc < 0) { 1548 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1549 return rc; 1550 } 1551 1552 *size = grp_reg->grp_size; 1553 return 0; 1554 } 1555 1556 1557 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1558 /* Header Type0 reg group */ 1559 { 1560 .grp_id = 0xFF, 1561 .grp_type = XEN_PT_GRP_TYPE_EMU, 1562 .grp_size = 0x40, 1563 .size_init = xen_pt_reg_grp_size_init, 1564 .emu_regs = xen_pt_emu_reg_header0, 1565 }, 1566 /* PCI PowerManagement Capability reg group */ 1567 { 1568 .grp_id = PCI_CAP_ID_PM, 1569 .grp_type = XEN_PT_GRP_TYPE_EMU, 1570 .grp_size = PCI_PM_SIZEOF, 1571 .size_init = xen_pt_reg_grp_size_init, 1572 .emu_regs = xen_pt_emu_reg_pm, 1573 }, 1574 /* AGP Capability Structure reg group */ 1575 { 1576 .grp_id = PCI_CAP_ID_AGP, 1577 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1578 .grp_size = 0x30, 1579 .size_init = xen_pt_reg_grp_size_init, 1580 }, 1581 /* Vital Product Data Capability Structure reg group */ 1582 { 1583 .grp_id = PCI_CAP_ID_VPD, 1584 .grp_type = XEN_PT_GRP_TYPE_EMU, 1585 .grp_size = 0x08, 1586 .size_init = xen_pt_reg_grp_size_init, 1587 .emu_regs = xen_pt_emu_reg_vpd, 1588 }, 1589 /* Slot Identification reg group */ 1590 { 1591 .grp_id = PCI_CAP_ID_SLOTID, 1592 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1593 .grp_size = 0x04, 1594 .size_init = xen_pt_reg_grp_size_init, 1595 }, 1596 /* MSI Capability Structure reg group */ 1597 { 1598 .grp_id = PCI_CAP_ID_MSI, 1599 .grp_type = XEN_PT_GRP_TYPE_EMU, 1600 .grp_size = 0xFF, 1601 .size_init = xen_pt_msi_size_init, 1602 .emu_regs = xen_pt_emu_reg_msi, 1603 }, 1604 /* PCI-X Capabilities List Item reg group */ 1605 { 1606 .grp_id = PCI_CAP_ID_PCIX, 1607 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1608 .grp_size = 0x18, 1609 .size_init = xen_pt_reg_grp_size_init, 1610 }, 1611 /* Vendor Specific Capability Structure reg group */ 1612 { 1613 .grp_id = PCI_CAP_ID_VNDR, 1614 .grp_type = XEN_PT_GRP_TYPE_EMU, 1615 .grp_size = 0xFF, 1616 .size_init = xen_pt_vendor_size_init, 1617 .emu_regs = xen_pt_emu_reg_vendor, 1618 }, 1619 /* SHPC Capability List Item reg group */ 1620 { 1621 .grp_id = PCI_CAP_ID_SHPC, 1622 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1623 .grp_size = 0x08, 1624 .size_init = xen_pt_reg_grp_size_init, 1625 }, 1626 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1627 { 1628 .grp_id = PCI_CAP_ID_SSVID, 1629 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1630 .grp_size = 0x08, 1631 .size_init = xen_pt_reg_grp_size_init, 1632 }, 1633 /* AGP 8x Capability Structure reg group */ 1634 { 1635 .grp_id = PCI_CAP_ID_AGP3, 1636 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1637 .grp_size = 0x30, 1638 .size_init = xen_pt_reg_grp_size_init, 1639 }, 1640 /* PCI Express Capability Structure reg group */ 1641 { 1642 .grp_id = PCI_CAP_ID_EXP, 1643 .grp_type = XEN_PT_GRP_TYPE_EMU, 1644 .grp_size = 0xFF, 1645 .size_init = xen_pt_pcie_size_init, 1646 .emu_regs = xen_pt_emu_reg_pcie, 1647 }, 1648 /* MSI-X Capability Structure reg group */ 1649 { 1650 .grp_id = PCI_CAP_ID_MSIX, 1651 .grp_type = XEN_PT_GRP_TYPE_EMU, 1652 .grp_size = 0x0C, 1653 .size_init = xen_pt_msix_size_init, 1654 .emu_regs = xen_pt_emu_reg_msix, 1655 }, 1656 { 1657 .grp_size = 0, 1658 }, 1659 }; 1660 1661 /* initialize Capabilities Pointer or Next Pointer register */ 1662 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1663 XenPTRegInfo *reg, uint32_t real_offset, 1664 uint32_t *data) 1665 { 1666 int i; 1667 uint8_t *config = s->dev.config; 1668 uint32_t reg_field = pci_get_byte(config + real_offset); 1669 uint8_t cap_id = 0; 1670 1671 /* find capability offset */ 1672 while (reg_field) { 1673 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1674 if (xen_pt_hide_dev_cap(&s->real_device, 1675 xen_pt_emu_reg_grps[i].grp_id)) { 1676 continue; 1677 } 1678 1679 cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID); 1680 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1681 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1682 goto out; 1683 } 1684 /* ignore the 0 hardwired capability, find next one */ 1685 break; 1686 } 1687 } 1688 1689 /* next capability */ 1690 reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT); 1691 } 1692 1693 out: 1694 *data = reg_field; 1695 return 0; 1696 } 1697 1698 1699 /************* 1700 * Main 1701 */ 1702 1703 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1704 { 1705 uint8_t id; 1706 unsigned max_cap = PCI_CAP_MAX; 1707 uint8_t pos = PCI_CAPABILITY_LIST; 1708 uint8_t status = 0; 1709 1710 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1711 return 0; 1712 } 1713 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1714 return 0; 1715 } 1716 1717 while (max_cap--) { 1718 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1719 break; 1720 } 1721 if (pos < PCI_CONFIG_HEADER_SIZE) { 1722 break; 1723 } 1724 1725 pos &= ~3; 1726 if (xen_host_pci_get_byte(&s->real_device, 1727 pos + PCI_CAP_LIST_ID, &id)) { 1728 break; 1729 } 1730 1731 if (id == 0xff) { 1732 break; 1733 } 1734 if (id == cap) { 1735 return pos; 1736 } 1737 1738 pos += PCI_CAP_LIST_NEXT; 1739 } 1740 return 0; 1741 } 1742 1743 static int xen_pt_config_reg_init(XenPCIPassthroughState *s, 1744 XenPTRegGroup *reg_grp, XenPTRegInfo *reg) 1745 { 1746 XenPTReg *reg_entry; 1747 uint32_t data = 0; 1748 int rc = 0; 1749 1750 reg_entry = g_new0(XenPTReg, 1); 1751 reg_entry->reg = reg; 1752 1753 if (reg->init) { 1754 /* initialize emulate register */ 1755 rc = reg->init(s, reg_entry->reg, 1756 reg_grp->base_offset + reg->offset, &data); 1757 if (rc < 0) { 1758 g_free(reg_entry); 1759 return rc; 1760 } 1761 if (data == XEN_PT_INVALID_REG) { 1762 /* free unused BAR register entry */ 1763 g_free(reg_entry); 1764 return 0; 1765 } 1766 /* set register value */ 1767 reg_entry->data = data; 1768 } 1769 /* list add register entry */ 1770 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 1771 1772 return 0; 1773 } 1774 1775 int xen_pt_config_init(XenPCIPassthroughState *s) 1776 { 1777 int i, rc; 1778 1779 QLIST_INIT(&s->reg_grps); 1780 1781 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1782 uint32_t reg_grp_offset = 0; 1783 XenPTRegGroup *reg_grp_entry = NULL; 1784 1785 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) { 1786 if (xen_pt_hide_dev_cap(&s->real_device, 1787 xen_pt_emu_reg_grps[i].grp_id)) { 1788 continue; 1789 } 1790 1791 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 1792 1793 if (!reg_grp_offset) { 1794 continue; 1795 } 1796 } 1797 1798 reg_grp_entry = g_new0(XenPTRegGroup, 1); 1799 QLIST_INIT(®_grp_entry->reg_tbl_list); 1800 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 1801 1802 reg_grp_entry->base_offset = reg_grp_offset; 1803 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 1804 if (xen_pt_emu_reg_grps[i].size_init) { 1805 /* get register group size */ 1806 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 1807 reg_grp_offset, 1808 ®_grp_entry->size); 1809 if (rc < 0) { 1810 xen_pt_config_delete(s); 1811 return rc; 1812 } 1813 } 1814 1815 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1816 if (xen_pt_emu_reg_grps[i].emu_regs) { 1817 int j = 0; 1818 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 1819 /* initialize capability register */ 1820 for (j = 0; regs->size != 0; j++, regs++) { 1821 /* initialize capability register */ 1822 rc = xen_pt_config_reg_init(s, reg_grp_entry, regs); 1823 if (rc < 0) { 1824 xen_pt_config_delete(s); 1825 return rc; 1826 } 1827 } 1828 } 1829 } 1830 } 1831 1832 return 0; 1833 } 1834 1835 /* delete all emulate register */ 1836 void xen_pt_config_delete(XenPCIPassthroughState *s) 1837 { 1838 struct XenPTRegGroup *reg_group, *next_grp; 1839 struct XenPTReg *reg, *next_reg; 1840 1841 /* free MSI/MSI-X info table */ 1842 if (s->msix) { 1843 xen_pt_msix_delete(s); 1844 } 1845 if (s->msi) { 1846 g_free(s->msi); 1847 } 1848 1849 /* free all register group entry */ 1850 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 1851 /* free all register entry */ 1852 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 1853 QLIST_REMOVE(reg, entries); 1854 g_free(reg); 1855 } 1856 1857 QLIST_REMOVE(reg_group, entries); 1858 g_free(reg_group); 1859 } 1860 } 1861