1 /* 2 * Copyright (c) 2007, Neocleus Corporation. 3 * Copyright (c) 2007, Intel Corporation. 4 * 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 * 8 * Alex Novik <alex@neocleus.com> 9 * Allen Kay <allen.m.kay@intel.com> 10 * Guy Zana <guy@neocleus.com> 11 * 12 * This file implements direct PCI assignment to a HVM guest 13 */ 14 15 #include "qemu/timer.h" 16 #include "hw/xen/xen_backend.h" 17 #include "xen_pt.h" 18 19 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \ 20 (((value) & (val_mask)) | ((data) & ~(val_mask))) 21 22 #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ 23 24 /* prototype */ 25 26 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 27 uint32_t real_offset, uint32_t *data); 28 29 30 /* helper */ 31 32 /* A return value of 1 means the capability should NOT be exposed to guest. */ 33 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) 34 { 35 switch (grp_id) { 36 case PCI_CAP_ID_EXP: 37 /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE 38 * Controller looks trivial, e.g., the PCI Express Capabilities 39 * Register is 0. We should not try to expose it to guest. 40 * 41 * The datasheet is available at 42 * http://download.intel.com/design/network/datashts/82599_datasheet.pdf 43 * 44 * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the 45 * PCI Express Capability Structure of the VF of Intel 82599 10GbE 46 * Controller looks trivial, e.g., the PCI Express Capabilities 47 * Register is 0, so the Capability Version is 0 and 48 * xen_pt_pcie_size_init() would fail. 49 */ 50 if (d->vendor_id == PCI_VENDOR_ID_INTEL && 51 d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { 52 return 1; 53 } 54 break; 55 } 56 return 0; 57 } 58 59 /* find emulate register group entry */ 60 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) 61 { 62 XenPTRegGroup *entry = NULL; 63 64 /* find register group entry */ 65 QLIST_FOREACH(entry, &s->reg_grps, entries) { 66 /* check address */ 67 if ((entry->base_offset <= address) 68 && ((entry->base_offset + entry->size) > address)) { 69 return entry; 70 } 71 } 72 73 /* group entry not found */ 74 return NULL; 75 } 76 77 /* find emulate register entry */ 78 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) 79 { 80 XenPTReg *reg_entry = NULL; 81 XenPTRegInfo *reg = NULL; 82 uint32_t real_offset = 0; 83 84 /* find register entry */ 85 QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { 86 reg = reg_entry->reg; 87 real_offset = reg_grp->base_offset + reg->offset; 88 /* check address */ 89 if ((real_offset <= address) 90 && ((real_offset + reg->size) > address)) { 91 return reg_entry; 92 } 93 } 94 95 return NULL; 96 } 97 98 99 /**************** 100 * general register functions 101 */ 102 103 /* register initialization function */ 104 105 static int xen_pt_common_reg_init(XenPCIPassthroughState *s, 106 XenPTRegInfo *reg, uint32_t real_offset, 107 uint32_t *data) 108 { 109 *data = reg->init_val; 110 return 0; 111 } 112 113 /* Read register functions */ 114 115 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 116 uint8_t *value, uint8_t valid_mask) 117 { 118 XenPTRegInfo *reg = cfg_entry->reg; 119 uint8_t valid_emu_mask = 0; 120 121 /* emulate byte register */ 122 valid_emu_mask = reg->emu_mask & valid_mask; 123 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 124 125 return 0; 126 } 127 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 128 uint16_t *value, uint16_t valid_mask) 129 { 130 XenPTRegInfo *reg = cfg_entry->reg; 131 uint16_t valid_emu_mask = 0; 132 133 /* emulate word register */ 134 valid_emu_mask = reg->emu_mask & valid_mask; 135 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 136 137 return 0; 138 } 139 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 140 uint32_t *value, uint32_t valid_mask) 141 { 142 XenPTRegInfo *reg = cfg_entry->reg; 143 uint32_t valid_emu_mask = 0; 144 145 /* emulate long register */ 146 valid_emu_mask = reg->emu_mask & valid_mask; 147 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 148 149 return 0; 150 } 151 152 /* Write register functions */ 153 154 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 155 uint8_t *val, uint8_t dev_value, 156 uint8_t valid_mask) 157 { 158 XenPTRegInfo *reg = cfg_entry->reg; 159 uint8_t writable_mask = 0; 160 uint8_t throughable_mask = 0; 161 162 /* modify emulate register */ 163 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 164 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 165 166 /* create value for writing to I/O device register */ 167 throughable_mask = ~reg->emu_mask & valid_mask; 168 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 169 170 return 0; 171 } 172 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 173 uint16_t *val, uint16_t dev_value, 174 uint16_t valid_mask) 175 { 176 XenPTRegInfo *reg = cfg_entry->reg; 177 uint16_t writable_mask = 0; 178 uint16_t throughable_mask = 0; 179 180 /* modify emulate register */ 181 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 182 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 183 184 /* create value for writing to I/O device register */ 185 throughable_mask = ~reg->emu_mask & valid_mask; 186 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 187 188 return 0; 189 } 190 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 191 uint32_t *val, uint32_t dev_value, 192 uint32_t valid_mask) 193 { 194 XenPTRegInfo *reg = cfg_entry->reg; 195 uint32_t writable_mask = 0; 196 uint32_t throughable_mask = 0; 197 198 /* modify emulate register */ 199 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 200 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 201 202 /* create value for writing to I/O device register */ 203 throughable_mask = ~reg->emu_mask & valid_mask; 204 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 205 206 return 0; 207 } 208 209 210 /* XenPTRegInfo declaration 211 * - only for emulated register (either a part or whole bit). 212 * - for passthrough register that need special behavior (like interacting with 213 * other component), set emu_mask to all 0 and specify r/w func properly. 214 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. 215 */ 216 217 /******************** 218 * Header Type0 219 */ 220 221 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, 222 XenPTRegInfo *reg, uint32_t real_offset, 223 uint32_t *data) 224 { 225 *data = s->real_device.vendor_id; 226 return 0; 227 } 228 static int xen_pt_device_reg_init(XenPCIPassthroughState *s, 229 XenPTRegInfo *reg, uint32_t real_offset, 230 uint32_t *data) 231 { 232 *data = s->real_device.device_id; 233 return 0; 234 } 235 static int xen_pt_status_reg_init(XenPCIPassthroughState *s, 236 XenPTRegInfo *reg, uint32_t real_offset, 237 uint32_t *data) 238 { 239 XenPTRegGroup *reg_grp_entry = NULL; 240 XenPTReg *reg_entry = NULL; 241 uint32_t reg_field = 0; 242 243 /* find Header register group */ 244 reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); 245 if (reg_grp_entry) { 246 /* find Capabilities Pointer register */ 247 reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); 248 if (reg_entry) { 249 /* check Capabilities Pointer register */ 250 if (reg_entry->data) { 251 reg_field |= PCI_STATUS_CAP_LIST; 252 } else { 253 reg_field &= ~PCI_STATUS_CAP_LIST; 254 } 255 } else { 256 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" 257 " for Capabilities Pointer register." 258 " (%s)\n", __func__); 259 return -1; 260 } 261 } else { 262 xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" 263 " for Header. (%s)\n", __func__); 264 return -1; 265 } 266 267 *data = reg_field; 268 return 0; 269 } 270 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, 271 XenPTRegInfo *reg, uint32_t real_offset, 272 uint32_t *data) 273 { 274 /* read PCI_HEADER_TYPE */ 275 *data = reg->init_val | 0x80; 276 return 0; 277 } 278 279 /* initialize Interrupt Pin register */ 280 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, 281 XenPTRegInfo *reg, uint32_t real_offset, 282 uint32_t *data) 283 { 284 *data = xen_pt_pci_read_intx(s); 285 return 0; 286 } 287 288 /* Command register */ 289 static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 290 uint16_t *value, uint16_t valid_mask) 291 { 292 XenPTRegInfo *reg = cfg_entry->reg; 293 uint16_t valid_emu_mask = 0; 294 uint16_t emu_mask = reg->emu_mask; 295 296 if (s->is_virtfn) { 297 emu_mask |= PCI_COMMAND_MEMORY; 298 } 299 300 /* emulate word register */ 301 valid_emu_mask = emu_mask & valid_mask; 302 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 303 304 return 0; 305 } 306 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 307 uint16_t *val, uint16_t dev_value, 308 uint16_t valid_mask) 309 { 310 XenPTRegInfo *reg = cfg_entry->reg; 311 uint16_t writable_mask = 0; 312 uint16_t throughable_mask = 0; 313 uint16_t emu_mask = reg->emu_mask; 314 315 if (s->is_virtfn) { 316 emu_mask |= PCI_COMMAND_MEMORY; 317 } 318 319 /* modify emulate register */ 320 writable_mask = ~reg->ro_mask & valid_mask; 321 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 322 323 /* create value for writing to I/O device register */ 324 throughable_mask = ~emu_mask & valid_mask; 325 326 if (*val & PCI_COMMAND_INTX_DISABLE) { 327 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 328 } else { 329 if (s->machine_irq) { 330 throughable_mask |= PCI_COMMAND_INTX_DISABLE; 331 } 332 } 333 334 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 335 336 return 0; 337 } 338 339 /* BAR */ 340 #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ 341 #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ 342 #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ 343 #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ 344 345 static bool is_64bit_bar(PCIIORegion *r) 346 { 347 return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); 348 } 349 350 static uint64_t xen_pt_get_bar_size(PCIIORegion *r) 351 { 352 if (is_64bit_bar(r)) { 353 uint64_t size64; 354 size64 = (r + 1)->size; 355 size64 <<= 32; 356 size64 += r->size; 357 return size64; 358 } 359 return r->size; 360 } 361 362 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, 363 int index) 364 { 365 PCIDevice *d = &s->dev; 366 XenPTRegion *region = NULL; 367 PCIIORegion *r; 368 369 /* check 64bit BAR */ 370 if ((0 < index) && (index < PCI_ROM_SLOT)) { 371 int type = s->real_device.io_regions[index - 1].type; 372 373 if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) 374 && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { 375 region = &s->bases[index - 1]; 376 if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { 377 return XEN_PT_BAR_FLAG_UPPER; 378 } 379 } 380 } 381 382 /* check unused BAR */ 383 r = &d->io_regions[index]; 384 if (!xen_pt_get_bar_size(r)) { 385 return XEN_PT_BAR_FLAG_UNUSED; 386 } 387 388 /* for ExpROM BAR */ 389 if (index == PCI_ROM_SLOT) { 390 return XEN_PT_BAR_FLAG_MEM; 391 } 392 393 /* check BAR I/O indicator */ 394 if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { 395 return XEN_PT_BAR_FLAG_IO; 396 } else { 397 return XEN_PT_BAR_FLAG_MEM; 398 } 399 } 400 401 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) 402 { 403 if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { 404 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); 405 } else { 406 return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); 407 } 408 } 409 410 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, 411 uint32_t real_offset, uint32_t *data) 412 { 413 uint32_t reg_field = 0; 414 int index; 415 416 index = xen_pt_bar_offset_to_index(reg->offset); 417 if (index < 0 || index >= PCI_NUM_REGIONS) { 418 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 419 return -1; 420 } 421 422 /* set BAR flag */ 423 s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); 424 if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { 425 reg_field = XEN_PT_INVALID_REG; 426 } 427 428 *data = reg_field; 429 return 0; 430 } 431 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 432 uint32_t *value, uint32_t valid_mask) 433 { 434 XenPTRegInfo *reg = cfg_entry->reg; 435 uint32_t valid_emu_mask = 0; 436 uint32_t bar_emu_mask = 0; 437 int index; 438 439 /* get BAR index */ 440 index = xen_pt_bar_offset_to_index(reg->offset); 441 if (index < 0 || index >= PCI_NUM_REGIONS - 1) { 442 XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); 443 return -1; 444 } 445 446 /* use fixed-up value from kernel sysfs */ 447 *value = base_address_with_flags(&s->real_device.io_regions[index]); 448 449 /* set emulate mask depend on BAR flag */ 450 switch (s->bases[index].bar_flag) { 451 case XEN_PT_BAR_FLAG_MEM: 452 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 453 break; 454 case XEN_PT_BAR_FLAG_IO: 455 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 456 break; 457 case XEN_PT_BAR_FLAG_UPPER: 458 bar_emu_mask = XEN_PT_BAR_ALLF; 459 break; 460 default: 461 break; 462 } 463 464 /* emulate BAR */ 465 valid_emu_mask = bar_emu_mask & valid_mask; 466 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 467 468 return 0; 469 } 470 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 471 uint32_t *val, uint32_t dev_value, 472 uint32_t valid_mask) 473 { 474 XenPTRegInfo *reg = cfg_entry->reg; 475 XenPTRegion *base = NULL; 476 PCIDevice *d = &s->dev; 477 const PCIIORegion *r; 478 uint32_t writable_mask = 0; 479 uint32_t throughable_mask = 0; 480 uint32_t bar_emu_mask = 0; 481 uint32_t bar_ro_mask = 0; 482 uint32_t r_size = 0; 483 int index = 0; 484 485 index = xen_pt_bar_offset_to_index(reg->offset); 486 if (index < 0 || index >= PCI_NUM_REGIONS) { 487 XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); 488 return -1; 489 } 490 491 r = &d->io_regions[index]; 492 base = &s->bases[index]; 493 r_size = xen_pt_get_emul_size(base->bar_flag, r->size); 494 495 /* set emulate mask and read-only mask values depend on the BAR flag */ 496 switch (s->bases[index].bar_flag) { 497 case XEN_PT_BAR_FLAG_MEM: 498 bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; 499 if (!r_size) { 500 /* low 32 bits mask for 64 bit bars */ 501 bar_ro_mask = XEN_PT_BAR_ALLF; 502 } else { 503 bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); 504 } 505 break; 506 case XEN_PT_BAR_FLAG_IO: 507 bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; 508 bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); 509 break; 510 case XEN_PT_BAR_FLAG_UPPER: 511 bar_emu_mask = XEN_PT_BAR_ALLF; 512 bar_ro_mask = r_size ? r_size - 1 : 0; 513 break; 514 default: 515 break; 516 } 517 518 /* modify emulate register */ 519 writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; 520 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 521 522 /* check whether we need to update the virtual region address or not */ 523 switch (s->bases[index].bar_flag) { 524 case XEN_PT_BAR_FLAG_UPPER: 525 case XEN_PT_BAR_FLAG_MEM: 526 /* nothing to do */ 527 break; 528 case XEN_PT_BAR_FLAG_IO: 529 /* nothing to do */ 530 break; 531 default: 532 break; 533 } 534 535 /* create value for writing to I/O device register */ 536 throughable_mask = ~bar_emu_mask & valid_mask; 537 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 538 539 return 0; 540 } 541 542 /* write Exp ROM BAR */ 543 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, 544 XenPTReg *cfg_entry, uint32_t *val, 545 uint32_t dev_value, uint32_t valid_mask) 546 { 547 XenPTRegInfo *reg = cfg_entry->reg; 548 XenPTRegion *base = NULL; 549 PCIDevice *d = (PCIDevice *)&s->dev; 550 uint32_t writable_mask = 0; 551 uint32_t throughable_mask = 0; 552 pcibus_t r_size = 0; 553 uint32_t bar_emu_mask = 0; 554 uint32_t bar_ro_mask = 0; 555 556 r_size = d->io_regions[PCI_ROM_SLOT].size; 557 base = &s->bases[PCI_ROM_SLOT]; 558 /* align memory type resource size */ 559 r_size = xen_pt_get_emul_size(base->bar_flag, r_size); 560 561 /* set emulate mask and read-only mask */ 562 bar_emu_mask = reg->emu_mask; 563 bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; 564 565 /* modify emulate register */ 566 writable_mask = ~bar_ro_mask & valid_mask; 567 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 568 569 /* create value for writing to I/O device register */ 570 throughable_mask = ~bar_emu_mask & valid_mask; 571 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 572 573 return 0; 574 } 575 576 /* Header Type0 reg static information table */ 577 static XenPTRegInfo xen_pt_emu_reg_header0[] = { 578 /* Vendor ID reg */ 579 { 580 .offset = PCI_VENDOR_ID, 581 .size = 2, 582 .init_val = 0x0000, 583 .ro_mask = 0xFFFF, 584 .emu_mask = 0xFFFF, 585 .init = xen_pt_vendor_reg_init, 586 .u.w.read = xen_pt_word_reg_read, 587 .u.w.write = xen_pt_word_reg_write, 588 }, 589 /* Device ID reg */ 590 { 591 .offset = PCI_DEVICE_ID, 592 .size = 2, 593 .init_val = 0x0000, 594 .ro_mask = 0xFFFF, 595 .emu_mask = 0xFFFF, 596 .init = xen_pt_device_reg_init, 597 .u.w.read = xen_pt_word_reg_read, 598 .u.w.write = xen_pt_word_reg_write, 599 }, 600 /* Command reg */ 601 { 602 .offset = PCI_COMMAND, 603 .size = 2, 604 .init_val = 0x0000, 605 .ro_mask = 0xF880, 606 .emu_mask = 0x0740, 607 .init = xen_pt_common_reg_init, 608 .u.w.read = xen_pt_cmd_reg_read, 609 .u.w.write = xen_pt_cmd_reg_write, 610 }, 611 /* Capabilities Pointer reg */ 612 { 613 .offset = PCI_CAPABILITY_LIST, 614 .size = 1, 615 .init_val = 0x00, 616 .ro_mask = 0xFF, 617 .emu_mask = 0xFF, 618 .init = xen_pt_ptr_reg_init, 619 .u.b.read = xen_pt_byte_reg_read, 620 .u.b.write = xen_pt_byte_reg_write, 621 }, 622 /* Status reg */ 623 /* use emulated Cap Ptr value to initialize, 624 * so need to be declared after Cap Ptr reg 625 */ 626 { 627 .offset = PCI_STATUS, 628 .size = 2, 629 .init_val = 0x0000, 630 .ro_mask = 0x06FF, 631 .emu_mask = 0x0010, 632 .init = xen_pt_status_reg_init, 633 .u.w.read = xen_pt_word_reg_read, 634 .u.w.write = xen_pt_word_reg_write, 635 }, 636 /* Cache Line Size reg */ 637 { 638 .offset = PCI_CACHE_LINE_SIZE, 639 .size = 1, 640 .init_val = 0x00, 641 .ro_mask = 0x00, 642 .emu_mask = 0xFF, 643 .init = xen_pt_common_reg_init, 644 .u.b.read = xen_pt_byte_reg_read, 645 .u.b.write = xen_pt_byte_reg_write, 646 }, 647 /* Latency Timer reg */ 648 { 649 .offset = PCI_LATENCY_TIMER, 650 .size = 1, 651 .init_val = 0x00, 652 .ro_mask = 0x00, 653 .emu_mask = 0xFF, 654 .init = xen_pt_common_reg_init, 655 .u.b.read = xen_pt_byte_reg_read, 656 .u.b.write = xen_pt_byte_reg_write, 657 }, 658 /* Header Type reg */ 659 { 660 .offset = PCI_HEADER_TYPE, 661 .size = 1, 662 .init_val = 0x00, 663 .ro_mask = 0xFF, 664 .emu_mask = 0x00, 665 .init = xen_pt_header_type_reg_init, 666 .u.b.read = xen_pt_byte_reg_read, 667 .u.b.write = xen_pt_byte_reg_write, 668 }, 669 /* Interrupt Line reg */ 670 { 671 .offset = PCI_INTERRUPT_LINE, 672 .size = 1, 673 .init_val = 0x00, 674 .ro_mask = 0x00, 675 .emu_mask = 0xFF, 676 .init = xen_pt_common_reg_init, 677 .u.b.read = xen_pt_byte_reg_read, 678 .u.b.write = xen_pt_byte_reg_write, 679 }, 680 /* Interrupt Pin reg */ 681 { 682 .offset = PCI_INTERRUPT_PIN, 683 .size = 1, 684 .init_val = 0x00, 685 .ro_mask = 0xFF, 686 .emu_mask = 0xFF, 687 .init = xen_pt_irqpin_reg_init, 688 .u.b.read = xen_pt_byte_reg_read, 689 .u.b.write = xen_pt_byte_reg_write, 690 }, 691 /* BAR 0 reg */ 692 /* mask of BAR need to be decided later, depends on IO/MEM type */ 693 { 694 .offset = PCI_BASE_ADDRESS_0, 695 .size = 4, 696 .init_val = 0x00000000, 697 .init = xen_pt_bar_reg_init, 698 .u.dw.read = xen_pt_bar_reg_read, 699 .u.dw.write = xen_pt_bar_reg_write, 700 }, 701 /* BAR 1 reg */ 702 { 703 .offset = PCI_BASE_ADDRESS_1, 704 .size = 4, 705 .init_val = 0x00000000, 706 .init = xen_pt_bar_reg_init, 707 .u.dw.read = xen_pt_bar_reg_read, 708 .u.dw.write = xen_pt_bar_reg_write, 709 }, 710 /* BAR 2 reg */ 711 { 712 .offset = PCI_BASE_ADDRESS_2, 713 .size = 4, 714 .init_val = 0x00000000, 715 .init = xen_pt_bar_reg_init, 716 .u.dw.read = xen_pt_bar_reg_read, 717 .u.dw.write = xen_pt_bar_reg_write, 718 }, 719 /* BAR 3 reg */ 720 { 721 .offset = PCI_BASE_ADDRESS_3, 722 .size = 4, 723 .init_val = 0x00000000, 724 .init = xen_pt_bar_reg_init, 725 .u.dw.read = xen_pt_bar_reg_read, 726 .u.dw.write = xen_pt_bar_reg_write, 727 }, 728 /* BAR 4 reg */ 729 { 730 .offset = PCI_BASE_ADDRESS_4, 731 .size = 4, 732 .init_val = 0x00000000, 733 .init = xen_pt_bar_reg_init, 734 .u.dw.read = xen_pt_bar_reg_read, 735 .u.dw.write = xen_pt_bar_reg_write, 736 }, 737 /* BAR 5 reg */ 738 { 739 .offset = PCI_BASE_ADDRESS_5, 740 .size = 4, 741 .init_val = 0x00000000, 742 .init = xen_pt_bar_reg_init, 743 .u.dw.read = xen_pt_bar_reg_read, 744 .u.dw.write = xen_pt_bar_reg_write, 745 }, 746 /* Expansion ROM BAR reg */ 747 { 748 .offset = PCI_ROM_ADDRESS, 749 .size = 4, 750 .init_val = 0x00000000, 751 .ro_mask = 0x000007FE, 752 .emu_mask = 0xFFFFF800, 753 .init = xen_pt_bar_reg_init, 754 .u.dw.read = xen_pt_long_reg_read, 755 .u.dw.write = xen_pt_exp_rom_bar_reg_write, 756 }, 757 { 758 .size = 0, 759 }, 760 }; 761 762 763 /********************************* 764 * Vital Product Data Capability 765 */ 766 767 /* Vital Product Data Capability Structure reg static information table */ 768 static XenPTRegInfo xen_pt_emu_reg_vpd[] = { 769 { 770 .offset = PCI_CAP_LIST_NEXT, 771 .size = 1, 772 .init_val = 0x00, 773 .ro_mask = 0xFF, 774 .emu_mask = 0xFF, 775 .init = xen_pt_ptr_reg_init, 776 .u.b.read = xen_pt_byte_reg_read, 777 .u.b.write = xen_pt_byte_reg_write, 778 }, 779 { 780 .size = 0, 781 }, 782 }; 783 784 785 /************************************** 786 * Vendor Specific Capability 787 */ 788 789 /* Vendor Specific Capability Structure reg static information table */ 790 static XenPTRegInfo xen_pt_emu_reg_vendor[] = { 791 { 792 .offset = PCI_CAP_LIST_NEXT, 793 .size = 1, 794 .init_val = 0x00, 795 .ro_mask = 0xFF, 796 .emu_mask = 0xFF, 797 .init = xen_pt_ptr_reg_init, 798 .u.b.read = xen_pt_byte_reg_read, 799 .u.b.write = xen_pt_byte_reg_write, 800 }, 801 { 802 .size = 0, 803 }, 804 }; 805 806 807 /***************************** 808 * PCI Express Capability 809 */ 810 811 static inline uint8_t get_capability_version(XenPCIPassthroughState *s, 812 uint32_t offset) 813 { 814 uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); 815 return flags & PCI_EXP_FLAGS_VERS; 816 } 817 818 static inline uint8_t get_device_type(XenPCIPassthroughState *s, 819 uint32_t offset) 820 { 821 uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS); 822 return (flags & PCI_EXP_FLAGS_TYPE) >> 4; 823 } 824 825 /* initialize Link Control register */ 826 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, 827 XenPTRegInfo *reg, uint32_t real_offset, 828 uint32_t *data) 829 { 830 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 831 uint8_t dev_type = get_device_type(s, real_offset - reg->offset); 832 833 /* no need to initialize in case of Root Complex Integrated Endpoint 834 * with cap_ver 1.x 835 */ 836 if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { 837 *data = XEN_PT_INVALID_REG; 838 } 839 840 *data = reg->init_val; 841 return 0; 842 } 843 /* initialize Device Control 2 register */ 844 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, 845 XenPTRegInfo *reg, uint32_t real_offset, 846 uint32_t *data) 847 { 848 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 849 850 /* no need to initialize in case of cap_ver 1.x */ 851 if (cap_ver == 1) { 852 *data = XEN_PT_INVALID_REG; 853 } 854 855 *data = reg->init_val; 856 return 0; 857 } 858 /* initialize Link Control 2 register */ 859 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, 860 XenPTRegInfo *reg, uint32_t real_offset, 861 uint32_t *data) 862 { 863 uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); 864 uint32_t reg_field = 0; 865 866 /* no need to initialize in case of cap_ver 1.x */ 867 if (cap_ver == 1) { 868 reg_field = XEN_PT_INVALID_REG; 869 } else { 870 /* set Supported Link Speed */ 871 uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset 872 + PCI_EXP_LNKCAP); 873 reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; 874 } 875 876 *data = reg_field; 877 return 0; 878 } 879 880 /* PCI Express Capability Structure reg static information table */ 881 static XenPTRegInfo xen_pt_emu_reg_pcie[] = { 882 /* Next Pointer reg */ 883 { 884 .offset = PCI_CAP_LIST_NEXT, 885 .size = 1, 886 .init_val = 0x00, 887 .ro_mask = 0xFF, 888 .emu_mask = 0xFF, 889 .init = xen_pt_ptr_reg_init, 890 .u.b.read = xen_pt_byte_reg_read, 891 .u.b.write = xen_pt_byte_reg_write, 892 }, 893 /* Device Capabilities reg */ 894 { 895 .offset = PCI_EXP_DEVCAP, 896 .size = 4, 897 .init_val = 0x00000000, 898 .ro_mask = 0x1FFCFFFF, 899 .emu_mask = 0x10000000, 900 .init = xen_pt_common_reg_init, 901 .u.dw.read = xen_pt_long_reg_read, 902 .u.dw.write = xen_pt_long_reg_write, 903 }, 904 /* Device Control reg */ 905 { 906 .offset = PCI_EXP_DEVCTL, 907 .size = 2, 908 .init_val = 0x2810, 909 .ro_mask = 0x8400, 910 .emu_mask = 0xFFFF, 911 .init = xen_pt_common_reg_init, 912 .u.w.read = xen_pt_word_reg_read, 913 .u.w.write = xen_pt_word_reg_write, 914 }, 915 /* Link Control reg */ 916 { 917 .offset = PCI_EXP_LNKCTL, 918 .size = 2, 919 .init_val = 0x0000, 920 .ro_mask = 0xFC34, 921 .emu_mask = 0xFFFF, 922 .init = xen_pt_linkctrl_reg_init, 923 .u.w.read = xen_pt_word_reg_read, 924 .u.w.write = xen_pt_word_reg_write, 925 }, 926 /* Device Control 2 reg */ 927 { 928 .offset = 0x28, 929 .size = 2, 930 .init_val = 0x0000, 931 .ro_mask = 0xFFE0, 932 .emu_mask = 0xFFFF, 933 .init = xen_pt_devctrl2_reg_init, 934 .u.w.read = xen_pt_word_reg_read, 935 .u.w.write = xen_pt_word_reg_write, 936 }, 937 /* Link Control 2 reg */ 938 { 939 .offset = 0x30, 940 .size = 2, 941 .init_val = 0x0000, 942 .ro_mask = 0xE040, 943 .emu_mask = 0xFFFF, 944 .init = xen_pt_linkctrl2_reg_init, 945 .u.w.read = xen_pt_word_reg_read, 946 .u.w.write = xen_pt_word_reg_write, 947 }, 948 { 949 .size = 0, 950 }, 951 }; 952 953 954 /********************************* 955 * Power Management Capability 956 */ 957 958 /* read Power Management Control/Status register */ 959 static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, 960 uint16_t *value, uint16_t valid_mask) 961 { 962 XenPTRegInfo *reg = cfg_entry->reg; 963 uint16_t valid_emu_mask = reg->emu_mask; 964 965 valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; 966 967 valid_emu_mask = valid_emu_mask & valid_mask; 968 *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); 969 970 return 0; 971 } 972 /* write Power Management Control/Status register */ 973 static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, 974 XenPTReg *cfg_entry, uint16_t *val, 975 uint16_t dev_value, uint16_t valid_mask) 976 { 977 XenPTRegInfo *reg = cfg_entry->reg; 978 uint16_t emu_mask = reg->emu_mask; 979 uint16_t writable_mask = 0; 980 uint16_t throughable_mask = 0; 981 982 emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET; 983 984 /* modify emulate register */ 985 writable_mask = emu_mask & ~reg->ro_mask & valid_mask; 986 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 987 988 /* create value for writing to I/O device register */ 989 throughable_mask = ~emu_mask & valid_mask; 990 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 991 992 return 0; 993 } 994 995 /* Power Management Capability reg static information table */ 996 static XenPTRegInfo xen_pt_emu_reg_pm[] = { 997 /* Next Pointer reg */ 998 { 999 .offset = PCI_CAP_LIST_NEXT, 1000 .size = 1, 1001 .init_val = 0x00, 1002 .ro_mask = 0xFF, 1003 .emu_mask = 0xFF, 1004 .init = xen_pt_ptr_reg_init, 1005 .u.b.read = xen_pt_byte_reg_read, 1006 .u.b.write = xen_pt_byte_reg_write, 1007 }, 1008 /* Power Management Capabilities reg */ 1009 { 1010 .offset = PCI_CAP_FLAGS, 1011 .size = 2, 1012 .init_val = 0x0000, 1013 .ro_mask = 0xFFFF, 1014 .emu_mask = 0xF9C8, 1015 .init = xen_pt_common_reg_init, 1016 .u.w.read = xen_pt_word_reg_read, 1017 .u.w.write = xen_pt_word_reg_write, 1018 }, 1019 /* PCI Power Management Control/Status reg */ 1020 { 1021 .offset = PCI_PM_CTRL, 1022 .size = 2, 1023 .init_val = 0x0008, 1024 .ro_mask = 0xE1FC, 1025 .emu_mask = 0x8100, 1026 .init = xen_pt_common_reg_init, 1027 .u.w.read = xen_pt_pmcsr_reg_read, 1028 .u.w.write = xen_pt_pmcsr_reg_write, 1029 }, 1030 { 1031 .size = 0, 1032 }, 1033 }; 1034 1035 1036 /******************************** 1037 * MSI Capability 1038 */ 1039 1040 /* Helper */ 1041 static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags) 1042 { 1043 /* check the offset whether matches the type or not */ 1044 bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT); 1045 bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT); 1046 return is_32 || is_64; 1047 } 1048 1049 /* Message Control register */ 1050 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, 1051 XenPTRegInfo *reg, uint32_t real_offset, 1052 uint32_t *data) 1053 { 1054 PCIDevice *d = &s->dev; 1055 XenPTMSI *msi = s->msi; 1056 uint16_t reg_field = 0; 1057 1058 /* use I/O device register's value as initial value */ 1059 reg_field = pci_get_word(d->config + real_offset); 1060 1061 if (reg_field & PCI_MSI_FLAGS_ENABLE) { 1062 XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); 1063 xen_host_pci_set_word(&s->real_device, real_offset, 1064 reg_field & ~PCI_MSI_FLAGS_ENABLE); 1065 } 1066 msi->flags |= reg_field; 1067 msi->ctrl_offset = real_offset; 1068 msi->initialized = false; 1069 msi->mapped = false; 1070 1071 *data = reg->init_val; 1072 return 0; 1073 } 1074 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, 1075 XenPTReg *cfg_entry, uint16_t *val, 1076 uint16_t dev_value, uint16_t valid_mask) 1077 { 1078 XenPTRegInfo *reg = cfg_entry->reg; 1079 XenPTMSI *msi = s->msi; 1080 uint16_t writable_mask = 0; 1081 uint16_t throughable_mask = 0; 1082 uint16_t raw_val; 1083 1084 /* Currently no support for multi-vector */ 1085 if (*val & PCI_MSI_FLAGS_QSIZE) { 1086 XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); 1087 } 1088 1089 /* modify emulate register */ 1090 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1091 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1092 msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE; 1093 1094 /* create value for writing to I/O device register */ 1095 raw_val = *val; 1096 throughable_mask = ~reg->emu_mask & valid_mask; 1097 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1098 1099 /* update MSI */ 1100 if (raw_val & PCI_MSI_FLAGS_ENABLE) { 1101 /* setup MSI pirq for the first time */ 1102 if (!msi->initialized) { 1103 /* Init physical one */ 1104 XEN_PT_LOG(&s->dev, "setup MSI\n"); 1105 if (xen_pt_msi_setup(s)) { 1106 /* We do not broadcast the error to the framework code, so 1107 * that MSI errors are contained in MSI emulation code and 1108 * QEMU can go on running. 1109 * Guest MSI would be actually not working. 1110 */ 1111 *val &= ~PCI_MSI_FLAGS_ENABLE; 1112 XEN_PT_WARN(&s->dev, "Can not map MSI.\n"); 1113 return 0; 1114 } 1115 if (xen_pt_msi_update(s)) { 1116 *val &= ~PCI_MSI_FLAGS_ENABLE; 1117 XEN_PT_WARN(&s->dev, "Can not bind MSI\n"); 1118 return 0; 1119 } 1120 msi->initialized = true; 1121 msi->mapped = true; 1122 } 1123 msi->flags |= PCI_MSI_FLAGS_ENABLE; 1124 } else if (msi->mapped) { 1125 xen_pt_msi_disable(s); 1126 } 1127 1128 /* pass through MSI_ENABLE bit */ 1129 *val &= ~PCI_MSI_FLAGS_ENABLE; 1130 *val |= raw_val & PCI_MSI_FLAGS_ENABLE; 1131 1132 return 0; 1133 } 1134 1135 /* initialize Message Upper Address register */ 1136 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, 1137 XenPTRegInfo *reg, uint32_t real_offset, 1138 uint32_t *data) 1139 { 1140 /* no need to initialize in case of 32 bit type */ 1141 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1142 *data = XEN_PT_INVALID_REG; 1143 } else { 1144 *data = reg->init_val; 1145 } 1146 1147 return 0; 1148 } 1149 /* this function will be called twice (for 32 bit and 64 bit type) */ 1150 /* initialize Message Data register */ 1151 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, 1152 XenPTRegInfo *reg, uint32_t real_offset, 1153 uint32_t *data) 1154 { 1155 uint32_t flags = s->msi->flags; 1156 uint32_t offset = reg->offset; 1157 1158 /* check the offset whether matches the type or not */ 1159 if (xen_pt_msgdata_check_type(offset, flags)) { 1160 *data = reg->init_val; 1161 } else { 1162 *data = XEN_PT_INVALID_REG; 1163 } 1164 return 0; 1165 } 1166 1167 /* write Message Address register */ 1168 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, 1169 XenPTReg *cfg_entry, uint32_t *val, 1170 uint32_t dev_value, uint32_t valid_mask) 1171 { 1172 XenPTRegInfo *reg = cfg_entry->reg; 1173 uint32_t writable_mask = 0; 1174 uint32_t throughable_mask = 0; 1175 uint32_t old_addr = cfg_entry->data; 1176 1177 /* modify emulate register */ 1178 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1179 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1180 s->msi->addr_lo = cfg_entry->data; 1181 1182 /* create value for writing to I/O device register */ 1183 throughable_mask = ~reg->emu_mask & valid_mask; 1184 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1185 1186 /* update MSI */ 1187 if (cfg_entry->data != old_addr) { 1188 if (s->msi->mapped) { 1189 xen_pt_msi_update(s); 1190 } 1191 } 1192 1193 return 0; 1194 } 1195 /* write Message Upper Address register */ 1196 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, 1197 XenPTReg *cfg_entry, uint32_t *val, 1198 uint32_t dev_value, uint32_t valid_mask) 1199 { 1200 XenPTRegInfo *reg = cfg_entry->reg; 1201 uint32_t writable_mask = 0; 1202 uint32_t throughable_mask = 0; 1203 uint32_t old_addr = cfg_entry->data; 1204 1205 /* check whether the type is 64 bit or not */ 1206 if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { 1207 XEN_PT_ERR(&s->dev, 1208 "Can't write to the upper address without 64 bit support\n"); 1209 return -1; 1210 } 1211 1212 /* modify emulate register */ 1213 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1214 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1215 /* update the msi_info too */ 1216 s->msi->addr_hi = cfg_entry->data; 1217 1218 /* create value for writing to I/O device register */ 1219 throughable_mask = ~reg->emu_mask & valid_mask; 1220 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1221 1222 /* update MSI */ 1223 if (cfg_entry->data != old_addr) { 1224 if (s->msi->mapped) { 1225 xen_pt_msi_update(s); 1226 } 1227 } 1228 1229 return 0; 1230 } 1231 1232 1233 /* this function will be called twice (for 32 bit and 64 bit type) */ 1234 /* write Message Data register */ 1235 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, 1236 XenPTReg *cfg_entry, uint16_t *val, 1237 uint16_t dev_value, uint16_t valid_mask) 1238 { 1239 XenPTRegInfo *reg = cfg_entry->reg; 1240 XenPTMSI *msi = s->msi; 1241 uint16_t writable_mask = 0; 1242 uint16_t throughable_mask = 0; 1243 uint16_t old_data = cfg_entry->data; 1244 uint32_t offset = reg->offset; 1245 1246 /* check the offset whether matches the type or not */ 1247 if (!xen_pt_msgdata_check_type(offset, msi->flags)) { 1248 /* exit I/O emulator */ 1249 XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); 1250 return -1; 1251 } 1252 1253 /* modify emulate register */ 1254 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1255 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1256 /* update the msi_info too */ 1257 msi->data = cfg_entry->data; 1258 1259 /* create value for writing to I/O device register */ 1260 throughable_mask = ~reg->emu_mask & valid_mask; 1261 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1262 1263 /* update MSI */ 1264 if (cfg_entry->data != old_data) { 1265 if (msi->mapped) { 1266 xen_pt_msi_update(s); 1267 } 1268 } 1269 1270 return 0; 1271 } 1272 1273 /* MSI Capability Structure reg static information table */ 1274 static XenPTRegInfo xen_pt_emu_reg_msi[] = { 1275 /* Next Pointer reg */ 1276 { 1277 .offset = PCI_CAP_LIST_NEXT, 1278 .size = 1, 1279 .init_val = 0x00, 1280 .ro_mask = 0xFF, 1281 .emu_mask = 0xFF, 1282 .init = xen_pt_ptr_reg_init, 1283 .u.b.read = xen_pt_byte_reg_read, 1284 .u.b.write = xen_pt_byte_reg_write, 1285 }, 1286 /* Message Control reg */ 1287 { 1288 .offset = PCI_MSI_FLAGS, 1289 .size = 2, 1290 .init_val = 0x0000, 1291 .ro_mask = 0xFF8E, 1292 .emu_mask = 0x007F, 1293 .init = xen_pt_msgctrl_reg_init, 1294 .u.w.read = xen_pt_word_reg_read, 1295 .u.w.write = xen_pt_msgctrl_reg_write, 1296 }, 1297 /* Message Address reg */ 1298 { 1299 .offset = PCI_MSI_ADDRESS_LO, 1300 .size = 4, 1301 .init_val = 0x00000000, 1302 .ro_mask = 0x00000003, 1303 .emu_mask = 0xFFFFFFFF, 1304 .no_wb = 1, 1305 .init = xen_pt_common_reg_init, 1306 .u.dw.read = xen_pt_long_reg_read, 1307 .u.dw.write = xen_pt_msgaddr32_reg_write, 1308 }, 1309 /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ 1310 { 1311 .offset = PCI_MSI_ADDRESS_HI, 1312 .size = 4, 1313 .init_val = 0x00000000, 1314 .ro_mask = 0x00000000, 1315 .emu_mask = 0xFFFFFFFF, 1316 .no_wb = 1, 1317 .init = xen_pt_msgaddr64_reg_init, 1318 .u.dw.read = xen_pt_long_reg_read, 1319 .u.dw.write = xen_pt_msgaddr64_reg_write, 1320 }, 1321 /* Message Data reg (16 bits of data for 32-bit devices) */ 1322 { 1323 .offset = PCI_MSI_DATA_32, 1324 .size = 2, 1325 .init_val = 0x0000, 1326 .ro_mask = 0x0000, 1327 .emu_mask = 0xFFFF, 1328 .no_wb = 1, 1329 .init = xen_pt_msgdata_reg_init, 1330 .u.w.read = xen_pt_word_reg_read, 1331 .u.w.write = xen_pt_msgdata_reg_write, 1332 }, 1333 /* Message Data reg (16 bits of data for 64-bit devices) */ 1334 { 1335 .offset = PCI_MSI_DATA_64, 1336 .size = 2, 1337 .init_val = 0x0000, 1338 .ro_mask = 0x0000, 1339 .emu_mask = 0xFFFF, 1340 .no_wb = 1, 1341 .init = xen_pt_msgdata_reg_init, 1342 .u.w.read = xen_pt_word_reg_read, 1343 .u.w.write = xen_pt_msgdata_reg_write, 1344 }, 1345 { 1346 .size = 0, 1347 }, 1348 }; 1349 1350 1351 /************************************** 1352 * MSI-X Capability 1353 */ 1354 1355 /* Message Control register for MSI-X */ 1356 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, 1357 XenPTRegInfo *reg, uint32_t real_offset, 1358 uint32_t *data) 1359 { 1360 PCIDevice *d = &s->dev; 1361 uint16_t reg_field = 0; 1362 1363 /* use I/O device register's value as initial value */ 1364 reg_field = pci_get_word(d->config + real_offset); 1365 1366 if (reg_field & PCI_MSIX_FLAGS_ENABLE) { 1367 XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n"); 1368 xen_host_pci_set_word(&s->real_device, real_offset, 1369 reg_field & ~PCI_MSIX_FLAGS_ENABLE); 1370 } 1371 1372 s->msix->ctrl_offset = real_offset; 1373 1374 *data = reg->init_val; 1375 return 0; 1376 } 1377 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, 1378 XenPTReg *cfg_entry, uint16_t *val, 1379 uint16_t dev_value, uint16_t valid_mask) 1380 { 1381 XenPTRegInfo *reg = cfg_entry->reg; 1382 uint16_t writable_mask = 0; 1383 uint16_t throughable_mask = 0; 1384 int debug_msix_enabled_old; 1385 1386 /* modify emulate register */ 1387 writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; 1388 cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); 1389 1390 /* create value for writing to I/O device register */ 1391 throughable_mask = ~reg->emu_mask & valid_mask; 1392 *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); 1393 1394 /* update MSI-X */ 1395 if ((*val & PCI_MSIX_FLAGS_ENABLE) 1396 && !(*val & PCI_MSIX_FLAGS_MASKALL)) { 1397 xen_pt_msix_update(s); 1398 } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { 1399 xen_pt_msix_disable(s); 1400 } 1401 1402 debug_msix_enabled_old = s->msix->enabled; 1403 s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); 1404 if (s->msix->enabled != debug_msix_enabled_old) { 1405 XEN_PT_LOG(&s->dev, "%s MSI-X\n", 1406 s->msix->enabled ? "enable" : "disable"); 1407 } 1408 1409 return 0; 1410 } 1411 1412 /* MSI-X Capability Structure reg static information table */ 1413 static XenPTRegInfo xen_pt_emu_reg_msix[] = { 1414 /* Next Pointer reg */ 1415 { 1416 .offset = PCI_CAP_LIST_NEXT, 1417 .size = 1, 1418 .init_val = 0x00, 1419 .ro_mask = 0xFF, 1420 .emu_mask = 0xFF, 1421 .init = xen_pt_ptr_reg_init, 1422 .u.b.read = xen_pt_byte_reg_read, 1423 .u.b.write = xen_pt_byte_reg_write, 1424 }, 1425 /* Message Control reg */ 1426 { 1427 .offset = PCI_MSI_FLAGS, 1428 .size = 2, 1429 .init_val = 0x0000, 1430 .ro_mask = 0x3FFF, 1431 .emu_mask = 0x0000, 1432 .init = xen_pt_msixctrl_reg_init, 1433 .u.w.read = xen_pt_word_reg_read, 1434 .u.w.write = xen_pt_msixctrl_reg_write, 1435 }, 1436 { 1437 .size = 0, 1438 }, 1439 }; 1440 1441 1442 /**************************** 1443 * Capabilities 1444 */ 1445 1446 /* capability structure register group size functions */ 1447 1448 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, 1449 const XenPTRegGroupInfo *grp_reg, 1450 uint32_t base_offset, uint8_t *size) 1451 { 1452 *size = grp_reg->grp_size; 1453 return 0; 1454 } 1455 /* get Vendor Specific Capability Structure register group size */ 1456 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, 1457 const XenPTRegGroupInfo *grp_reg, 1458 uint32_t base_offset, uint8_t *size) 1459 { 1460 *size = pci_get_byte(s->dev.config + base_offset + 0x02); 1461 return 0; 1462 } 1463 /* get PCI Express Capability Structure register group size */ 1464 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, 1465 const XenPTRegGroupInfo *grp_reg, 1466 uint32_t base_offset, uint8_t *size) 1467 { 1468 PCIDevice *d = &s->dev; 1469 uint8_t version = get_capability_version(s, base_offset); 1470 uint8_t type = get_device_type(s, base_offset); 1471 uint8_t pcie_size = 0; 1472 1473 1474 /* calculate size depend on capability version and device/port type */ 1475 /* in case of PCI Express Base Specification Rev 1.x */ 1476 if (version == 1) { 1477 /* The PCI Express Capabilities, Device Capabilities, and Device 1478 * Status/Control registers are required for all PCI Express devices. 1479 * The Link Capabilities and Link Status/Control are required for all 1480 * Endpoints that are not Root Complex Integrated Endpoints. Endpoints 1481 * are not required to implement registers other than those listed 1482 * above and terminate the capability structure. 1483 */ 1484 switch (type) { 1485 case PCI_EXP_TYPE_ENDPOINT: 1486 case PCI_EXP_TYPE_LEG_END: 1487 pcie_size = 0x14; 1488 break; 1489 case PCI_EXP_TYPE_RC_END: 1490 /* has no link */ 1491 pcie_size = 0x0C; 1492 break; 1493 /* only EndPoint passthrough is supported */ 1494 case PCI_EXP_TYPE_ROOT_PORT: 1495 case PCI_EXP_TYPE_UPSTREAM: 1496 case PCI_EXP_TYPE_DOWNSTREAM: 1497 case PCI_EXP_TYPE_PCI_BRIDGE: 1498 case PCI_EXP_TYPE_PCIE_BRIDGE: 1499 case PCI_EXP_TYPE_RC_EC: 1500 default: 1501 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1502 return -1; 1503 } 1504 } 1505 /* in case of PCI Express Base Specification Rev 2.0 */ 1506 else if (version == 2) { 1507 switch (type) { 1508 case PCI_EXP_TYPE_ENDPOINT: 1509 case PCI_EXP_TYPE_LEG_END: 1510 case PCI_EXP_TYPE_RC_END: 1511 /* For Functions that do not implement the registers, 1512 * these spaces must be hardwired to 0b. 1513 */ 1514 pcie_size = 0x3C; 1515 break; 1516 /* only EndPoint passthrough is supported */ 1517 case PCI_EXP_TYPE_ROOT_PORT: 1518 case PCI_EXP_TYPE_UPSTREAM: 1519 case PCI_EXP_TYPE_DOWNSTREAM: 1520 case PCI_EXP_TYPE_PCI_BRIDGE: 1521 case PCI_EXP_TYPE_PCIE_BRIDGE: 1522 case PCI_EXP_TYPE_RC_EC: 1523 default: 1524 XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type); 1525 return -1; 1526 } 1527 } else { 1528 XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version); 1529 return -1; 1530 } 1531 1532 *size = pcie_size; 1533 return 0; 1534 } 1535 /* get MSI Capability Structure register group size */ 1536 static int xen_pt_msi_size_init(XenPCIPassthroughState *s, 1537 const XenPTRegGroupInfo *grp_reg, 1538 uint32_t base_offset, uint8_t *size) 1539 { 1540 PCIDevice *d = &s->dev; 1541 uint16_t msg_ctrl = 0; 1542 uint8_t msi_size = 0xa; 1543 1544 msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS)); 1545 1546 /* check if 64-bit address is capable of per-vector masking */ 1547 if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { 1548 msi_size += 4; 1549 } 1550 if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { 1551 msi_size += 10; 1552 } 1553 1554 s->msi = g_new0(XenPTMSI, 1); 1555 s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 1556 1557 *size = msi_size; 1558 return 0; 1559 } 1560 /* get MSI-X Capability Structure register group size */ 1561 static int xen_pt_msix_size_init(XenPCIPassthroughState *s, 1562 const XenPTRegGroupInfo *grp_reg, 1563 uint32_t base_offset, uint8_t *size) 1564 { 1565 int rc = 0; 1566 1567 rc = xen_pt_msix_init(s, base_offset); 1568 1569 if (rc < 0) { 1570 XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); 1571 return rc; 1572 } 1573 1574 *size = grp_reg->grp_size; 1575 return 0; 1576 } 1577 1578 1579 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { 1580 /* Header Type0 reg group */ 1581 { 1582 .grp_id = 0xFF, 1583 .grp_type = XEN_PT_GRP_TYPE_EMU, 1584 .grp_size = 0x40, 1585 .size_init = xen_pt_reg_grp_size_init, 1586 .emu_regs = xen_pt_emu_reg_header0, 1587 }, 1588 /* PCI PowerManagement Capability reg group */ 1589 { 1590 .grp_id = PCI_CAP_ID_PM, 1591 .grp_type = XEN_PT_GRP_TYPE_EMU, 1592 .grp_size = PCI_PM_SIZEOF, 1593 .size_init = xen_pt_reg_grp_size_init, 1594 .emu_regs = xen_pt_emu_reg_pm, 1595 }, 1596 /* AGP Capability Structure reg group */ 1597 { 1598 .grp_id = PCI_CAP_ID_AGP, 1599 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1600 .grp_size = 0x30, 1601 .size_init = xen_pt_reg_grp_size_init, 1602 }, 1603 /* Vital Product Data Capability Structure reg group */ 1604 { 1605 .grp_id = PCI_CAP_ID_VPD, 1606 .grp_type = XEN_PT_GRP_TYPE_EMU, 1607 .grp_size = 0x08, 1608 .size_init = xen_pt_reg_grp_size_init, 1609 .emu_regs = xen_pt_emu_reg_vpd, 1610 }, 1611 /* Slot Identification reg group */ 1612 { 1613 .grp_id = PCI_CAP_ID_SLOTID, 1614 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1615 .grp_size = 0x04, 1616 .size_init = xen_pt_reg_grp_size_init, 1617 }, 1618 /* MSI Capability Structure reg group */ 1619 { 1620 .grp_id = PCI_CAP_ID_MSI, 1621 .grp_type = XEN_PT_GRP_TYPE_EMU, 1622 .grp_size = 0xFF, 1623 .size_init = xen_pt_msi_size_init, 1624 .emu_regs = xen_pt_emu_reg_msi, 1625 }, 1626 /* PCI-X Capabilities List Item reg group */ 1627 { 1628 .grp_id = PCI_CAP_ID_PCIX, 1629 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1630 .grp_size = 0x18, 1631 .size_init = xen_pt_reg_grp_size_init, 1632 }, 1633 /* Vendor Specific Capability Structure reg group */ 1634 { 1635 .grp_id = PCI_CAP_ID_VNDR, 1636 .grp_type = XEN_PT_GRP_TYPE_EMU, 1637 .grp_size = 0xFF, 1638 .size_init = xen_pt_vendor_size_init, 1639 .emu_regs = xen_pt_emu_reg_vendor, 1640 }, 1641 /* SHPC Capability List Item reg group */ 1642 { 1643 .grp_id = PCI_CAP_ID_SHPC, 1644 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1645 .grp_size = 0x08, 1646 .size_init = xen_pt_reg_grp_size_init, 1647 }, 1648 /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ 1649 { 1650 .grp_id = PCI_CAP_ID_SSVID, 1651 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1652 .grp_size = 0x08, 1653 .size_init = xen_pt_reg_grp_size_init, 1654 }, 1655 /* AGP 8x Capability Structure reg group */ 1656 { 1657 .grp_id = PCI_CAP_ID_AGP3, 1658 .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, 1659 .grp_size = 0x30, 1660 .size_init = xen_pt_reg_grp_size_init, 1661 }, 1662 /* PCI Express Capability Structure reg group */ 1663 { 1664 .grp_id = PCI_CAP_ID_EXP, 1665 .grp_type = XEN_PT_GRP_TYPE_EMU, 1666 .grp_size = 0xFF, 1667 .size_init = xen_pt_pcie_size_init, 1668 .emu_regs = xen_pt_emu_reg_pcie, 1669 }, 1670 /* MSI-X Capability Structure reg group */ 1671 { 1672 .grp_id = PCI_CAP_ID_MSIX, 1673 .grp_type = XEN_PT_GRP_TYPE_EMU, 1674 .grp_size = 0x0C, 1675 .size_init = xen_pt_msix_size_init, 1676 .emu_regs = xen_pt_emu_reg_msix, 1677 }, 1678 { 1679 .grp_size = 0, 1680 }, 1681 }; 1682 1683 /* initialize Capabilities Pointer or Next Pointer register */ 1684 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, 1685 XenPTRegInfo *reg, uint32_t real_offset, 1686 uint32_t *data) 1687 { 1688 int i; 1689 uint8_t *config = s->dev.config; 1690 uint32_t reg_field = pci_get_byte(config + real_offset); 1691 uint8_t cap_id = 0; 1692 1693 /* find capability offset */ 1694 while (reg_field) { 1695 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1696 if (xen_pt_hide_dev_cap(&s->real_device, 1697 xen_pt_emu_reg_grps[i].grp_id)) { 1698 continue; 1699 } 1700 1701 cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID); 1702 if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { 1703 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1704 goto out; 1705 } 1706 /* ignore the 0 hardwired capability, find next one */ 1707 break; 1708 } 1709 } 1710 1711 /* next capability */ 1712 reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT); 1713 } 1714 1715 out: 1716 *data = reg_field; 1717 return 0; 1718 } 1719 1720 1721 /************* 1722 * Main 1723 */ 1724 1725 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) 1726 { 1727 uint8_t id; 1728 unsigned max_cap = PCI_CAP_MAX; 1729 uint8_t pos = PCI_CAPABILITY_LIST; 1730 uint8_t status = 0; 1731 1732 if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { 1733 return 0; 1734 } 1735 if ((status & PCI_STATUS_CAP_LIST) == 0) { 1736 return 0; 1737 } 1738 1739 while (max_cap--) { 1740 if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { 1741 break; 1742 } 1743 if (pos < PCI_CONFIG_HEADER_SIZE) { 1744 break; 1745 } 1746 1747 pos &= ~3; 1748 if (xen_host_pci_get_byte(&s->real_device, 1749 pos + PCI_CAP_LIST_ID, &id)) { 1750 break; 1751 } 1752 1753 if (id == 0xff) { 1754 break; 1755 } 1756 if (id == cap) { 1757 return pos; 1758 } 1759 1760 pos += PCI_CAP_LIST_NEXT; 1761 } 1762 return 0; 1763 } 1764 1765 static int xen_pt_config_reg_init(XenPCIPassthroughState *s, 1766 XenPTRegGroup *reg_grp, XenPTRegInfo *reg) 1767 { 1768 XenPTReg *reg_entry; 1769 uint32_t data = 0; 1770 int rc = 0; 1771 1772 reg_entry = g_new0(XenPTReg, 1); 1773 reg_entry->reg = reg; 1774 1775 if (reg->init) { 1776 /* initialize emulate register */ 1777 rc = reg->init(s, reg_entry->reg, 1778 reg_grp->base_offset + reg->offset, &data); 1779 if (rc < 0) { 1780 g_free(reg_entry); 1781 return rc; 1782 } 1783 if (data == XEN_PT_INVALID_REG) { 1784 /* free unused BAR register entry */ 1785 g_free(reg_entry); 1786 return 0; 1787 } 1788 /* set register value */ 1789 reg_entry->data = data; 1790 } 1791 /* list add register entry */ 1792 QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); 1793 1794 return 0; 1795 } 1796 1797 int xen_pt_config_init(XenPCIPassthroughState *s) 1798 { 1799 int i, rc; 1800 1801 QLIST_INIT(&s->reg_grps); 1802 1803 for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { 1804 uint32_t reg_grp_offset = 0; 1805 XenPTRegGroup *reg_grp_entry = NULL; 1806 1807 if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) { 1808 if (xen_pt_hide_dev_cap(&s->real_device, 1809 xen_pt_emu_reg_grps[i].grp_id)) { 1810 continue; 1811 } 1812 1813 reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); 1814 1815 if (!reg_grp_offset) { 1816 continue; 1817 } 1818 } 1819 1820 reg_grp_entry = g_new0(XenPTRegGroup, 1); 1821 QLIST_INIT(®_grp_entry->reg_tbl_list); 1822 QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); 1823 1824 reg_grp_entry->base_offset = reg_grp_offset; 1825 reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; 1826 if (xen_pt_emu_reg_grps[i].size_init) { 1827 /* get register group size */ 1828 rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, 1829 reg_grp_offset, 1830 ®_grp_entry->size); 1831 if (rc < 0) { 1832 xen_pt_config_delete(s); 1833 return rc; 1834 } 1835 } 1836 1837 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { 1838 if (xen_pt_emu_reg_grps[i].emu_regs) { 1839 int j = 0; 1840 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; 1841 /* initialize capability register */ 1842 for (j = 0; regs->size != 0; j++, regs++) { 1843 /* initialize capability register */ 1844 rc = xen_pt_config_reg_init(s, reg_grp_entry, regs); 1845 if (rc < 0) { 1846 xen_pt_config_delete(s); 1847 return rc; 1848 } 1849 } 1850 } 1851 } 1852 } 1853 1854 return 0; 1855 } 1856 1857 /* delete all emulate register */ 1858 void xen_pt_config_delete(XenPCIPassthroughState *s) 1859 { 1860 struct XenPTRegGroup *reg_group, *next_grp; 1861 struct XenPTReg *reg, *next_reg; 1862 1863 /* free MSI/MSI-X info table */ 1864 if (s->msix) { 1865 xen_pt_msix_delete(s); 1866 } 1867 if (s->msi) { 1868 g_free(s->msi); 1869 } 1870 1871 /* free all register group entry */ 1872 QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { 1873 /* free all register entry */ 1874 QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { 1875 QLIST_REMOVE(reg, entries); 1876 g_free(reg); 1877 } 1878 1879 QLIST_REMOVE(reg_group, entries); 1880 g_free(reg_group); 1881 } 1882 } 1883