1 /* 2 * QEMU MCH/ICH9 PCI Bridge Emulation 3 * 4 * Copyright (c) 2006 Fabrice Bellard 5 * Copyright (c) 2009, 2010, 2011 6 * Isaku Yamahata <yamahata at valinux co jp> 7 * VA Linux Systems Japan K.K. 8 * Copyright (C) 2012 Jason Baron <jbaron@redhat.com> 9 * 10 * This is based on piix.c, but heavily modified. 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this software and associated documentation files (the "Software"), to deal 14 * in the Software without restriction, including without limitation the rights 15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 * copies of the Software, and to permit persons to whom the Software is 17 * furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 * THE SOFTWARE. 29 */ 30 31 #include "qemu/osdep.h" 32 #include "hw/pci-host/q35.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "qapi/error.h" 36 #include "qapi/visitor.h" 37 #include "qemu/module.h" 38 39 /**************************************************************************** 40 * Q35 host 41 */ 42 43 #define Q35_PCI_HOST_HOLE64_SIZE_DEFAULT (1ULL << 35) 44 45 static void q35_host_realize(DeviceState *dev, Error **errp) 46 { 47 PCIHostState *pci = PCI_HOST_BRIDGE(dev); 48 Q35PCIHost *s = Q35_HOST_DEVICE(dev); 49 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 50 51 sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); 52 sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4); 53 54 sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); 55 sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); 56 57 /* register q35 0xcf8 port as coalesced pio */ 58 memory_region_set_flush_coalesced(&pci->data_mem); 59 memory_region_add_coalescing(&pci->conf_mem, 0, 4); 60 61 pci->bus = pci_root_bus_new(DEVICE(s), "pcie.0", 62 s->mch.pci_address_space, 63 s->mch.address_space_io, 64 0, TYPE_PCIE_BUS); 65 PC_MACHINE(qdev_get_machine())->bus = pci->bus; 66 qdev_set_parent_bus(DEVICE(&s->mch), BUS(pci->bus)); 67 qdev_init_nofail(DEVICE(&s->mch)); 68 } 69 70 static const char *q35_host_root_bus_path(PCIHostState *host_bridge, 71 PCIBus *rootbus) 72 { 73 Q35PCIHost *s = Q35_HOST_DEVICE(host_bridge); 74 75 /* For backwards compat with old device paths */ 76 if (s->mch.short_root_bus) { 77 return "0000"; 78 } 79 return "0000:00"; 80 } 81 82 static void q35_host_get_pci_hole_start(Object *obj, Visitor *v, 83 const char *name, void *opaque, 84 Error **errp) 85 { 86 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 87 uint64_t val64; 88 uint32_t value; 89 90 val64 = range_is_empty(&s->mch.pci_hole) 91 ? 0 : range_lob(&s->mch.pci_hole); 92 value = val64; 93 assert(value == val64); 94 visit_type_uint32(v, name, &value, errp); 95 } 96 97 static void q35_host_get_pci_hole_end(Object *obj, Visitor *v, 98 const char *name, void *opaque, 99 Error **errp) 100 { 101 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 102 uint64_t val64; 103 uint32_t value; 104 105 val64 = range_is_empty(&s->mch.pci_hole) 106 ? 0 : range_upb(&s->mch.pci_hole) + 1; 107 value = val64; 108 assert(value == val64); 109 visit_type_uint32(v, name, &value, errp); 110 } 111 112 /* 113 * The 64bit PCI hole start is set by the Guest firmware 114 * as the address of the first 64bit PCI MEM resource. 115 * If no PCI device has resources on the 64bit area, 116 * the 64bit PCI hole will start after "over 4G RAM" and the 117 * reserved space for memory hotplug if any. 118 */ 119 static uint64_t q35_host_get_pci_hole64_start_value(Object *obj) 120 { 121 PCIHostState *h = PCI_HOST_BRIDGE(obj); 122 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 123 Range w64; 124 uint64_t value; 125 126 pci_bus_get_w64_range(h->bus, &w64); 127 value = range_is_empty(&w64) ? 0 : range_lob(&w64); 128 if (!value && s->pci_hole64_fix) { 129 value = pc_pci_hole64_start(); 130 } 131 return value; 132 } 133 134 static void q35_host_get_pci_hole64_start(Object *obj, Visitor *v, 135 const char *name, void *opaque, 136 Error **errp) 137 { 138 uint64_t hole64_start = q35_host_get_pci_hole64_start_value(obj); 139 140 visit_type_uint64(v, name, &hole64_start, errp); 141 } 142 143 /* 144 * The 64bit PCI hole end is set by the Guest firmware 145 * as the address of the last 64bit PCI MEM resource. 146 * Then it is expanded to the PCI_HOST_PROP_PCI_HOLE64_SIZE 147 * that can be configured by the user. 148 */ 149 static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v, 150 const char *name, void *opaque, 151 Error **errp) 152 { 153 PCIHostState *h = PCI_HOST_BRIDGE(obj); 154 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 155 uint64_t hole64_start = q35_host_get_pci_hole64_start_value(obj); 156 Range w64; 157 uint64_t value, hole64_end; 158 159 pci_bus_get_w64_range(h->bus, &w64); 160 value = range_is_empty(&w64) ? 0 : range_upb(&w64) + 1; 161 hole64_end = ROUND_UP(hole64_start + s->mch.pci_hole64_size, 1ULL << 30); 162 if (s->pci_hole64_fix && value < hole64_end) { 163 value = hole64_end; 164 } 165 visit_type_uint64(v, name, &value, errp); 166 } 167 168 static void q35_host_get_mmcfg_size(Object *obj, Visitor *v, const char *name, 169 void *opaque, Error **errp) 170 { 171 PCIExpressHost *e = PCIE_HOST_BRIDGE(obj); 172 173 visit_type_uint64(v, name, &e->size, errp); 174 } 175 176 /* 177 * NOTE: setting defaults for the mch.* fields in this table 178 * doesn't work, because mch is a separate QOM object that is 179 * zeroed by the object_initialize(&s->mch, ...) call inside 180 * q35_host_initfn(). The default values for those 181 * properties need to be initialized manually by 182 * q35_host_initfn() after the object_initialize() call. 183 */ 184 static Property q35_host_props[] = { 185 DEFINE_PROP_UINT64(PCIE_HOST_MCFG_BASE, Q35PCIHost, parent_obj.base_addr, 186 MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT), 187 DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost, 188 mch.pci_hole64_size, Q35_PCI_HOST_HOLE64_SIZE_DEFAULT), 189 DEFINE_PROP_UINT32("short_root_bus", Q35PCIHost, mch.short_root_bus, 0), 190 DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, Q35PCIHost, 191 mch.below_4g_mem_size, 0), 192 DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost, 193 mch.above_4g_mem_size, 0), 194 DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true), 195 DEFINE_PROP_END_OF_LIST(), 196 }; 197 198 static void q35_host_class_init(ObjectClass *klass, void *data) 199 { 200 DeviceClass *dc = DEVICE_CLASS(klass); 201 PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); 202 203 hc->root_bus_path = q35_host_root_bus_path; 204 dc->realize = q35_host_realize; 205 dc->props = q35_host_props; 206 /* Reason: needs to be wired up by pc_q35_init */ 207 dc->user_creatable = false; 208 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 209 dc->fw_name = "pci"; 210 } 211 212 static void q35_host_initfn(Object *obj) 213 { 214 Q35PCIHost *s = Q35_HOST_DEVICE(obj); 215 PCIHostState *phb = PCI_HOST_BRIDGE(obj); 216 217 memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, phb, 218 "pci-conf-idx", 4); 219 memory_region_init_io(&phb->data_mem, obj, &pci_host_data_le_ops, phb, 220 "pci-conf-data", 4); 221 222 object_initialize_child(OBJECT(s), "mch", &s->mch, sizeof(s->mch), 223 TYPE_MCH_PCI_DEVICE, &error_abort, NULL); 224 qdev_prop_set_int32(DEVICE(&s->mch), "addr", PCI_DEVFN(0, 0)); 225 qdev_prop_set_bit(DEVICE(&s->mch), "multifunction", false); 226 /* mch's object_initialize resets the default value, set it again */ 227 qdev_prop_set_uint64(DEVICE(s), PCI_HOST_PROP_PCI_HOLE64_SIZE, 228 Q35_PCI_HOST_HOLE64_SIZE_DEFAULT); 229 object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_START, "uint32", 230 q35_host_get_pci_hole_start, 231 NULL, NULL, NULL, NULL); 232 233 object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_END, "uint32", 234 q35_host_get_pci_hole_end, 235 NULL, NULL, NULL, NULL); 236 237 object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_START, "uint64", 238 q35_host_get_pci_hole64_start, 239 NULL, NULL, NULL, NULL); 240 241 object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_END, "uint64", 242 q35_host_get_pci_hole64_end, 243 NULL, NULL, NULL, NULL); 244 245 object_property_add(obj, PCIE_HOST_MCFG_SIZE, "uint64", 246 q35_host_get_mmcfg_size, 247 NULL, NULL, NULL, NULL); 248 249 object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION, 250 (Object **) &s->mch.ram_memory, 251 qdev_prop_allow_set_link_before_realize, 0, NULL); 252 253 object_property_add_link(obj, MCH_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION, 254 (Object **) &s->mch.pci_address_space, 255 qdev_prop_allow_set_link_before_realize, 0, NULL); 256 257 object_property_add_link(obj, MCH_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION, 258 (Object **) &s->mch.system_memory, 259 qdev_prop_allow_set_link_before_realize, 0, NULL); 260 261 object_property_add_link(obj, MCH_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION, 262 (Object **) &s->mch.address_space_io, 263 qdev_prop_allow_set_link_before_realize, 0, NULL); 264 } 265 266 static const TypeInfo q35_host_info = { 267 .name = TYPE_Q35_HOST_DEVICE, 268 .parent = TYPE_PCIE_HOST_BRIDGE, 269 .instance_size = sizeof(Q35PCIHost), 270 .instance_init = q35_host_initfn, 271 .class_init = q35_host_class_init, 272 }; 273 274 /**************************************************************************** 275 * MCH D0:F0 276 */ 277 278 static uint64_t tseg_blackhole_read(void *ptr, hwaddr reg, unsigned size) 279 { 280 return 0xffffffff; 281 } 282 283 static void tseg_blackhole_write(void *opaque, hwaddr addr, uint64_t val, 284 unsigned width) 285 { 286 /* nothing */ 287 } 288 289 static const MemoryRegionOps tseg_blackhole_ops = { 290 .read = tseg_blackhole_read, 291 .write = tseg_blackhole_write, 292 .endianness = DEVICE_NATIVE_ENDIAN, 293 .valid.min_access_size = 1, 294 .valid.max_access_size = 4, 295 .impl.min_access_size = 4, 296 .impl.max_access_size = 4, 297 .endianness = DEVICE_LITTLE_ENDIAN, 298 }; 299 300 /* PCIe MMCFG */ 301 static void mch_update_pciexbar(MCHPCIState *mch) 302 { 303 PCIDevice *pci_dev = PCI_DEVICE(mch); 304 BusState *bus = qdev_get_parent_bus(DEVICE(mch)); 305 PCIExpressHost *pehb = PCIE_HOST_BRIDGE(bus->parent); 306 307 uint64_t pciexbar; 308 int enable; 309 uint64_t addr; 310 uint64_t addr_mask; 311 uint32_t length; 312 313 pciexbar = pci_get_quad(pci_dev->config + MCH_HOST_BRIDGE_PCIEXBAR); 314 enable = pciexbar & MCH_HOST_BRIDGE_PCIEXBAREN; 315 addr_mask = MCH_HOST_BRIDGE_PCIEXBAR_ADMSK; 316 switch (pciexbar & MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_MASK) { 317 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_256M: 318 length = 256 * 1024 * 1024; 319 break; 320 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_128M: 321 length = 128 * 1024 * 1024; 322 addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_128ADMSK | 323 MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; 324 break; 325 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_64M: 326 length = 64 * 1024 * 1024; 327 addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; 328 break; 329 case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_RVD: 330 default: 331 abort(); 332 } 333 addr = pciexbar & addr_mask; 334 pcie_host_mmcfg_update(pehb, enable, addr, length); 335 } 336 337 /* PAM */ 338 static void mch_update_pam(MCHPCIState *mch) 339 { 340 PCIDevice *pd = PCI_DEVICE(mch); 341 int i; 342 343 memory_region_transaction_begin(); 344 for (i = 0; i < 13; i++) { 345 pam_update(&mch->pam_regions[i], i, 346 pd->config[MCH_HOST_BRIDGE_PAM0 + DIV_ROUND_UP(i, 2)]); 347 } 348 memory_region_transaction_commit(); 349 } 350 351 /* SMRAM */ 352 static void mch_update_smram(MCHPCIState *mch) 353 { 354 PCIDevice *pd = PCI_DEVICE(mch); 355 bool h_smrame = (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME); 356 uint32_t tseg_size; 357 358 /* implement SMRAM.D_LCK */ 359 if (pd->config[MCH_HOST_BRIDGE_SMRAM] & MCH_HOST_BRIDGE_SMRAM_D_LCK) { 360 pd->config[MCH_HOST_BRIDGE_SMRAM] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN; 361 pd->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK; 362 pd->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK; 363 } 364 365 memory_region_transaction_begin(); 366 367 if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_D_OPEN) { 368 /* Hide (!) low SMRAM if H_SMRAME = 1 */ 369 memory_region_set_enabled(&mch->smram_region, h_smrame); 370 /* Show high SMRAM if H_SMRAME = 1 */ 371 memory_region_set_enabled(&mch->open_high_smram, h_smrame); 372 } else { 373 /* Hide high SMRAM and low SMRAM */ 374 memory_region_set_enabled(&mch->smram_region, true); 375 memory_region_set_enabled(&mch->open_high_smram, false); 376 } 377 378 if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME) { 379 memory_region_set_enabled(&mch->low_smram, !h_smrame); 380 memory_region_set_enabled(&mch->high_smram, h_smrame); 381 } else { 382 memory_region_set_enabled(&mch->low_smram, false); 383 memory_region_set_enabled(&mch->high_smram, false); 384 } 385 386 if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) { 387 switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & 388 MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) { 389 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB: 390 tseg_size = 1024 * 1024; 391 break; 392 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB: 393 tseg_size = 1024 * 1024 * 2; 394 break; 395 case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB: 396 tseg_size = 1024 * 1024 * 8; 397 break; 398 default: 399 tseg_size = 1024 * 1024 * (uint32_t)mch->ext_tseg_mbytes; 400 break; 401 } 402 } else { 403 tseg_size = 0; 404 } 405 memory_region_del_subregion(mch->system_memory, &mch->tseg_blackhole); 406 memory_region_set_enabled(&mch->tseg_blackhole, tseg_size); 407 memory_region_set_size(&mch->tseg_blackhole, tseg_size); 408 memory_region_add_subregion_overlap(mch->system_memory, 409 mch->below_4g_mem_size - tseg_size, 410 &mch->tseg_blackhole, 1); 411 412 memory_region_set_enabled(&mch->tseg_window, tseg_size); 413 memory_region_set_size(&mch->tseg_window, tseg_size); 414 memory_region_set_address(&mch->tseg_window, 415 mch->below_4g_mem_size - tseg_size); 416 memory_region_set_alias_offset(&mch->tseg_window, 417 mch->below_4g_mem_size - tseg_size); 418 419 memory_region_transaction_commit(); 420 } 421 422 static void mch_update_ext_tseg_mbytes(MCHPCIState *mch) 423 { 424 PCIDevice *pd = PCI_DEVICE(mch); 425 uint8_t *reg = pd->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES; 426 427 if (mch->ext_tseg_mbytes > 0 && 428 pci_get_word(reg) == MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY) { 429 pci_set_word(reg, mch->ext_tseg_mbytes); 430 } 431 } 432 433 static void mch_write_config(PCIDevice *d, 434 uint32_t address, uint32_t val, int len) 435 { 436 MCHPCIState *mch = MCH_PCI_DEVICE(d); 437 438 pci_default_write_config(d, address, val, len); 439 440 if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PAM0, 441 MCH_HOST_BRIDGE_PAM_SIZE)) { 442 mch_update_pam(mch); 443 } 444 445 if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PCIEXBAR, 446 MCH_HOST_BRIDGE_PCIEXBAR_SIZE)) { 447 mch_update_pciexbar(mch); 448 } 449 450 if (ranges_overlap(address, len, MCH_HOST_BRIDGE_SMRAM, 451 MCH_HOST_BRIDGE_SMRAM_SIZE)) { 452 mch_update_smram(mch); 453 } 454 455 if (ranges_overlap(address, len, MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, 456 MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_SIZE)) { 457 mch_update_ext_tseg_mbytes(mch); 458 } 459 } 460 461 static void mch_update(MCHPCIState *mch) 462 { 463 mch_update_pciexbar(mch); 464 mch_update_pam(mch); 465 mch_update_smram(mch); 466 mch_update_ext_tseg_mbytes(mch); 467 468 /* 469 * pci hole goes from end-of-low-ram to io-apic. 470 * mmconfig will be excluded by the dsdt builder. 471 */ 472 range_set_bounds(&mch->pci_hole, 473 mch->below_4g_mem_size, 474 IO_APIC_DEFAULT_ADDRESS - 1); 475 } 476 477 static int mch_post_load(void *opaque, int version_id) 478 { 479 MCHPCIState *mch = opaque; 480 mch_update(mch); 481 return 0; 482 } 483 484 static const VMStateDescription vmstate_mch = { 485 .name = "mch", 486 .version_id = 1, 487 .minimum_version_id = 1, 488 .post_load = mch_post_load, 489 .fields = (VMStateField[]) { 490 VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState), 491 /* Used to be smm_enabled, which was basically always zero because 492 * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. 493 */ 494 VMSTATE_UNUSED(1), 495 VMSTATE_END_OF_LIST() 496 } 497 }; 498 499 static void mch_reset(DeviceState *qdev) 500 { 501 PCIDevice *d = PCI_DEVICE(qdev); 502 MCHPCIState *mch = MCH_PCI_DEVICE(d); 503 504 pci_set_quad(d->config + MCH_HOST_BRIDGE_PCIEXBAR, 505 MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT); 506 507 d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT; 508 d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT; 509 d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK; 510 d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK; 511 512 if (mch->ext_tseg_mbytes > 0) { 513 pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, 514 MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY); 515 } 516 517 mch_update(mch); 518 } 519 520 static void mch_realize(PCIDevice *d, Error **errp) 521 { 522 int i; 523 MCHPCIState *mch = MCH_PCI_DEVICE(d); 524 525 if (mch->ext_tseg_mbytes > MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_MAX) { 526 error_setg(errp, "invalid extended-tseg-mbytes value: %" PRIu16, 527 mch->ext_tseg_mbytes); 528 return; 529 } 530 531 /* setup pci memory mapping */ 532 pc_pci_as_mapping_init(OBJECT(mch), mch->system_memory, 533 mch->pci_address_space); 534 535 /* if *disabled* show SMRAM to all CPUs */ 536 memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region", 537 mch->pci_address_space, MCH_HOST_BRIDGE_SMRAM_C_BASE, 538 MCH_HOST_BRIDGE_SMRAM_C_SIZE); 539 memory_region_add_subregion_overlap(mch->system_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, 540 &mch->smram_region, 1); 541 memory_region_set_enabled(&mch->smram_region, true); 542 543 memory_region_init_alias(&mch->open_high_smram, OBJECT(mch), "smram-open-high", 544 mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, 545 MCH_HOST_BRIDGE_SMRAM_C_SIZE); 546 memory_region_add_subregion_overlap(mch->system_memory, 0xfeda0000, 547 &mch->open_high_smram, 1); 548 memory_region_set_enabled(&mch->open_high_smram, false); 549 550 /* smram, as seen by SMM CPUs */ 551 memory_region_init(&mch->smram, OBJECT(mch), "smram", 1ull << 32); 552 memory_region_set_enabled(&mch->smram, true); 553 memory_region_init_alias(&mch->low_smram, OBJECT(mch), "smram-low", 554 mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, 555 MCH_HOST_BRIDGE_SMRAM_C_SIZE); 556 memory_region_set_enabled(&mch->low_smram, true); 557 memory_region_add_subregion(&mch->smram, MCH_HOST_BRIDGE_SMRAM_C_BASE, 558 &mch->low_smram); 559 memory_region_init_alias(&mch->high_smram, OBJECT(mch), "smram-high", 560 mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, 561 MCH_HOST_BRIDGE_SMRAM_C_SIZE); 562 memory_region_set_enabled(&mch->high_smram, true); 563 memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram); 564 565 memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch), 566 &tseg_blackhole_ops, NULL, 567 "tseg-blackhole", 0); 568 memory_region_set_enabled(&mch->tseg_blackhole, false); 569 memory_region_add_subregion_overlap(mch->system_memory, 570 mch->below_4g_mem_size, 571 &mch->tseg_blackhole, 1); 572 573 memory_region_init_alias(&mch->tseg_window, OBJECT(mch), "tseg-window", 574 mch->ram_memory, mch->below_4g_mem_size, 0); 575 memory_region_set_enabled(&mch->tseg_window, false); 576 memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size, 577 &mch->tseg_window); 578 object_property_add_const_link(qdev_get_machine(), "smram", 579 OBJECT(&mch->smram), &error_abort); 580 581 init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, 582 mch->pci_address_space, &mch->pam_regions[0], 583 PAM_BIOS_BASE, PAM_BIOS_SIZE); 584 for (i = 0; i < 12; ++i) { 585 init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, 586 mch->pci_address_space, &mch->pam_regions[i+1], 587 PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); 588 } 589 } 590 591 uint64_t mch_mcfg_base(void) 592 { 593 bool ambiguous; 594 Object *o = object_resolve_path_type("", TYPE_MCH_PCI_DEVICE, &ambiguous); 595 if (!o) { 596 return 0; 597 } 598 return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT; 599 } 600 601 static Property mch_props[] = { 602 DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes, 603 16), 604 DEFINE_PROP_END_OF_LIST(), 605 }; 606 607 static void mch_class_init(ObjectClass *klass, void *data) 608 { 609 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 610 DeviceClass *dc = DEVICE_CLASS(klass); 611 612 k->realize = mch_realize; 613 k->config_write = mch_write_config; 614 dc->reset = mch_reset; 615 dc->props = mch_props; 616 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 617 dc->desc = "Host bridge"; 618 dc->vmsd = &vmstate_mch; 619 k->vendor_id = PCI_VENDOR_ID_INTEL; 620 /* 621 * The 'q35' machine type implements an Intel Series 3 chipset, 622 * of which there are several variants. The key difference between 623 * the 82P35 MCH ('p35') and 82Q35 GMCH ('q35') variants is that 624 * the latter has an integrated graphics adapter. QEMU does not 625 * implement integrated graphics, so uses the PCI ID for the 82P35 626 * chipset. 627 */ 628 k->device_id = PCI_DEVICE_ID_INTEL_P35_MCH; 629 k->revision = MCH_HOST_BRIDGE_REVISION_DEFAULT; 630 k->class_id = PCI_CLASS_BRIDGE_HOST; 631 /* 632 * PCI-facing part of the host bridge, not usable without the 633 * host-facing part, which can't be device_add'ed, yet. 634 */ 635 dc->user_creatable = false; 636 } 637 638 static const TypeInfo mch_info = { 639 .name = TYPE_MCH_PCI_DEVICE, 640 .parent = TYPE_PCI_DEVICE, 641 .instance_size = sizeof(MCHPCIState), 642 .class_init = mch_class_init, 643 .interfaces = (InterfaceInfo[]) { 644 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 645 { }, 646 }, 647 }; 648 649 static void q35_register(void) 650 { 651 type_register_static(&mch_info); 652 type_register_static(&q35_host_info); 653 } 654 655 type_init(q35_register); 656