1 /* 2 * Emulated CXL Switch Upstream Port 3 * 4 * Copyright (c) 2022 Huawei Technologies. 5 * 6 * Based on xio3130_upstream.c 7 * 8 * SPDX-License-Identifier: GPL-2.0-or-later 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/log.h" 13 #include "hw/pci/msi.h" 14 #include "hw/pci/pcie.h" 15 #include "hw/pci/pcie_port.h" 16 17 #define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 1 18 19 #define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70 20 #define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90 21 #define CXL_UPSTREAM_PORT_AER_OFFSET 0x100 22 #define CXL_UPSTREAM_PORT_DVSEC_OFFSET \ 23 (CXL_UPSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF) 24 25 typedef struct CXLUpstreamPort { 26 /*< private >*/ 27 PCIEPort parent_obj; 28 29 /*< public >*/ 30 CXLComponentState cxl_cstate; 31 } CXLUpstreamPort; 32 33 CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp) 34 { 35 return &usp->cxl_cstate; 36 } 37 38 static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr, 39 uint32_t val, int len) 40 { 41 CXLUpstreamPort *usp = CXL_USP(dev); 42 43 if (range_contains(&usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) { 44 uint8_t *reg = &dev->config[addr]; 45 addr -= usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob; 46 if (addr == PORT_CONTROL_OFFSET) { 47 if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) { 48 /* unmask SBR */ 49 qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n"); 50 } 51 if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) { 52 /* Alt Memory & ID Space Enable */ 53 qemu_log_mask(LOG_UNIMP, 54 "Alt Memory & ID space is not supported\n"); 55 } 56 } 57 } 58 } 59 60 static void cxl_usp_write_config(PCIDevice *d, uint32_t address, 61 uint32_t val, int len) 62 { 63 pci_bridge_write_config(d, address, val, len); 64 pcie_cap_flr_write_config(d, address, val, len); 65 pcie_aer_write_config(d, address, val, len); 66 67 cxl_usp_dvsec_write_config(d, address, val, len); 68 } 69 70 static void latch_registers(CXLUpstreamPort *usp) 71 { 72 uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers; 73 uint32_t *write_msk = usp->cxl_cstate.crb.cache_mem_regs_write_mask; 74 75 cxl_component_register_init_common(reg_state, write_msk, 76 CXL2_UPSTREAM_PORT); 77 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8); 78 } 79 80 static void cxl_usp_reset(DeviceState *qdev) 81 { 82 PCIDevice *d = PCI_DEVICE(qdev); 83 CXLUpstreamPort *usp = CXL_USP(qdev); 84 85 pci_bridge_reset(qdev); 86 pcie_cap_deverr_reset(d); 87 latch_registers(usp); 88 } 89 90 static void build_dvsecs(CXLComponentState *cxl) 91 { 92 uint8_t *dvsec; 93 94 dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 95 .status = 0x1, /* Port Power Management Init Complete */ 96 }; 97 cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, 98 EXTENSIONS_PORT_DVSEC_LENGTH, 99 EXTENSIONS_PORT_DVSEC, 100 EXTENSIONS_PORT_DVSEC_REVID, dvsec); 101 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){ 102 .cap = 0x27, /* Cache, IO, Mem, non-MLD */ 103 .ctrl = 0x27, /* Cache, IO, Mem */ 104 .status = 0x26, /* same */ 105 .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */ 106 }; 107 cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, 108 PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0, 109 PCIE_FLEXBUS_PORT_DVSEC, 110 PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec); 111 112 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){ 113 .rsvd = 0, 114 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX, 115 .reg0_base_hi = 0, 116 }; 117 cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT, 118 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC, 119 REG_LOC_DVSEC_REVID, dvsec); 120 } 121 122 static void cxl_usp_realize(PCIDevice *d, Error **errp) 123 { 124 PCIEPort *p = PCIE_PORT(d); 125 CXLUpstreamPort *usp = CXL_USP(d); 126 CXLComponentState *cxl_cstate = &usp->cxl_cstate; 127 ComponentRegisters *cregs = &cxl_cstate->crb; 128 MemoryRegion *component_bar = &cregs->component_registers; 129 int rc; 130 131 pci_bridge_initfn(d, TYPE_PCIE_BUS); 132 pcie_port_init_reg(d); 133 134 rc = msi_init(d, CXL_UPSTREAM_PORT_MSI_OFFSET, 135 CXL_UPSTREAM_PORT_MSI_NR_VECTOR, true, true, errp); 136 if (rc) { 137 assert(rc == -ENOTSUP); 138 goto err_bridge; 139 } 140 141 rc = pcie_cap_init(d, CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET, 142 PCI_EXP_TYPE_UPSTREAM, p->port, errp); 143 if (rc < 0) { 144 goto err_msi; 145 } 146 147 pcie_cap_flr_init(d); 148 pcie_cap_deverr_init(d); 149 rc = pcie_aer_init(d, PCI_ERR_VER, CXL_UPSTREAM_PORT_AER_OFFSET, 150 PCI_ERR_SIZEOF, errp); 151 if (rc) { 152 goto err_cap; 153 } 154 155 cxl_cstate->dvsec_offset = CXL_UPSTREAM_PORT_DVSEC_OFFSET; 156 cxl_cstate->pdev = d; 157 build_dvsecs(cxl_cstate); 158 cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_USP); 159 pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX, 160 PCI_BASE_ADDRESS_SPACE_MEMORY | 161 PCI_BASE_ADDRESS_MEM_TYPE_64, 162 component_bar); 163 164 return; 165 166 err_cap: 167 pcie_cap_exit(d); 168 err_msi: 169 msi_uninit(d); 170 err_bridge: 171 pci_bridge_exitfn(d); 172 } 173 174 static void cxl_usp_exitfn(PCIDevice *d) 175 { 176 pcie_aer_exit(d); 177 pcie_cap_exit(d); 178 msi_uninit(d); 179 pci_bridge_exitfn(d); 180 } 181 182 static void cxl_upstream_class_init(ObjectClass *oc, void *data) 183 { 184 DeviceClass *dc = DEVICE_CLASS(oc); 185 PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); 186 187 k->is_bridge = true; 188 k->config_write = cxl_usp_write_config; 189 k->realize = cxl_usp_realize; 190 k->exit = cxl_usp_exitfn; 191 k->vendor_id = 0x19e5; /* Huawei */ 192 k->device_id = 0xa128; /* Emulated CXL Switch Upstream Port */ 193 k->revision = 0; 194 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 195 dc->desc = "CXL Switch Upstream Port"; 196 dc->reset = cxl_usp_reset; 197 } 198 199 static const TypeInfo cxl_usp_info = { 200 .name = TYPE_CXL_USP, 201 .parent = TYPE_PCIE_PORT, 202 .instance_size = sizeof(CXLUpstreamPort), 203 .class_init = cxl_upstream_class_init, 204 .interfaces = (InterfaceInfo[]) { 205 { INTERFACE_PCIE_DEVICE }, 206 { INTERFACE_CXL_DEVICE }, 207 { } 208 }, 209 }; 210 211 static void cxl_usp_register_type(void) 212 { 213 type_register_static(&cxl_usp_info); 214 } 215 216 type_init(cxl_usp_register_type); 217