1 /*
2 * CXL 2.0 Root Port Implementation
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/range.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/pcie_port.h"
25 #include "hw/pci/msi.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/sysbus.h"
28 #include "qapi/error.h"
29 #include "hw/cxl/cxl.h"
30
31 #define CXL_ROOT_PORT_DID 0x7075
32
33 #define CXL_RP_MSI_OFFSET 0x60
34 #define CXL_RP_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_MASKBIT
35 #define CXL_RP_MSI_NR_VECTOR 2
36
37 /* Copied from the gen root port which we derive */
38 #define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100
39 #define GEN_PCIE_ROOT_PORT_ACS_OFFSET \
40 (GEN_PCIE_ROOT_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
41 #define CXL_ROOT_PORT_DVSEC_OFFSET \
42 (GEN_PCIE_ROOT_PORT_ACS_OFFSET + PCI_ACS_SIZEOF)
43
44 typedef struct CXLRootPort {
45 /*< private >*/
46 PCIESlot parent_obj;
47
48 CXLComponentState cxl_cstate;
49 PCIResReserve res_reserve;
50 } CXLRootPort;
51
52 #define TYPE_CXL_ROOT_PORT "cxl-rp"
DECLARE_INSTANCE_CHECKER(CXLRootPort,CXL_ROOT_PORT,TYPE_CXL_ROOT_PORT)53 DECLARE_INSTANCE_CHECKER(CXLRootPort, CXL_ROOT_PORT, TYPE_CXL_ROOT_PORT)
54
55 /*
56 * If two MSI vector are allocated, Advanced Error Interrupt Message Number
57 * is 1. otherwise 0.
58 * 17.12.5.10 RPERRSTS, 32:27 bit Advanced Error Interrupt Message Number.
59 */
60 static uint8_t cxl_rp_aer_vector(const PCIDevice *d)
61 {
62 switch (msi_nr_vectors_allocated(d)) {
63 case 1:
64 return 0;
65 case 2:
66 return 1;
67 case 4:
68 case 8:
69 case 16:
70 case 32:
71 default:
72 break;
73 }
74 abort();
75 return 0;
76 }
77
cxl_rp_interrupts_init(PCIDevice * d,Error ** errp)78 static int cxl_rp_interrupts_init(PCIDevice *d, Error **errp)
79 {
80 int rc;
81
82 rc = msi_init(d, CXL_RP_MSI_OFFSET, CXL_RP_MSI_NR_VECTOR,
83 CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT,
84 CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT,
85 errp);
86 if (rc < 0) {
87 assert(rc == -ENOTSUP);
88 }
89
90 return rc;
91 }
92
cxl_rp_interrupts_uninit(PCIDevice * d)93 static void cxl_rp_interrupts_uninit(PCIDevice *d)
94 {
95 msi_uninit(d);
96 }
97
latch_registers(CXLRootPort * crp)98 static void latch_registers(CXLRootPort *crp)
99 {
100 uint32_t *reg_state = crp->cxl_cstate.crb.cache_mem_registers;
101 uint32_t *write_msk = crp->cxl_cstate.crb.cache_mem_regs_write_mask;
102
103 cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT);
104 }
105
build_dvsecs(CXLComponentState * cxl)106 static void build_dvsecs(CXLComponentState *cxl)
107 {
108 uint8_t *dvsec;
109
110 dvsec = (uint8_t *)&(CXLDVSECPortExt){ 0 };
111 cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT,
112 EXTENSIONS_PORT_DVSEC_LENGTH,
113 EXTENSIONS_PORT_DVSEC,
114 EXTENSIONS_PORT_DVSEC_REVID, dvsec);
115
116 dvsec = (uint8_t *)&(CXLDVSECPortGPF){
117 .rsvd = 0,
118 .phase1_ctrl = 1, /* 1μs timeout */
119 .phase2_ctrl = 1, /* 1μs timeout */
120 };
121 cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT,
122 GPF_PORT_DVSEC_LENGTH, GPF_PORT_DVSEC,
123 GPF_PORT_DVSEC_REVID, dvsec);
124
125 dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
126 .cap = 0x26, /* IO, Mem, non-MLD */
127 .ctrl = 0x2,
128 .status = 0x26, /* same */
129 .rcvd_mod_ts_data_phase1 = 0xef,
130 };
131 cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT,
132 PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
133 PCIE_FLEXBUS_PORT_DVSEC,
134 PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
135
136 dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
137 .rsvd = 0,
138 .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
139 .reg0_base_hi = 0,
140 };
141 cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT,
142 REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
143 REG_LOC_DVSEC_REVID, dvsec);
144 }
145
cxl_rp_realize(DeviceState * dev,Error ** errp)146 static void cxl_rp_realize(DeviceState *dev, Error **errp)
147 {
148 PCIDevice *pci_dev = PCI_DEVICE(dev);
149 PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev);
150 CXLRootPort *crp = CXL_ROOT_PORT(dev);
151 CXLComponentState *cxl_cstate = &crp->cxl_cstate;
152 ComponentRegisters *cregs = &cxl_cstate->crb;
153 MemoryRegion *component_bar = &cregs->component_registers;
154 Error *local_err = NULL;
155
156 rpc->parent_realize(dev, &local_err);
157 if (local_err) {
158 error_propagate(errp, local_err);
159 return;
160 }
161
162 int rc =
163 pci_bridge_qemu_reserve_cap_init(pci_dev, 0, crp->res_reserve, errp);
164 if (rc < 0) {
165 rpc->parent_class.exit(pci_dev);
166 return;
167 }
168
169 if (!crp->res_reserve.io || crp->res_reserve.io == -1) {
170 pci_word_test_and_clear_mask(pci_dev->wmask + PCI_COMMAND,
171 PCI_COMMAND_IO);
172 pci_dev->wmask[PCI_IO_BASE] = 0;
173 pci_dev->wmask[PCI_IO_LIMIT] = 0;
174 }
175
176 cxl_cstate->dvsec_offset = CXL_ROOT_PORT_DVSEC_OFFSET;
177 cxl_cstate->pdev = pci_dev;
178 build_dvsecs(cxl_cstate);
179
180 cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
181 TYPE_CXL_ROOT_PORT);
182
183 pci_register_bar(pci_dev, CXL_COMPONENT_REG_BAR_IDX,
184 PCI_BASE_ADDRESS_SPACE_MEMORY |
185 PCI_BASE_ADDRESS_MEM_TYPE_64,
186 component_bar);
187 }
188
cxl_rp_reset_hold(Object * obj,ResetType type)189 static void cxl_rp_reset_hold(Object *obj, ResetType type)
190 {
191 PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(obj);
192 CXLRootPort *crp = CXL_ROOT_PORT(obj);
193
194 if (rpc->parent_phases.hold) {
195 rpc->parent_phases.hold(obj, type);
196 }
197
198 latch_registers(crp);
199 }
200
201 static Property gen_rp_props[] = {
202 DEFINE_PROP_UINT32("bus-reserve", CXLRootPort, res_reserve.bus, -1),
203 DEFINE_PROP_SIZE("io-reserve", CXLRootPort, res_reserve.io, -1),
204 DEFINE_PROP_SIZE("mem-reserve", CXLRootPort, res_reserve.mem_non_pref, -1),
205 DEFINE_PROP_SIZE("pref32-reserve", CXLRootPort, res_reserve.mem_pref_32,
206 -1),
207 DEFINE_PROP_SIZE("pref64-reserve", CXLRootPort, res_reserve.mem_pref_64,
208 -1),
209 DEFINE_PROP_END_OF_LIST()
210 };
211
cxl_rp_dvsec_write_config(PCIDevice * dev,uint32_t addr,uint32_t val,int len)212 static void cxl_rp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
213 uint32_t val, int len)
214 {
215 CXLRootPort *crp = CXL_ROOT_PORT(dev);
216
217 if (range_contains(&crp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
218 uint8_t *reg = &dev->config[addr];
219 addr -= crp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob;
220 if (addr == PORT_CONTROL_OFFSET) {
221 if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
222 /* unmask SBR */
223 qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
224 }
225 if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
226 /* Alt Memory & ID Space Enable */
227 qemu_log_mask(LOG_UNIMP,
228 "Alt Memory & ID space is not supported\n");
229 }
230 }
231 }
232 }
233
cxl_rp_aer_vector_update(PCIDevice * d)234 static void cxl_rp_aer_vector_update(PCIDevice *d)
235 {
236 PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d);
237
238 if (rpc->aer_vector) {
239 pcie_aer_root_set_vector(d, rpc->aer_vector(d));
240 }
241 }
242
cxl_rp_write_config(PCIDevice * d,uint32_t address,uint32_t val,int len)243 static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val,
244 int len)
245 {
246 uint16_t slt_ctl, slt_sta;
247 uint32_t root_cmd =
248 pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND);
249
250 pcie_cap_slot_get(d, &slt_ctl, &slt_sta);
251 pci_bridge_write_config(d, address, val, len);
252 cxl_rp_aer_vector_update(d);
253 pcie_cap_flr_write_config(d, address, val, len);
254 pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len);
255 pcie_aer_write_config(d, address, val, len);
256 pcie_aer_root_write_config(d, address, val, len, root_cmd);
257
258 cxl_rp_dvsec_write_config(d, address, val, len);
259 }
260
cxl_root_port_class_init(ObjectClass * oc,void * data)261 static void cxl_root_port_class_init(ObjectClass *oc, void *data)
262 {
263 DeviceClass *dc = DEVICE_CLASS(oc);
264 PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
265 ResettableClass *rc = RESETTABLE_CLASS(oc);
266 PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(oc);
267
268 k->vendor_id = PCI_VENDOR_ID_INTEL;
269 k->device_id = CXL_ROOT_PORT_DID;
270 dc->desc = "CXL Root Port";
271 k->revision = 0;
272 device_class_set_props(dc, gen_rp_props);
273 k->config_write = cxl_rp_write_config;
274
275 device_class_set_parent_realize(dc, cxl_rp_realize, &rpc->parent_realize);
276 resettable_class_set_parent_phases(rc, NULL, cxl_rp_reset_hold, NULL,
277 &rpc->parent_phases);
278
279 rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET;
280 rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET;
281 rpc->aer_vector = cxl_rp_aer_vector;
282 rpc->interrupts_init = cxl_rp_interrupts_init;
283 rpc->interrupts_uninit = cxl_rp_interrupts_uninit;
284
285 dc->hotpluggable = false;
286 }
287
288 static const TypeInfo cxl_root_port_info = {
289 .name = TYPE_CXL_ROOT_PORT,
290 .parent = TYPE_PCIE_ROOT_PORT,
291 .instance_size = sizeof(CXLRootPort),
292 .class_init = cxl_root_port_class_init,
293 .interfaces = (InterfaceInfo[]) {
294 { INTERFACE_CXL_DEVICE },
295 { }
296 },
297 };
298
cxl_register(void)299 static void cxl_register(void)
300 {
301 type_register_static(&cxl_root_port_info);
302 }
303
304 type_init(cxl_register);
305