xref: /openbmc/qemu/hw/pci-bridge/cxl_upstream.c (revision 7200fb21)
1 /*
2  * Emulated CXL Switch Upstream Port
3  *
4  * Copyright (c) 2022 Huawei Technologies.
5  *
6  * Based on xio3130_upstream.c
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qemu/log.h"
13 #include "hw/qdev-properties.h"
14 #include "hw/pci/msi.h"
15 #include "hw/pci/pcie.h"
16 #include "hw/pci/pcie_port.h"
17 #include "hw/pci-bridge/cxl_upstream_port.h"
18 /*
19  * Null value of all Fs suggested by IEEE RA guidelines for use of
20  * EU, OUI and CID
21  */
22 #define UI64_NULL (~0ULL)
23 
24 #define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 2
25 
26 #define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70
27 #define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90
28 #define CXL_UPSTREAM_PORT_AER_OFFSET 0x100
29 #define CXL_UPSTREAM_PORT_SN_OFFSET \
30     (CXL_UPSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
31 #define CXL_UPSTREAM_PORT_DVSEC_OFFSET \
32     (CXL_UPSTREAM_PORT_SN_OFFSET + PCI_EXT_CAP_DSN_SIZEOF)
33 
34 CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp)
35 {
36     return &usp->cxl_cstate;
37 }
38 
39 static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
40                                        uint32_t val, int len)
41 {
42     CXLUpstreamPort *usp = CXL_USP(dev);
43 
44     if (range_contains(&usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
45         uint8_t *reg = &dev->config[addr];
46         addr -= usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob;
47         if (addr == PORT_CONTROL_OFFSET) {
48             if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
49                 /* unmask SBR */
50                 qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
51             }
52             if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
53                 /* Alt Memory & ID Space Enable */
54                 qemu_log_mask(LOG_UNIMP,
55                               "Alt Memory & ID space is not supported\n");
56             }
57         }
58     }
59 }
60 
61 static void cxl_usp_write_config(PCIDevice *d, uint32_t address,
62                                  uint32_t val, int len)
63 {
64     CXLUpstreamPort *usp = CXL_USP(d);
65 
66     pcie_doe_write_config(&usp->doe_cdat, address, val, len);
67     pci_bridge_write_config(d, address, val, len);
68     pcie_cap_flr_write_config(d, address, val, len);
69     pcie_aer_write_config(d, address, val, len);
70 
71     cxl_usp_dvsec_write_config(d, address, val, len);
72 }
73 
74 static uint32_t cxl_usp_read_config(PCIDevice *d, uint32_t address, int len)
75 {
76     CXLUpstreamPort *usp = CXL_USP(d);
77     uint32_t val;
78 
79     if (pcie_doe_read_config(&usp->doe_cdat, address, len, &val)) {
80         return val;
81     }
82 
83     return pci_default_read_config(d, address, len);
84 }
85 
86 static void latch_registers(CXLUpstreamPort *usp)
87 {
88     uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers;
89     uint32_t *write_msk = usp->cxl_cstate.crb.cache_mem_regs_write_mask;
90 
91     cxl_component_register_init_common(reg_state, write_msk,
92                                        CXL2_UPSTREAM_PORT);
93     ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8);
94 }
95 
96 static void cxl_usp_reset(DeviceState *qdev)
97 {
98     PCIDevice *d = PCI_DEVICE(qdev);
99     CXLUpstreamPort *usp = CXL_USP(qdev);
100 
101     pci_bridge_reset(qdev);
102     pcie_cap_deverr_reset(d);
103     latch_registers(usp);
104 }
105 
106 static void build_dvsecs(CXLComponentState *cxl)
107 {
108     uint8_t *dvsec;
109 
110     dvsec = (uint8_t *)&(CXLDVSECPortExt){
111         .status = 0x1, /* Port Power Management Init Complete */
112     };
113     cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
114                                EXTENSIONS_PORT_DVSEC_LENGTH,
115                                EXTENSIONS_PORT_DVSEC,
116                                EXTENSIONS_PORT_DVSEC_REVID, dvsec);
117     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
118         .cap                     = 0x27, /* Cache, IO, Mem, non-MLD */
119         .ctrl                    = 0x27, /* Cache, IO, Mem */
120         .status                  = 0x26, /* same */
121         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
122     };
123     cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
124                                PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
125                                PCIE_FLEXBUS_PORT_DVSEC,
126                                PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
127 
128     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
129         .rsvd         = 0,
130         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
131         .reg0_base_hi = 0,
132     };
133     cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
134                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
135                                REG_LOC_DVSEC_REVID, dvsec);
136 }
137 
138 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
139 {
140     CDATObject *cdat = &CXL_USP(doe_cap->pdev)->cxl_cstate.cdat;
141     uint16_t ent;
142     void *base;
143     uint32_t len;
144     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
145     CDATRsp rsp;
146 
147     cxl_doe_cdat_update(&CXL_USP(doe_cap->pdev)->cxl_cstate, &error_fatal);
148     assert(cdat->entry_len);
149 
150     /* Discard if request length mismatched */
151     if (pcie_doe_get_obj_len(req) <
152         DIV_ROUND_UP(sizeof(CDATReq), sizeof(uint32_t))) {
153         return false;
154     }
155 
156     ent = req->entry_handle;
157     base = cdat->entry[ent].base;
158     len = cdat->entry[ent].length;
159 
160     rsp = (CDATRsp) {
161         .header = {
162             .vendor_id = CXL_VENDOR_ID,
163             .data_obj_type = CXL_DOE_TABLE_ACCESS,
164             .reserved = 0x0,
165             .length = DIV_ROUND_UP((sizeof(rsp) + len), sizeof(uint32_t)),
166         },
167         .rsp_code = CXL_DOE_TAB_RSP,
168         .table_type = CXL_DOE_TAB_TYPE_CDAT,
169         .entry_handle = (ent < cdat->entry_len - 1) ?
170                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
171     };
172 
173     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
174         memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), sizeof(uint32_t)),
175            base, len);
176 
177     doe_cap->read_mbox_len += rsp.header.length;
178 
179     return true;
180 }
181 
182 static DOEProtocol doe_cdat_prot[] = {
183     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
184     { }
185 };
186 
187 enum {
188     CXL_USP_CDAT_SSLBIS_LAT,
189     CXL_USP_CDAT_SSLBIS_BW,
190     CXL_USP_CDAT_NUM_ENTRIES
191 };
192 
193 static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
194 {
195     g_autofree CDATSslbis *sslbis_latency = NULL;
196     g_autofree CDATSslbis *sslbis_bandwidth = NULL;
197     CXLUpstreamPort *us = CXL_USP(priv);
198     PCIBus *bus = &PCI_BRIDGE(us)->sec_bus;
199     int devfn, sslbis_size, i;
200     int count = 0;
201     uint16_t port_ids[256];
202 
203     for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
204         PCIDevice *d = bus->devices[devfn];
205         PCIEPort *port;
206 
207         if (!d || !pci_is_express(d) || !d->exp.exp_cap) {
208             continue;
209         }
210 
211         /*
212          * Whilst the PCI express spec doesn't allow anything other than
213          * downstream ports on this bus, let us be a little paranoid
214          */
215         if (!object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) {
216             continue;
217         }
218 
219         port = PCIE_PORT(d);
220         port_ids[count] = port->port;
221         count++;
222     }
223 
224     /* May not yet have any ports - try again later */
225     if (count == 0) {
226         return 0;
227     }
228 
229     sslbis_size = sizeof(CDATSslbis) + sizeof(*sslbis_latency->sslbe) * count;
230     sslbis_latency = g_malloc(sslbis_size);
231     if (!sslbis_latency) {
232         return -ENOMEM;
233     }
234     *sslbis_latency = (CDATSslbis) {
235         .sslbis_header = {
236             .header = {
237                 .type = CDAT_TYPE_SSLBIS,
238                 .length = sslbis_size,
239             },
240             .data_type = HMATLB_DATA_TYPE_ACCESS_LATENCY,
241             .entry_base_unit = 10000,
242         },
243     };
244 
245     for (i = 0; i < count; i++) {
246         sslbis_latency->sslbe[i] = (CDATSslbe) {
247             .port_x_id = CDAT_PORT_ID_USP,
248             .port_y_id = port_ids[i],
249             .latency_bandwidth = 15, /* 150ns */
250         };
251     }
252 
253     sslbis_bandwidth = g_malloc(sslbis_size);
254     if (!sslbis_bandwidth) {
255         return 0;
256     }
257     *sslbis_bandwidth = (CDATSslbis) {
258         .sslbis_header = {
259             .header = {
260                 .type = CDAT_TYPE_SSLBIS,
261                 .length = sslbis_size,
262             },
263             .data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH,
264             .entry_base_unit = 1024,
265         },
266     };
267 
268     for (i = 0; i < count; i++) {
269         sslbis_bandwidth->sslbe[i] = (CDATSslbe) {
270             .port_x_id = CDAT_PORT_ID_USP,
271             .port_y_id = port_ids[i],
272             .latency_bandwidth = 16, /* 16 GB/s */
273         };
274     }
275 
276     *cdat_table = g_new0(CDATSubHeader *, CXL_USP_CDAT_NUM_ENTRIES);
277 
278     /* Header always at start of structure */
279     (*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency);
280     (*cdat_table)[CXL_USP_CDAT_SSLBIS_BW] = g_steal_pointer(&sslbis_bandwidth);
281 
282     return CXL_USP_CDAT_NUM_ENTRIES;
283 }
284 
285 static void free_default_cdat_table(CDATSubHeader **cdat_table, int num,
286                                     void *priv)
287 {
288     int i;
289 
290     for (i = 0; i < num; i++) {
291         g_free(cdat_table[i]);
292     }
293     g_free(cdat_table);
294 }
295 
296 static void cxl_usp_realize(PCIDevice *d, Error **errp)
297 {
298     PCIEPort *p = PCIE_PORT(d);
299     CXLUpstreamPort *usp = CXL_USP(d);
300     CXLComponentState *cxl_cstate = &usp->cxl_cstate;
301     ComponentRegisters *cregs = &cxl_cstate->crb;
302     MemoryRegion *component_bar = &cregs->component_registers;
303     int rc;
304 
305     pci_bridge_initfn(d, TYPE_PCIE_BUS);
306     pcie_port_init_reg(d);
307 
308     rc = msi_init(d, CXL_UPSTREAM_PORT_MSI_OFFSET,
309                   CXL_UPSTREAM_PORT_MSI_NR_VECTOR, true, true, errp);
310     if (rc) {
311         assert(rc == -ENOTSUP);
312         goto err_bridge;
313     }
314 
315     rc = pcie_cap_init(d, CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET,
316                        PCI_EXP_TYPE_UPSTREAM, p->port, errp);
317     if (rc < 0) {
318         goto err_msi;
319     }
320 
321     pcie_cap_flr_init(d);
322     pcie_cap_deverr_init(d);
323     rc = pcie_aer_init(d, PCI_ERR_VER, CXL_UPSTREAM_PORT_AER_OFFSET,
324                        PCI_ERR_SIZEOF, errp);
325     if (rc) {
326         goto err_cap;
327     }
328     if (usp->sn != UI64_NULL) {
329         pcie_dev_ser_num_init(d, CXL_UPSTREAM_PORT_SN_OFFSET, usp->sn);
330     }
331     cxl_cstate->dvsec_offset = CXL_UPSTREAM_PORT_DVSEC_OFFSET;
332     cxl_cstate->pdev = d;
333     build_dvsecs(cxl_cstate);
334     cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_USP);
335     pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX,
336                      PCI_BASE_ADDRESS_SPACE_MEMORY |
337                      PCI_BASE_ADDRESS_MEM_TYPE_64,
338                      component_bar);
339 
340     pcie_doe_init(d, &usp->doe_cdat, cxl_cstate->dvsec_offset, doe_cdat_prot,
341                   true, 1);
342 
343     cxl_cstate->cdat.build_cdat_table = build_cdat_table;
344     cxl_cstate->cdat.free_cdat_table = free_default_cdat_table;
345     cxl_cstate->cdat.private = d;
346     cxl_doe_cdat_init(cxl_cstate, errp);
347     if (*errp) {
348         goto err_cap;
349     }
350 
351     return;
352 
353 err_cap:
354     pcie_cap_exit(d);
355 err_msi:
356     msi_uninit(d);
357 err_bridge:
358     pci_bridge_exitfn(d);
359 }
360 
361 static void cxl_usp_exitfn(PCIDevice *d)
362 {
363     pcie_aer_exit(d);
364     pcie_cap_exit(d);
365     msi_uninit(d);
366     pci_bridge_exitfn(d);
367 }
368 
369 static Property cxl_upstream_props[] = {
370     DEFINE_PROP_UINT64("sn", CXLUpstreamPort, sn, UI64_NULL),
371     DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename),
372     DEFINE_PROP_END_OF_LIST()
373 };
374 
375 static void cxl_upstream_class_init(ObjectClass *oc, void *data)
376 {
377     DeviceClass *dc = DEVICE_CLASS(oc);
378     PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
379 
380     k->config_write = cxl_usp_write_config;
381     k->config_read = cxl_usp_read_config;
382     k->realize = cxl_usp_realize;
383     k->exit = cxl_usp_exitfn;
384     k->vendor_id = 0x19e5; /* Huawei */
385     k->device_id = 0xa128; /* Emulated CXL Switch Upstream Port */
386     k->revision = 0;
387     set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
388     dc->desc = "CXL Switch Upstream Port";
389     dc->reset = cxl_usp_reset;
390     device_class_set_props(dc, cxl_upstream_props);
391 }
392 
393 static const TypeInfo cxl_usp_info = {
394     .name = TYPE_CXL_USP,
395     .parent = TYPE_PCIE_PORT,
396     .instance_size = sizeof(CXLUpstreamPort),
397     .class_init = cxl_upstream_class_init,
398     .interfaces = (InterfaceInfo[]) {
399         { INTERFACE_PCIE_DEVICE },
400         { INTERFACE_CXL_DEVICE },
401         { }
402     },
403 };
404 
405 static void cxl_usp_register_type(void)
406 {
407     type_register_static(&cxl_usp_info);
408 }
409 
410 type_init(cxl_usp_register_type);
411