1 /* 2 * CXL host parameter parsing routines 3 * 4 * Copyright (c) 2022 Huawei 5 * Modeled loosely on the NUMA options handling in hw/core/numa.c 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/units.h" 10 #include "qemu/bitmap.h" 11 #include "qemu/error-report.h" 12 #include "qapi/error.h" 13 #include "sysemu/qtest.h" 14 #include "hw/boards.h" 15 16 #include "qapi/qapi-visit-machine.h" 17 #include "hw/cxl/cxl.h" 18 #include "hw/pci/pci_bus.h" 19 #include "hw/pci/pci_bridge.h" 20 #include "hw/pci/pci_host.h" 21 #include "hw/pci/pcie_port.h" 22 23 void cxl_fixed_memory_window_config(MachineState *ms, 24 CXLFixedMemoryWindowOptions *object, 25 Error **errp) 26 { 27 CXLFixedWindow *fw = g_malloc0(sizeof(*fw)); 28 strList *target; 29 int i; 30 31 for (target = object->targets; target; target = target->next) { 32 fw->num_targets++; 33 } 34 35 fw->enc_int_ways = cxl_interleave_ways_enc(fw->num_targets, errp); 36 if (*errp) { 37 return; 38 } 39 40 fw->targets = g_malloc0_n(fw->num_targets, sizeof(*fw->targets)); 41 for (i = 0, target = object->targets; target; i++, target = target->next) { 42 /* This link cannot be resolved yet, so stash the name for now */ 43 fw->targets[i] = g_strdup(target->value); 44 } 45 46 if (object->size % (256 * MiB)) { 47 error_setg(errp, 48 "Size of a CXL fixed memory window must my a multiple of 256MiB"); 49 return; 50 } 51 fw->size = object->size; 52 53 if (object->has_interleave_granularity) { 54 fw->enc_int_gran = 55 cxl_interleave_granularity_enc(object->interleave_granularity, 56 errp); 57 if (*errp) { 58 return; 59 } 60 } else { 61 /* Default to 256 byte interleave */ 62 fw->enc_int_gran = 0; 63 } 64 65 ms->cxl_devices_state->fixed_windows = 66 g_list_append(ms->cxl_devices_state->fixed_windows, fw); 67 68 return; 69 } 70 71 void cxl_fixed_memory_window_link_targets(Error **errp) 72 { 73 MachineState *ms = MACHINE(qdev_get_machine()); 74 75 if (ms->cxl_devices_state && ms->cxl_devices_state->fixed_windows) { 76 GList *it; 77 78 for (it = ms->cxl_devices_state->fixed_windows; it; it = it->next) { 79 CXLFixedWindow *fw = it->data; 80 int i; 81 82 for (i = 0; i < fw->num_targets; i++) { 83 Object *o; 84 bool ambig; 85 86 o = object_resolve_path_type(fw->targets[i], 87 TYPE_PXB_CXL_DEVICE, 88 &ambig); 89 if (!o) { 90 error_setg(errp, "Could not resolve CXLFM target %s", 91 fw->targets[i]); 92 return; 93 } 94 fw->target_hbs[i] = PXB_CXL_DEV(o); 95 } 96 } 97 } 98 } 99 100 /* TODO: support, multiple hdm decoders */ 101 static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr, 102 uint8_t *target) 103 { 104 uint32_t ctrl; 105 uint32_t ig_enc; 106 uint32_t iw_enc; 107 uint32_t target_reg; 108 uint32_t target_idx; 109 110 ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL]; 111 if (!FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) { 112 return false; 113 } 114 115 ig_enc = FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, IG); 116 iw_enc = FIELD_EX32(ctrl, CXL_HDM_DECODER0_CTRL, IW); 117 target_idx = (addr / cxl_decode_ig(ig_enc)) % (1 << iw_enc); 118 119 if (target_idx > 4) { 120 target_reg = cache_mem[R_CXL_HDM_DECODER0_TARGET_LIST_LO]; 121 target_reg >>= target_idx * 8; 122 } else { 123 target_reg = cache_mem[R_CXL_HDM_DECODER0_TARGET_LIST_LO]; 124 target_reg >>= (target_idx - 4) * 8; 125 } 126 *target = target_reg & 0xff; 127 128 return true; 129 } 130 131 static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr) 132 { 133 CXLComponentState *hb_cstate; 134 PCIHostState *hb; 135 int rb_index; 136 uint32_t *cache_mem; 137 uint8_t target; 138 bool target_found; 139 PCIDevice *rp, *d; 140 141 /* Address is relative to memory region. Convert to HPA */ 142 addr += fw->base; 143 144 rb_index = (addr / cxl_decode_ig(fw->enc_int_gran)) % fw->num_targets; 145 hb = PCI_HOST_BRIDGE(fw->target_hbs[rb_index]->cxl.cxl_host_bridge); 146 if (!hb || !hb->bus || !pci_bus_is_cxl(hb->bus)) { 147 return NULL; 148 } 149 150 hb_cstate = cxl_get_hb_cstate(hb); 151 if (!hb_cstate) { 152 return NULL; 153 } 154 155 cache_mem = hb_cstate->crb.cache_mem_registers; 156 157 target_found = cxl_hdm_find_target(cache_mem, addr, &target); 158 if (!target_found) { 159 return NULL; 160 } 161 162 rp = pcie_find_port_by_pn(hb->bus, target); 163 if (!rp) { 164 return NULL; 165 } 166 167 d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0]; 168 169 if (!d || !object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) { 170 return NULL; 171 } 172 173 return d; 174 } 175 176 static MemTxResult cxl_read_cfmws(void *opaque, hwaddr addr, uint64_t *data, 177 unsigned size, MemTxAttrs attrs) 178 { 179 CXLFixedWindow *fw = opaque; 180 PCIDevice *d; 181 182 d = cxl_cfmws_find_device(fw, addr); 183 if (d == NULL) { 184 *data = 0; 185 /* Reads to invalid address return poison */ 186 return MEMTX_ERROR; 187 } 188 189 return cxl_type3_read(d, addr + fw->base, data, size, attrs); 190 } 191 192 static MemTxResult cxl_write_cfmws(void *opaque, hwaddr addr, 193 uint64_t data, unsigned size, 194 MemTxAttrs attrs) 195 { 196 CXLFixedWindow *fw = opaque; 197 PCIDevice *d; 198 199 d = cxl_cfmws_find_device(fw, addr); 200 if (d == NULL) { 201 /* Writes to invalid address are silent */ 202 return MEMTX_OK; 203 } 204 205 return cxl_type3_write(d, addr + fw->base, data, size, attrs); 206 } 207 208 const MemoryRegionOps cfmws_ops = { 209 .read_with_attrs = cxl_read_cfmws, 210 .write_with_attrs = cxl_write_cfmws, 211 .endianness = DEVICE_LITTLE_ENDIAN, 212 .valid = { 213 .min_access_size = 1, 214 .max_access_size = 8, 215 .unaligned = true, 216 }, 217 .impl = { 218 .min_access_size = 1, 219 .max_access_size = 8, 220 .unaligned = true, 221 }, 222 }; 223