1 /* 2 * CXL Utility library for components 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qapi/error.h" 13 #include "hw/pci/pci.h" 14 #include "hw/cxl/cxl.h" 15 16 static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset, 17 unsigned size) 18 { 19 CXLComponentState *cxl_cstate = opaque; 20 ComponentRegisters *cregs = &cxl_cstate->crb; 21 22 if (size == 8) { 23 qemu_log_mask(LOG_UNIMP, 24 "CXL 8 byte cache mem registers not implemented\n"); 25 return 0; 26 } 27 28 if (cregs->special_ops && cregs->special_ops->read) { 29 return cregs->special_ops->read(cxl_cstate, offset, size); 30 } else { 31 return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 32 } 33 } 34 35 static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, 36 unsigned size) 37 { 38 CXLComponentState *cxl_cstate = opaque; 39 ComponentRegisters *cregs = &cxl_cstate->crb; 40 uint32_t mask; 41 42 if (size == 8) { 43 qemu_log_mask(LOG_UNIMP, 44 "CXL 8 byte cache mem registers not implemented\n"); 45 return; 46 } 47 mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)]; 48 value &= mask; 49 /* RO bits should remain constant. Done by reading existing value */ 50 value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 51 if (cregs->special_ops && cregs->special_ops->write) { 52 cregs->special_ops->write(cxl_cstate, offset, value, size); 53 } else { 54 cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value; 55 } 56 } 57 58 /* 59 * 8.2.3 60 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0 61 * Component Registers. 62 * 63 * 8.2.2 64 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial 65 * reads are not permitted. 66 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial 67 * reads are not permitted. 68 * 69 * As of the spec defined today, only 4 byte registers exist. 70 */ 71 static const MemoryRegionOps cache_mem_ops = { 72 .read = cxl_cache_mem_read_reg, 73 .write = cxl_cache_mem_write_reg, 74 .endianness = DEVICE_LITTLE_ENDIAN, 75 .valid = { 76 .min_access_size = 4, 77 .max_access_size = 8, 78 .unaligned = false, 79 }, 80 .impl = { 81 .min_access_size = 4, 82 .max_access_size = 8, 83 }, 84 }; 85 86 void cxl_component_register_block_init(Object *obj, 87 CXLComponentState *cxl_cstate, 88 const char *type) 89 { 90 ComponentRegisters *cregs = &cxl_cstate->crb; 91 92 memory_region_init(&cregs->component_registers, obj, type, 93 CXL2_COMPONENT_BLOCK_SIZE); 94 95 /* io registers controls link which we don't care about in QEMU */ 96 memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io", 97 CXL2_COMPONENT_IO_REGION_SIZE); 98 memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs, 99 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE); 100 101 memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io); 102 memory_region_add_subregion(&cregs->component_registers, 103 CXL2_COMPONENT_IO_REGION_SIZE, 104 &cregs->cache_mem); 105 } 106 107 static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk) 108 { 109 /* 110 * Error status is RW1C but given bits are not yet set, it can 111 * be handled as RO. 112 */ 113 reg_state[R_CXL_RAS_UNC_ERR_STATUS] = 0; 114 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */ 115 reg_state[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; 116 write_msk[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; 117 reg_state[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; 118 write_msk[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; 119 reg_state[R_CXL_RAS_COR_ERR_STATUS] = 0; 120 reg_state[R_CXL_RAS_COR_ERR_MASK] = 0x7f; 121 write_msk[R_CXL_RAS_COR_ERR_MASK] = 0x7f; 122 /* CXL switches and devices must set */ 123 reg_state[R_CXL_RAS_ERR_CAP_CTRL] = 0x00; 124 } 125 126 static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk) 127 { 128 int decoder_count = 1; 129 int i; 130 131 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 132 cxl_decoder_count_enc(decoder_count)); 133 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1); 134 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1); 135 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); 136 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); 137 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, 138 HDM_DECODER_ENABLE, 0); 139 write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; 140 for (i = 0; i < decoder_count; i++) { 141 write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000; 142 write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff; 143 write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000; 144 write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff; 145 write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff; 146 } 147 } 148 149 void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk, 150 enum reg_type type) 151 { 152 int caps = 0; 153 154 /* 155 * In CXL 2.0 the capabilities required for each CXL component are such that, 156 * with the ordering chosen here, a single number can be used to define 157 * which capabilities should be provided. 158 */ 159 switch (type) { 160 case CXL2_DOWNSTREAM_PORT: 161 case CXL2_DEVICE: 162 /* RAS, Link */ 163 caps = 2; 164 break; 165 case CXL2_UPSTREAM_PORT: 166 case CXL2_TYPE3_DEVICE: 167 case CXL2_LOGICAL_DEVICE: 168 /* + HDM */ 169 caps = 3; 170 break; 171 case CXL2_ROOT_PORT: 172 /* + Extended Security, + Snoop */ 173 caps = 5; 174 break; 175 default: 176 abort(); 177 } 178 179 memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE); 180 181 /* CXL Capability Header Register */ 182 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1); 183 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1); 184 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1); 185 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); 186 187 #define init_cap_reg(reg, id, version) \ 188 QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \ 189 do { \ 190 int which = R_CXL_##reg##_CAPABILITY_HEADER; \ 191 reg_state[which] = FIELD_DP32(reg_state[which], \ 192 CXL_##reg##_CAPABILITY_HEADER, ID, id); \ 193 reg_state[which] = \ 194 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \ 195 VERSION, version); \ 196 reg_state[which] = \ 197 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \ 198 CXL_##reg##_REGISTERS_OFFSET); \ 199 } while (0) 200 201 init_cap_reg(RAS, 2, 2); 202 ras_init_common(reg_state, write_msk); 203 204 init_cap_reg(LINK, 4, 2); 205 206 if (caps < 3) { 207 return; 208 } 209 210 init_cap_reg(HDM, 5, 1); 211 hdm_init_common(reg_state, write_msk); 212 213 if (caps < 5) { 214 return; 215 } 216 217 init_cap_reg(EXTSEC, 6, 1); 218 init_cap_reg(SNOOP, 8, 1); 219 220 #undef init_cap_reg 221 } 222 223 /* 224 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible 225 * for tracking the valid offset. 226 * 227 * This function will build the DVSEC header on behalf of the caller and then 228 * copy in the remaining data for the vendor specific bits. 229 * It will also set up appropriate write masks. 230 */ 231 void cxl_component_create_dvsec(CXLComponentState *cxl, 232 enum reg_type cxl_dev_type, uint16_t length, 233 uint16_t type, uint8_t rev, uint8_t *body) 234 { 235 PCIDevice *pdev = cxl->pdev; 236 uint16_t offset = cxl->dvsec_offset; 237 uint8_t *wmask = pdev->wmask; 238 239 assert(offset >= PCI_CFG_SPACE_SIZE && 240 ((offset + length) < PCI_CFG_SPACE_EXP_SIZE)); 241 assert((length & 0xf000) == 0); 242 assert((rev & ~0xf) == 0); 243 244 /* Create the DVSEC in the MCFG space */ 245 pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length); 246 pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET, 247 (length << 20) | (rev << 16) | CXL_VENDOR_ID); 248 pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type); 249 memcpy(pdev->config + offset + sizeof(DVSECHeader), 250 body + sizeof(DVSECHeader), 251 length - sizeof(DVSECHeader)); 252 253 /* Configure write masks */ 254 switch (type) { 255 case PCIE_CXL_DEVICE_DVSEC: 256 /* Cntrl RW Lock - so needs explicit blocking when lock is set */ 257 wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD; 258 wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F; 259 /* Status is RW1CS */ 260 wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F; 261 /* Lock is RW Once */ 262 wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01; 263 /* range1/2_base_high/low is RW Lock */ 264 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF; 265 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF; 266 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF; 267 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF; 268 wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0; 269 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF; 270 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF; 271 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF; 272 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF; 273 wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0; 274 break; 275 case NON_CXL_FUNCTION_MAP_DVSEC: 276 break; /* Not yet implemented */ 277 case EXTENSIONS_PORT_DVSEC: 278 wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F; 279 wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40; 280 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF; 281 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF; 282 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0; 283 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF; 284 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0; 285 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF; 286 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0; 287 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF; 288 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0; 289 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF; 290 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF; 291 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF; 292 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF; 293 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF; 294 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF; 295 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF; 296 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF; 297 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF; 298 break; 299 case GPF_PORT_DVSEC: 300 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F; 301 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F; 302 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F; 303 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F; 304 break; 305 case GPF_DEVICE_DVSEC: 306 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F; 307 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F; 308 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF; 309 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF; 310 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF; 311 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF; 312 break; 313 case PCIE_FLEXBUS_PORT_DVSEC: 314 switch (cxl_dev_type) { 315 case CXL2_ROOT_PORT: 316 /* No MLD */ 317 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd; 318 break; 319 case CXL2_DOWNSTREAM_PORT: 320 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd; 321 break; 322 default: /* Registers are RO for other component types */ 323 break; 324 } 325 /* There are rw1cs bits in the status register but never set currently */ 326 break; 327 } 328 329 /* Update state for future DVSEC additions */ 330 range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length); 331 cxl->dvsec_offset += length; 332 } 333 334 uint8_t cxl_interleave_ways_enc(int iw, Error **errp) 335 { 336 switch (iw) { 337 case 1: return 0x0; 338 case 2: return 0x1; 339 case 4: return 0x2; 340 case 8: return 0x3; 341 case 16: return 0x4; 342 case 3: return 0x8; 343 case 6: return 0x9; 344 case 12: return 0xa; 345 default: 346 error_setg(errp, "Interleave ways: %d not supported", iw); 347 return 0; 348 } 349 } 350 351 uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp) 352 { 353 switch (gran) { 354 case 256: return 0; 355 case 512: return 1; 356 case 1024: return 2; 357 case 2048: return 3; 358 case 4096: return 4; 359 case 8192: return 5; 360 case 16384: return 6; 361 default: 362 error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran); 363 return 0; 364 } 365 } 366