1 /* 2 * CXL Utility library for devices 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "hw/cxl/cxl.h" 13 14 /* 15 * Device registers have no restrictions per the spec, and so fall back to the 16 * default memory mapped register rules in CXL r3.1 Section 8.2: 17 * Software shall use CXL.io Memory Read and Write to access memory mapped 18 * register defined in this section. Unless otherwise specified, software 19 * shall restrict the accesses width based on the following: 20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes 21 * quantity. 22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8 23 * Bytes 24 * • The address shall be a multiple of the access width, e.g. when 25 * accessing a register as a 4 Byte quantity, the address shall be 26 * multiple of 4. 27 * • The accesses shall map to contiguous bytes.If these rules are not 28 * followed, the behavior is undefined 29 */ 30 31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size) 32 { 33 CXLDeviceState *cxl_dstate = opaque; 34 35 switch (size) { 36 case 4: 37 return cxl_dstate->caps_reg_state32[offset / size]; 38 case 8: 39 return cxl_dstate->caps_reg_state64[offset / size]; 40 default: 41 g_assert_not_reached(); 42 } 43 } 44 45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size) 46 { 47 CXLDeviceState *cxl_dstate = opaque; 48 49 switch (size) { 50 case 1: 51 return cxl_dstate->dev_reg_state[offset]; 52 case 2: 53 return cxl_dstate->dev_reg_state16[offset / size]; 54 case 4: 55 return cxl_dstate->dev_reg_state32[offset / size]; 56 case 8: 57 return cxl_dstate->dev_reg_state64[offset / size]; 58 default: 59 g_assert_not_reached(); 60 } 61 } 62 63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) 64 { 65 CXLDeviceState *cxl_dstate; 66 CXLCCI *cci = opaque; 67 68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 70 } else if (object_dynamic_cast(OBJECT(cci->intf), 71 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 73 } else { 74 return 0; 75 } 76 77 switch (size) { 78 case 1: 79 return cxl_dstate->mbox_reg_state[offset]; 80 case 2: 81 return cxl_dstate->mbox_reg_state16[offset / size]; 82 case 4: 83 return cxl_dstate->mbox_reg_state32[offset / size]; 84 case 8: 85 if (offset == A_CXL_DEV_BG_CMD_STS) { 86 uint64_t bg_status_reg; 87 bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP, 88 cci->bg.opcode); 89 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, 90 PERCENTAGE_COMP, cci->bg.complete_pct); 91 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, 92 RET_CODE, cci->bg.ret_code); 93 /* endian? */ 94 cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg; 95 } 96 if (offset == A_CXL_DEV_MAILBOX_STS) { 97 uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size]; 98 if (cci->bg.complete_pct) { 99 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP, 100 0); 101 cxl_dstate->mbox_reg_state64[offset / size] = status_reg; 102 } 103 } 104 return cxl_dstate->mbox_reg_state64[offset / size]; 105 default: 106 g_assert_not_reached(); 107 } 108 } 109 110 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset, 111 uint64_t value) 112 { 113 switch (offset) { 114 case A_CXL_DEV_MAILBOX_CTRL: 115 /* fallthrough */ 116 case A_CXL_DEV_MAILBOX_CAP: 117 /* RO register */ 118 break; 119 default: 120 qemu_log_mask(LOG_UNIMP, 121 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n", 122 __func__, offset); 123 return; 124 } 125 126 reg_state[offset / sizeof(*reg_state)] = value; 127 } 128 129 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, 130 uint64_t value) 131 { 132 switch (offset) { 133 case A_CXL_DEV_MAILBOX_CMD: 134 break; 135 case A_CXL_DEV_BG_CMD_STS: 136 break; 137 case A_CXL_DEV_MAILBOX_STS: 138 /* Read only register, will get updated by the state machine */ 139 return; 140 default: 141 qemu_log_mask(LOG_UNIMP, 142 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n", 143 __func__, offset); 144 return; 145 } 146 147 148 reg_state[offset / sizeof(*reg_state)] = value; 149 } 150 151 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, 152 unsigned size) 153 { 154 CXLDeviceState *cxl_dstate; 155 CXLCCI *cci = opaque; 156 157 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 158 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 159 } else if (object_dynamic_cast(OBJECT(cci->intf), 160 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 161 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 162 } else { 163 return; 164 } 165 166 if (offset >= A_CXL_DEV_CMD_PAYLOAD) { 167 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size); 168 return; 169 } 170 171 switch (size) { 172 case 4: 173 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value); 174 break; 175 case 8: 176 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value); 177 break; 178 default: 179 g_assert_not_reached(); 180 } 181 182 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 183 DOORBELL)) { 184 uint64_t command_reg = 185 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; 186 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, 187 COMMAND_SET); 188 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); 189 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); 190 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD; 191 /* 192 * Copy taken to avoid need for individual command handlers to care 193 * about aliasing. 194 */ 195 g_autofree uint8_t *pl_in_copy = NULL; 196 size_t len_out = 0; 197 uint64_t status_reg; 198 bool bg_started = false; 199 int rc; 200 201 pl_in_copy = g_memdup2(pl, len_in); 202 if (len_in == 0 || pl_in_copy) { 203 /* Avoid stale data - including from earlier cmds */ 204 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE); 205 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy, 206 &len_out, pl, &bg_started); 207 } else { 208 rc = CXL_MBOX_INTERNAL_ERROR; 209 } 210 211 /* Set bg and the return code */ 212 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP, 213 bg_started ? 1 : 0); 214 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc); 215 /* Set the return length */ 216 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set); 217 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 218 COMMAND, cmd); 219 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 220 LENGTH, len_out); 221 222 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; 223 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; 224 /* Tell the host we're done */ 225 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 226 DOORBELL, 0); 227 } 228 } 229 230 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size) 231 { 232 CXLDeviceState *cxl_dstate = opaque; 233 234 return cxl_dstate->memdev_status; 235 } 236 237 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value, 238 unsigned size) 239 { 240 /* Many register sets are read only */ 241 } 242 243 static const MemoryRegionOps mdev_ops = { 244 .read = mdev_reg_read, 245 .write = ro_reg_write, 246 .endianness = DEVICE_LITTLE_ENDIAN, 247 .valid = { 248 .min_access_size = 1, 249 .max_access_size = 8, 250 .unaligned = false, 251 }, 252 .impl = { 253 .min_access_size = 8, 254 .max_access_size = 8, 255 }, 256 }; 257 258 static const MemoryRegionOps mailbox_ops = { 259 .read = mailbox_reg_read, 260 .write = mailbox_reg_write, 261 .endianness = DEVICE_LITTLE_ENDIAN, 262 .valid = { 263 .min_access_size = 1, 264 .max_access_size = 8, 265 .unaligned = false, 266 }, 267 .impl = { 268 .min_access_size = 1, 269 .max_access_size = 8, 270 }, 271 }; 272 273 static const MemoryRegionOps dev_ops = { 274 .read = dev_reg_read, 275 .write = ro_reg_write, 276 .endianness = DEVICE_LITTLE_ENDIAN, 277 .valid = { 278 .min_access_size = 1, 279 .max_access_size = 8, 280 .unaligned = false, 281 }, 282 .impl = { 283 .min_access_size = 1, 284 .max_access_size = 8, 285 }, 286 }; 287 288 static const MemoryRegionOps caps_ops = { 289 .read = caps_reg_read, 290 .write = ro_reg_write, 291 .endianness = DEVICE_LITTLE_ENDIAN, 292 .valid = { 293 .min_access_size = 1, 294 .max_access_size = 8, 295 .unaligned = false, 296 }, 297 .impl = { 298 .min_access_size = 4, 299 .max_access_size = 8, 300 }, 301 }; 302 303 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate, 304 CXLCCI *cci) 305 { 306 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */ 307 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers", 308 pow2ceil(CXL_MMIO_SIZE)); 309 310 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate, 311 "cap-array", CXL_CAPS_SIZE); 312 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate, 313 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH); 314 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci, 315 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH); 316 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops, 317 cxl_dstate, "memory device caps", 318 CXL_MEMORY_DEVICE_REGISTERS_LENGTH); 319 320 memory_region_add_subregion(&cxl_dstate->device_registers, 0, 321 &cxl_dstate->caps); 322 memory_region_add_subregion(&cxl_dstate->device_registers, 323 CXL_DEVICE_STATUS_REGISTERS_OFFSET, 324 &cxl_dstate->device); 325 memory_region_add_subregion(&cxl_dstate->device_registers, 326 CXL_MAILBOX_REGISTERS_OFFSET, 327 &cxl_dstate->mailbox); 328 memory_region_add_subregion(&cxl_dstate->device_registers, 329 CXL_MEMORY_DEVICE_REGISTERS_OFFSET, 330 &cxl_dstate->memory_device); 331 } 332 333 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type, 334 bool available) 335 { 336 if (available) { 337 cxl_dstate->event_status |= (1 << log_type); 338 } else { 339 cxl_dstate->event_status &= ~(1 << log_type); 340 } 341 342 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS, 343 EVENT_STATUS, cxl_dstate->event_status); 344 } 345 346 static void device_reg_init_common(CXLDeviceState *cxl_dstate) 347 { 348 CXLEventLogType log; 349 350 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) { 351 cxl_event_set_status(cxl_dstate, log, false); 352 } 353 } 354 355 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) 356 { 357 const uint8_t msi_n = 9; 358 359 /* 2048 payload size */ 360 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 361 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); 362 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE; 363 /* irq support */ 364 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 365 BG_INT_CAP, 1); 366 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 367 MSI_N, msi_n); 368 cxl_dstate->mbox_msi_n = msi_n; 369 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 370 MBOX_READY_TIME, 0); /* Not reported */ 371 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 372 TYPE, 0); /* Inferred from class code */ 373 } 374 375 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) 376 { 377 uint64_t memdev_status_reg; 378 379 memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1); 380 memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS, 381 MBOX_READY, 1); 382 cxl_dstate->memdev_status = memdev_status_reg; 383 } 384 385 void cxl_device_register_init_t3(CXLType3Dev *ct3d) 386 { 387 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 388 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 389 const int cap_count = 3; 390 391 /* CXL Device Capabilities Array Register */ 392 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 393 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 394 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 395 396 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 397 CXL_DEVICE_STATUS_VERSION); 398 device_reg_init_common(cxl_dstate); 399 400 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION); 401 mailbox_reg_init_common(cxl_dstate); 402 403 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 404 CXL_MEM_DEV_STATUS_VERSION); 405 memdev_reg_init_common(cxl_dstate); 406 407 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d), 408 CXL_MAILBOX_MAX_PAYLOAD_SIZE); 409 } 410 411 void cxl_device_register_init_swcci(CSWMBCCIDev *sw) 412 { 413 CXLDeviceState *cxl_dstate = &sw->cxl_dstate; 414 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 415 const int cap_count = 3; 416 417 /* CXL Device Capabilities Array Register */ 418 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 419 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 420 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 421 422 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); 423 device_reg_init_common(cxl_dstate); 424 425 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); 426 mailbox_reg_init_common(cxl_dstate); 427 428 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); 429 memdev_reg_init_common(cxl_dstate); 430 } 431 432 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) 433 { 434 uint64_t time, delta; 435 uint64_t final_time = 0; 436 437 if (cxl_dstate->timestamp.set) { 438 /* Find the delta from the last time the host set the time. */ 439 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 440 delta = time - cxl_dstate->timestamp.last_set; 441 final_time = cxl_dstate->timestamp.host_set + delta; 442 } 443 444 return final_time; 445 } 446