1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright(c) 2021 Intel Corporation. All rights reserved. 3 4 #include <linux/platform_device.h> 5 #include <linux/mod_devicetable.h> 6 #include <linux/module.h> 7 #include <linux/delay.h> 8 #include <linux/sizes.h> 9 #include <linux/bits.h> 10 #include <asm/unaligned.h> 11 #include <crypto/sha2.h> 12 #include <cxlmem.h> 13 14 #include "trace.h" 15 16 #define LSA_SIZE SZ_128K 17 #define FW_SIZE SZ_64M 18 #define FW_SLOTS 3 19 #define DEV_SIZE SZ_2G 20 #define EFFECT(x) (1U << x) 21 22 #define MOCK_INJECT_DEV_MAX 8 23 #define MOCK_INJECT_TEST_MAX 128 24 25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX; 26 27 enum cxl_command_effects { 28 CONF_CHANGE_COLD_RESET = 0, 29 CONF_CHANGE_IMMEDIATE, 30 DATA_CHANGE_IMMEDIATE, 31 POLICY_CHANGE_IMMEDIATE, 32 LOG_CHANGE_IMMEDIATE, 33 SECURITY_CHANGE_IMMEDIATE, 34 BACKGROUND_OP, 35 SECONDARY_MBOX_SUPPORTED, 36 }; 37 38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0) 39 40 static struct cxl_cel_entry mock_cel[] = { 41 { 42 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), 43 .effect = CXL_CMD_EFFECT_NONE, 44 }, 45 { 46 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY), 47 .effect = CXL_CMD_EFFECT_NONE, 48 }, 49 { 50 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA), 51 .effect = CXL_CMD_EFFECT_NONE, 52 }, 53 { 54 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO), 55 .effect = CXL_CMD_EFFECT_NONE, 56 }, 57 { 58 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA), 59 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) | 60 EFFECT(DATA_CHANGE_IMMEDIATE)), 61 }, 62 { 63 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO), 64 .effect = CXL_CMD_EFFECT_NONE, 65 }, 66 { 67 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), 68 .effect = CXL_CMD_EFFECT_NONE, 69 }, 70 { 71 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), 72 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), 73 }, 74 { 75 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON), 76 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), 77 }, 78 { 79 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO), 80 .effect = CXL_CMD_EFFECT_NONE, 81 }, 82 { 83 .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW), 84 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) | 85 EFFECT(BACKGROUND_OP)), 86 }, 87 { 88 .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW), 89 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) | 90 EFFECT(CONF_CHANGE_IMMEDIATE)), 91 }, 92 }; 93 94 /* See CXL 2.0 Table 181 Get Health Info Output Payload */ 95 struct cxl_mbox_health_info { 96 u8 health_status; 97 u8 media_status; 98 u8 ext_status; 99 u8 life_used; 100 __le16 temperature; 101 __le32 dirty_shutdowns; 102 __le32 volatile_errors; 103 __le32 pmem_errors; 104 } __packed; 105 106 static struct { 107 struct cxl_mbox_get_supported_logs gsl; 108 struct cxl_gsl_entry entry; 109 } mock_gsl_payload = { 110 .gsl = { 111 .entries = cpu_to_le16(1), 112 }, 113 .entry = { 114 .uuid = DEFINE_CXL_CEL_UUID, 115 .size = cpu_to_le32(sizeof(mock_cel)), 116 }, 117 }; 118 119 #define PASS_TRY_LIMIT 3 120 121 #define CXL_TEST_EVENT_CNT_MAX 15 122 123 /* Set a number of events to return at a time for simulation. */ 124 #define CXL_TEST_EVENT_CNT 3 125 126 struct mock_event_log { 127 u16 clear_idx; 128 u16 cur_idx; 129 u16 nr_events; 130 u16 nr_overflow; 131 u16 overflow_reset; 132 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; 133 }; 134 135 struct mock_event_store { 136 struct cxl_dev_state *cxlds; 137 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; 138 u32 ev_status; 139 }; 140 141 struct cxl_mockmem_data { 142 void *lsa; 143 void *fw; 144 int fw_slot; 145 int fw_staged; 146 size_t fw_size; 147 u32 security_state; 148 u8 user_pass[NVDIMM_PASSPHRASE_LEN]; 149 u8 master_pass[NVDIMM_PASSPHRASE_LEN]; 150 int user_limit; 151 int master_limit; 152 struct mock_event_store mes; 153 u8 event_buf[SZ_4K]; 154 u64 timestamp; 155 }; 156 157 static struct mock_event_log *event_find_log(struct device *dev, int log_type) 158 { 159 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 160 161 if (log_type >= CXL_EVENT_TYPE_MAX) 162 return NULL; 163 return &mdata->mes.mock_logs[log_type]; 164 } 165 166 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log) 167 { 168 return log->events[log->cur_idx]; 169 } 170 171 static void event_reset_log(struct mock_event_log *log) 172 { 173 log->cur_idx = 0; 174 log->clear_idx = 0; 175 log->nr_overflow = log->overflow_reset; 176 } 177 178 /* Handle can never be 0 use 1 based indexing for handle */ 179 static u16 event_get_clear_handle(struct mock_event_log *log) 180 { 181 return log->clear_idx + 1; 182 } 183 184 /* Handle can never be 0 use 1 based indexing for handle */ 185 static __le16 event_get_cur_event_handle(struct mock_event_log *log) 186 { 187 u16 cur_handle = log->cur_idx + 1; 188 189 return cpu_to_le16(cur_handle); 190 } 191 192 static bool event_log_empty(struct mock_event_log *log) 193 { 194 return log->cur_idx == log->nr_events; 195 } 196 197 static void mes_add_event(struct mock_event_store *mes, 198 enum cxl_event_log_type log_type, 199 struct cxl_event_record_raw *event) 200 { 201 struct mock_event_log *log; 202 203 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX)) 204 return; 205 206 log = &mes->mock_logs[log_type]; 207 208 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) { 209 log->nr_overflow++; 210 log->overflow_reset = log->nr_overflow; 211 return; 212 } 213 214 log->events[log->nr_events] = event; 215 log->nr_events++; 216 } 217 218 static int mock_get_event(struct cxl_dev_state *cxlds, 219 struct cxl_mbox_cmd *cmd) 220 { 221 struct cxl_get_event_payload *pl; 222 struct mock_event_log *log; 223 u16 nr_overflow; 224 u8 log_type; 225 int i; 226 227 if (cmd->size_in != sizeof(log_type)) 228 return -EINVAL; 229 230 if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT)) 231 return -EINVAL; 232 233 log_type = *((u8 *)cmd->payload_in); 234 if (log_type >= CXL_EVENT_TYPE_MAX) 235 return -EINVAL; 236 237 memset(cmd->payload_out, 0, cmd->size_out); 238 239 log = event_find_log(cxlds->dev, log_type); 240 if (!log || event_log_empty(log)) 241 return 0; 242 243 pl = cmd->payload_out; 244 245 for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) { 246 memcpy(&pl->records[i], event_get_current(log), 247 sizeof(pl->records[i])); 248 pl->records[i].hdr.handle = event_get_cur_event_handle(log); 249 log->cur_idx++; 250 } 251 252 pl->record_count = cpu_to_le16(i); 253 if (!event_log_empty(log)) 254 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; 255 256 if (log->nr_overflow) { 257 u64 ns; 258 259 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; 260 pl->overflow_err_count = cpu_to_le16(nr_overflow); 261 ns = ktime_get_real_ns(); 262 ns -= 5000000000; /* 5s ago */ 263 pl->first_overflow_timestamp = cpu_to_le64(ns); 264 ns = ktime_get_real_ns(); 265 ns -= 1000000000; /* 1s ago */ 266 pl->last_overflow_timestamp = cpu_to_le64(ns); 267 } 268 269 return 0; 270 } 271 272 static int mock_clear_event(struct cxl_dev_state *cxlds, 273 struct cxl_mbox_cmd *cmd) 274 { 275 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; 276 struct mock_event_log *log; 277 u8 log_type = pl->event_log; 278 u16 handle; 279 int nr; 280 281 if (log_type >= CXL_EVENT_TYPE_MAX) 282 return -EINVAL; 283 284 log = event_find_log(cxlds->dev, log_type); 285 if (!log) 286 return 0; /* No mock data in this log */ 287 288 /* 289 * This check is technically not invalid per the specification AFAICS. 290 * (The host could 'guess' handles and clear them in order). 291 * However, this is not good behavior for the host so test it. 292 */ 293 if (log->clear_idx + pl->nr_recs > log->cur_idx) { 294 dev_err(cxlds->dev, 295 "Attempting to clear more events than returned!\n"); 296 return -EINVAL; 297 } 298 299 /* Check handle order prior to clearing events */ 300 for (nr = 0, handle = event_get_clear_handle(log); 301 nr < pl->nr_recs; 302 nr++, handle++) { 303 if (handle != le16_to_cpu(pl->handles[nr])) { 304 dev_err(cxlds->dev, "Clearing events out of order\n"); 305 return -EINVAL; 306 } 307 } 308 309 if (log->nr_overflow) 310 log->nr_overflow = 0; 311 312 /* Clear events */ 313 log->clear_idx += pl->nr_recs; 314 return 0; 315 } 316 317 static void cxl_mock_event_trigger(struct device *dev) 318 { 319 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 320 struct mock_event_store *mes = &mdata->mes; 321 int i; 322 323 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) { 324 struct mock_event_log *log; 325 326 log = event_find_log(dev, i); 327 if (log) 328 event_reset_log(log); 329 } 330 331 cxl_mem_get_event_records(mes->cxlds, mes->ev_status); 332 } 333 334 struct cxl_event_record_raw maint_needed = { 335 .hdr = { 336 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB, 337 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 338 .length = sizeof(struct cxl_event_record_raw), 339 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, 340 /* .handle = Set dynamically */ 341 .related_handle = cpu_to_le16(0xa5b6), 342 }, 343 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 344 }; 345 346 struct cxl_event_record_raw hardware_replace = { 347 .hdr = { 348 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E, 349 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 350 .length = sizeof(struct cxl_event_record_raw), 351 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE, 352 /* .handle = Set dynamically */ 353 .related_handle = cpu_to_le16(0xb6a5), 354 }, 355 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 356 }; 357 358 struct cxl_event_gen_media gen_media = { 359 .hdr = { 360 .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 361 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), 362 .length = sizeof(struct cxl_event_gen_media), 363 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT, 364 /* .handle = Set dynamically */ 365 .related_handle = cpu_to_le16(0), 366 }, 367 .phys_addr = cpu_to_le64(0x2000), 368 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, 369 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, 370 .transaction_type = CXL_GMER_TRANS_HOST_WRITE, 371 /* .validity_flags = <set below> */ 372 .channel = 1, 373 .rank = 30 374 }; 375 376 struct cxl_event_dram dram = { 377 .hdr = { 378 .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 379 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), 380 .length = sizeof(struct cxl_event_dram), 381 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, 382 /* .handle = Set dynamically */ 383 .related_handle = cpu_to_le16(0), 384 }, 385 .phys_addr = cpu_to_le64(0x8000), 386 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT, 387 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR, 388 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, 389 /* .validity_flags = <set below> */ 390 .channel = 1, 391 .bank_group = 5, 392 .bank = 2, 393 .column = {0xDE, 0xAD}, 394 }; 395 396 struct cxl_event_mem_module mem_module = { 397 .hdr = { 398 .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339, 399 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), 400 .length = sizeof(struct cxl_event_mem_module), 401 /* .handle = Set dynamically */ 402 .related_handle = cpu_to_le16(0), 403 }, 404 .event_type = CXL_MMER_TEMP_CHANGE, 405 .info = { 406 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED, 407 .media_status = CXL_DHI_MS_ALL_DATA_LOST, 408 .add_status = (CXL_DHI_AS_CRITICAL << 2) | 409 (CXL_DHI_AS_WARNING << 4) | 410 (CXL_DHI_AS_WARNING << 5), 411 .device_temp = { 0xDE, 0xAD}, 412 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef }, 413 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 414 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 415 } 416 }; 417 418 static int mock_set_timestamp(struct cxl_dev_state *cxlds, 419 struct cxl_mbox_cmd *cmd) 420 { 421 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 422 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in; 423 424 if (cmd->size_in != sizeof(*ts)) 425 return -EINVAL; 426 427 if (cmd->size_out != 0) 428 return -EINVAL; 429 430 mdata->timestamp = le64_to_cpu(ts->timestamp); 431 return 0; 432 } 433 434 static void cxl_mock_add_event_logs(struct mock_event_store *mes) 435 { 436 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, 437 &gen_media.validity_flags); 438 439 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP | 440 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN, 441 &dram.validity_flags); 442 443 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); 444 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 445 (struct cxl_event_record_raw *)&gen_media); 446 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 447 (struct cxl_event_record_raw *)&mem_module); 448 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; 449 450 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed); 451 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 452 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 453 (struct cxl_event_record_raw *)&dram); 454 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 455 (struct cxl_event_record_raw *)&gen_media); 456 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 457 (struct cxl_event_record_raw *)&mem_module); 458 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 459 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 460 (struct cxl_event_record_raw *)&dram); 461 /* Overflow this log */ 462 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 463 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 464 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 465 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 466 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 467 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 468 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 469 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 470 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 471 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 472 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL; 473 474 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); 475 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, 476 (struct cxl_event_record_raw *)&dram); 477 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; 478 } 479 480 static int mock_gsl(struct cxl_mbox_cmd *cmd) 481 { 482 if (cmd->size_out < sizeof(mock_gsl_payload)) 483 return -EINVAL; 484 485 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload)); 486 cmd->size_out = sizeof(mock_gsl_payload); 487 488 return 0; 489 } 490 491 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 492 { 493 struct cxl_mbox_get_log *gl = cmd->payload_in; 494 u32 offset = le32_to_cpu(gl->offset); 495 u32 length = le32_to_cpu(gl->length); 496 uuid_t uuid = DEFINE_CXL_CEL_UUID; 497 void *data = &mock_cel; 498 499 if (cmd->size_in < sizeof(*gl)) 500 return -EINVAL; 501 if (length > cxlds->payload_size) 502 return -EINVAL; 503 if (offset + length > sizeof(mock_cel)) 504 return -EINVAL; 505 if (!uuid_equal(&gl->uuid, &uuid)) 506 return -EINVAL; 507 if (length > cmd->size_out) 508 return -EINVAL; 509 510 memcpy(cmd->payload_out, data + offset, length); 511 512 return 0; 513 } 514 515 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 516 { 517 struct cxl_mbox_identify id = { 518 .fw_revision = { "mock fw v1 " }, 519 .total_capacity = 520 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 521 .volatile_capacity = 522 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 523 }; 524 525 if (cmd->size_out < sizeof(id)) 526 return -EINVAL; 527 528 memcpy(cmd->payload_out, &id, sizeof(id)); 529 530 return 0; 531 } 532 533 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 534 { 535 struct cxl_mbox_identify id = { 536 .fw_revision = { "mock fw v1 " }, 537 .lsa_size = cpu_to_le32(LSA_SIZE), 538 .partition_align = 539 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), 540 .total_capacity = 541 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 542 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX), 543 }; 544 545 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); 546 547 if (cmd->size_out < sizeof(id)) 548 return -EINVAL; 549 550 memcpy(cmd->payload_out, &id, sizeof(id)); 551 552 return 0; 553 } 554 555 static int mock_partition_info(struct cxl_dev_state *cxlds, 556 struct cxl_mbox_cmd *cmd) 557 { 558 struct cxl_mbox_get_partition_info pi = { 559 .active_volatile_cap = 560 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 561 .active_persistent_cap = 562 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 563 }; 564 565 if (cmd->size_out < sizeof(pi)) 566 return -EINVAL; 567 568 memcpy(cmd->payload_out, &pi, sizeof(pi)); 569 570 return 0; 571 } 572 573 static int mock_get_security_state(struct cxl_dev_state *cxlds, 574 struct cxl_mbox_cmd *cmd) 575 { 576 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 577 578 if (cmd->size_in) 579 return -EINVAL; 580 581 if (cmd->size_out != sizeof(u32)) 582 return -EINVAL; 583 584 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32)); 585 586 return 0; 587 } 588 589 static void master_plimit_check(struct cxl_mockmem_data *mdata) 590 { 591 if (mdata->master_limit == PASS_TRY_LIMIT) 592 return; 593 mdata->master_limit++; 594 if (mdata->master_limit == PASS_TRY_LIMIT) 595 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 596 } 597 598 static void user_plimit_check(struct cxl_mockmem_data *mdata) 599 { 600 if (mdata->user_limit == PASS_TRY_LIMIT) 601 return; 602 mdata->user_limit++; 603 if (mdata->user_limit == PASS_TRY_LIMIT) 604 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 605 } 606 607 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 608 { 609 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 610 struct cxl_set_pass *set_pass; 611 612 if (cmd->size_in != sizeof(*set_pass)) 613 return -EINVAL; 614 615 if (cmd->size_out != 0) 616 return -EINVAL; 617 618 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 619 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 620 return -ENXIO; 621 } 622 623 set_pass = cmd->payload_in; 624 switch (set_pass->type) { 625 case CXL_PMEM_SEC_PASS_MASTER: 626 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 627 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 628 return -ENXIO; 629 } 630 /* 631 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in 632 * the security disabled state when the user passphrase is not set. 633 */ 634 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 635 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 636 return -ENXIO; 637 } 638 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 639 master_plimit_check(mdata); 640 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 641 return -ENXIO; 642 } 643 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 644 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 645 return 0; 646 647 case CXL_PMEM_SEC_PASS_USER: 648 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 649 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 650 return -ENXIO; 651 } 652 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 653 user_plimit_check(mdata); 654 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 655 return -ENXIO; 656 } 657 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 658 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET; 659 return 0; 660 661 default: 662 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 663 } 664 return -EINVAL; 665 } 666 667 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 668 { 669 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 670 struct cxl_disable_pass *dis_pass; 671 672 if (cmd->size_in != sizeof(*dis_pass)) 673 return -EINVAL; 674 675 if (cmd->size_out != 0) 676 return -EINVAL; 677 678 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 679 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 680 return -ENXIO; 681 } 682 683 dis_pass = cmd->payload_in; 684 switch (dis_pass->type) { 685 case CXL_PMEM_SEC_PASS_MASTER: 686 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 687 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 688 return -ENXIO; 689 } 690 691 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) { 692 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 693 return -ENXIO; 694 } 695 696 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) { 697 master_plimit_check(mdata); 698 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 699 return -ENXIO; 700 } 701 702 mdata->master_limit = 0; 703 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN); 704 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 705 return 0; 706 707 case CXL_PMEM_SEC_PASS_USER: 708 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 709 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 710 return -ENXIO; 711 } 712 713 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 714 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 715 return -ENXIO; 716 } 717 718 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 719 user_plimit_check(mdata); 720 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 721 return -ENXIO; 722 } 723 724 mdata->user_limit = 0; 725 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 726 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET | 727 CXL_PMEM_SEC_STATE_LOCKED); 728 return 0; 729 730 default: 731 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 732 return -EINVAL; 733 } 734 735 return 0; 736 } 737 738 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 739 { 740 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 741 742 if (cmd->size_in != 0) 743 return -EINVAL; 744 745 if (cmd->size_out != 0) 746 return -EINVAL; 747 748 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) 749 return 0; 750 751 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN; 752 return 0; 753 } 754 755 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 756 { 757 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 758 759 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) 760 return -EINVAL; 761 762 if (cmd->size_out != 0) 763 return -EINVAL; 764 765 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 766 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 767 return -ENXIO; 768 } 769 770 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 771 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 772 return -ENXIO; 773 } 774 775 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 776 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 777 return -ENXIO; 778 } 779 780 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) { 781 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 782 return -ENXIO; 783 } 784 785 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 786 if (++mdata->user_limit == PASS_TRY_LIMIT) 787 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 788 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 789 return -ENXIO; 790 } 791 792 mdata->user_limit = 0; 793 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 794 return 0; 795 } 796 797 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, 798 struct cxl_mbox_cmd *cmd) 799 { 800 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 801 struct cxl_pass_erase *erase; 802 803 if (cmd->size_in != sizeof(*erase)) 804 return -EINVAL; 805 806 if (cmd->size_out != 0) 807 return -EINVAL; 808 809 erase = cmd->payload_in; 810 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 811 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 812 return -ENXIO; 813 } 814 815 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT && 816 erase->type == CXL_PMEM_SEC_PASS_USER) { 817 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 818 return -ENXIO; 819 } 820 821 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT && 822 erase->type == CXL_PMEM_SEC_PASS_MASTER) { 823 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 824 return -ENXIO; 825 } 826 827 switch (erase->type) { 828 case CXL_PMEM_SEC_PASS_MASTER: 829 /* 830 * The spec does not clearly define the behavior of the scenario 831 * where a master passphrase is passed in while the master 832 * passphrase is not set and user passphrase is not set. The 833 * code will take the assumption that it will behave the same 834 * as a CXL secure erase command without passphrase (0x4401). 835 */ 836 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) { 837 if (memcmp(mdata->master_pass, erase->pass, 838 NVDIMM_PASSPHRASE_LEN)) { 839 master_plimit_check(mdata); 840 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 841 return -ENXIO; 842 } 843 mdata->master_limit = 0; 844 mdata->user_limit = 0; 845 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 846 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 847 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 848 } else { 849 /* 850 * CXL rev3 8.2.9.8.6.3 Disable Passphrase 851 * When master passphrase is disabled, the device shall 852 * return Invalid Input for the Passphrase Secure Erase 853 * command with master passphrase. 854 */ 855 return -EINVAL; 856 } 857 /* Scramble encryption keys so that data is effectively erased */ 858 break; 859 case CXL_PMEM_SEC_PASS_USER: 860 /* 861 * The spec does not clearly define the behavior of the scenario 862 * where a user passphrase is passed in while the user 863 * passphrase is not set. The code will take the assumption that 864 * it will behave the same as a CXL secure erase command without 865 * passphrase (0x4401). 866 */ 867 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 868 if (memcmp(mdata->user_pass, erase->pass, 869 NVDIMM_PASSPHRASE_LEN)) { 870 user_plimit_check(mdata); 871 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 872 return -ENXIO; 873 } 874 mdata->user_limit = 0; 875 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 876 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 877 } 878 879 /* 880 * CXL rev3 Table 8-118 881 * If user passphrase is not set or supported by device, current 882 * passphrase value is ignored. Will make the assumption that 883 * the operation will proceed as secure erase w/o passphrase 884 * since spec is not explicit. 885 */ 886 887 /* Scramble encryption keys so that data is effectively erased */ 888 break; 889 default: 890 return -EINVAL; 891 } 892 893 return 0; 894 } 895 896 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 897 { 898 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; 899 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 900 void *lsa = mdata->lsa; 901 u32 offset, length; 902 903 if (sizeof(*get_lsa) > cmd->size_in) 904 return -EINVAL; 905 offset = le32_to_cpu(get_lsa->offset); 906 length = le32_to_cpu(get_lsa->length); 907 if (offset + length > LSA_SIZE) 908 return -EINVAL; 909 if (length > cmd->size_out) 910 return -EINVAL; 911 912 memcpy(cmd->payload_out, lsa + offset, length); 913 return 0; 914 } 915 916 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 917 { 918 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; 919 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 920 void *lsa = mdata->lsa; 921 u32 offset, length; 922 923 if (sizeof(*set_lsa) > cmd->size_in) 924 return -EINVAL; 925 offset = le32_to_cpu(set_lsa->offset); 926 length = cmd->size_in - sizeof(*set_lsa); 927 if (offset + length > LSA_SIZE) 928 return -EINVAL; 929 930 memcpy(lsa + offset, &set_lsa->data[0], length); 931 return 0; 932 } 933 934 static int mock_health_info(struct cxl_dev_state *cxlds, 935 struct cxl_mbox_cmd *cmd) 936 { 937 struct cxl_mbox_health_info health_info = { 938 /* set flags for maint needed, perf degraded, hw replacement */ 939 .health_status = 0x7, 940 /* set media status to "All Data Lost" */ 941 .media_status = 0x3, 942 /* 943 * set ext_status flags for: 944 * ext_life_used: normal, 945 * ext_temperature: critical, 946 * ext_corrected_volatile: warning, 947 * ext_corrected_persistent: normal, 948 */ 949 .ext_status = 0x18, 950 .life_used = 15, 951 .temperature = cpu_to_le16(25), 952 .dirty_shutdowns = cpu_to_le32(10), 953 .volatile_errors = cpu_to_le32(20), 954 .pmem_errors = cpu_to_le32(30), 955 }; 956 957 if (cmd->size_out < sizeof(health_info)) 958 return -EINVAL; 959 960 memcpy(cmd->payload_out, &health_info, sizeof(health_info)); 961 return 0; 962 } 963 964 static struct mock_poison { 965 struct cxl_dev_state *cxlds; 966 u64 dpa; 967 } mock_poison_list[MOCK_INJECT_TEST_MAX]; 968 969 static struct cxl_mbox_poison_out * 970 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) 971 { 972 struct cxl_mbox_poison_out *po; 973 int nr_records = 0; 974 u64 dpa; 975 976 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL); 977 if (!po) 978 return NULL; 979 980 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 981 if (mock_poison_list[i].cxlds != cxlds) 982 continue; 983 if (mock_poison_list[i].dpa < offset || 984 mock_poison_list[i].dpa > offset + length - 1) 985 continue; 986 987 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED; 988 po->record[nr_records].address = cpu_to_le64(dpa); 989 po->record[nr_records].length = cpu_to_le32(1); 990 nr_records++; 991 if (nr_records == poison_inject_dev_max) 992 break; 993 } 994 995 /* Always return count, even when zero */ 996 po->count = cpu_to_le16(nr_records); 997 998 return po; 999 } 1000 1001 static int mock_get_poison(struct cxl_dev_state *cxlds, 1002 struct cxl_mbox_cmd *cmd) 1003 { 1004 struct cxl_mbox_poison_in *pi = cmd->payload_in; 1005 struct cxl_mbox_poison_out *po; 1006 u64 offset = le64_to_cpu(pi->offset); 1007 u64 length = le64_to_cpu(pi->length); 1008 int nr_records; 1009 1010 po = cxl_get_injected_po(cxlds, offset, length); 1011 if (!po) 1012 return -ENOMEM; 1013 nr_records = le16_to_cpu(po->count); 1014 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records)); 1015 cmd->size_out = struct_size(po, record, nr_records); 1016 kfree(po); 1017 1018 return 0; 1019 } 1020 1021 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) 1022 { 1023 int count = 0; 1024 1025 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1026 if (mock_poison_list[i].cxlds == cxlds) 1027 count++; 1028 } 1029 return (count >= poison_inject_dev_max); 1030 } 1031 1032 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) 1033 { 1034 if (mock_poison_dev_max_injected(cxlds)) { 1035 dev_dbg(cxlds->dev, 1036 "Device poison injection limit has been reached: %d\n", 1037 MOCK_INJECT_DEV_MAX); 1038 return false; 1039 } 1040 1041 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1042 if (!mock_poison_list[i].cxlds) { 1043 mock_poison_list[i].cxlds = cxlds; 1044 mock_poison_list[i].dpa = dpa; 1045 return true; 1046 } 1047 } 1048 dev_dbg(cxlds->dev, 1049 "Mock test poison injection limit has been reached: %d\n", 1050 MOCK_INJECT_TEST_MAX); 1051 1052 return false; 1053 } 1054 1055 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa) 1056 { 1057 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1058 if (mock_poison_list[i].cxlds == cxlds && 1059 mock_poison_list[i].dpa == dpa) 1060 return true; 1061 } 1062 return false; 1063 } 1064 1065 static int mock_inject_poison(struct cxl_dev_state *cxlds, 1066 struct cxl_mbox_cmd *cmd) 1067 { 1068 struct cxl_mbox_inject_poison *pi = cmd->payload_in; 1069 u64 dpa = le64_to_cpu(pi->address); 1070 1071 if (mock_poison_found(cxlds, dpa)) { 1072 /* Not an error to inject poison if already poisoned */ 1073 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa); 1074 return 0; 1075 } 1076 if (!mock_poison_add(cxlds, dpa)) 1077 return -ENXIO; 1078 1079 return 0; 1080 } 1081 1082 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa) 1083 { 1084 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1085 if (mock_poison_list[i].cxlds == cxlds && 1086 mock_poison_list[i].dpa == dpa) { 1087 mock_poison_list[i].cxlds = NULL; 1088 return true; 1089 } 1090 } 1091 return false; 1092 } 1093 1094 static int mock_clear_poison(struct cxl_dev_state *cxlds, 1095 struct cxl_mbox_cmd *cmd) 1096 { 1097 struct cxl_mbox_clear_poison *pi = cmd->payload_in; 1098 u64 dpa = le64_to_cpu(pi->address); 1099 1100 /* 1101 * A real CXL device will write pi->write_data to the address 1102 * being cleared. In this mock, just delete this address from 1103 * the mock poison list. 1104 */ 1105 if (!mock_poison_del(cxlds, dpa)) 1106 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa); 1107 1108 return 0; 1109 } 1110 1111 static bool mock_poison_list_empty(void) 1112 { 1113 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1114 if (mock_poison_list[i].cxlds) 1115 return false; 1116 } 1117 return true; 1118 } 1119 1120 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf) 1121 { 1122 return sysfs_emit(buf, "%u\n", poison_inject_dev_max); 1123 } 1124 1125 static ssize_t poison_inject_max_store(struct device_driver *drv, 1126 const char *buf, size_t len) 1127 { 1128 int val; 1129 1130 if (kstrtoint(buf, 0, &val) < 0) 1131 return -EINVAL; 1132 1133 if (!mock_poison_list_empty()) 1134 return -EBUSY; 1135 1136 if (val <= MOCK_INJECT_TEST_MAX) 1137 poison_inject_dev_max = val; 1138 else 1139 return -EINVAL; 1140 1141 return len; 1142 } 1143 1144 static DRIVER_ATTR_RW(poison_inject_max); 1145 1146 static struct attribute *cxl_mock_mem_core_attrs[] = { 1147 &driver_attr_poison_inject_max.attr, 1148 NULL 1149 }; 1150 ATTRIBUTE_GROUPS(cxl_mock_mem_core); 1151 1152 static int mock_fw_info(struct cxl_dev_state *cxlds, 1153 struct cxl_mbox_cmd *cmd) 1154 { 1155 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1156 struct cxl_mbox_get_fw_info fw_info = { 1157 .num_slots = FW_SLOTS, 1158 .slot_info = (mdata->fw_slot & 0x7) | 1159 ((mdata->fw_staged & 0x7) << 3), 1160 .activation_cap = 0, 1161 }; 1162 1163 strcpy(fw_info.slot_1_revision, "cxl_test_fw_001"); 1164 strcpy(fw_info.slot_2_revision, "cxl_test_fw_002"); 1165 strcpy(fw_info.slot_3_revision, "cxl_test_fw_003"); 1166 strcpy(fw_info.slot_4_revision, ""); 1167 1168 if (cmd->size_out < sizeof(fw_info)) 1169 return -EINVAL; 1170 1171 memcpy(cmd->payload_out, &fw_info, sizeof(fw_info)); 1172 return 0; 1173 } 1174 1175 static int mock_transfer_fw(struct cxl_dev_state *cxlds, 1176 struct cxl_mbox_cmd *cmd) 1177 { 1178 struct cxl_mbox_transfer_fw *transfer = cmd->payload_in; 1179 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1180 void *fw = mdata->fw; 1181 size_t offset, length; 1182 1183 offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT; 1184 length = cmd->size_in - sizeof(*transfer); 1185 if (offset + length > FW_SIZE) 1186 return -EINVAL; 1187 1188 switch (transfer->action) { 1189 case CXL_FW_TRANSFER_ACTION_FULL: 1190 if (offset != 0) 1191 return -EINVAL; 1192 fallthrough; 1193 case CXL_FW_TRANSFER_ACTION_END: 1194 if (transfer->slot == 0 || transfer->slot > FW_SLOTS) 1195 return -EINVAL; 1196 mdata->fw_size = offset + length; 1197 break; 1198 case CXL_FW_TRANSFER_ACTION_INITIATE: 1199 case CXL_FW_TRANSFER_ACTION_CONTINUE: 1200 break; 1201 case CXL_FW_TRANSFER_ACTION_ABORT: 1202 return 0; 1203 default: 1204 return -EINVAL; 1205 } 1206 1207 memcpy(fw + offset, transfer->data, length); 1208 return 0; 1209 } 1210 1211 static int mock_activate_fw(struct cxl_dev_state *cxlds, 1212 struct cxl_mbox_cmd *cmd) 1213 { 1214 struct cxl_mbox_activate_fw *activate = cmd->payload_in; 1215 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 1216 1217 if (activate->slot == 0 || activate->slot > FW_SLOTS) 1218 return -EINVAL; 1219 1220 switch (activate->action) { 1221 case CXL_FW_ACTIVATE_ONLINE: 1222 mdata->fw_slot = activate->slot; 1223 mdata->fw_staged = 0; 1224 return 0; 1225 case CXL_FW_ACTIVATE_OFFLINE: 1226 mdata->fw_staged = activate->slot; 1227 return 0; 1228 } 1229 1230 return -EINVAL; 1231 } 1232 1233 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 1234 { 1235 struct device *dev = cxlds->dev; 1236 int rc = -EIO; 1237 1238 switch (cmd->opcode) { 1239 case CXL_MBOX_OP_SET_TIMESTAMP: 1240 rc = mock_set_timestamp(cxlds, cmd); 1241 break; 1242 case CXL_MBOX_OP_GET_SUPPORTED_LOGS: 1243 rc = mock_gsl(cmd); 1244 break; 1245 case CXL_MBOX_OP_GET_LOG: 1246 rc = mock_get_log(cxlds, cmd); 1247 break; 1248 case CXL_MBOX_OP_IDENTIFY: 1249 if (cxlds->rcd) 1250 rc = mock_rcd_id(cxlds, cmd); 1251 else 1252 rc = mock_id(cxlds, cmd); 1253 break; 1254 case CXL_MBOX_OP_GET_LSA: 1255 rc = mock_get_lsa(cxlds, cmd); 1256 break; 1257 case CXL_MBOX_OP_GET_PARTITION_INFO: 1258 rc = mock_partition_info(cxlds, cmd); 1259 break; 1260 case CXL_MBOX_OP_GET_EVENT_RECORD: 1261 rc = mock_get_event(cxlds, cmd); 1262 break; 1263 case CXL_MBOX_OP_CLEAR_EVENT_RECORD: 1264 rc = mock_clear_event(cxlds, cmd); 1265 break; 1266 case CXL_MBOX_OP_SET_LSA: 1267 rc = mock_set_lsa(cxlds, cmd); 1268 break; 1269 case CXL_MBOX_OP_GET_HEALTH_INFO: 1270 rc = mock_health_info(cxlds, cmd); 1271 break; 1272 case CXL_MBOX_OP_GET_SECURITY_STATE: 1273 rc = mock_get_security_state(cxlds, cmd); 1274 break; 1275 case CXL_MBOX_OP_SET_PASSPHRASE: 1276 rc = mock_set_passphrase(cxlds, cmd); 1277 break; 1278 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 1279 rc = mock_disable_passphrase(cxlds, cmd); 1280 break; 1281 case CXL_MBOX_OP_FREEZE_SECURITY: 1282 rc = mock_freeze_security(cxlds, cmd); 1283 break; 1284 case CXL_MBOX_OP_UNLOCK: 1285 rc = mock_unlock_security(cxlds, cmd); 1286 break; 1287 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 1288 rc = mock_passphrase_secure_erase(cxlds, cmd); 1289 break; 1290 case CXL_MBOX_OP_GET_POISON: 1291 rc = mock_get_poison(cxlds, cmd); 1292 break; 1293 case CXL_MBOX_OP_INJECT_POISON: 1294 rc = mock_inject_poison(cxlds, cmd); 1295 break; 1296 case CXL_MBOX_OP_CLEAR_POISON: 1297 rc = mock_clear_poison(cxlds, cmd); 1298 break; 1299 case CXL_MBOX_OP_GET_FW_INFO: 1300 rc = mock_fw_info(cxlds, cmd); 1301 break; 1302 case CXL_MBOX_OP_TRANSFER_FW: 1303 rc = mock_transfer_fw(cxlds, cmd); 1304 break; 1305 case CXL_MBOX_OP_ACTIVATE_FW: 1306 rc = mock_activate_fw(cxlds, cmd); 1307 break; 1308 default: 1309 break; 1310 } 1311 1312 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode, 1313 cmd->size_in, cmd->size_out, rc); 1314 1315 return rc; 1316 } 1317 1318 static void label_area_release(void *lsa) 1319 { 1320 vfree(lsa); 1321 } 1322 1323 static void fw_buf_release(void *buf) 1324 { 1325 vfree(buf); 1326 } 1327 1328 static bool is_rcd(struct platform_device *pdev) 1329 { 1330 const struct platform_device_id *id = platform_get_device_id(pdev); 1331 1332 return !!id->driver_data; 1333 } 1334 1335 static ssize_t event_trigger_store(struct device *dev, 1336 struct device_attribute *attr, 1337 const char *buf, size_t count) 1338 { 1339 cxl_mock_event_trigger(dev); 1340 return count; 1341 } 1342 static DEVICE_ATTR_WO(event_trigger); 1343 1344 static int cxl_mock_mem_probe(struct platform_device *pdev) 1345 { 1346 struct device *dev = &pdev->dev; 1347 struct cxl_memdev *cxlmd; 1348 struct cxl_dev_state *cxlds; 1349 struct cxl_mockmem_data *mdata; 1350 int rc; 1351 1352 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL); 1353 if (!mdata) 1354 return -ENOMEM; 1355 dev_set_drvdata(dev, mdata); 1356 1357 mdata->lsa = vmalloc(LSA_SIZE); 1358 if (!mdata->lsa) 1359 return -ENOMEM; 1360 mdata->fw = vmalloc(FW_SIZE); 1361 if (!mdata->fw) 1362 return -ENOMEM; 1363 mdata->fw_slot = 2; 1364 1365 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa); 1366 if (rc) 1367 return rc; 1368 1369 rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw); 1370 if (rc) 1371 return rc; 1372 1373 cxlds = cxl_dev_state_create(dev); 1374 if (IS_ERR(cxlds)) 1375 return PTR_ERR(cxlds); 1376 1377 cxlds->serial = pdev->id; 1378 cxlds->mbox_send = cxl_mock_mbox_send; 1379 cxlds->payload_size = SZ_4K; 1380 cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; 1381 if (is_rcd(pdev)) { 1382 cxlds->rcd = true; 1383 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 1384 } 1385 1386 rc = cxl_enumerate_cmds(cxlds); 1387 if (rc) 1388 return rc; 1389 1390 rc = cxl_poison_state_init(cxlds); 1391 if (rc) 1392 return rc; 1393 1394 rc = cxl_set_timestamp(cxlds); 1395 if (rc) 1396 return rc; 1397 1398 cxlds->media_ready = true; 1399 rc = cxl_dev_state_identify(cxlds); 1400 if (rc) 1401 return rc; 1402 1403 rc = cxl_mem_create_range_info(cxlds); 1404 if (rc) 1405 return rc; 1406 1407 mdata->mes.cxlds = cxlds; 1408 cxl_mock_add_event_logs(&mdata->mes); 1409 1410 cxlmd = devm_cxl_add_memdev(cxlds); 1411 if (IS_ERR(cxlmd)) 1412 return PTR_ERR(cxlmd); 1413 1414 rc = cxl_memdev_setup_fw_upload(cxlds); 1415 if (rc) 1416 return rc; 1417 1418 cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 1419 1420 return 0; 1421 } 1422 1423 static ssize_t security_lock_show(struct device *dev, 1424 struct device_attribute *attr, char *buf) 1425 { 1426 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1427 1428 return sysfs_emit(buf, "%u\n", 1429 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)); 1430 } 1431 1432 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr, 1433 const char *buf, size_t count) 1434 { 1435 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1436 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT | 1437 CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 1438 int val; 1439 1440 if (kstrtoint(buf, 0, &val) < 0) 1441 return -EINVAL; 1442 1443 if (val == 1) { 1444 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) 1445 return -ENXIO; 1446 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED; 1447 mdata->security_state &= ~mask; 1448 } else { 1449 return -EINVAL; 1450 } 1451 return count; 1452 } 1453 1454 static DEVICE_ATTR_RW(security_lock); 1455 1456 static ssize_t fw_buf_checksum_show(struct device *dev, 1457 struct device_attribute *attr, char *buf) 1458 { 1459 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1460 u8 hash[SHA256_DIGEST_SIZE]; 1461 unsigned char *hstr, *hptr; 1462 struct sha256_state sctx; 1463 ssize_t written = 0; 1464 int i; 1465 1466 sha256_init(&sctx); 1467 sha256_update(&sctx, mdata->fw, mdata->fw_size); 1468 sha256_final(&sctx, hash); 1469 1470 hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL); 1471 if (!hstr) 1472 return -ENOMEM; 1473 1474 hptr = hstr; 1475 for (i = 0; i < SHA256_DIGEST_SIZE; i++) 1476 hptr += sprintf(hptr, "%02x", hash[i]); 1477 1478 written = sysfs_emit(buf, "%s\n", hstr); 1479 1480 kfree(hstr); 1481 return written; 1482 } 1483 1484 static DEVICE_ATTR_RO(fw_buf_checksum); 1485 1486 static struct attribute *cxl_mock_mem_attrs[] = { 1487 &dev_attr_security_lock.attr, 1488 &dev_attr_event_trigger.attr, 1489 &dev_attr_fw_buf_checksum.attr, 1490 NULL 1491 }; 1492 ATTRIBUTE_GROUPS(cxl_mock_mem); 1493 1494 static const struct platform_device_id cxl_mock_mem_ids[] = { 1495 { .name = "cxl_mem", 0 }, 1496 { .name = "cxl_rcd", 1 }, 1497 { }, 1498 }; 1499 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); 1500 1501 static struct platform_driver cxl_mock_mem_driver = { 1502 .probe = cxl_mock_mem_probe, 1503 .id_table = cxl_mock_mem_ids, 1504 .driver = { 1505 .name = KBUILD_MODNAME, 1506 .dev_groups = cxl_mock_mem_groups, 1507 .groups = cxl_mock_mem_core_groups, 1508 }, 1509 }; 1510 1511 module_platform_driver(cxl_mock_mem_driver); 1512 MODULE_LICENSE("GPL v2"); 1513 MODULE_IMPORT_NS(CXL); 1514