1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright(c) 2021 Intel Corporation. All rights reserved. 3 4 #include <linux/platform_device.h> 5 #include <linux/mod_devicetable.h> 6 #include <linux/module.h> 7 #include <linux/delay.h> 8 #include <linux/sizes.h> 9 #include <linux/bits.h> 10 #include <asm/unaligned.h> 11 #include <cxlmem.h> 12 13 #include "trace.h" 14 15 #define LSA_SIZE SZ_128K 16 #define DEV_SIZE SZ_2G 17 #define EFFECT(x) (1U << x) 18 19 #define MOCK_INJECT_DEV_MAX 8 20 #define MOCK_INJECT_TEST_MAX 128 21 22 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX; 23 24 static struct cxl_cel_entry mock_cel[] = { 25 { 26 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), 27 .effect = cpu_to_le16(0), 28 }, 29 { 30 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY), 31 .effect = cpu_to_le16(0), 32 }, 33 { 34 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA), 35 .effect = cpu_to_le16(0), 36 }, 37 { 38 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO), 39 .effect = cpu_to_le16(0), 40 }, 41 { 42 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA), 43 .effect = cpu_to_le16(EFFECT(1) | EFFECT(2)), 44 }, 45 { 46 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO), 47 .effect = cpu_to_le16(0), 48 }, 49 { 50 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), 51 .effect = cpu_to_le16(0), 52 }, 53 { 54 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), 55 .effect = cpu_to_le16(0), 56 }, 57 { 58 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON), 59 .effect = cpu_to_le16(0), 60 }, 61 }; 62 63 /* See CXL 2.0 Table 181 Get Health Info Output Payload */ 64 struct cxl_mbox_health_info { 65 u8 health_status; 66 u8 media_status; 67 u8 ext_status; 68 u8 life_used; 69 __le16 temperature; 70 __le32 dirty_shutdowns; 71 __le32 volatile_errors; 72 __le32 pmem_errors; 73 } __packed; 74 75 static struct { 76 struct cxl_mbox_get_supported_logs gsl; 77 struct cxl_gsl_entry entry; 78 } mock_gsl_payload = { 79 .gsl = { 80 .entries = cpu_to_le16(1), 81 }, 82 .entry = { 83 .uuid = DEFINE_CXL_CEL_UUID, 84 .size = cpu_to_le32(sizeof(mock_cel)), 85 }, 86 }; 87 88 #define PASS_TRY_LIMIT 3 89 90 #define CXL_TEST_EVENT_CNT_MAX 15 91 92 /* Set a number of events to return at a time for simulation. */ 93 #define CXL_TEST_EVENT_CNT 3 94 95 struct mock_event_log { 96 u16 clear_idx; 97 u16 cur_idx; 98 u16 nr_events; 99 u16 nr_overflow; 100 u16 overflow_reset; 101 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; 102 }; 103 104 struct mock_event_store { 105 struct cxl_dev_state *cxlds; 106 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; 107 u32 ev_status; 108 }; 109 110 struct cxl_mockmem_data { 111 void *lsa; 112 u32 security_state; 113 u8 user_pass[NVDIMM_PASSPHRASE_LEN]; 114 u8 master_pass[NVDIMM_PASSPHRASE_LEN]; 115 int user_limit; 116 int master_limit; 117 struct mock_event_store mes; 118 u8 event_buf[SZ_4K]; 119 u64 timestamp; 120 }; 121 122 static struct mock_event_log *event_find_log(struct device *dev, int log_type) 123 { 124 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 125 126 if (log_type >= CXL_EVENT_TYPE_MAX) 127 return NULL; 128 return &mdata->mes.mock_logs[log_type]; 129 } 130 131 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log) 132 { 133 return log->events[log->cur_idx]; 134 } 135 136 static void event_reset_log(struct mock_event_log *log) 137 { 138 log->cur_idx = 0; 139 log->clear_idx = 0; 140 log->nr_overflow = log->overflow_reset; 141 } 142 143 /* Handle can never be 0 use 1 based indexing for handle */ 144 static u16 event_get_clear_handle(struct mock_event_log *log) 145 { 146 return log->clear_idx + 1; 147 } 148 149 /* Handle can never be 0 use 1 based indexing for handle */ 150 static __le16 event_get_cur_event_handle(struct mock_event_log *log) 151 { 152 u16 cur_handle = log->cur_idx + 1; 153 154 return cpu_to_le16(cur_handle); 155 } 156 157 static bool event_log_empty(struct mock_event_log *log) 158 { 159 return log->cur_idx == log->nr_events; 160 } 161 162 static void mes_add_event(struct mock_event_store *mes, 163 enum cxl_event_log_type log_type, 164 struct cxl_event_record_raw *event) 165 { 166 struct mock_event_log *log; 167 168 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX)) 169 return; 170 171 log = &mes->mock_logs[log_type]; 172 173 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) { 174 log->nr_overflow++; 175 log->overflow_reset = log->nr_overflow; 176 return; 177 } 178 179 log->events[log->nr_events] = event; 180 log->nr_events++; 181 } 182 183 static int mock_get_event(struct cxl_dev_state *cxlds, 184 struct cxl_mbox_cmd *cmd) 185 { 186 struct cxl_get_event_payload *pl; 187 struct mock_event_log *log; 188 u16 nr_overflow; 189 u8 log_type; 190 int i; 191 192 if (cmd->size_in != sizeof(log_type)) 193 return -EINVAL; 194 195 if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT)) 196 return -EINVAL; 197 198 log_type = *((u8 *)cmd->payload_in); 199 if (log_type >= CXL_EVENT_TYPE_MAX) 200 return -EINVAL; 201 202 memset(cmd->payload_out, 0, cmd->size_out); 203 204 log = event_find_log(cxlds->dev, log_type); 205 if (!log || event_log_empty(log)) 206 return 0; 207 208 pl = cmd->payload_out; 209 210 for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) { 211 memcpy(&pl->records[i], event_get_current(log), 212 sizeof(pl->records[i])); 213 pl->records[i].hdr.handle = event_get_cur_event_handle(log); 214 log->cur_idx++; 215 } 216 217 pl->record_count = cpu_to_le16(i); 218 if (!event_log_empty(log)) 219 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; 220 221 if (log->nr_overflow) { 222 u64 ns; 223 224 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; 225 pl->overflow_err_count = cpu_to_le16(nr_overflow); 226 ns = ktime_get_real_ns(); 227 ns -= 5000000000; /* 5s ago */ 228 pl->first_overflow_timestamp = cpu_to_le64(ns); 229 ns = ktime_get_real_ns(); 230 ns -= 1000000000; /* 1s ago */ 231 pl->last_overflow_timestamp = cpu_to_le64(ns); 232 } 233 234 return 0; 235 } 236 237 static int mock_clear_event(struct cxl_dev_state *cxlds, 238 struct cxl_mbox_cmd *cmd) 239 { 240 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; 241 struct mock_event_log *log; 242 u8 log_type = pl->event_log; 243 u16 handle; 244 int nr; 245 246 if (log_type >= CXL_EVENT_TYPE_MAX) 247 return -EINVAL; 248 249 log = event_find_log(cxlds->dev, log_type); 250 if (!log) 251 return 0; /* No mock data in this log */ 252 253 /* 254 * This check is technically not invalid per the specification AFAICS. 255 * (The host could 'guess' handles and clear them in order). 256 * However, this is not good behavior for the host so test it. 257 */ 258 if (log->clear_idx + pl->nr_recs > log->cur_idx) { 259 dev_err(cxlds->dev, 260 "Attempting to clear more events than returned!\n"); 261 return -EINVAL; 262 } 263 264 /* Check handle order prior to clearing events */ 265 for (nr = 0, handle = event_get_clear_handle(log); 266 nr < pl->nr_recs; 267 nr++, handle++) { 268 if (handle != le16_to_cpu(pl->handles[nr])) { 269 dev_err(cxlds->dev, "Clearing events out of order\n"); 270 return -EINVAL; 271 } 272 } 273 274 if (log->nr_overflow) 275 log->nr_overflow = 0; 276 277 /* Clear events */ 278 log->clear_idx += pl->nr_recs; 279 return 0; 280 } 281 282 static void cxl_mock_event_trigger(struct device *dev) 283 { 284 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 285 struct mock_event_store *mes = &mdata->mes; 286 int i; 287 288 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) { 289 struct mock_event_log *log; 290 291 log = event_find_log(dev, i); 292 if (log) 293 event_reset_log(log); 294 } 295 296 cxl_mem_get_event_records(mes->cxlds, mes->ev_status); 297 } 298 299 struct cxl_event_record_raw maint_needed = { 300 .hdr = { 301 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB, 302 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 303 .length = sizeof(struct cxl_event_record_raw), 304 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, 305 /* .handle = Set dynamically */ 306 .related_handle = cpu_to_le16(0xa5b6), 307 }, 308 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 309 }; 310 311 struct cxl_event_record_raw hardware_replace = { 312 .hdr = { 313 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E, 314 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 315 .length = sizeof(struct cxl_event_record_raw), 316 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE, 317 /* .handle = Set dynamically */ 318 .related_handle = cpu_to_le16(0xb6a5), 319 }, 320 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 321 }; 322 323 struct cxl_event_gen_media gen_media = { 324 .hdr = { 325 .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 326 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), 327 .length = sizeof(struct cxl_event_gen_media), 328 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT, 329 /* .handle = Set dynamically */ 330 .related_handle = cpu_to_le16(0), 331 }, 332 .phys_addr = cpu_to_le64(0x2000), 333 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, 334 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, 335 .transaction_type = CXL_GMER_TRANS_HOST_WRITE, 336 /* .validity_flags = <set below> */ 337 .channel = 1, 338 .rank = 30 339 }; 340 341 struct cxl_event_dram dram = { 342 .hdr = { 343 .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 344 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), 345 .length = sizeof(struct cxl_event_dram), 346 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, 347 /* .handle = Set dynamically */ 348 .related_handle = cpu_to_le16(0), 349 }, 350 .phys_addr = cpu_to_le64(0x8000), 351 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT, 352 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR, 353 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, 354 /* .validity_flags = <set below> */ 355 .channel = 1, 356 .bank_group = 5, 357 .bank = 2, 358 .column = {0xDE, 0xAD}, 359 }; 360 361 struct cxl_event_mem_module mem_module = { 362 .hdr = { 363 .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339, 364 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), 365 .length = sizeof(struct cxl_event_mem_module), 366 /* .handle = Set dynamically */ 367 .related_handle = cpu_to_le16(0), 368 }, 369 .event_type = CXL_MMER_TEMP_CHANGE, 370 .info = { 371 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED, 372 .media_status = CXL_DHI_MS_ALL_DATA_LOST, 373 .add_status = (CXL_DHI_AS_CRITICAL << 2) | 374 (CXL_DHI_AS_WARNING << 4) | 375 (CXL_DHI_AS_WARNING << 5), 376 .device_temp = { 0xDE, 0xAD}, 377 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef }, 378 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 379 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 380 } 381 }; 382 383 static int mock_set_timestamp(struct cxl_dev_state *cxlds, 384 struct cxl_mbox_cmd *cmd) 385 { 386 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 387 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in; 388 389 if (cmd->size_in != sizeof(*ts)) 390 return -EINVAL; 391 392 if (cmd->size_out != 0) 393 return -EINVAL; 394 395 mdata->timestamp = le64_to_cpu(ts->timestamp); 396 return 0; 397 } 398 399 static void cxl_mock_add_event_logs(struct mock_event_store *mes) 400 { 401 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, 402 &gen_media.validity_flags); 403 404 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP | 405 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN, 406 &dram.validity_flags); 407 408 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); 409 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 410 (struct cxl_event_record_raw *)&gen_media); 411 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 412 (struct cxl_event_record_raw *)&mem_module); 413 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; 414 415 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed); 416 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 417 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 418 (struct cxl_event_record_raw *)&dram); 419 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 420 (struct cxl_event_record_raw *)&gen_media); 421 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 422 (struct cxl_event_record_raw *)&mem_module); 423 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 424 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 425 (struct cxl_event_record_raw *)&dram); 426 /* Overflow this log */ 427 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 428 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 429 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 430 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 431 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 432 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 433 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 434 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 435 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 436 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 437 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL; 438 439 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); 440 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, 441 (struct cxl_event_record_raw *)&dram); 442 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; 443 } 444 445 static int mock_gsl(struct cxl_mbox_cmd *cmd) 446 { 447 if (cmd->size_out < sizeof(mock_gsl_payload)) 448 return -EINVAL; 449 450 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload)); 451 cmd->size_out = sizeof(mock_gsl_payload); 452 453 return 0; 454 } 455 456 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 457 { 458 struct cxl_mbox_get_log *gl = cmd->payload_in; 459 u32 offset = le32_to_cpu(gl->offset); 460 u32 length = le32_to_cpu(gl->length); 461 uuid_t uuid = DEFINE_CXL_CEL_UUID; 462 void *data = &mock_cel; 463 464 if (cmd->size_in < sizeof(*gl)) 465 return -EINVAL; 466 if (length > cxlds->payload_size) 467 return -EINVAL; 468 if (offset + length > sizeof(mock_cel)) 469 return -EINVAL; 470 if (!uuid_equal(&gl->uuid, &uuid)) 471 return -EINVAL; 472 if (length > cmd->size_out) 473 return -EINVAL; 474 475 memcpy(cmd->payload_out, data + offset, length); 476 477 return 0; 478 } 479 480 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 481 { 482 struct cxl_mbox_identify id = { 483 .fw_revision = { "mock fw v1 " }, 484 .total_capacity = 485 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 486 .volatile_capacity = 487 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 488 }; 489 490 if (cmd->size_out < sizeof(id)) 491 return -EINVAL; 492 493 memcpy(cmd->payload_out, &id, sizeof(id)); 494 495 return 0; 496 } 497 498 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 499 { 500 struct cxl_mbox_identify id = { 501 .fw_revision = { "mock fw v1 " }, 502 .lsa_size = cpu_to_le32(LSA_SIZE), 503 .partition_align = 504 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), 505 .total_capacity = 506 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 507 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX), 508 }; 509 510 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); 511 512 if (cmd->size_out < sizeof(id)) 513 return -EINVAL; 514 515 memcpy(cmd->payload_out, &id, sizeof(id)); 516 517 return 0; 518 } 519 520 static int mock_partition_info(struct cxl_dev_state *cxlds, 521 struct cxl_mbox_cmd *cmd) 522 { 523 struct cxl_mbox_get_partition_info pi = { 524 .active_volatile_cap = 525 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 526 .active_persistent_cap = 527 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 528 }; 529 530 if (cmd->size_out < sizeof(pi)) 531 return -EINVAL; 532 533 memcpy(cmd->payload_out, &pi, sizeof(pi)); 534 535 return 0; 536 } 537 538 static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 539 { 540 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 541 542 if (cmd->size_in != 0) 543 return -EINVAL; 544 545 if (cmd->size_out != 0) 546 return -EINVAL; 547 548 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 549 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 550 return -ENXIO; 551 } 552 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) { 553 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 554 return -ENXIO; 555 } 556 557 return 0; /* assume less than 2 secs, no bg */ 558 } 559 560 static int mock_secure_erase(struct cxl_dev_state *cxlds, 561 struct cxl_mbox_cmd *cmd) 562 { 563 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 564 565 if (cmd->size_in != 0) 566 return -EINVAL; 567 568 if (cmd->size_out != 0) 569 return -EINVAL; 570 571 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 572 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 573 return -ENXIO; 574 } 575 576 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) { 577 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 578 return -ENXIO; 579 } 580 581 return 0; 582 } 583 584 static int mock_get_security_state(struct cxl_dev_state *cxlds, 585 struct cxl_mbox_cmd *cmd) 586 { 587 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 588 589 if (cmd->size_in) 590 return -EINVAL; 591 592 if (cmd->size_out != sizeof(u32)) 593 return -EINVAL; 594 595 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32)); 596 597 return 0; 598 } 599 600 static void master_plimit_check(struct cxl_mockmem_data *mdata) 601 { 602 if (mdata->master_limit == PASS_TRY_LIMIT) 603 return; 604 mdata->master_limit++; 605 if (mdata->master_limit == PASS_TRY_LIMIT) 606 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 607 } 608 609 static void user_plimit_check(struct cxl_mockmem_data *mdata) 610 { 611 if (mdata->user_limit == PASS_TRY_LIMIT) 612 return; 613 mdata->user_limit++; 614 if (mdata->user_limit == PASS_TRY_LIMIT) 615 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 616 } 617 618 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 619 { 620 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 621 struct cxl_set_pass *set_pass; 622 623 if (cmd->size_in != sizeof(*set_pass)) 624 return -EINVAL; 625 626 if (cmd->size_out != 0) 627 return -EINVAL; 628 629 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 630 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 631 return -ENXIO; 632 } 633 634 set_pass = cmd->payload_in; 635 switch (set_pass->type) { 636 case CXL_PMEM_SEC_PASS_MASTER: 637 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 638 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 639 return -ENXIO; 640 } 641 /* 642 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in 643 * the security disabled state when the user passphrase is not set. 644 */ 645 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 646 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 647 return -ENXIO; 648 } 649 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 650 master_plimit_check(mdata); 651 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 652 return -ENXIO; 653 } 654 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 655 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 656 return 0; 657 658 case CXL_PMEM_SEC_PASS_USER: 659 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 660 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 661 return -ENXIO; 662 } 663 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 664 user_plimit_check(mdata); 665 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 666 return -ENXIO; 667 } 668 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 669 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET; 670 return 0; 671 672 default: 673 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 674 } 675 return -EINVAL; 676 } 677 678 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 679 { 680 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 681 struct cxl_disable_pass *dis_pass; 682 683 if (cmd->size_in != sizeof(*dis_pass)) 684 return -EINVAL; 685 686 if (cmd->size_out != 0) 687 return -EINVAL; 688 689 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 690 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 691 return -ENXIO; 692 } 693 694 dis_pass = cmd->payload_in; 695 switch (dis_pass->type) { 696 case CXL_PMEM_SEC_PASS_MASTER: 697 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 698 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 699 return -ENXIO; 700 } 701 702 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) { 703 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 704 return -ENXIO; 705 } 706 707 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) { 708 master_plimit_check(mdata); 709 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 710 return -ENXIO; 711 } 712 713 mdata->master_limit = 0; 714 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN); 715 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 716 return 0; 717 718 case CXL_PMEM_SEC_PASS_USER: 719 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 720 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 721 return -ENXIO; 722 } 723 724 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 725 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 726 return -ENXIO; 727 } 728 729 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 730 user_plimit_check(mdata); 731 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 732 return -ENXIO; 733 } 734 735 mdata->user_limit = 0; 736 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 737 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET | 738 CXL_PMEM_SEC_STATE_LOCKED); 739 return 0; 740 741 default: 742 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 743 return -EINVAL; 744 } 745 746 return 0; 747 } 748 749 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 750 { 751 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 752 753 if (cmd->size_in != 0) 754 return -EINVAL; 755 756 if (cmd->size_out != 0) 757 return -EINVAL; 758 759 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) 760 return 0; 761 762 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN; 763 return 0; 764 } 765 766 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 767 { 768 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 769 770 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) 771 return -EINVAL; 772 773 if (cmd->size_out != 0) 774 return -EINVAL; 775 776 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 777 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 778 return -ENXIO; 779 } 780 781 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 782 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 783 return -ENXIO; 784 } 785 786 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 787 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 788 return -ENXIO; 789 } 790 791 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) { 792 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 793 return -ENXIO; 794 } 795 796 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 797 if (++mdata->user_limit == PASS_TRY_LIMIT) 798 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 799 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 800 return -ENXIO; 801 } 802 803 mdata->user_limit = 0; 804 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 805 return 0; 806 } 807 808 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, 809 struct cxl_mbox_cmd *cmd) 810 { 811 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 812 struct cxl_pass_erase *erase; 813 814 if (cmd->size_in != sizeof(*erase)) 815 return -EINVAL; 816 817 if (cmd->size_out != 0) 818 return -EINVAL; 819 820 erase = cmd->payload_in; 821 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 822 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 823 return -ENXIO; 824 } 825 826 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT && 827 erase->type == CXL_PMEM_SEC_PASS_USER) { 828 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 829 return -ENXIO; 830 } 831 832 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT && 833 erase->type == CXL_PMEM_SEC_PASS_MASTER) { 834 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 835 return -ENXIO; 836 } 837 838 switch (erase->type) { 839 case CXL_PMEM_SEC_PASS_MASTER: 840 /* 841 * The spec does not clearly define the behavior of the scenario 842 * where a master passphrase is passed in while the master 843 * passphrase is not set and user passphrase is not set. The 844 * code will take the assumption that it will behave the same 845 * as a CXL secure erase command without passphrase (0x4401). 846 */ 847 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) { 848 if (memcmp(mdata->master_pass, erase->pass, 849 NVDIMM_PASSPHRASE_LEN)) { 850 master_plimit_check(mdata); 851 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 852 return -ENXIO; 853 } 854 mdata->master_limit = 0; 855 mdata->user_limit = 0; 856 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 857 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 858 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 859 } else { 860 /* 861 * CXL rev3 8.2.9.8.6.3 Disable Passphrase 862 * When master passphrase is disabled, the device shall 863 * return Invalid Input for the Passphrase Secure Erase 864 * command with master passphrase. 865 */ 866 return -EINVAL; 867 } 868 /* Scramble encryption keys so that data is effectively erased */ 869 break; 870 case CXL_PMEM_SEC_PASS_USER: 871 /* 872 * The spec does not clearly define the behavior of the scenario 873 * where a user passphrase is passed in while the user 874 * passphrase is not set. The code will take the assumption that 875 * it will behave the same as a CXL secure erase command without 876 * passphrase (0x4401). 877 */ 878 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 879 if (memcmp(mdata->user_pass, erase->pass, 880 NVDIMM_PASSPHRASE_LEN)) { 881 user_plimit_check(mdata); 882 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 883 return -ENXIO; 884 } 885 mdata->user_limit = 0; 886 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 887 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 888 } 889 890 /* 891 * CXL rev3 Table 8-118 892 * If user passphrase is not set or supported by device, current 893 * passphrase value is ignored. Will make the assumption that 894 * the operation will proceed as secure erase w/o passphrase 895 * since spec is not explicit. 896 */ 897 898 /* Scramble encryption keys so that data is effectively erased */ 899 break; 900 default: 901 return -EINVAL; 902 } 903 904 return 0; 905 } 906 907 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 908 { 909 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; 910 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 911 void *lsa = mdata->lsa; 912 u32 offset, length; 913 914 if (sizeof(*get_lsa) > cmd->size_in) 915 return -EINVAL; 916 offset = le32_to_cpu(get_lsa->offset); 917 length = le32_to_cpu(get_lsa->length); 918 if (offset + length > LSA_SIZE) 919 return -EINVAL; 920 if (length > cmd->size_out) 921 return -EINVAL; 922 923 memcpy(cmd->payload_out, lsa + offset, length); 924 return 0; 925 } 926 927 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 928 { 929 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; 930 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 931 void *lsa = mdata->lsa; 932 u32 offset, length; 933 934 if (sizeof(*set_lsa) > cmd->size_in) 935 return -EINVAL; 936 offset = le32_to_cpu(set_lsa->offset); 937 length = cmd->size_in - sizeof(*set_lsa); 938 if (offset + length > LSA_SIZE) 939 return -EINVAL; 940 941 memcpy(lsa + offset, &set_lsa->data[0], length); 942 return 0; 943 } 944 945 static int mock_health_info(struct cxl_dev_state *cxlds, 946 struct cxl_mbox_cmd *cmd) 947 { 948 struct cxl_mbox_health_info health_info = { 949 /* set flags for maint needed, perf degraded, hw replacement */ 950 .health_status = 0x7, 951 /* set media status to "All Data Lost" */ 952 .media_status = 0x3, 953 /* 954 * set ext_status flags for: 955 * ext_life_used: normal, 956 * ext_temperature: critical, 957 * ext_corrected_volatile: warning, 958 * ext_corrected_persistent: normal, 959 */ 960 .ext_status = 0x18, 961 .life_used = 15, 962 .temperature = cpu_to_le16(25), 963 .dirty_shutdowns = cpu_to_le32(10), 964 .volatile_errors = cpu_to_le32(20), 965 .pmem_errors = cpu_to_le32(30), 966 }; 967 968 if (cmd->size_out < sizeof(health_info)) 969 return -EINVAL; 970 971 memcpy(cmd->payload_out, &health_info, sizeof(health_info)); 972 return 0; 973 } 974 975 static struct mock_poison { 976 struct cxl_dev_state *cxlds; 977 u64 dpa; 978 } mock_poison_list[MOCK_INJECT_TEST_MAX]; 979 980 static struct cxl_mbox_poison_out * 981 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) 982 { 983 struct cxl_mbox_poison_out *po; 984 int nr_records = 0; 985 u64 dpa; 986 987 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL); 988 if (!po) 989 return NULL; 990 991 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 992 if (mock_poison_list[i].cxlds != cxlds) 993 continue; 994 if (mock_poison_list[i].dpa < offset || 995 mock_poison_list[i].dpa > offset + length - 1) 996 continue; 997 998 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED; 999 po->record[nr_records].address = cpu_to_le64(dpa); 1000 po->record[nr_records].length = cpu_to_le32(1); 1001 nr_records++; 1002 if (nr_records == poison_inject_dev_max) 1003 break; 1004 } 1005 1006 /* Always return count, even when zero */ 1007 po->count = cpu_to_le16(nr_records); 1008 1009 return po; 1010 } 1011 1012 static int mock_get_poison(struct cxl_dev_state *cxlds, 1013 struct cxl_mbox_cmd *cmd) 1014 { 1015 struct cxl_mbox_poison_in *pi = cmd->payload_in; 1016 struct cxl_mbox_poison_out *po; 1017 u64 offset = le64_to_cpu(pi->offset); 1018 u64 length = le64_to_cpu(pi->length); 1019 int nr_records; 1020 1021 po = cxl_get_injected_po(cxlds, offset, length); 1022 if (!po) 1023 return -ENOMEM; 1024 nr_records = le16_to_cpu(po->count); 1025 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records)); 1026 cmd->size_out = struct_size(po, record, nr_records); 1027 kfree(po); 1028 1029 return 0; 1030 } 1031 1032 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) 1033 { 1034 int count = 0; 1035 1036 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1037 if (mock_poison_list[i].cxlds == cxlds) 1038 count++; 1039 } 1040 return (count >= poison_inject_dev_max); 1041 } 1042 1043 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) 1044 { 1045 if (mock_poison_dev_max_injected(cxlds)) { 1046 dev_dbg(cxlds->dev, 1047 "Device poison injection limit has been reached: %d\n", 1048 MOCK_INJECT_DEV_MAX); 1049 return false; 1050 } 1051 1052 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1053 if (!mock_poison_list[i].cxlds) { 1054 mock_poison_list[i].cxlds = cxlds; 1055 mock_poison_list[i].dpa = dpa; 1056 return true; 1057 } 1058 } 1059 dev_dbg(cxlds->dev, 1060 "Mock test poison injection limit has been reached: %d\n", 1061 MOCK_INJECT_TEST_MAX); 1062 1063 return false; 1064 } 1065 1066 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa) 1067 { 1068 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1069 if (mock_poison_list[i].cxlds == cxlds && 1070 mock_poison_list[i].dpa == dpa) 1071 return true; 1072 } 1073 return false; 1074 } 1075 1076 static int mock_inject_poison(struct cxl_dev_state *cxlds, 1077 struct cxl_mbox_cmd *cmd) 1078 { 1079 struct cxl_mbox_inject_poison *pi = cmd->payload_in; 1080 u64 dpa = le64_to_cpu(pi->address); 1081 1082 if (mock_poison_found(cxlds, dpa)) { 1083 /* Not an error to inject poison if already poisoned */ 1084 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa); 1085 return 0; 1086 } 1087 if (!mock_poison_add(cxlds, dpa)) 1088 return -ENXIO; 1089 1090 return 0; 1091 } 1092 1093 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa) 1094 { 1095 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1096 if (mock_poison_list[i].cxlds == cxlds && 1097 mock_poison_list[i].dpa == dpa) { 1098 mock_poison_list[i].cxlds = NULL; 1099 return true; 1100 } 1101 } 1102 return false; 1103 } 1104 1105 static int mock_clear_poison(struct cxl_dev_state *cxlds, 1106 struct cxl_mbox_cmd *cmd) 1107 { 1108 struct cxl_mbox_clear_poison *pi = cmd->payload_in; 1109 u64 dpa = le64_to_cpu(pi->address); 1110 1111 /* 1112 * A real CXL device will write pi->write_data to the address 1113 * being cleared. In this mock, just delete this address from 1114 * the mock poison list. 1115 */ 1116 if (!mock_poison_del(cxlds, dpa)) 1117 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa); 1118 1119 return 0; 1120 } 1121 1122 static bool mock_poison_list_empty(void) 1123 { 1124 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1125 if (mock_poison_list[i].cxlds) 1126 return false; 1127 } 1128 return true; 1129 } 1130 1131 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf) 1132 { 1133 return sysfs_emit(buf, "%u\n", poison_inject_dev_max); 1134 } 1135 1136 static ssize_t poison_inject_max_store(struct device_driver *drv, 1137 const char *buf, size_t len) 1138 { 1139 int val; 1140 1141 if (kstrtoint(buf, 0, &val) < 0) 1142 return -EINVAL; 1143 1144 if (!mock_poison_list_empty()) 1145 return -EBUSY; 1146 1147 if (val <= MOCK_INJECT_TEST_MAX) 1148 poison_inject_dev_max = val; 1149 else 1150 return -EINVAL; 1151 1152 return len; 1153 } 1154 1155 static DRIVER_ATTR_RW(poison_inject_max); 1156 1157 static struct attribute *cxl_mock_mem_core_attrs[] = { 1158 &driver_attr_poison_inject_max.attr, 1159 NULL 1160 }; 1161 ATTRIBUTE_GROUPS(cxl_mock_mem_core); 1162 1163 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 1164 { 1165 struct device *dev = cxlds->dev; 1166 int rc = -EIO; 1167 1168 switch (cmd->opcode) { 1169 case CXL_MBOX_OP_SET_TIMESTAMP: 1170 rc = mock_set_timestamp(cxlds, cmd); 1171 break; 1172 case CXL_MBOX_OP_GET_SUPPORTED_LOGS: 1173 rc = mock_gsl(cmd); 1174 break; 1175 case CXL_MBOX_OP_GET_LOG: 1176 rc = mock_get_log(cxlds, cmd); 1177 break; 1178 case CXL_MBOX_OP_IDENTIFY: 1179 if (cxlds->rcd) 1180 rc = mock_rcd_id(cxlds, cmd); 1181 else 1182 rc = mock_id(cxlds, cmd); 1183 break; 1184 case CXL_MBOX_OP_GET_LSA: 1185 rc = mock_get_lsa(cxlds, cmd); 1186 break; 1187 case CXL_MBOX_OP_GET_PARTITION_INFO: 1188 rc = mock_partition_info(cxlds, cmd); 1189 break; 1190 case CXL_MBOX_OP_GET_EVENT_RECORD: 1191 rc = mock_get_event(cxlds, cmd); 1192 break; 1193 case CXL_MBOX_OP_CLEAR_EVENT_RECORD: 1194 rc = mock_clear_event(cxlds, cmd); 1195 break; 1196 case CXL_MBOX_OP_SET_LSA: 1197 rc = mock_set_lsa(cxlds, cmd); 1198 break; 1199 case CXL_MBOX_OP_GET_HEALTH_INFO: 1200 rc = mock_health_info(cxlds, cmd); 1201 break; 1202 case CXL_MBOX_OP_SANITIZE: 1203 rc = mock_sanitize(cxlds, cmd); 1204 break; 1205 case CXL_MBOX_OP_SECURE_ERASE: 1206 rc = mock_secure_erase(cxlds, cmd); 1207 break; 1208 case CXL_MBOX_OP_GET_SECURITY_STATE: 1209 rc = mock_get_security_state(cxlds, cmd); 1210 break; 1211 case CXL_MBOX_OP_SET_PASSPHRASE: 1212 rc = mock_set_passphrase(cxlds, cmd); 1213 break; 1214 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 1215 rc = mock_disable_passphrase(cxlds, cmd); 1216 break; 1217 case CXL_MBOX_OP_FREEZE_SECURITY: 1218 rc = mock_freeze_security(cxlds, cmd); 1219 break; 1220 case CXL_MBOX_OP_UNLOCK: 1221 rc = mock_unlock_security(cxlds, cmd); 1222 break; 1223 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 1224 rc = mock_passphrase_secure_erase(cxlds, cmd); 1225 break; 1226 case CXL_MBOX_OP_GET_POISON: 1227 rc = mock_get_poison(cxlds, cmd); 1228 break; 1229 case CXL_MBOX_OP_INJECT_POISON: 1230 rc = mock_inject_poison(cxlds, cmd); 1231 break; 1232 case CXL_MBOX_OP_CLEAR_POISON: 1233 rc = mock_clear_poison(cxlds, cmd); 1234 break; 1235 default: 1236 break; 1237 } 1238 1239 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode, 1240 cmd->size_in, cmd->size_out, rc); 1241 1242 return rc; 1243 } 1244 1245 static void label_area_release(void *lsa) 1246 { 1247 vfree(lsa); 1248 } 1249 1250 static bool is_rcd(struct platform_device *pdev) 1251 { 1252 const struct platform_device_id *id = platform_get_device_id(pdev); 1253 1254 return !!id->driver_data; 1255 } 1256 1257 static ssize_t event_trigger_store(struct device *dev, 1258 struct device_attribute *attr, 1259 const char *buf, size_t count) 1260 { 1261 cxl_mock_event_trigger(dev); 1262 return count; 1263 } 1264 static DEVICE_ATTR_WO(event_trigger); 1265 1266 static int cxl_mock_mem_probe(struct platform_device *pdev) 1267 { 1268 struct device *dev = &pdev->dev; 1269 struct cxl_memdev *cxlmd; 1270 struct cxl_dev_state *cxlds; 1271 struct cxl_mockmem_data *mdata; 1272 int rc; 1273 1274 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL); 1275 if (!mdata) 1276 return -ENOMEM; 1277 dev_set_drvdata(dev, mdata); 1278 1279 mdata->lsa = vmalloc(LSA_SIZE); 1280 if (!mdata->lsa) 1281 return -ENOMEM; 1282 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa); 1283 if (rc) 1284 return rc; 1285 1286 cxlds = cxl_dev_state_create(dev); 1287 if (IS_ERR(cxlds)) 1288 return PTR_ERR(cxlds); 1289 1290 cxlds->serial = pdev->id; 1291 cxlds->mbox_send = cxl_mock_mbox_send; 1292 cxlds->payload_size = SZ_4K; 1293 cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; 1294 if (is_rcd(pdev)) { 1295 cxlds->rcd = true; 1296 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 1297 } 1298 1299 rc = cxl_enumerate_cmds(cxlds); 1300 if (rc) 1301 return rc; 1302 1303 rc = cxl_poison_state_init(cxlds); 1304 if (rc) 1305 return rc; 1306 1307 rc = cxl_set_timestamp(cxlds); 1308 if (rc) 1309 return rc; 1310 1311 cxlds->media_ready = true; 1312 rc = cxl_dev_state_identify(cxlds); 1313 if (rc) 1314 return rc; 1315 1316 rc = cxl_mem_create_range_info(cxlds); 1317 if (rc) 1318 return rc; 1319 1320 mdata->mes.cxlds = cxlds; 1321 cxl_mock_add_event_logs(&mdata->mes); 1322 1323 cxlmd = devm_cxl_add_memdev(cxlds); 1324 if (IS_ERR(cxlmd)) 1325 return PTR_ERR(cxlmd); 1326 1327 cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 1328 1329 return 0; 1330 } 1331 1332 static ssize_t security_lock_show(struct device *dev, 1333 struct device_attribute *attr, char *buf) 1334 { 1335 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1336 1337 return sysfs_emit(buf, "%u\n", 1338 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)); 1339 } 1340 1341 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr, 1342 const char *buf, size_t count) 1343 { 1344 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1345 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT | 1346 CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 1347 int val; 1348 1349 if (kstrtoint(buf, 0, &val) < 0) 1350 return -EINVAL; 1351 1352 if (val == 1) { 1353 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) 1354 return -ENXIO; 1355 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED; 1356 mdata->security_state &= ~mask; 1357 } else { 1358 return -EINVAL; 1359 } 1360 return count; 1361 } 1362 1363 static DEVICE_ATTR_RW(security_lock); 1364 1365 static struct attribute *cxl_mock_mem_attrs[] = { 1366 &dev_attr_security_lock.attr, 1367 &dev_attr_event_trigger.attr, 1368 NULL 1369 }; 1370 ATTRIBUTE_GROUPS(cxl_mock_mem); 1371 1372 static const struct platform_device_id cxl_mock_mem_ids[] = { 1373 { .name = "cxl_mem", 0 }, 1374 { .name = "cxl_rcd", 1 }, 1375 { }, 1376 }; 1377 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); 1378 1379 static struct platform_driver cxl_mock_mem_driver = { 1380 .probe = cxl_mock_mem_probe, 1381 .id_table = cxl_mock_mem_ids, 1382 .driver = { 1383 .name = KBUILD_MODNAME, 1384 .dev_groups = cxl_mock_mem_groups, 1385 .groups = cxl_mock_mem_core_groups, 1386 }, 1387 }; 1388 1389 module_platform_driver(cxl_mock_mem_driver); 1390 MODULE_LICENSE("GPL v2"); 1391 MODULE_IMPORT_NS(CXL); 1392