1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright(c) 2021 Intel Corporation. All rights reserved. 3 4 #include <linux/platform_device.h> 5 #include <linux/mod_devicetable.h> 6 #include <linux/module.h> 7 #include <linux/delay.h> 8 #include <linux/sizes.h> 9 #include <linux/bits.h> 10 #include <asm/unaligned.h> 11 #include <cxlmem.h> 12 13 #include "trace.h" 14 15 #define LSA_SIZE SZ_128K 16 #define DEV_SIZE SZ_2G 17 #define EFFECT(x) (1U << x) 18 19 #define MOCK_INJECT_DEV_MAX 8 20 #define MOCK_INJECT_TEST_MAX 128 21 22 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX; 23 24 enum cxl_command_effects { 25 CONF_CHANGE_COLD_RESET = 0, 26 CONF_CHANGE_IMMEDIATE, 27 DATA_CHANGE_IMMEDIATE, 28 POLICY_CHANGE_IMMEDIATE, 29 LOG_CHANGE_IMMEDIATE, 30 SECURITY_CHANGE_IMMEDIATE, 31 BACKGROUND_OP, 32 SECONDARY_MBOX_SUPPORTED, 33 }; 34 35 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0) 36 37 static struct cxl_cel_entry mock_cel[] = { 38 { 39 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), 40 .effect = CXL_CMD_EFFECT_NONE, 41 }, 42 { 43 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY), 44 .effect = CXL_CMD_EFFECT_NONE, 45 }, 46 { 47 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA), 48 .effect = CXL_CMD_EFFECT_NONE, 49 }, 50 { 51 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO), 52 .effect = CXL_CMD_EFFECT_NONE, 53 }, 54 { 55 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA), 56 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) | 57 EFFECT(DATA_CHANGE_IMMEDIATE)), 58 }, 59 { 60 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO), 61 .effect = CXL_CMD_EFFECT_NONE, 62 }, 63 { 64 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), 65 .effect = CXL_CMD_EFFECT_NONE, 66 }, 67 { 68 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), 69 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), 70 }, 71 { 72 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON), 73 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), 74 }, 75 }; 76 77 /* See CXL 2.0 Table 181 Get Health Info Output Payload */ 78 struct cxl_mbox_health_info { 79 u8 health_status; 80 u8 media_status; 81 u8 ext_status; 82 u8 life_used; 83 __le16 temperature; 84 __le32 dirty_shutdowns; 85 __le32 volatile_errors; 86 __le32 pmem_errors; 87 } __packed; 88 89 static struct { 90 struct cxl_mbox_get_supported_logs gsl; 91 struct cxl_gsl_entry entry; 92 } mock_gsl_payload = { 93 .gsl = { 94 .entries = cpu_to_le16(1), 95 }, 96 .entry = { 97 .uuid = DEFINE_CXL_CEL_UUID, 98 .size = cpu_to_le32(sizeof(mock_cel)), 99 }, 100 }; 101 102 #define PASS_TRY_LIMIT 3 103 104 #define CXL_TEST_EVENT_CNT_MAX 15 105 106 /* Set a number of events to return at a time for simulation. */ 107 #define CXL_TEST_EVENT_CNT 3 108 109 struct mock_event_log { 110 u16 clear_idx; 111 u16 cur_idx; 112 u16 nr_events; 113 u16 nr_overflow; 114 u16 overflow_reset; 115 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; 116 }; 117 118 struct mock_event_store { 119 struct cxl_dev_state *cxlds; 120 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; 121 u32 ev_status; 122 }; 123 124 struct cxl_mockmem_data { 125 void *lsa; 126 u32 security_state; 127 u8 user_pass[NVDIMM_PASSPHRASE_LEN]; 128 u8 master_pass[NVDIMM_PASSPHRASE_LEN]; 129 int user_limit; 130 int master_limit; 131 struct mock_event_store mes; 132 u8 event_buf[SZ_4K]; 133 u64 timestamp; 134 }; 135 136 static struct mock_event_log *event_find_log(struct device *dev, int log_type) 137 { 138 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 139 140 if (log_type >= CXL_EVENT_TYPE_MAX) 141 return NULL; 142 return &mdata->mes.mock_logs[log_type]; 143 } 144 145 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log) 146 { 147 return log->events[log->cur_idx]; 148 } 149 150 static void event_reset_log(struct mock_event_log *log) 151 { 152 log->cur_idx = 0; 153 log->clear_idx = 0; 154 log->nr_overflow = log->overflow_reset; 155 } 156 157 /* Handle can never be 0 use 1 based indexing for handle */ 158 static u16 event_get_clear_handle(struct mock_event_log *log) 159 { 160 return log->clear_idx + 1; 161 } 162 163 /* Handle can never be 0 use 1 based indexing for handle */ 164 static __le16 event_get_cur_event_handle(struct mock_event_log *log) 165 { 166 u16 cur_handle = log->cur_idx + 1; 167 168 return cpu_to_le16(cur_handle); 169 } 170 171 static bool event_log_empty(struct mock_event_log *log) 172 { 173 return log->cur_idx == log->nr_events; 174 } 175 176 static void mes_add_event(struct mock_event_store *mes, 177 enum cxl_event_log_type log_type, 178 struct cxl_event_record_raw *event) 179 { 180 struct mock_event_log *log; 181 182 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX)) 183 return; 184 185 log = &mes->mock_logs[log_type]; 186 187 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) { 188 log->nr_overflow++; 189 log->overflow_reset = log->nr_overflow; 190 return; 191 } 192 193 log->events[log->nr_events] = event; 194 log->nr_events++; 195 } 196 197 static int mock_get_event(struct cxl_dev_state *cxlds, 198 struct cxl_mbox_cmd *cmd) 199 { 200 struct cxl_get_event_payload *pl; 201 struct mock_event_log *log; 202 u16 nr_overflow; 203 u8 log_type; 204 int i; 205 206 if (cmd->size_in != sizeof(log_type)) 207 return -EINVAL; 208 209 if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT)) 210 return -EINVAL; 211 212 log_type = *((u8 *)cmd->payload_in); 213 if (log_type >= CXL_EVENT_TYPE_MAX) 214 return -EINVAL; 215 216 memset(cmd->payload_out, 0, cmd->size_out); 217 218 log = event_find_log(cxlds->dev, log_type); 219 if (!log || event_log_empty(log)) 220 return 0; 221 222 pl = cmd->payload_out; 223 224 for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) { 225 memcpy(&pl->records[i], event_get_current(log), 226 sizeof(pl->records[i])); 227 pl->records[i].hdr.handle = event_get_cur_event_handle(log); 228 log->cur_idx++; 229 } 230 231 pl->record_count = cpu_to_le16(i); 232 if (!event_log_empty(log)) 233 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; 234 235 if (log->nr_overflow) { 236 u64 ns; 237 238 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; 239 pl->overflow_err_count = cpu_to_le16(nr_overflow); 240 ns = ktime_get_real_ns(); 241 ns -= 5000000000; /* 5s ago */ 242 pl->first_overflow_timestamp = cpu_to_le64(ns); 243 ns = ktime_get_real_ns(); 244 ns -= 1000000000; /* 1s ago */ 245 pl->last_overflow_timestamp = cpu_to_le64(ns); 246 } 247 248 return 0; 249 } 250 251 static int mock_clear_event(struct cxl_dev_state *cxlds, 252 struct cxl_mbox_cmd *cmd) 253 { 254 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; 255 struct mock_event_log *log; 256 u8 log_type = pl->event_log; 257 u16 handle; 258 int nr; 259 260 if (log_type >= CXL_EVENT_TYPE_MAX) 261 return -EINVAL; 262 263 log = event_find_log(cxlds->dev, log_type); 264 if (!log) 265 return 0; /* No mock data in this log */ 266 267 /* 268 * This check is technically not invalid per the specification AFAICS. 269 * (The host could 'guess' handles and clear them in order). 270 * However, this is not good behavior for the host so test it. 271 */ 272 if (log->clear_idx + pl->nr_recs > log->cur_idx) { 273 dev_err(cxlds->dev, 274 "Attempting to clear more events than returned!\n"); 275 return -EINVAL; 276 } 277 278 /* Check handle order prior to clearing events */ 279 for (nr = 0, handle = event_get_clear_handle(log); 280 nr < pl->nr_recs; 281 nr++, handle++) { 282 if (handle != le16_to_cpu(pl->handles[nr])) { 283 dev_err(cxlds->dev, "Clearing events out of order\n"); 284 return -EINVAL; 285 } 286 } 287 288 if (log->nr_overflow) 289 log->nr_overflow = 0; 290 291 /* Clear events */ 292 log->clear_idx += pl->nr_recs; 293 return 0; 294 } 295 296 static void cxl_mock_event_trigger(struct device *dev) 297 { 298 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 299 struct mock_event_store *mes = &mdata->mes; 300 int i; 301 302 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) { 303 struct mock_event_log *log; 304 305 log = event_find_log(dev, i); 306 if (log) 307 event_reset_log(log); 308 } 309 310 cxl_mem_get_event_records(mes->cxlds, mes->ev_status); 311 } 312 313 struct cxl_event_record_raw maint_needed = { 314 .hdr = { 315 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB, 316 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 317 .length = sizeof(struct cxl_event_record_raw), 318 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, 319 /* .handle = Set dynamically */ 320 .related_handle = cpu_to_le16(0xa5b6), 321 }, 322 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 323 }; 324 325 struct cxl_event_record_raw hardware_replace = { 326 .hdr = { 327 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E, 328 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), 329 .length = sizeof(struct cxl_event_record_raw), 330 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE, 331 /* .handle = Set dynamically */ 332 .related_handle = cpu_to_le16(0xb6a5), 333 }, 334 .data = { 0xDE, 0xAD, 0xBE, 0xEF }, 335 }; 336 337 struct cxl_event_gen_media gen_media = { 338 .hdr = { 339 .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 340 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6), 341 .length = sizeof(struct cxl_event_gen_media), 342 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT, 343 /* .handle = Set dynamically */ 344 .related_handle = cpu_to_le16(0), 345 }, 346 .phys_addr = cpu_to_le64(0x2000), 347 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, 348 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, 349 .transaction_type = CXL_GMER_TRANS_HOST_WRITE, 350 /* .validity_flags = <set below> */ 351 .channel = 1, 352 .rank = 30 353 }; 354 355 struct cxl_event_dram dram = { 356 .hdr = { 357 .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 358 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24), 359 .length = sizeof(struct cxl_event_dram), 360 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, 361 /* .handle = Set dynamically */ 362 .related_handle = cpu_to_le16(0), 363 }, 364 .phys_addr = cpu_to_le64(0x8000), 365 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT, 366 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR, 367 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, 368 /* .validity_flags = <set below> */ 369 .channel = 1, 370 .bank_group = 5, 371 .bank = 2, 372 .column = {0xDE, 0xAD}, 373 }; 374 375 struct cxl_event_mem_module mem_module = { 376 .hdr = { 377 .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339, 378 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74), 379 .length = sizeof(struct cxl_event_mem_module), 380 /* .handle = Set dynamically */ 381 .related_handle = cpu_to_le16(0), 382 }, 383 .event_type = CXL_MMER_TEMP_CHANGE, 384 .info = { 385 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED, 386 .media_status = CXL_DHI_MS_ALL_DATA_LOST, 387 .add_status = (CXL_DHI_AS_CRITICAL << 2) | 388 (CXL_DHI_AS_WARNING << 4) | 389 (CXL_DHI_AS_WARNING << 5), 390 .device_temp = { 0xDE, 0xAD}, 391 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef }, 392 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 393 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, 394 } 395 }; 396 397 static int mock_set_timestamp(struct cxl_dev_state *cxlds, 398 struct cxl_mbox_cmd *cmd) 399 { 400 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 401 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in; 402 403 if (cmd->size_in != sizeof(*ts)) 404 return -EINVAL; 405 406 if (cmd->size_out != 0) 407 return -EINVAL; 408 409 mdata->timestamp = le64_to_cpu(ts->timestamp); 410 return 0; 411 } 412 413 static void cxl_mock_add_event_logs(struct mock_event_store *mes) 414 { 415 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, 416 &gen_media.validity_flags); 417 418 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP | 419 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN, 420 &dram.validity_flags); 421 422 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); 423 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 424 (struct cxl_event_record_raw *)&gen_media); 425 mes_add_event(mes, CXL_EVENT_TYPE_INFO, 426 (struct cxl_event_record_raw *)&mem_module); 427 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; 428 429 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed); 430 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 431 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 432 (struct cxl_event_record_raw *)&dram); 433 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 434 (struct cxl_event_record_raw *)&gen_media); 435 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 436 (struct cxl_event_record_raw *)&mem_module); 437 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 438 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, 439 (struct cxl_event_record_raw *)&dram); 440 /* Overflow this log */ 441 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 442 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 443 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 444 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 445 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 446 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 447 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 448 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 449 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 450 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); 451 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL; 452 453 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); 454 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, 455 (struct cxl_event_record_raw *)&dram); 456 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; 457 } 458 459 static int mock_gsl(struct cxl_mbox_cmd *cmd) 460 { 461 if (cmd->size_out < sizeof(mock_gsl_payload)) 462 return -EINVAL; 463 464 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload)); 465 cmd->size_out = sizeof(mock_gsl_payload); 466 467 return 0; 468 } 469 470 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 471 { 472 struct cxl_mbox_get_log *gl = cmd->payload_in; 473 u32 offset = le32_to_cpu(gl->offset); 474 u32 length = le32_to_cpu(gl->length); 475 uuid_t uuid = DEFINE_CXL_CEL_UUID; 476 void *data = &mock_cel; 477 478 if (cmd->size_in < sizeof(*gl)) 479 return -EINVAL; 480 if (length > cxlds->payload_size) 481 return -EINVAL; 482 if (offset + length > sizeof(mock_cel)) 483 return -EINVAL; 484 if (!uuid_equal(&gl->uuid, &uuid)) 485 return -EINVAL; 486 if (length > cmd->size_out) 487 return -EINVAL; 488 489 memcpy(cmd->payload_out, data + offset, length); 490 491 return 0; 492 } 493 494 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 495 { 496 struct cxl_mbox_identify id = { 497 .fw_revision = { "mock fw v1 " }, 498 .total_capacity = 499 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 500 .volatile_capacity = 501 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 502 }; 503 504 if (cmd->size_out < sizeof(id)) 505 return -EINVAL; 506 507 memcpy(cmd->payload_out, &id, sizeof(id)); 508 509 return 0; 510 } 511 512 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 513 { 514 struct cxl_mbox_identify id = { 515 .fw_revision = { "mock fw v1 " }, 516 .lsa_size = cpu_to_le32(LSA_SIZE), 517 .partition_align = 518 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), 519 .total_capacity = 520 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), 521 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX), 522 }; 523 524 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); 525 526 if (cmd->size_out < sizeof(id)) 527 return -EINVAL; 528 529 memcpy(cmd->payload_out, &id, sizeof(id)); 530 531 return 0; 532 } 533 534 static int mock_partition_info(struct cxl_dev_state *cxlds, 535 struct cxl_mbox_cmd *cmd) 536 { 537 struct cxl_mbox_get_partition_info pi = { 538 .active_volatile_cap = 539 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 540 .active_persistent_cap = 541 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), 542 }; 543 544 if (cmd->size_out < sizeof(pi)) 545 return -EINVAL; 546 547 memcpy(cmd->payload_out, &pi, sizeof(pi)); 548 549 return 0; 550 } 551 552 static int mock_get_security_state(struct cxl_dev_state *cxlds, 553 struct cxl_mbox_cmd *cmd) 554 { 555 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 556 557 if (cmd->size_in) 558 return -EINVAL; 559 560 if (cmd->size_out != sizeof(u32)) 561 return -EINVAL; 562 563 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32)); 564 565 return 0; 566 } 567 568 static void master_plimit_check(struct cxl_mockmem_data *mdata) 569 { 570 if (mdata->master_limit == PASS_TRY_LIMIT) 571 return; 572 mdata->master_limit++; 573 if (mdata->master_limit == PASS_TRY_LIMIT) 574 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 575 } 576 577 static void user_plimit_check(struct cxl_mockmem_data *mdata) 578 { 579 if (mdata->user_limit == PASS_TRY_LIMIT) 580 return; 581 mdata->user_limit++; 582 if (mdata->user_limit == PASS_TRY_LIMIT) 583 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 584 } 585 586 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 587 { 588 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 589 struct cxl_set_pass *set_pass; 590 591 if (cmd->size_in != sizeof(*set_pass)) 592 return -EINVAL; 593 594 if (cmd->size_out != 0) 595 return -EINVAL; 596 597 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 598 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 599 return -ENXIO; 600 } 601 602 set_pass = cmd->payload_in; 603 switch (set_pass->type) { 604 case CXL_PMEM_SEC_PASS_MASTER: 605 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 606 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 607 return -ENXIO; 608 } 609 /* 610 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in 611 * the security disabled state when the user passphrase is not set. 612 */ 613 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 614 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 615 return -ENXIO; 616 } 617 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 618 master_plimit_check(mdata); 619 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 620 return -ENXIO; 621 } 622 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 623 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 624 return 0; 625 626 case CXL_PMEM_SEC_PASS_USER: 627 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 628 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 629 return -ENXIO; 630 } 631 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { 632 user_plimit_check(mdata); 633 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 634 return -ENXIO; 635 } 636 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); 637 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET; 638 return 0; 639 640 default: 641 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 642 } 643 return -EINVAL; 644 } 645 646 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 647 { 648 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 649 struct cxl_disable_pass *dis_pass; 650 651 if (cmd->size_in != sizeof(*dis_pass)) 652 return -EINVAL; 653 654 if (cmd->size_out != 0) 655 return -EINVAL; 656 657 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 658 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 659 return -ENXIO; 660 } 661 662 dis_pass = cmd->payload_in; 663 switch (dis_pass->type) { 664 case CXL_PMEM_SEC_PASS_MASTER: 665 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { 666 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 667 return -ENXIO; 668 } 669 670 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) { 671 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 672 return -ENXIO; 673 } 674 675 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) { 676 master_plimit_check(mdata); 677 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 678 return -ENXIO; 679 } 680 681 mdata->master_limit = 0; 682 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN); 683 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET; 684 return 0; 685 686 case CXL_PMEM_SEC_PASS_USER: 687 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 688 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 689 return -ENXIO; 690 } 691 692 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 693 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 694 return -ENXIO; 695 } 696 697 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 698 user_plimit_check(mdata); 699 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 700 return -ENXIO; 701 } 702 703 mdata->user_limit = 0; 704 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 705 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET | 706 CXL_PMEM_SEC_STATE_LOCKED); 707 return 0; 708 709 default: 710 cmd->return_code = CXL_MBOX_CMD_RC_INPUT; 711 return -EINVAL; 712 } 713 714 return 0; 715 } 716 717 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 718 { 719 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 720 721 if (cmd->size_in != 0) 722 return -EINVAL; 723 724 if (cmd->size_out != 0) 725 return -EINVAL; 726 727 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) 728 return 0; 729 730 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN; 731 return 0; 732 } 733 734 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 735 { 736 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 737 738 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) 739 return -EINVAL; 740 741 if (cmd->size_out != 0) 742 return -EINVAL; 743 744 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 745 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 746 return -ENXIO; 747 } 748 749 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { 750 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 751 return -ENXIO; 752 } 753 754 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { 755 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 756 return -ENXIO; 757 } 758 759 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) { 760 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 761 return -ENXIO; 762 } 763 764 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { 765 if (++mdata->user_limit == PASS_TRY_LIMIT) 766 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; 767 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 768 return -ENXIO; 769 } 770 771 mdata->user_limit = 0; 772 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 773 return 0; 774 } 775 776 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, 777 struct cxl_mbox_cmd *cmd) 778 { 779 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 780 struct cxl_pass_erase *erase; 781 782 if (cmd->size_in != sizeof(*erase)) 783 return -EINVAL; 784 785 if (cmd->size_out != 0) 786 return -EINVAL; 787 788 erase = cmd->payload_in; 789 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { 790 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 791 return -ENXIO; 792 } 793 794 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT && 795 erase->type == CXL_PMEM_SEC_PASS_USER) { 796 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 797 return -ENXIO; 798 } 799 800 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT && 801 erase->type == CXL_PMEM_SEC_PASS_MASTER) { 802 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; 803 return -ENXIO; 804 } 805 806 switch (erase->type) { 807 case CXL_PMEM_SEC_PASS_MASTER: 808 /* 809 * The spec does not clearly define the behavior of the scenario 810 * where a master passphrase is passed in while the master 811 * passphrase is not set and user passphrase is not set. The 812 * code will take the assumption that it will behave the same 813 * as a CXL secure erase command without passphrase (0x4401). 814 */ 815 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) { 816 if (memcmp(mdata->master_pass, erase->pass, 817 NVDIMM_PASSPHRASE_LEN)) { 818 master_plimit_check(mdata); 819 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 820 return -ENXIO; 821 } 822 mdata->master_limit = 0; 823 mdata->user_limit = 0; 824 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 825 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 826 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; 827 } else { 828 /* 829 * CXL rev3 8.2.9.8.6.3 Disable Passphrase 830 * When master passphrase is disabled, the device shall 831 * return Invalid Input for the Passphrase Secure Erase 832 * command with master passphrase. 833 */ 834 return -EINVAL; 835 } 836 /* Scramble encryption keys so that data is effectively erased */ 837 break; 838 case CXL_PMEM_SEC_PASS_USER: 839 /* 840 * The spec does not clearly define the behavior of the scenario 841 * where a user passphrase is passed in while the user 842 * passphrase is not set. The code will take the assumption that 843 * it will behave the same as a CXL secure erase command without 844 * passphrase (0x4401). 845 */ 846 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { 847 if (memcmp(mdata->user_pass, erase->pass, 848 NVDIMM_PASSPHRASE_LEN)) { 849 user_plimit_check(mdata); 850 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; 851 return -ENXIO; 852 } 853 mdata->user_limit = 0; 854 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; 855 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); 856 } 857 858 /* 859 * CXL rev3 Table 8-118 860 * If user passphrase is not set or supported by device, current 861 * passphrase value is ignored. Will make the assumption that 862 * the operation will proceed as secure erase w/o passphrase 863 * since spec is not explicit. 864 */ 865 866 /* Scramble encryption keys so that data is effectively erased */ 867 break; 868 default: 869 return -EINVAL; 870 } 871 872 return 0; 873 } 874 875 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 876 { 877 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; 878 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 879 void *lsa = mdata->lsa; 880 u32 offset, length; 881 882 if (sizeof(*get_lsa) > cmd->size_in) 883 return -EINVAL; 884 offset = le32_to_cpu(get_lsa->offset); 885 length = le32_to_cpu(get_lsa->length); 886 if (offset + length > LSA_SIZE) 887 return -EINVAL; 888 if (length > cmd->size_out) 889 return -EINVAL; 890 891 memcpy(cmd->payload_out, lsa + offset, length); 892 return 0; 893 } 894 895 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 896 { 897 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; 898 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); 899 void *lsa = mdata->lsa; 900 u32 offset, length; 901 902 if (sizeof(*set_lsa) > cmd->size_in) 903 return -EINVAL; 904 offset = le32_to_cpu(set_lsa->offset); 905 length = cmd->size_in - sizeof(*set_lsa); 906 if (offset + length > LSA_SIZE) 907 return -EINVAL; 908 909 memcpy(lsa + offset, &set_lsa->data[0], length); 910 return 0; 911 } 912 913 static int mock_health_info(struct cxl_dev_state *cxlds, 914 struct cxl_mbox_cmd *cmd) 915 { 916 struct cxl_mbox_health_info health_info = { 917 /* set flags for maint needed, perf degraded, hw replacement */ 918 .health_status = 0x7, 919 /* set media status to "All Data Lost" */ 920 .media_status = 0x3, 921 /* 922 * set ext_status flags for: 923 * ext_life_used: normal, 924 * ext_temperature: critical, 925 * ext_corrected_volatile: warning, 926 * ext_corrected_persistent: normal, 927 */ 928 .ext_status = 0x18, 929 .life_used = 15, 930 .temperature = cpu_to_le16(25), 931 .dirty_shutdowns = cpu_to_le32(10), 932 .volatile_errors = cpu_to_le32(20), 933 .pmem_errors = cpu_to_le32(30), 934 }; 935 936 if (cmd->size_out < sizeof(health_info)) 937 return -EINVAL; 938 939 memcpy(cmd->payload_out, &health_info, sizeof(health_info)); 940 return 0; 941 } 942 943 static struct mock_poison { 944 struct cxl_dev_state *cxlds; 945 u64 dpa; 946 } mock_poison_list[MOCK_INJECT_TEST_MAX]; 947 948 static struct cxl_mbox_poison_out * 949 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) 950 { 951 struct cxl_mbox_poison_out *po; 952 int nr_records = 0; 953 u64 dpa; 954 955 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL); 956 if (!po) 957 return NULL; 958 959 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 960 if (mock_poison_list[i].cxlds != cxlds) 961 continue; 962 if (mock_poison_list[i].dpa < offset || 963 mock_poison_list[i].dpa > offset + length - 1) 964 continue; 965 966 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED; 967 po->record[nr_records].address = cpu_to_le64(dpa); 968 po->record[nr_records].length = cpu_to_le32(1); 969 nr_records++; 970 if (nr_records == poison_inject_dev_max) 971 break; 972 } 973 974 /* Always return count, even when zero */ 975 po->count = cpu_to_le16(nr_records); 976 977 return po; 978 } 979 980 static int mock_get_poison(struct cxl_dev_state *cxlds, 981 struct cxl_mbox_cmd *cmd) 982 { 983 struct cxl_mbox_poison_in *pi = cmd->payload_in; 984 struct cxl_mbox_poison_out *po; 985 u64 offset = le64_to_cpu(pi->offset); 986 u64 length = le64_to_cpu(pi->length); 987 int nr_records; 988 989 po = cxl_get_injected_po(cxlds, offset, length); 990 if (!po) 991 return -ENOMEM; 992 nr_records = le16_to_cpu(po->count); 993 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records)); 994 cmd->size_out = struct_size(po, record, nr_records); 995 kfree(po); 996 997 return 0; 998 } 999 1000 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) 1001 { 1002 int count = 0; 1003 1004 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1005 if (mock_poison_list[i].cxlds == cxlds) 1006 count++; 1007 } 1008 return (count >= poison_inject_dev_max); 1009 } 1010 1011 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) 1012 { 1013 if (mock_poison_dev_max_injected(cxlds)) { 1014 dev_dbg(cxlds->dev, 1015 "Device poison injection limit has been reached: %d\n", 1016 MOCK_INJECT_DEV_MAX); 1017 return false; 1018 } 1019 1020 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1021 if (!mock_poison_list[i].cxlds) { 1022 mock_poison_list[i].cxlds = cxlds; 1023 mock_poison_list[i].dpa = dpa; 1024 return true; 1025 } 1026 } 1027 dev_dbg(cxlds->dev, 1028 "Mock test poison injection limit has been reached: %d\n", 1029 MOCK_INJECT_TEST_MAX); 1030 1031 return false; 1032 } 1033 1034 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa) 1035 { 1036 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1037 if (mock_poison_list[i].cxlds == cxlds && 1038 mock_poison_list[i].dpa == dpa) 1039 return true; 1040 } 1041 return false; 1042 } 1043 1044 static int mock_inject_poison(struct cxl_dev_state *cxlds, 1045 struct cxl_mbox_cmd *cmd) 1046 { 1047 struct cxl_mbox_inject_poison *pi = cmd->payload_in; 1048 u64 dpa = le64_to_cpu(pi->address); 1049 1050 if (mock_poison_found(cxlds, dpa)) { 1051 /* Not an error to inject poison if already poisoned */ 1052 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa); 1053 return 0; 1054 } 1055 if (!mock_poison_add(cxlds, dpa)) 1056 return -ENXIO; 1057 1058 return 0; 1059 } 1060 1061 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa) 1062 { 1063 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1064 if (mock_poison_list[i].cxlds == cxlds && 1065 mock_poison_list[i].dpa == dpa) { 1066 mock_poison_list[i].cxlds = NULL; 1067 return true; 1068 } 1069 } 1070 return false; 1071 } 1072 1073 static int mock_clear_poison(struct cxl_dev_state *cxlds, 1074 struct cxl_mbox_cmd *cmd) 1075 { 1076 struct cxl_mbox_clear_poison *pi = cmd->payload_in; 1077 u64 dpa = le64_to_cpu(pi->address); 1078 1079 /* 1080 * A real CXL device will write pi->write_data to the address 1081 * being cleared. In this mock, just delete this address from 1082 * the mock poison list. 1083 */ 1084 if (!mock_poison_del(cxlds, dpa)) 1085 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa); 1086 1087 return 0; 1088 } 1089 1090 static bool mock_poison_list_empty(void) 1091 { 1092 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { 1093 if (mock_poison_list[i].cxlds) 1094 return false; 1095 } 1096 return true; 1097 } 1098 1099 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf) 1100 { 1101 return sysfs_emit(buf, "%u\n", poison_inject_dev_max); 1102 } 1103 1104 static ssize_t poison_inject_max_store(struct device_driver *drv, 1105 const char *buf, size_t len) 1106 { 1107 int val; 1108 1109 if (kstrtoint(buf, 0, &val) < 0) 1110 return -EINVAL; 1111 1112 if (!mock_poison_list_empty()) 1113 return -EBUSY; 1114 1115 if (val <= MOCK_INJECT_TEST_MAX) 1116 poison_inject_dev_max = val; 1117 else 1118 return -EINVAL; 1119 1120 return len; 1121 } 1122 1123 static DRIVER_ATTR_RW(poison_inject_max); 1124 1125 static struct attribute *cxl_mock_mem_core_attrs[] = { 1126 &driver_attr_poison_inject_max.attr, 1127 NULL 1128 }; 1129 ATTRIBUTE_GROUPS(cxl_mock_mem_core); 1130 1131 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) 1132 { 1133 struct device *dev = cxlds->dev; 1134 int rc = -EIO; 1135 1136 switch (cmd->opcode) { 1137 case CXL_MBOX_OP_SET_TIMESTAMP: 1138 rc = mock_set_timestamp(cxlds, cmd); 1139 break; 1140 case CXL_MBOX_OP_GET_SUPPORTED_LOGS: 1141 rc = mock_gsl(cmd); 1142 break; 1143 case CXL_MBOX_OP_GET_LOG: 1144 rc = mock_get_log(cxlds, cmd); 1145 break; 1146 case CXL_MBOX_OP_IDENTIFY: 1147 if (cxlds->rcd) 1148 rc = mock_rcd_id(cxlds, cmd); 1149 else 1150 rc = mock_id(cxlds, cmd); 1151 break; 1152 case CXL_MBOX_OP_GET_LSA: 1153 rc = mock_get_lsa(cxlds, cmd); 1154 break; 1155 case CXL_MBOX_OP_GET_PARTITION_INFO: 1156 rc = mock_partition_info(cxlds, cmd); 1157 break; 1158 case CXL_MBOX_OP_GET_EVENT_RECORD: 1159 rc = mock_get_event(cxlds, cmd); 1160 break; 1161 case CXL_MBOX_OP_CLEAR_EVENT_RECORD: 1162 rc = mock_clear_event(cxlds, cmd); 1163 break; 1164 case CXL_MBOX_OP_SET_LSA: 1165 rc = mock_set_lsa(cxlds, cmd); 1166 break; 1167 case CXL_MBOX_OP_GET_HEALTH_INFO: 1168 rc = mock_health_info(cxlds, cmd); 1169 break; 1170 case CXL_MBOX_OP_GET_SECURITY_STATE: 1171 rc = mock_get_security_state(cxlds, cmd); 1172 break; 1173 case CXL_MBOX_OP_SET_PASSPHRASE: 1174 rc = mock_set_passphrase(cxlds, cmd); 1175 break; 1176 case CXL_MBOX_OP_DISABLE_PASSPHRASE: 1177 rc = mock_disable_passphrase(cxlds, cmd); 1178 break; 1179 case CXL_MBOX_OP_FREEZE_SECURITY: 1180 rc = mock_freeze_security(cxlds, cmd); 1181 break; 1182 case CXL_MBOX_OP_UNLOCK: 1183 rc = mock_unlock_security(cxlds, cmd); 1184 break; 1185 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: 1186 rc = mock_passphrase_secure_erase(cxlds, cmd); 1187 break; 1188 case CXL_MBOX_OP_GET_POISON: 1189 rc = mock_get_poison(cxlds, cmd); 1190 break; 1191 case CXL_MBOX_OP_INJECT_POISON: 1192 rc = mock_inject_poison(cxlds, cmd); 1193 break; 1194 case CXL_MBOX_OP_CLEAR_POISON: 1195 rc = mock_clear_poison(cxlds, cmd); 1196 break; 1197 default: 1198 break; 1199 } 1200 1201 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode, 1202 cmd->size_in, cmd->size_out, rc); 1203 1204 return rc; 1205 } 1206 1207 static void label_area_release(void *lsa) 1208 { 1209 vfree(lsa); 1210 } 1211 1212 static bool is_rcd(struct platform_device *pdev) 1213 { 1214 const struct platform_device_id *id = platform_get_device_id(pdev); 1215 1216 return !!id->driver_data; 1217 } 1218 1219 static ssize_t event_trigger_store(struct device *dev, 1220 struct device_attribute *attr, 1221 const char *buf, size_t count) 1222 { 1223 cxl_mock_event_trigger(dev); 1224 return count; 1225 } 1226 static DEVICE_ATTR_WO(event_trigger); 1227 1228 static int cxl_mock_mem_probe(struct platform_device *pdev) 1229 { 1230 struct device *dev = &pdev->dev; 1231 struct cxl_memdev *cxlmd; 1232 struct cxl_dev_state *cxlds; 1233 struct cxl_mockmem_data *mdata; 1234 int rc; 1235 1236 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL); 1237 if (!mdata) 1238 return -ENOMEM; 1239 dev_set_drvdata(dev, mdata); 1240 1241 mdata->lsa = vmalloc(LSA_SIZE); 1242 if (!mdata->lsa) 1243 return -ENOMEM; 1244 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa); 1245 if (rc) 1246 return rc; 1247 1248 cxlds = cxl_dev_state_create(dev); 1249 if (IS_ERR(cxlds)) 1250 return PTR_ERR(cxlds); 1251 1252 cxlds->serial = pdev->id; 1253 cxlds->mbox_send = cxl_mock_mbox_send; 1254 cxlds->payload_size = SZ_4K; 1255 cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; 1256 if (is_rcd(pdev)) { 1257 cxlds->rcd = true; 1258 cxlds->component_reg_phys = CXL_RESOURCE_NONE; 1259 } 1260 1261 rc = cxl_enumerate_cmds(cxlds); 1262 if (rc) 1263 return rc; 1264 1265 rc = cxl_poison_state_init(cxlds); 1266 if (rc) 1267 return rc; 1268 1269 rc = cxl_set_timestamp(cxlds); 1270 if (rc) 1271 return rc; 1272 1273 cxlds->media_ready = true; 1274 rc = cxl_dev_state_identify(cxlds); 1275 if (rc) 1276 return rc; 1277 1278 rc = cxl_mem_create_range_info(cxlds); 1279 if (rc) 1280 return rc; 1281 1282 mdata->mes.cxlds = cxlds; 1283 cxl_mock_add_event_logs(&mdata->mes); 1284 1285 cxlmd = devm_cxl_add_memdev(cxlds); 1286 if (IS_ERR(cxlmd)) 1287 return PTR_ERR(cxlmd); 1288 1289 cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); 1290 1291 return 0; 1292 } 1293 1294 static ssize_t security_lock_show(struct device *dev, 1295 struct device_attribute *attr, char *buf) 1296 { 1297 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1298 1299 return sysfs_emit(buf, "%u\n", 1300 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)); 1301 } 1302 1303 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr, 1304 const char *buf, size_t count) 1305 { 1306 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); 1307 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT | 1308 CXL_PMEM_SEC_STATE_MASTER_PLIMIT; 1309 int val; 1310 1311 if (kstrtoint(buf, 0, &val) < 0) 1312 return -EINVAL; 1313 1314 if (val == 1) { 1315 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) 1316 return -ENXIO; 1317 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED; 1318 mdata->security_state &= ~mask; 1319 } else { 1320 return -EINVAL; 1321 } 1322 return count; 1323 } 1324 1325 static DEVICE_ATTR_RW(security_lock); 1326 1327 static struct attribute *cxl_mock_mem_attrs[] = { 1328 &dev_attr_security_lock.attr, 1329 &dev_attr_event_trigger.attr, 1330 NULL 1331 }; 1332 ATTRIBUTE_GROUPS(cxl_mock_mem); 1333 1334 static const struct platform_device_id cxl_mock_mem_ids[] = { 1335 { .name = "cxl_mem", 0 }, 1336 { .name = "cxl_rcd", 1 }, 1337 { }, 1338 }; 1339 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); 1340 1341 static struct platform_driver cxl_mock_mem_driver = { 1342 .probe = cxl_mock_mem_probe, 1343 .id_table = cxl_mock_mem_ids, 1344 .driver = { 1345 .name = KBUILD_MODNAME, 1346 .dev_groups = cxl_mock_mem_groups, 1347 .groups = cxl_mock_mem_core_groups, 1348 }, 1349 }; 1350 1351 module_platform_driver(cxl_mock_mem_driver); 1352 MODULE_LICENSE("GPL v2"); 1353 MODULE_IMPORT_NS(CXL); 1354