xref: /openbmc/linux/tools/testing/cxl/test/mem.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <cxlmem.h>
12 
13 #include "trace.h"
14 
15 #define LSA_SIZE SZ_128K
16 #define DEV_SIZE SZ_2G
17 #define EFFECT(x) (1U << x)
18 
19 #define MOCK_INJECT_DEV_MAX 8
20 #define MOCK_INJECT_TEST_MAX 128
21 
22 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
23 
24 static struct cxl_cel_entry mock_cel[] = {
25 	{
26 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
27 		.effect = cpu_to_le16(0),
28 	},
29 	{
30 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
31 		.effect = cpu_to_le16(0),
32 	},
33 	{
34 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
35 		.effect = cpu_to_le16(0),
36 	},
37 	{
38 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
39 		.effect = cpu_to_le16(0),
40 	},
41 	{
42 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
43 		.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
44 	},
45 	{
46 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
47 		.effect = cpu_to_le16(0),
48 	},
49 	{
50 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
51 		.effect = cpu_to_le16(0),
52 	},
53 	{
54 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
55 		.effect = cpu_to_le16(0),
56 	},
57 	{
58 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
59 		.effect = cpu_to_le16(0),
60 	},
61 };
62 
63 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
64 struct cxl_mbox_health_info {
65 	u8 health_status;
66 	u8 media_status;
67 	u8 ext_status;
68 	u8 life_used;
69 	__le16 temperature;
70 	__le32 dirty_shutdowns;
71 	__le32 volatile_errors;
72 	__le32 pmem_errors;
73 } __packed;
74 
75 static struct {
76 	struct cxl_mbox_get_supported_logs gsl;
77 	struct cxl_gsl_entry entry;
78 } mock_gsl_payload = {
79 	.gsl = {
80 		.entries = cpu_to_le16(1),
81 	},
82 	.entry = {
83 		.uuid = DEFINE_CXL_CEL_UUID,
84 		.size = cpu_to_le32(sizeof(mock_cel)),
85 	},
86 };
87 
88 #define PASS_TRY_LIMIT 3
89 
90 #define CXL_TEST_EVENT_CNT_MAX 15
91 
92 /* Set a number of events to return at a time for simulation.  */
93 #define CXL_TEST_EVENT_CNT 3
94 
95 struct mock_event_log {
96 	u16 clear_idx;
97 	u16 cur_idx;
98 	u16 nr_events;
99 	u16 nr_overflow;
100 	u16 overflow_reset;
101 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
102 };
103 
104 struct mock_event_store {
105 	struct cxl_dev_state *cxlds;
106 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
107 	u32 ev_status;
108 };
109 
110 struct cxl_mockmem_data {
111 	void *lsa;
112 	u32 security_state;
113 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
114 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
115 	int user_limit;
116 	int master_limit;
117 	struct mock_event_store mes;
118 	u8 event_buf[SZ_4K];
119 	u64 timestamp;
120 };
121 
122 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
123 {
124 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
125 
126 	if (log_type >= CXL_EVENT_TYPE_MAX)
127 		return NULL;
128 	return &mdata->mes.mock_logs[log_type];
129 }
130 
131 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
132 {
133 	return log->events[log->cur_idx];
134 }
135 
136 static void event_reset_log(struct mock_event_log *log)
137 {
138 	log->cur_idx = 0;
139 	log->clear_idx = 0;
140 	log->nr_overflow = log->overflow_reset;
141 }
142 
143 /* Handle can never be 0 use 1 based indexing for handle */
144 static u16 event_get_clear_handle(struct mock_event_log *log)
145 {
146 	return log->clear_idx + 1;
147 }
148 
149 /* Handle can never be 0 use 1 based indexing for handle */
150 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
151 {
152 	u16 cur_handle = log->cur_idx + 1;
153 
154 	return cpu_to_le16(cur_handle);
155 }
156 
157 static bool event_log_empty(struct mock_event_log *log)
158 {
159 	return log->cur_idx == log->nr_events;
160 }
161 
162 static void mes_add_event(struct mock_event_store *mes,
163 			  enum cxl_event_log_type log_type,
164 			  struct cxl_event_record_raw *event)
165 {
166 	struct mock_event_log *log;
167 
168 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
169 		return;
170 
171 	log = &mes->mock_logs[log_type];
172 
173 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
174 		log->nr_overflow++;
175 		log->overflow_reset = log->nr_overflow;
176 		return;
177 	}
178 
179 	log->events[log->nr_events] = event;
180 	log->nr_events++;
181 }
182 
183 static int mock_get_event(struct cxl_dev_state *cxlds,
184 			  struct cxl_mbox_cmd *cmd)
185 {
186 	struct cxl_get_event_payload *pl;
187 	struct mock_event_log *log;
188 	u16 nr_overflow;
189 	u8 log_type;
190 	int i;
191 
192 	if (cmd->size_in != sizeof(log_type))
193 		return -EINVAL;
194 
195 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
196 		return -EINVAL;
197 
198 	log_type = *((u8 *)cmd->payload_in);
199 	if (log_type >= CXL_EVENT_TYPE_MAX)
200 		return -EINVAL;
201 
202 	memset(cmd->payload_out, 0, cmd->size_out);
203 
204 	log = event_find_log(cxlds->dev, log_type);
205 	if (!log || event_log_empty(log))
206 		return 0;
207 
208 	pl = cmd->payload_out;
209 
210 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
211 		memcpy(&pl->records[i], event_get_current(log),
212 		       sizeof(pl->records[i]));
213 		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
214 		log->cur_idx++;
215 	}
216 
217 	pl->record_count = cpu_to_le16(i);
218 	if (!event_log_empty(log))
219 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
220 
221 	if (log->nr_overflow) {
222 		u64 ns;
223 
224 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
225 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
226 		ns = ktime_get_real_ns();
227 		ns -= 5000000000; /* 5s ago */
228 		pl->first_overflow_timestamp = cpu_to_le64(ns);
229 		ns = ktime_get_real_ns();
230 		ns -= 1000000000; /* 1s ago */
231 		pl->last_overflow_timestamp = cpu_to_le64(ns);
232 	}
233 
234 	return 0;
235 }
236 
237 static int mock_clear_event(struct cxl_dev_state *cxlds,
238 			    struct cxl_mbox_cmd *cmd)
239 {
240 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
241 	struct mock_event_log *log;
242 	u8 log_type = pl->event_log;
243 	u16 handle;
244 	int nr;
245 
246 	if (log_type >= CXL_EVENT_TYPE_MAX)
247 		return -EINVAL;
248 
249 	log = event_find_log(cxlds->dev, log_type);
250 	if (!log)
251 		return 0; /* No mock data in this log */
252 
253 	/*
254 	 * This check is technically not invalid per the specification AFAICS.
255 	 * (The host could 'guess' handles and clear them in order).
256 	 * However, this is not good behavior for the host so test it.
257 	 */
258 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
259 		dev_err(cxlds->dev,
260 			"Attempting to clear more events than returned!\n");
261 		return -EINVAL;
262 	}
263 
264 	/* Check handle order prior to clearing events */
265 	for (nr = 0, handle = event_get_clear_handle(log);
266 	     nr < pl->nr_recs;
267 	     nr++, handle++) {
268 		if (handle != le16_to_cpu(pl->handles[nr])) {
269 			dev_err(cxlds->dev, "Clearing events out of order\n");
270 			return -EINVAL;
271 		}
272 	}
273 
274 	if (log->nr_overflow)
275 		log->nr_overflow = 0;
276 
277 	/* Clear events */
278 	log->clear_idx += pl->nr_recs;
279 	return 0;
280 }
281 
282 static void cxl_mock_event_trigger(struct device *dev)
283 {
284 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
285 	struct mock_event_store *mes = &mdata->mes;
286 	int i;
287 
288 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
289 		struct mock_event_log *log;
290 
291 		log = event_find_log(dev, i);
292 		if (log)
293 			event_reset_log(log);
294 	}
295 
296 	cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
297 }
298 
299 struct cxl_event_record_raw maint_needed = {
300 	.hdr = {
301 		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
302 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
303 		.length = sizeof(struct cxl_event_record_raw),
304 		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
305 		/* .handle = Set dynamically */
306 		.related_handle = cpu_to_le16(0xa5b6),
307 	},
308 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
309 };
310 
311 struct cxl_event_record_raw hardware_replace = {
312 	.hdr = {
313 		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
314 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
315 		.length = sizeof(struct cxl_event_record_raw),
316 		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
317 		/* .handle = Set dynamically */
318 		.related_handle = cpu_to_le16(0xb6a5),
319 	},
320 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
321 };
322 
323 struct cxl_event_gen_media gen_media = {
324 	.hdr = {
325 		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
326 				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
327 		.length = sizeof(struct cxl_event_gen_media),
328 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
329 		/* .handle = Set dynamically */
330 		.related_handle = cpu_to_le16(0),
331 	},
332 	.phys_addr = cpu_to_le64(0x2000),
333 	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
334 	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
335 	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
336 	/* .validity_flags = <set below> */
337 	.channel = 1,
338 	.rank = 30
339 };
340 
341 struct cxl_event_dram dram = {
342 	.hdr = {
343 		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
344 				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
345 		.length = sizeof(struct cxl_event_dram),
346 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
347 		/* .handle = Set dynamically */
348 		.related_handle = cpu_to_le16(0),
349 	},
350 	.phys_addr = cpu_to_le64(0x8000),
351 	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
352 	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
353 	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
354 	/* .validity_flags = <set below> */
355 	.channel = 1,
356 	.bank_group = 5,
357 	.bank = 2,
358 	.column = {0xDE, 0xAD},
359 };
360 
361 struct cxl_event_mem_module mem_module = {
362 	.hdr = {
363 		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
364 				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
365 		.length = sizeof(struct cxl_event_mem_module),
366 		/* .handle = Set dynamically */
367 		.related_handle = cpu_to_le16(0),
368 	},
369 	.event_type = CXL_MMER_TEMP_CHANGE,
370 	.info = {
371 		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
372 		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
373 		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
374 			      (CXL_DHI_AS_WARNING << 4) |
375 			      (CXL_DHI_AS_WARNING << 5),
376 		.device_temp = { 0xDE, 0xAD},
377 		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
378 		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
379 		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
380 	}
381 };
382 
383 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
384 			      struct cxl_mbox_cmd *cmd)
385 {
386 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
387 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
388 
389 	if (cmd->size_in != sizeof(*ts))
390 		return -EINVAL;
391 
392 	if (cmd->size_out != 0)
393 		return -EINVAL;
394 
395 	mdata->timestamp = le64_to_cpu(ts->timestamp);
396 	return 0;
397 }
398 
399 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
400 {
401 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
402 			   &gen_media.validity_flags);
403 
404 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
405 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
406 			   &dram.validity_flags);
407 
408 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
409 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
410 		      (struct cxl_event_record_raw *)&gen_media);
411 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
412 		      (struct cxl_event_record_raw *)&mem_module);
413 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
414 
415 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
416 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
417 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
418 		      (struct cxl_event_record_raw *)&dram);
419 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
420 		      (struct cxl_event_record_raw *)&gen_media);
421 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
422 		      (struct cxl_event_record_raw *)&mem_module);
423 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
424 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
425 		      (struct cxl_event_record_raw *)&dram);
426 	/* Overflow this log */
427 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
428 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
429 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
430 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
431 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
432 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
433 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
434 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
435 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
436 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
437 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
438 
439 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
440 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
441 		      (struct cxl_event_record_raw *)&dram);
442 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
443 }
444 
445 static int mock_gsl(struct cxl_mbox_cmd *cmd)
446 {
447 	if (cmd->size_out < sizeof(mock_gsl_payload))
448 		return -EINVAL;
449 
450 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
451 	cmd->size_out = sizeof(mock_gsl_payload);
452 
453 	return 0;
454 }
455 
456 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
457 {
458 	struct cxl_mbox_get_log *gl = cmd->payload_in;
459 	u32 offset = le32_to_cpu(gl->offset);
460 	u32 length = le32_to_cpu(gl->length);
461 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
462 	void *data = &mock_cel;
463 
464 	if (cmd->size_in < sizeof(*gl))
465 		return -EINVAL;
466 	if (length > cxlds->payload_size)
467 		return -EINVAL;
468 	if (offset + length > sizeof(mock_cel))
469 		return -EINVAL;
470 	if (!uuid_equal(&gl->uuid, &uuid))
471 		return -EINVAL;
472 	if (length > cmd->size_out)
473 		return -EINVAL;
474 
475 	memcpy(cmd->payload_out, data + offset, length);
476 
477 	return 0;
478 }
479 
480 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
481 {
482 	struct cxl_mbox_identify id = {
483 		.fw_revision = { "mock fw v1 " },
484 		.total_capacity =
485 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
486 		.volatile_capacity =
487 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
488 	};
489 
490 	if (cmd->size_out < sizeof(id))
491 		return -EINVAL;
492 
493 	memcpy(cmd->payload_out, &id, sizeof(id));
494 
495 	return 0;
496 }
497 
498 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
499 {
500 	struct cxl_mbox_identify id = {
501 		.fw_revision = { "mock fw v1 " },
502 		.lsa_size = cpu_to_le32(LSA_SIZE),
503 		.partition_align =
504 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
505 		.total_capacity =
506 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
507 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
508 	};
509 
510 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
511 
512 	if (cmd->size_out < sizeof(id))
513 		return -EINVAL;
514 
515 	memcpy(cmd->payload_out, &id, sizeof(id));
516 
517 	return 0;
518 }
519 
520 static int mock_partition_info(struct cxl_dev_state *cxlds,
521 			       struct cxl_mbox_cmd *cmd)
522 {
523 	struct cxl_mbox_get_partition_info pi = {
524 		.active_volatile_cap =
525 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
526 		.active_persistent_cap =
527 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
528 	};
529 
530 	if (cmd->size_out < sizeof(pi))
531 		return -EINVAL;
532 
533 	memcpy(cmd->payload_out, &pi, sizeof(pi));
534 
535 	return 0;
536 }
537 
538 static int mock_get_security_state(struct cxl_dev_state *cxlds,
539 				   struct cxl_mbox_cmd *cmd)
540 {
541 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
542 
543 	if (cmd->size_in)
544 		return -EINVAL;
545 
546 	if (cmd->size_out != sizeof(u32))
547 		return -EINVAL;
548 
549 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
550 
551 	return 0;
552 }
553 
554 static void master_plimit_check(struct cxl_mockmem_data *mdata)
555 {
556 	if (mdata->master_limit == PASS_TRY_LIMIT)
557 		return;
558 	mdata->master_limit++;
559 	if (mdata->master_limit == PASS_TRY_LIMIT)
560 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
561 }
562 
563 static void user_plimit_check(struct cxl_mockmem_data *mdata)
564 {
565 	if (mdata->user_limit == PASS_TRY_LIMIT)
566 		return;
567 	mdata->user_limit++;
568 	if (mdata->user_limit == PASS_TRY_LIMIT)
569 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
570 }
571 
572 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
573 {
574 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
575 	struct cxl_set_pass *set_pass;
576 
577 	if (cmd->size_in != sizeof(*set_pass))
578 		return -EINVAL;
579 
580 	if (cmd->size_out != 0)
581 		return -EINVAL;
582 
583 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
584 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
585 		return -ENXIO;
586 	}
587 
588 	set_pass = cmd->payload_in;
589 	switch (set_pass->type) {
590 	case CXL_PMEM_SEC_PASS_MASTER:
591 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
592 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
593 			return -ENXIO;
594 		}
595 		/*
596 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
597 		 * the security disabled state when the user passphrase is not set.
598 		 */
599 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
600 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
601 			return -ENXIO;
602 		}
603 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
604 			master_plimit_check(mdata);
605 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
606 			return -ENXIO;
607 		}
608 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
609 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
610 		return 0;
611 
612 	case CXL_PMEM_SEC_PASS_USER:
613 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
614 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
615 			return -ENXIO;
616 		}
617 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
618 			user_plimit_check(mdata);
619 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
620 			return -ENXIO;
621 		}
622 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
623 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
624 		return 0;
625 
626 	default:
627 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
628 	}
629 	return -EINVAL;
630 }
631 
632 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
633 {
634 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
635 	struct cxl_disable_pass *dis_pass;
636 
637 	if (cmd->size_in != sizeof(*dis_pass))
638 		return -EINVAL;
639 
640 	if (cmd->size_out != 0)
641 		return -EINVAL;
642 
643 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
644 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
645 		return -ENXIO;
646 	}
647 
648 	dis_pass = cmd->payload_in;
649 	switch (dis_pass->type) {
650 	case CXL_PMEM_SEC_PASS_MASTER:
651 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
652 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
653 			return -ENXIO;
654 		}
655 
656 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
657 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
658 			return -ENXIO;
659 		}
660 
661 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
662 			master_plimit_check(mdata);
663 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
664 			return -ENXIO;
665 		}
666 
667 		mdata->master_limit = 0;
668 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
669 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
670 		return 0;
671 
672 	case CXL_PMEM_SEC_PASS_USER:
673 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
674 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
675 			return -ENXIO;
676 		}
677 
678 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
679 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
680 			return -ENXIO;
681 		}
682 
683 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
684 			user_plimit_check(mdata);
685 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
686 			return -ENXIO;
687 		}
688 
689 		mdata->user_limit = 0;
690 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
691 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
692 					   CXL_PMEM_SEC_STATE_LOCKED);
693 		return 0;
694 
695 	default:
696 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
697 		return -EINVAL;
698 	}
699 
700 	return 0;
701 }
702 
703 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
704 {
705 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
706 
707 	if (cmd->size_in != 0)
708 		return -EINVAL;
709 
710 	if (cmd->size_out != 0)
711 		return -EINVAL;
712 
713 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
714 		return 0;
715 
716 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
717 	return 0;
718 }
719 
720 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
721 {
722 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
723 
724 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
725 		return -EINVAL;
726 
727 	if (cmd->size_out != 0)
728 		return -EINVAL;
729 
730 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
731 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
732 		return -ENXIO;
733 	}
734 
735 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
736 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
737 		return -ENXIO;
738 	}
739 
740 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
741 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
742 		return -ENXIO;
743 	}
744 
745 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
746 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
747 		return -ENXIO;
748 	}
749 
750 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
751 		if (++mdata->user_limit == PASS_TRY_LIMIT)
752 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
753 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
754 		return -ENXIO;
755 	}
756 
757 	mdata->user_limit = 0;
758 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
759 	return 0;
760 }
761 
762 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
763 					struct cxl_mbox_cmd *cmd)
764 {
765 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
766 	struct cxl_pass_erase *erase;
767 
768 	if (cmd->size_in != sizeof(*erase))
769 		return -EINVAL;
770 
771 	if (cmd->size_out != 0)
772 		return -EINVAL;
773 
774 	erase = cmd->payload_in;
775 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
776 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
777 		return -ENXIO;
778 	}
779 
780 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
781 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
782 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
783 		return -ENXIO;
784 	}
785 
786 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
787 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
788 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
789 		return -ENXIO;
790 	}
791 
792 	switch (erase->type) {
793 	case CXL_PMEM_SEC_PASS_MASTER:
794 		/*
795 		 * The spec does not clearly define the behavior of the scenario
796 		 * where a master passphrase is passed in while the master
797 		 * passphrase is not set and user passphrase is not set. The
798 		 * code will take the assumption that it will behave the same
799 		 * as a CXL secure erase command without passphrase (0x4401).
800 		 */
801 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
802 			if (memcmp(mdata->master_pass, erase->pass,
803 				   NVDIMM_PASSPHRASE_LEN)) {
804 				master_plimit_check(mdata);
805 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
806 				return -ENXIO;
807 			}
808 			mdata->master_limit = 0;
809 			mdata->user_limit = 0;
810 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
811 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
812 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
813 		} else {
814 			/*
815 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
816 			 * When master passphrase is disabled, the device shall
817 			 * return Invalid Input for the Passphrase Secure Erase
818 			 * command with master passphrase.
819 			 */
820 			return -EINVAL;
821 		}
822 		/* Scramble encryption keys so that data is effectively erased */
823 		break;
824 	case CXL_PMEM_SEC_PASS_USER:
825 		/*
826 		 * The spec does not clearly define the behavior of the scenario
827 		 * where a user passphrase is passed in while the user
828 		 * passphrase is not set. The code will take the assumption that
829 		 * it will behave the same as a CXL secure erase command without
830 		 * passphrase (0x4401).
831 		 */
832 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
833 			if (memcmp(mdata->user_pass, erase->pass,
834 				   NVDIMM_PASSPHRASE_LEN)) {
835 				user_plimit_check(mdata);
836 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
837 				return -ENXIO;
838 			}
839 			mdata->user_limit = 0;
840 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
841 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
842 		}
843 
844 		/*
845 		 * CXL rev3 Table 8-118
846 		 * If user passphrase is not set or supported by device, current
847 		 * passphrase value is ignored. Will make the assumption that
848 		 * the operation will proceed as secure erase w/o passphrase
849 		 * since spec is not explicit.
850 		 */
851 
852 		/* Scramble encryption keys so that data is effectively erased */
853 		break;
854 	default:
855 		return -EINVAL;
856 	}
857 
858 	return 0;
859 }
860 
861 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
862 {
863 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
864 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
865 	void *lsa = mdata->lsa;
866 	u32 offset, length;
867 
868 	if (sizeof(*get_lsa) > cmd->size_in)
869 		return -EINVAL;
870 	offset = le32_to_cpu(get_lsa->offset);
871 	length = le32_to_cpu(get_lsa->length);
872 	if (offset + length > LSA_SIZE)
873 		return -EINVAL;
874 	if (length > cmd->size_out)
875 		return -EINVAL;
876 
877 	memcpy(cmd->payload_out, lsa + offset, length);
878 	return 0;
879 }
880 
881 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
882 {
883 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
884 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
885 	void *lsa = mdata->lsa;
886 	u32 offset, length;
887 
888 	if (sizeof(*set_lsa) > cmd->size_in)
889 		return -EINVAL;
890 	offset = le32_to_cpu(set_lsa->offset);
891 	length = cmd->size_in - sizeof(*set_lsa);
892 	if (offset + length > LSA_SIZE)
893 		return -EINVAL;
894 
895 	memcpy(lsa + offset, &set_lsa->data[0], length);
896 	return 0;
897 }
898 
899 static int mock_health_info(struct cxl_dev_state *cxlds,
900 			    struct cxl_mbox_cmd *cmd)
901 {
902 	struct cxl_mbox_health_info health_info = {
903 		/* set flags for maint needed, perf degraded, hw replacement */
904 		.health_status = 0x7,
905 		/* set media status to "All Data Lost" */
906 		.media_status = 0x3,
907 		/*
908 		 * set ext_status flags for:
909 		 *  ext_life_used: normal,
910 		 *  ext_temperature: critical,
911 		 *  ext_corrected_volatile: warning,
912 		 *  ext_corrected_persistent: normal,
913 		 */
914 		.ext_status = 0x18,
915 		.life_used = 15,
916 		.temperature = cpu_to_le16(25),
917 		.dirty_shutdowns = cpu_to_le32(10),
918 		.volatile_errors = cpu_to_le32(20),
919 		.pmem_errors = cpu_to_le32(30),
920 	};
921 
922 	if (cmd->size_out < sizeof(health_info))
923 		return -EINVAL;
924 
925 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
926 	return 0;
927 }
928 
929 static struct mock_poison {
930 	struct cxl_dev_state *cxlds;
931 	u64 dpa;
932 } mock_poison_list[MOCK_INJECT_TEST_MAX];
933 
934 static struct cxl_mbox_poison_out *
935 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
936 {
937 	struct cxl_mbox_poison_out *po;
938 	int nr_records = 0;
939 	u64 dpa;
940 
941 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
942 	if (!po)
943 		return NULL;
944 
945 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
946 		if (mock_poison_list[i].cxlds != cxlds)
947 			continue;
948 		if (mock_poison_list[i].dpa < offset ||
949 		    mock_poison_list[i].dpa > offset + length - 1)
950 			continue;
951 
952 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
953 		po->record[nr_records].address = cpu_to_le64(dpa);
954 		po->record[nr_records].length = cpu_to_le32(1);
955 		nr_records++;
956 		if (nr_records == poison_inject_dev_max)
957 			break;
958 	}
959 
960 	/* Always return count, even when zero */
961 	po->count = cpu_to_le16(nr_records);
962 
963 	return po;
964 }
965 
966 static int mock_get_poison(struct cxl_dev_state *cxlds,
967 			   struct cxl_mbox_cmd *cmd)
968 {
969 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
970 	struct cxl_mbox_poison_out *po;
971 	u64 offset = le64_to_cpu(pi->offset);
972 	u64 length = le64_to_cpu(pi->length);
973 	int nr_records;
974 
975 	po = cxl_get_injected_po(cxlds, offset, length);
976 	if (!po)
977 		return -ENOMEM;
978 	nr_records = le16_to_cpu(po->count);
979 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
980 	cmd->size_out = struct_size(po, record, nr_records);
981 	kfree(po);
982 
983 	return 0;
984 }
985 
986 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
987 {
988 	int count = 0;
989 
990 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
991 		if (mock_poison_list[i].cxlds == cxlds)
992 			count++;
993 	}
994 	return (count >= poison_inject_dev_max);
995 }
996 
997 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
998 {
999 	if (mock_poison_dev_max_injected(cxlds)) {
1000 		dev_dbg(cxlds->dev,
1001 			"Device poison injection limit has been reached: %d\n",
1002 			MOCK_INJECT_DEV_MAX);
1003 		return false;
1004 	}
1005 
1006 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1007 		if (!mock_poison_list[i].cxlds) {
1008 			mock_poison_list[i].cxlds = cxlds;
1009 			mock_poison_list[i].dpa = dpa;
1010 			return true;
1011 		}
1012 	}
1013 	dev_dbg(cxlds->dev,
1014 		"Mock test poison injection limit has been reached: %d\n",
1015 		MOCK_INJECT_TEST_MAX);
1016 
1017 	return false;
1018 }
1019 
1020 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1021 {
1022 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1023 		if (mock_poison_list[i].cxlds == cxlds &&
1024 		    mock_poison_list[i].dpa == dpa)
1025 			return true;
1026 	}
1027 	return false;
1028 }
1029 
1030 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1031 			      struct cxl_mbox_cmd *cmd)
1032 {
1033 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1034 	u64 dpa = le64_to_cpu(pi->address);
1035 
1036 	if (mock_poison_found(cxlds, dpa)) {
1037 		/* Not an error to inject poison if already poisoned */
1038 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1039 		return 0;
1040 	}
1041 	if (!mock_poison_add(cxlds, dpa))
1042 		return -ENXIO;
1043 
1044 	return 0;
1045 }
1046 
1047 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1048 {
1049 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1050 		if (mock_poison_list[i].cxlds == cxlds &&
1051 		    mock_poison_list[i].dpa == dpa) {
1052 			mock_poison_list[i].cxlds = NULL;
1053 			return true;
1054 		}
1055 	}
1056 	return false;
1057 }
1058 
1059 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1060 			     struct cxl_mbox_cmd *cmd)
1061 {
1062 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1063 	u64 dpa = le64_to_cpu(pi->address);
1064 
1065 	/*
1066 	 * A real CXL device will write pi->write_data to the address
1067 	 * being cleared. In this mock, just delete this address from
1068 	 * the mock poison list.
1069 	 */
1070 	if (!mock_poison_del(cxlds, dpa))
1071 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1072 
1073 	return 0;
1074 }
1075 
1076 static bool mock_poison_list_empty(void)
1077 {
1078 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1079 		if (mock_poison_list[i].cxlds)
1080 			return false;
1081 	}
1082 	return true;
1083 }
1084 
1085 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1086 {
1087 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1088 }
1089 
1090 static ssize_t poison_inject_max_store(struct device_driver *drv,
1091 				       const char *buf, size_t len)
1092 {
1093 	int val;
1094 
1095 	if (kstrtoint(buf, 0, &val) < 0)
1096 		return -EINVAL;
1097 
1098 	if (!mock_poison_list_empty())
1099 		return -EBUSY;
1100 
1101 	if (val <= MOCK_INJECT_TEST_MAX)
1102 		poison_inject_dev_max = val;
1103 	else
1104 		return -EINVAL;
1105 
1106 	return len;
1107 }
1108 
1109 static DRIVER_ATTR_RW(poison_inject_max);
1110 
1111 static struct attribute *cxl_mock_mem_core_attrs[] = {
1112 	&driver_attr_poison_inject_max.attr,
1113 	NULL
1114 };
1115 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1116 
1117 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
1118 {
1119 	struct device *dev = cxlds->dev;
1120 	int rc = -EIO;
1121 
1122 	switch (cmd->opcode) {
1123 	case CXL_MBOX_OP_SET_TIMESTAMP:
1124 		rc = mock_set_timestamp(cxlds, cmd);
1125 		break;
1126 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1127 		rc = mock_gsl(cmd);
1128 		break;
1129 	case CXL_MBOX_OP_GET_LOG:
1130 		rc = mock_get_log(cxlds, cmd);
1131 		break;
1132 	case CXL_MBOX_OP_IDENTIFY:
1133 		if (cxlds->rcd)
1134 			rc = mock_rcd_id(cxlds, cmd);
1135 		else
1136 			rc = mock_id(cxlds, cmd);
1137 		break;
1138 	case CXL_MBOX_OP_GET_LSA:
1139 		rc = mock_get_lsa(cxlds, cmd);
1140 		break;
1141 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1142 		rc = mock_partition_info(cxlds, cmd);
1143 		break;
1144 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1145 		rc = mock_get_event(cxlds, cmd);
1146 		break;
1147 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1148 		rc = mock_clear_event(cxlds, cmd);
1149 		break;
1150 	case CXL_MBOX_OP_SET_LSA:
1151 		rc = mock_set_lsa(cxlds, cmd);
1152 		break;
1153 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1154 		rc = mock_health_info(cxlds, cmd);
1155 		break;
1156 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1157 		rc = mock_get_security_state(cxlds, cmd);
1158 		break;
1159 	case CXL_MBOX_OP_SET_PASSPHRASE:
1160 		rc = mock_set_passphrase(cxlds, cmd);
1161 		break;
1162 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1163 		rc = mock_disable_passphrase(cxlds, cmd);
1164 		break;
1165 	case CXL_MBOX_OP_FREEZE_SECURITY:
1166 		rc = mock_freeze_security(cxlds, cmd);
1167 		break;
1168 	case CXL_MBOX_OP_UNLOCK:
1169 		rc = mock_unlock_security(cxlds, cmd);
1170 		break;
1171 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1172 		rc = mock_passphrase_secure_erase(cxlds, cmd);
1173 		break;
1174 	case CXL_MBOX_OP_GET_POISON:
1175 		rc = mock_get_poison(cxlds, cmd);
1176 		break;
1177 	case CXL_MBOX_OP_INJECT_POISON:
1178 		rc = mock_inject_poison(cxlds, cmd);
1179 		break;
1180 	case CXL_MBOX_OP_CLEAR_POISON:
1181 		rc = mock_clear_poison(cxlds, cmd);
1182 		break;
1183 	default:
1184 		break;
1185 	}
1186 
1187 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1188 		cmd->size_in, cmd->size_out, rc);
1189 
1190 	return rc;
1191 }
1192 
1193 static void label_area_release(void *lsa)
1194 {
1195 	vfree(lsa);
1196 }
1197 
1198 static bool is_rcd(struct platform_device *pdev)
1199 {
1200 	const struct platform_device_id *id = platform_get_device_id(pdev);
1201 
1202 	return !!id->driver_data;
1203 }
1204 
1205 static ssize_t event_trigger_store(struct device *dev,
1206 				   struct device_attribute *attr,
1207 				   const char *buf, size_t count)
1208 {
1209 	cxl_mock_event_trigger(dev);
1210 	return count;
1211 }
1212 static DEVICE_ATTR_WO(event_trigger);
1213 
1214 static int cxl_mock_mem_probe(struct platform_device *pdev)
1215 {
1216 	struct device *dev = &pdev->dev;
1217 	struct cxl_memdev *cxlmd;
1218 	struct cxl_dev_state *cxlds;
1219 	struct cxl_mockmem_data *mdata;
1220 	int rc;
1221 
1222 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1223 	if (!mdata)
1224 		return -ENOMEM;
1225 	dev_set_drvdata(dev, mdata);
1226 
1227 	mdata->lsa = vmalloc(LSA_SIZE);
1228 	if (!mdata->lsa)
1229 		return -ENOMEM;
1230 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1231 	if (rc)
1232 		return rc;
1233 
1234 	cxlds = cxl_dev_state_create(dev);
1235 	if (IS_ERR(cxlds))
1236 		return PTR_ERR(cxlds);
1237 
1238 	cxlds->serial = pdev->id;
1239 	cxlds->mbox_send = cxl_mock_mbox_send;
1240 	cxlds->payload_size = SZ_4K;
1241 	cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1242 	if (is_rcd(pdev)) {
1243 		cxlds->rcd = true;
1244 		cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1245 	}
1246 
1247 	rc = cxl_enumerate_cmds(cxlds);
1248 	if (rc)
1249 		return rc;
1250 
1251 	rc = cxl_poison_state_init(cxlds);
1252 	if (rc)
1253 		return rc;
1254 
1255 	rc = cxl_set_timestamp(cxlds);
1256 	if (rc)
1257 		return rc;
1258 
1259 	rc = cxl_dev_state_identify(cxlds);
1260 	if (rc)
1261 		return rc;
1262 
1263 	rc = cxl_mem_create_range_info(cxlds);
1264 	if (rc)
1265 		return rc;
1266 
1267 	mdata->mes.cxlds = cxlds;
1268 	cxl_mock_add_event_logs(&mdata->mes);
1269 
1270 	cxlmd = devm_cxl_add_memdev(cxlds);
1271 	if (IS_ERR(cxlmd))
1272 		return PTR_ERR(cxlmd);
1273 
1274 	cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
1275 
1276 	return 0;
1277 }
1278 
1279 static ssize_t security_lock_show(struct device *dev,
1280 				  struct device_attribute *attr, char *buf)
1281 {
1282 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1283 
1284 	return sysfs_emit(buf, "%u\n",
1285 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1286 }
1287 
1288 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1289 				   const char *buf, size_t count)
1290 {
1291 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1292 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1293 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1294 	int val;
1295 
1296 	if (kstrtoint(buf, 0, &val) < 0)
1297 		return -EINVAL;
1298 
1299 	if (val == 1) {
1300 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1301 			return -ENXIO;
1302 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1303 		mdata->security_state &= ~mask;
1304 	} else {
1305 		return -EINVAL;
1306 	}
1307 	return count;
1308 }
1309 
1310 static DEVICE_ATTR_RW(security_lock);
1311 
1312 static struct attribute *cxl_mock_mem_attrs[] = {
1313 	&dev_attr_security_lock.attr,
1314 	&dev_attr_event_trigger.attr,
1315 	NULL
1316 };
1317 ATTRIBUTE_GROUPS(cxl_mock_mem);
1318 
1319 static const struct platform_device_id cxl_mock_mem_ids[] = {
1320 	{ .name = "cxl_mem", 0 },
1321 	{ .name = "cxl_rcd", 1 },
1322 	{ },
1323 };
1324 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1325 
1326 static struct platform_driver cxl_mock_mem_driver = {
1327 	.probe = cxl_mock_mem_probe,
1328 	.id_table = cxl_mock_mem_ids,
1329 	.driver = {
1330 		.name = KBUILD_MODNAME,
1331 		.dev_groups = cxl_mock_mem_groups,
1332 		.groups = cxl_mock_mem_core_groups,
1333 	},
1334 };
1335 
1336 module_platform_driver(cxl_mock_mem_driver);
1337 MODULE_LICENSE("GPL v2");
1338 MODULE_IMPORT_NS(CXL);
1339