xref: /openbmc/linux/tools/testing/cxl/test/mem.c (revision 867eab655d3b38740d8d6c24705af25e0b1dbdb6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <crypto/sha2.h>
12 #include <cxlmem.h>
13 
14 #include "trace.h"
15 
16 #define LSA_SIZE SZ_128K
17 #define FW_SIZE SZ_64M
18 #define FW_SLOTS 3
19 #define DEV_SIZE SZ_2G
20 #define EFFECT(x) (1U << x)
21 
22 #define MOCK_INJECT_DEV_MAX 8
23 #define MOCK_INJECT_TEST_MAX 128
24 
25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
26 
27 enum cxl_command_effects {
28 	CONF_CHANGE_COLD_RESET = 0,
29 	CONF_CHANGE_IMMEDIATE,
30 	DATA_CHANGE_IMMEDIATE,
31 	POLICY_CHANGE_IMMEDIATE,
32 	LOG_CHANGE_IMMEDIATE,
33 	SECURITY_CHANGE_IMMEDIATE,
34 	BACKGROUND_OP,
35 	SECONDARY_MBOX_SUPPORTED,
36 };
37 
38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
39 
40 static struct cxl_cel_entry mock_cel[] = {
41 	{
42 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43 		.effect = CXL_CMD_EFFECT_NONE,
44 	},
45 	{
46 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47 		.effect = CXL_CMD_EFFECT_NONE,
48 	},
49 	{
50 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51 		.effect = CXL_CMD_EFFECT_NONE,
52 	},
53 	{
54 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55 		.effect = CXL_CMD_EFFECT_NONE,
56 	},
57 	{
58 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
61 	},
62 	{
63 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64 		.effect = CXL_CMD_EFFECT_NONE,
65 	},
66 	{
67 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68 		.effect = CXL_CMD_EFFECT_NONE,
69 	},
70 	{
71 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
73 	},
74 	{
75 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
77 	},
78 	{
79 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80 		.effect = CXL_CMD_EFFECT_NONE,
81 	},
82 	{
83 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85 				      EFFECT(BACKGROUND_OP)),
86 	},
87 	{
88 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
91 	},
92 };
93 
94 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
95 struct cxl_mbox_health_info {
96 	u8 health_status;
97 	u8 media_status;
98 	u8 ext_status;
99 	u8 life_used;
100 	__le16 temperature;
101 	__le32 dirty_shutdowns;
102 	__le32 volatile_errors;
103 	__le32 pmem_errors;
104 } __packed;
105 
106 static struct {
107 	struct cxl_mbox_get_supported_logs gsl;
108 	struct cxl_gsl_entry entry;
109 } mock_gsl_payload = {
110 	.gsl = {
111 		.entries = cpu_to_le16(1),
112 	},
113 	.entry = {
114 		.uuid = DEFINE_CXL_CEL_UUID,
115 		.size = cpu_to_le32(sizeof(mock_cel)),
116 	},
117 };
118 
119 #define PASS_TRY_LIMIT 3
120 
121 #define CXL_TEST_EVENT_CNT_MAX 15
122 
123 /* Set a number of events to return at a time for simulation.  */
124 #define CXL_TEST_EVENT_CNT 3
125 
126 struct mock_event_log {
127 	u16 clear_idx;
128 	u16 cur_idx;
129 	u16 nr_events;
130 	u16 nr_overflow;
131 	u16 overflow_reset;
132 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
133 };
134 
135 struct mock_event_store {
136 	struct cxl_dev_state *cxlds;
137 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
138 	u32 ev_status;
139 };
140 
141 struct cxl_mockmem_data {
142 	void *lsa;
143 	void *fw;
144 	int fw_slot;
145 	int fw_staged;
146 	size_t fw_size;
147 	u32 security_state;
148 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
149 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
150 	int user_limit;
151 	int master_limit;
152 	struct mock_event_store mes;
153 	u8 event_buf[SZ_4K];
154 	u64 timestamp;
155 };
156 
157 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
158 {
159 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
160 
161 	if (log_type >= CXL_EVENT_TYPE_MAX)
162 		return NULL;
163 	return &mdata->mes.mock_logs[log_type];
164 }
165 
166 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
167 {
168 	return log->events[log->cur_idx];
169 }
170 
171 static void event_reset_log(struct mock_event_log *log)
172 {
173 	log->cur_idx = 0;
174 	log->clear_idx = 0;
175 	log->nr_overflow = log->overflow_reset;
176 }
177 
178 /* Handle can never be 0 use 1 based indexing for handle */
179 static u16 event_get_clear_handle(struct mock_event_log *log)
180 {
181 	return log->clear_idx + 1;
182 }
183 
184 /* Handle can never be 0 use 1 based indexing for handle */
185 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
186 {
187 	u16 cur_handle = log->cur_idx + 1;
188 
189 	return cpu_to_le16(cur_handle);
190 }
191 
192 static bool event_log_empty(struct mock_event_log *log)
193 {
194 	return log->cur_idx == log->nr_events;
195 }
196 
197 static void mes_add_event(struct mock_event_store *mes,
198 			  enum cxl_event_log_type log_type,
199 			  struct cxl_event_record_raw *event)
200 {
201 	struct mock_event_log *log;
202 
203 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
204 		return;
205 
206 	log = &mes->mock_logs[log_type];
207 
208 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
209 		log->nr_overflow++;
210 		log->overflow_reset = log->nr_overflow;
211 		return;
212 	}
213 
214 	log->events[log->nr_events] = event;
215 	log->nr_events++;
216 }
217 
218 static int mock_get_event(struct cxl_dev_state *cxlds,
219 			  struct cxl_mbox_cmd *cmd)
220 {
221 	struct cxl_get_event_payload *pl;
222 	struct mock_event_log *log;
223 	u16 nr_overflow;
224 	u8 log_type;
225 	int i;
226 
227 	if (cmd->size_in != sizeof(log_type))
228 		return -EINVAL;
229 
230 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
231 		return -EINVAL;
232 
233 	log_type = *((u8 *)cmd->payload_in);
234 	if (log_type >= CXL_EVENT_TYPE_MAX)
235 		return -EINVAL;
236 
237 	memset(cmd->payload_out, 0, cmd->size_out);
238 
239 	log = event_find_log(cxlds->dev, log_type);
240 	if (!log || event_log_empty(log))
241 		return 0;
242 
243 	pl = cmd->payload_out;
244 
245 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
246 		memcpy(&pl->records[i], event_get_current(log),
247 		       sizeof(pl->records[i]));
248 		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
249 		log->cur_idx++;
250 	}
251 
252 	pl->record_count = cpu_to_le16(i);
253 	if (!event_log_empty(log))
254 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
255 
256 	if (log->nr_overflow) {
257 		u64 ns;
258 
259 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
260 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
261 		ns = ktime_get_real_ns();
262 		ns -= 5000000000; /* 5s ago */
263 		pl->first_overflow_timestamp = cpu_to_le64(ns);
264 		ns = ktime_get_real_ns();
265 		ns -= 1000000000; /* 1s ago */
266 		pl->last_overflow_timestamp = cpu_to_le64(ns);
267 	}
268 
269 	return 0;
270 }
271 
272 static int mock_clear_event(struct cxl_dev_state *cxlds,
273 			    struct cxl_mbox_cmd *cmd)
274 {
275 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
276 	struct mock_event_log *log;
277 	u8 log_type = pl->event_log;
278 	u16 handle;
279 	int nr;
280 
281 	if (log_type >= CXL_EVENT_TYPE_MAX)
282 		return -EINVAL;
283 
284 	log = event_find_log(cxlds->dev, log_type);
285 	if (!log)
286 		return 0; /* No mock data in this log */
287 
288 	/*
289 	 * This check is technically not invalid per the specification AFAICS.
290 	 * (The host could 'guess' handles and clear them in order).
291 	 * However, this is not good behavior for the host so test it.
292 	 */
293 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
294 		dev_err(cxlds->dev,
295 			"Attempting to clear more events than returned!\n");
296 		return -EINVAL;
297 	}
298 
299 	/* Check handle order prior to clearing events */
300 	for (nr = 0, handle = event_get_clear_handle(log);
301 	     nr < pl->nr_recs;
302 	     nr++, handle++) {
303 		if (handle != le16_to_cpu(pl->handles[nr])) {
304 			dev_err(cxlds->dev, "Clearing events out of order\n");
305 			return -EINVAL;
306 		}
307 	}
308 
309 	if (log->nr_overflow)
310 		log->nr_overflow = 0;
311 
312 	/* Clear events */
313 	log->clear_idx += pl->nr_recs;
314 	return 0;
315 }
316 
317 static void cxl_mock_event_trigger(struct device *dev)
318 {
319 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
320 	struct mock_event_store *mes = &mdata->mes;
321 	int i;
322 
323 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
324 		struct mock_event_log *log;
325 
326 		log = event_find_log(dev, i);
327 		if (log)
328 			event_reset_log(log);
329 	}
330 
331 	cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
332 }
333 
334 struct cxl_event_record_raw maint_needed = {
335 	.hdr = {
336 		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
337 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
338 		.length = sizeof(struct cxl_event_record_raw),
339 		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
340 		/* .handle = Set dynamically */
341 		.related_handle = cpu_to_le16(0xa5b6),
342 	},
343 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
344 };
345 
346 struct cxl_event_record_raw hardware_replace = {
347 	.hdr = {
348 		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
349 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
350 		.length = sizeof(struct cxl_event_record_raw),
351 		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
352 		/* .handle = Set dynamically */
353 		.related_handle = cpu_to_le16(0xb6a5),
354 	},
355 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
356 };
357 
358 struct cxl_event_gen_media gen_media = {
359 	.hdr = {
360 		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
361 				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
362 		.length = sizeof(struct cxl_event_gen_media),
363 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
364 		/* .handle = Set dynamically */
365 		.related_handle = cpu_to_le16(0),
366 	},
367 	.phys_addr = cpu_to_le64(0x2000),
368 	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
369 	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
370 	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
371 	/* .validity_flags = <set below> */
372 	.channel = 1,
373 	.rank = 30
374 };
375 
376 struct cxl_event_dram dram = {
377 	.hdr = {
378 		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
379 				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
380 		.length = sizeof(struct cxl_event_dram),
381 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
382 		/* .handle = Set dynamically */
383 		.related_handle = cpu_to_le16(0),
384 	},
385 	.phys_addr = cpu_to_le64(0x8000),
386 	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
387 	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
388 	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
389 	/* .validity_flags = <set below> */
390 	.channel = 1,
391 	.bank_group = 5,
392 	.bank = 2,
393 	.column = {0xDE, 0xAD},
394 };
395 
396 struct cxl_event_mem_module mem_module = {
397 	.hdr = {
398 		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
399 				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
400 		.length = sizeof(struct cxl_event_mem_module),
401 		/* .handle = Set dynamically */
402 		.related_handle = cpu_to_le16(0),
403 	},
404 	.event_type = CXL_MMER_TEMP_CHANGE,
405 	.info = {
406 		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
407 		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
408 		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
409 			      (CXL_DHI_AS_WARNING << 4) |
410 			      (CXL_DHI_AS_WARNING << 5),
411 		.device_temp = { 0xDE, 0xAD},
412 		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
413 		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
414 		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
415 	}
416 };
417 
418 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
419 			      struct cxl_mbox_cmd *cmd)
420 {
421 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
422 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
423 
424 	if (cmd->size_in != sizeof(*ts))
425 		return -EINVAL;
426 
427 	if (cmd->size_out != 0)
428 		return -EINVAL;
429 
430 	mdata->timestamp = le64_to_cpu(ts->timestamp);
431 	return 0;
432 }
433 
434 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
435 {
436 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
437 			   &gen_media.validity_flags);
438 
439 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
440 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
441 			   &dram.validity_flags);
442 
443 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
444 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
445 		      (struct cxl_event_record_raw *)&gen_media);
446 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
447 		      (struct cxl_event_record_raw *)&mem_module);
448 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
449 
450 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
451 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
452 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
453 		      (struct cxl_event_record_raw *)&dram);
454 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
455 		      (struct cxl_event_record_raw *)&gen_media);
456 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
457 		      (struct cxl_event_record_raw *)&mem_module);
458 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
459 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
460 		      (struct cxl_event_record_raw *)&dram);
461 	/* Overflow this log */
462 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
463 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
464 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
465 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
466 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
467 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
468 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
469 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
470 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
471 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
472 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
473 
474 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
475 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
476 		      (struct cxl_event_record_raw *)&dram);
477 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
478 }
479 
480 static int mock_gsl(struct cxl_mbox_cmd *cmd)
481 {
482 	if (cmd->size_out < sizeof(mock_gsl_payload))
483 		return -EINVAL;
484 
485 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
486 	cmd->size_out = sizeof(mock_gsl_payload);
487 
488 	return 0;
489 }
490 
491 static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
492 {
493 	struct cxl_mbox_get_log *gl = cmd->payload_in;
494 	u32 offset = le32_to_cpu(gl->offset);
495 	u32 length = le32_to_cpu(gl->length);
496 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
497 	void *data = &mock_cel;
498 
499 	if (cmd->size_in < sizeof(*gl))
500 		return -EINVAL;
501 	if (length > cxlds->payload_size)
502 		return -EINVAL;
503 	if (offset + length > sizeof(mock_cel))
504 		return -EINVAL;
505 	if (!uuid_equal(&gl->uuid, &uuid))
506 		return -EINVAL;
507 	if (length > cmd->size_out)
508 		return -EINVAL;
509 
510 	memcpy(cmd->payload_out, data + offset, length);
511 
512 	return 0;
513 }
514 
515 static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
516 {
517 	struct cxl_mbox_identify id = {
518 		.fw_revision = { "mock fw v1 " },
519 		.total_capacity =
520 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
521 		.volatile_capacity =
522 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
523 	};
524 
525 	if (cmd->size_out < sizeof(id))
526 		return -EINVAL;
527 
528 	memcpy(cmd->payload_out, &id, sizeof(id));
529 
530 	return 0;
531 }
532 
533 static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
534 {
535 	struct cxl_mbox_identify id = {
536 		.fw_revision = { "mock fw v1 " },
537 		.lsa_size = cpu_to_le32(LSA_SIZE),
538 		.partition_align =
539 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
540 		.total_capacity =
541 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
542 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
543 	};
544 
545 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
546 
547 	if (cmd->size_out < sizeof(id))
548 		return -EINVAL;
549 
550 	memcpy(cmd->payload_out, &id, sizeof(id));
551 
552 	return 0;
553 }
554 
555 static int mock_partition_info(struct cxl_dev_state *cxlds,
556 			       struct cxl_mbox_cmd *cmd)
557 {
558 	struct cxl_mbox_get_partition_info pi = {
559 		.active_volatile_cap =
560 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
561 		.active_persistent_cap =
562 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
563 	};
564 
565 	if (cmd->size_out < sizeof(pi))
566 		return -EINVAL;
567 
568 	memcpy(cmd->payload_out, &pi, sizeof(pi));
569 
570 	return 0;
571 }
572 
573 static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
574 {
575 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
576 
577 	if (cmd->size_in != 0)
578 		return -EINVAL;
579 
580 	if (cmd->size_out != 0)
581 		return -EINVAL;
582 
583 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
584 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
585 		return -ENXIO;
586 	}
587 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
588 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
589 		return -ENXIO;
590 	}
591 
592 	return 0; /* assume less than 2 secs, no bg */
593 }
594 
595 static int mock_secure_erase(struct cxl_dev_state *cxlds,
596 			     struct cxl_mbox_cmd *cmd)
597 {
598 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
599 
600 	if (cmd->size_in != 0)
601 		return -EINVAL;
602 
603 	if (cmd->size_out != 0)
604 		return -EINVAL;
605 
606 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
607 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
608 		return -ENXIO;
609 	}
610 
611 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
612 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
613 		return -ENXIO;
614 	}
615 
616 	return 0;
617 }
618 
619 static int mock_get_security_state(struct cxl_dev_state *cxlds,
620 				   struct cxl_mbox_cmd *cmd)
621 {
622 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
623 
624 	if (cmd->size_in)
625 		return -EINVAL;
626 
627 	if (cmd->size_out != sizeof(u32))
628 		return -EINVAL;
629 
630 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
631 
632 	return 0;
633 }
634 
635 static void master_plimit_check(struct cxl_mockmem_data *mdata)
636 {
637 	if (mdata->master_limit == PASS_TRY_LIMIT)
638 		return;
639 	mdata->master_limit++;
640 	if (mdata->master_limit == PASS_TRY_LIMIT)
641 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
642 }
643 
644 static void user_plimit_check(struct cxl_mockmem_data *mdata)
645 {
646 	if (mdata->user_limit == PASS_TRY_LIMIT)
647 		return;
648 	mdata->user_limit++;
649 	if (mdata->user_limit == PASS_TRY_LIMIT)
650 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
651 }
652 
653 static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
654 {
655 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
656 	struct cxl_set_pass *set_pass;
657 
658 	if (cmd->size_in != sizeof(*set_pass))
659 		return -EINVAL;
660 
661 	if (cmd->size_out != 0)
662 		return -EINVAL;
663 
664 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
665 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
666 		return -ENXIO;
667 	}
668 
669 	set_pass = cmd->payload_in;
670 	switch (set_pass->type) {
671 	case CXL_PMEM_SEC_PASS_MASTER:
672 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
673 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
674 			return -ENXIO;
675 		}
676 		/*
677 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
678 		 * the security disabled state when the user passphrase is not set.
679 		 */
680 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
681 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
682 			return -ENXIO;
683 		}
684 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
685 			master_plimit_check(mdata);
686 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
687 			return -ENXIO;
688 		}
689 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
690 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
691 		return 0;
692 
693 	case CXL_PMEM_SEC_PASS_USER:
694 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
695 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
696 			return -ENXIO;
697 		}
698 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
699 			user_plimit_check(mdata);
700 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
701 			return -ENXIO;
702 		}
703 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
704 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
705 		return 0;
706 
707 	default:
708 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
709 	}
710 	return -EINVAL;
711 }
712 
713 static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
714 {
715 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
716 	struct cxl_disable_pass *dis_pass;
717 
718 	if (cmd->size_in != sizeof(*dis_pass))
719 		return -EINVAL;
720 
721 	if (cmd->size_out != 0)
722 		return -EINVAL;
723 
724 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
725 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
726 		return -ENXIO;
727 	}
728 
729 	dis_pass = cmd->payload_in;
730 	switch (dis_pass->type) {
731 	case CXL_PMEM_SEC_PASS_MASTER:
732 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
733 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
734 			return -ENXIO;
735 		}
736 
737 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
738 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
739 			return -ENXIO;
740 		}
741 
742 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
743 			master_plimit_check(mdata);
744 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
745 			return -ENXIO;
746 		}
747 
748 		mdata->master_limit = 0;
749 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
750 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
751 		return 0;
752 
753 	case CXL_PMEM_SEC_PASS_USER:
754 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
755 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
756 			return -ENXIO;
757 		}
758 
759 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
760 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
761 			return -ENXIO;
762 		}
763 
764 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
765 			user_plimit_check(mdata);
766 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
767 			return -ENXIO;
768 		}
769 
770 		mdata->user_limit = 0;
771 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
772 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
773 					   CXL_PMEM_SEC_STATE_LOCKED);
774 		return 0;
775 
776 	default:
777 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
778 		return -EINVAL;
779 	}
780 
781 	return 0;
782 }
783 
784 static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
785 {
786 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
787 
788 	if (cmd->size_in != 0)
789 		return -EINVAL;
790 
791 	if (cmd->size_out != 0)
792 		return -EINVAL;
793 
794 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
795 		return 0;
796 
797 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
798 	return 0;
799 }
800 
801 static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
802 {
803 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
804 
805 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
806 		return -EINVAL;
807 
808 	if (cmd->size_out != 0)
809 		return -EINVAL;
810 
811 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
812 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
813 		return -ENXIO;
814 	}
815 
816 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
817 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
818 		return -ENXIO;
819 	}
820 
821 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
822 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
823 		return -ENXIO;
824 	}
825 
826 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
827 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
828 		return -ENXIO;
829 	}
830 
831 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
832 		if (++mdata->user_limit == PASS_TRY_LIMIT)
833 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
834 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
835 		return -ENXIO;
836 	}
837 
838 	mdata->user_limit = 0;
839 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
840 	return 0;
841 }
842 
843 static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
844 					struct cxl_mbox_cmd *cmd)
845 {
846 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
847 	struct cxl_pass_erase *erase;
848 
849 	if (cmd->size_in != sizeof(*erase))
850 		return -EINVAL;
851 
852 	if (cmd->size_out != 0)
853 		return -EINVAL;
854 
855 	erase = cmd->payload_in;
856 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
857 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
858 		return -ENXIO;
859 	}
860 
861 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
862 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
863 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
864 		return -ENXIO;
865 	}
866 
867 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
868 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
869 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
870 		return -ENXIO;
871 	}
872 
873 	switch (erase->type) {
874 	case CXL_PMEM_SEC_PASS_MASTER:
875 		/*
876 		 * The spec does not clearly define the behavior of the scenario
877 		 * where a master passphrase is passed in while the master
878 		 * passphrase is not set and user passphrase is not set. The
879 		 * code will take the assumption that it will behave the same
880 		 * as a CXL secure erase command without passphrase (0x4401).
881 		 */
882 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
883 			if (memcmp(mdata->master_pass, erase->pass,
884 				   NVDIMM_PASSPHRASE_LEN)) {
885 				master_plimit_check(mdata);
886 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
887 				return -ENXIO;
888 			}
889 			mdata->master_limit = 0;
890 			mdata->user_limit = 0;
891 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
892 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
893 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
894 		} else {
895 			/*
896 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
897 			 * When master passphrase is disabled, the device shall
898 			 * return Invalid Input for the Passphrase Secure Erase
899 			 * command with master passphrase.
900 			 */
901 			return -EINVAL;
902 		}
903 		/* Scramble encryption keys so that data is effectively erased */
904 		break;
905 	case CXL_PMEM_SEC_PASS_USER:
906 		/*
907 		 * The spec does not clearly define the behavior of the scenario
908 		 * where a user passphrase is passed in while the user
909 		 * passphrase is not set. The code will take the assumption that
910 		 * it will behave the same as a CXL secure erase command without
911 		 * passphrase (0x4401).
912 		 */
913 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
914 			if (memcmp(mdata->user_pass, erase->pass,
915 				   NVDIMM_PASSPHRASE_LEN)) {
916 				user_plimit_check(mdata);
917 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
918 				return -ENXIO;
919 			}
920 			mdata->user_limit = 0;
921 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
922 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
923 		}
924 
925 		/*
926 		 * CXL rev3 Table 8-118
927 		 * If user passphrase is not set or supported by device, current
928 		 * passphrase value is ignored. Will make the assumption that
929 		 * the operation will proceed as secure erase w/o passphrase
930 		 * since spec is not explicit.
931 		 */
932 
933 		/* Scramble encryption keys so that data is effectively erased */
934 		break;
935 	default:
936 		return -EINVAL;
937 	}
938 
939 	return 0;
940 }
941 
942 static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
943 {
944 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
945 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
946 	void *lsa = mdata->lsa;
947 	u32 offset, length;
948 
949 	if (sizeof(*get_lsa) > cmd->size_in)
950 		return -EINVAL;
951 	offset = le32_to_cpu(get_lsa->offset);
952 	length = le32_to_cpu(get_lsa->length);
953 	if (offset + length > LSA_SIZE)
954 		return -EINVAL;
955 	if (length > cmd->size_out)
956 		return -EINVAL;
957 
958 	memcpy(cmd->payload_out, lsa + offset, length);
959 	return 0;
960 }
961 
962 static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
963 {
964 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
965 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
966 	void *lsa = mdata->lsa;
967 	u32 offset, length;
968 
969 	if (sizeof(*set_lsa) > cmd->size_in)
970 		return -EINVAL;
971 	offset = le32_to_cpu(set_lsa->offset);
972 	length = cmd->size_in - sizeof(*set_lsa);
973 	if (offset + length > LSA_SIZE)
974 		return -EINVAL;
975 
976 	memcpy(lsa + offset, &set_lsa->data[0], length);
977 	return 0;
978 }
979 
980 static int mock_health_info(struct cxl_dev_state *cxlds,
981 			    struct cxl_mbox_cmd *cmd)
982 {
983 	struct cxl_mbox_health_info health_info = {
984 		/* set flags for maint needed, perf degraded, hw replacement */
985 		.health_status = 0x7,
986 		/* set media status to "All Data Lost" */
987 		.media_status = 0x3,
988 		/*
989 		 * set ext_status flags for:
990 		 *  ext_life_used: normal,
991 		 *  ext_temperature: critical,
992 		 *  ext_corrected_volatile: warning,
993 		 *  ext_corrected_persistent: normal,
994 		 */
995 		.ext_status = 0x18,
996 		.life_used = 15,
997 		.temperature = cpu_to_le16(25),
998 		.dirty_shutdowns = cpu_to_le32(10),
999 		.volatile_errors = cpu_to_le32(20),
1000 		.pmem_errors = cpu_to_le32(30),
1001 	};
1002 
1003 	if (cmd->size_out < sizeof(health_info))
1004 		return -EINVAL;
1005 
1006 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1007 	return 0;
1008 }
1009 
1010 static struct mock_poison {
1011 	struct cxl_dev_state *cxlds;
1012 	u64 dpa;
1013 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1014 
1015 static struct cxl_mbox_poison_out *
1016 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1017 {
1018 	struct cxl_mbox_poison_out *po;
1019 	int nr_records = 0;
1020 	u64 dpa;
1021 
1022 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1023 	if (!po)
1024 		return NULL;
1025 
1026 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1027 		if (mock_poison_list[i].cxlds != cxlds)
1028 			continue;
1029 		if (mock_poison_list[i].dpa < offset ||
1030 		    mock_poison_list[i].dpa > offset + length - 1)
1031 			continue;
1032 
1033 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1034 		po->record[nr_records].address = cpu_to_le64(dpa);
1035 		po->record[nr_records].length = cpu_to_le32(1);
1036 		nr_records++;
1037 		if (nr_records == poison_inject_dev_max)
1038 			break;
1039 	}
1040 
1041 	/* Always return count, even when zero */
1042 	po->count = cpu_to_le16(nr_records);
1043 
1044 	return po;
1045 }
1046 
1047 static int mock_get_poison(struct cxl_dev_state *cxlds,
1048 			   struct cxl_mbox_cmd *cmd)
1049 {
1050 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1051 	struct cxl_mbox_poison_out *po;
1052 	u64 offset = le64_to_cpu(pi->offset);
1053 	u64 length = le64_to_cpu(pi->length);
1054 	int nr_records;
1055 
1056 	po = cxl_get_injected_po(cxlds, offset, length);
1057 	if (!po)
1058 		return -ENOMEM;
1059 	nr_records = le16_to_cpu(po->count);
1060 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1061 	cmd->size_out = struct_size(po, record, nr_records);
1062 	kfree(po);
1063 
1064 	return 0;
1065 }
1066 
1067 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1068 {
1069 	int count = 0;
1070 
1071 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1072 		if (mock_poison_list[i].cxlds == cxlds)
1073 			count++;
1074 	}
1075 	return (count >= poison_inject_dev_max);
1076 }
1077 
1078 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1079 {
1080 	if (mock_poison_dev_max_injected(cxlds)) {
1081 		dev_dbg(cxlds->dev,
1082 			"Device poison injection limit has been reached: %d\n",
1083 			MOCK_INJECT_DEV_MAX);
1084 		return false;
1085 	}
1086 
1087 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1088 		if (!mock_poison_list[i].cxlds) {
1089 			mock_poison_list[i].cxlds = cxlds;
1090 			mock_poison_list[i].dpa = dpa;
1091 			return true;
1092 		}
1093 	}
1094 	dev_dbg(cxlds->dev,
1095 		"Mock test poison injection limit has been reached: %d\n",
1096 		MOCK_INJECT_TEST_MAX);
1097 
1098 	return false;
1099 }
1100 
1101 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1102 {
1103 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1104 		if (mock_poison_list[i].cxlds == cxlds &&
1105 		    mock_poison_list[i].dpa == dpa)
1106 			return true;
1107 	}
1108 	return false;
1109 }
1110 
1111 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1112 			      struct cxl_mbox_cmd *cmd)
1113 {
1114 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1115 	u64 dpa = le64_to_cpu(pi->address);
1116 
1117 	if (mock_poison_found(cxlds, dpa)) {
1118 		/* Not an error to inject poison if already poisoned */
1119 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1120 		return 0;
1121 	}
1122 	if (!mock_poison_add(cxlds, dpa))
1123 		return -ENXIO;
1124 
1125 	return 0;
1126 }
1127 
1128 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1129 {
1130 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1131 		if (mock_poison_list[i].cxlds == cxlds &&
1132 		    mock_poison_list[i].dpa == dpa) {
1133 			mock_poison_list[i].cxlds = NULL;
1134 			return true;
1135 		}
1136 	}
1137 	return false;
1138 }
1139 
1140 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1141 			     struct cxl_mbox_cmd *cmd)
1142 {
1143 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1144 	u64 dpa = le64_to_cpu(pi->address);
1145 
1146 	/*
1147 	 * A real CXL device will write pi->write_data to the address
1148 	 * being cleared. In this mock, just delete this address from
1149 	 * the mock poison list.
1150 	 */
1151 	if (!mock_poison_del(cxlds, dpa))
1152 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1153 
1154 	return 0;
1155 }
1156 
1157 static bool mock_poison_list_empty(void)
1158 {
1159 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1160 		if (mock_poison_list[i].cxlds)
1161 			return false;
1162 	}
1163 	return true;
1164 }
1165 
1166 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1167 {
1168 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1169 }
1170 
1171 static ssize_t poison_inject_max_store(struct device_driver *drv,
1172 				       const char *buf, size_t len)
1173 {
1174 	int val;
1175 
1176 	if (kstrtoint(buf, 0, &val) < 0)
1177 		return -EINVAL;
1178 
1179 	if (!mock_poison_list_empty())
1180 		return -EBUSY;
1181 
1182 	if (val <= MOCK_INJECT_TEST_MAX)
1183 		poison_inject_dev_max = val;
1184 	else
1185 		return -EINVAL;
1186 
1187 	return len;
1188 }
1189 
1190 static DRIVER_ATTR_RW(poison_inject_max);
1191 
1192 static struct attribute *cxl_mock_mem_core_attrs[] = {
1193 	&driver_attr_poison_inject_max.attr,
1194 	NULL
1195 };
1196 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1197 
1198 static int mock_fw_info(struct cxl_dev_state *cxlds,
1199 			    struct cxl_mbox_cmd *cmd)
1200 {
1201 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
1202 	struct cxl_mbox_get_fw_info fw_info = {
1203 		.num_slots = FW_SLOTS,
1204 		.slot_info = (mdata->fw_slot & 0x7) |
1205 			     ((mdata->fw_staged & 0x7) << 3),
1206 		.activation_cap = 0,
1207 	};
1208 
1209 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1210 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1211 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1212 	strcpy(fw_info.slot_4_revision, "");
1213 
1214 	if (cmd->size_out < sizeof(fw_info))
1215 		return -EINVAL;
1216 
1217 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1218 	return 0;
1219 }
1220 
1221 static int mock_transfer_fw(struct cxl_dev_state *cxlds,
1222 			    struct cxl_mbox_cmd *cmd)
1223 {
1224 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1225 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
1226 	void *fw = mdata->fw;
1227 	size_t offset, length;
1228 
1229 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1230 	length = cmd->size_in - sizeof(*transfer);
1231 	if (offset + length > FW_SIZE)
1232 		return -EINVAL;
1233 
1234 	switch (transfer->action) {
1235 	case CXL_FW_TRANSFER_ACTION_FULL:
1236 		if (offset != 0)
1237 			return -EINVAL;
1238 		fallthrough;
1239 	case CXL_FW_TRANSFER_ACTION_END:
1240 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1241 			return -EINVAL;
1242 		mdata->fw_size = offset + length;
1243 		break;
1244 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1245 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1246 		break;
1247 	case CXL_FW_TRANSFER_ACTION_ABORT:
1248 		return 0;
1249 	default:
1250 		return -EINVAL;
1251 	}
1252 
1253 	memcpy(fw + offset, transfer->data, length);
1254 	return 0;
1255 }
1256 
1257 static int mock_activate_fw(struct cxl_dev_state *cxlds,
1258 			    struct cxl_mbox_cmd *cmd)
1259 {
1260 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1261 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
1262 
1263 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1264 		return -EINVAL;
1265 
1266 	switch (activate->action) {
1267 	case CXL_FW_ACTIVATE_ONLINE:
1268 		mdata->fw_slot = activate->slot;
1269 		mdata->fw_staged = 0;
1270 		return 0;
1271 	case CXL_FW_ACTIVATE_OFFLINE:
1272 		mdata->fw_staged = activate->slot;
1273 		return 0;
1274 	}
1275 
1276 	return -EINVAL;
1277 }
1278 
1279 static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
1280 {
1281 	struct device *dev = cxlds->dev;
1282 	int rc = -EIO;
1283 
1284 	switch (cmd->opcode) {
1285 	case CXL_MBOX_OP_SET_TIMESTAMP:
1286 		rc = mock_set_timestamp(cxlds, cmd);
1287 		break;
1288 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1289 		rc = mock_gsl(cmd);
1290 		break;
1291 	case CXL_MBOX_OP_GET_LOG:
1292 		rc = mock_get_log(cxlds, cmd);
1293 		break;
1294 	case CXL_MBOX_OP_IDENTIFY:
1295 		if (cxlds->rcd)
1296 			rc = mock_rcd_id(cxlds, cmd);
1297 		else
1298 			rc = mock_id(cxlds, cmd);
1299 		break;
1300 	case CXL_MBOX_OP_GET_LSA:
1301 		rc = mock_get_lsa(cxlds, cmd);
1302 		break;
1303 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1304 		rc = mock_partition_info(cxlds, cmd);
1305 		break;
1306 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1307 		rc = mock_get_event(cxlds, cmd);
1308 		break;
1309 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1310 		rc = mock_clear_event(cxlds, cmd);
1311 		break;
1312 	case CXL_MBOX_OP_SET_LSA:
1313 		rc = mock_set_lsa(cxlds, cmd);
1314 		break;
1315 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1316 		rc = mock_health_info(cxlds, cmd);
1317 		break;
1318 	case CXL_MBOX_OP_SANITIZE:
1319 		rc = mock_sanitize(cxlds, cmd);
1320 		break;
1321 	case CXL_MBOX_OP_SECURE_ERASE:
1322 		rc = mock_secure_erase(cxlds, cmd);
1323 		break;
1324 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1325 		rc = mock_get_security_state(cxlds, cmd);
1326 		break;
1327 	case CXL_MBOX_OP_SET_PASSPHRASE:
1328 		rc = mock_set_passphrase(cxlds, cmd);
1329 		break;
1330 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1331 		rc = mock_disable_passphrase(cxlds, cmd);
1332 		break;
1333 	case CXL_MBOX_OP_FREEZE_SECURITY:
1334 		rc = mock_freeze_security(cxlds, cmd);
1335 		break;
1336 	case CXL_MBOX_OP_UNLOCK:
1337 		rc = mock_unlock_security(cxlds, cmd);
1338 		break;
1339 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1340 		rc = mock_passphrase_secure_erase(cxlds, cmd);
1341 		break;
1342 	case CXL_MBOX_OP_GET_POISON:
1343 		rc = mock_get_poison(cxlds, cmd);
1344 		break;
1345 	case CXL_MBOX_OP_INJECT_POISON:
1346 		rc = mock_inject_poison(cxlds, cmd);
1347 		break;
1348 	case CXL_MBOX_OP_CLEAR_POISON:
1349 		rc = mock_clear_poison(cxlds, cmd);
1350 		break;
1351 	case CXL_MBOX_OP_GET_FW_INFO:
1352 		rc = mock_fw_info(cxlds, cmd);
1353 		break;
1354 	case CXL_MBOX_OP_TRANSFER_FW:
1355 		rc = mock_transfer_fw(cxlds, cmd);
1356 		break;
1357 	case CXL_MBOX_OP_ACTIVATE_FW:
1358 		rc = mock_activate_fw(cxlds, cmd);
1359 		break;
1360 	default:
1361 		break;
1362 	}
1363 
1364 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1365 		cmd->size_in, cmd->size_out, rc);
1366 
1367 	return rc;
1368 }
1369 
1370 static void label_area_release(void *lsa)
1371 {
1372 	vfree(lsa);
1373 }
1374 
1375 static void fw_buf_release(void *buf)
1376 {
1377 	vfree(buf);
1378 }
1379 
1380 static bool is_rcd(struct platform_device *pdev)
1381 {
1382 	const struct platform_device_id *id = platform_get_device_id(pdev);
1383 
1384 	return !!id->driver_data;
1385 }
1386 
1387 static ssize_t event_trigger_store(struct device *dev,
1388 				   struct device_attribute *attr,
1389 				   const char *buf, size_t count)
1390 {
1391 	cxl_mock_event_trigger(dev);
1392 	return count;
1393 }
1394 static DEVICE_ATTR_WO(event_trigger);
1395 
1396 static int cxl_mock_mem_probe(struct platform_device *pdev)
1397 {
1398 	struct device *dev = &pdev->dev;
1399 	struct cxl_memdev *cxlmd;
1400 	struct cxl_dev_state *cxlds;
1401 	struct cxl_mockmem_data *mdata;
1402 	int rc;
1403 
1404 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1405 	if (!mdata)
1406 		return -ENOMEM;
1407 	dev_set_drvdata(dev, mdata);
1408 
1409 	mdata->lsa = vmalloc(LSA_SIZE);
1410 	if (!mdata->lsa)
1411 		return -ENOMEM;
1412 	mdata->fw = vmalloc(FW_SIZE);
1413 	if (!mdata->fw)
1414 		return -ENOMEM;
1415 	mdata->fw_slot = 2;
1416 
1417 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1418 	if (rc)
1419 		return rc;
1420 
1421 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1422 	if (rc)
1423 		return rc;
1424 
1425 	cxlds = cxl_dev_state_create(dev);
1426 	if (IS_ERR(cxlds))
1427 		return PTR_ERR(cxlds);
1428 
1429 	cxlds->serial = pdev->id;
1430 	cxlds->mbox_send = cxl_mock_mbox_send;
1431 	cxlds->payload_size = SZ_4K;
1432 	cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1433 	if (is_rcd(pdev)) {
1434 		cxlds->rcd = true;
1435 		cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1436 	}
1437 
1438 	rc = cxl_enumerate_cmds(cxlds);
1439 	if (rc)
1440 		return rc;
1441 
1442 	rc = cxl_poison_state_init(cxlds);
1443 	if (rc)
1444 		return rc;
1445 
1446 	rc = cxl_set_timestamp(cxlds);
1447 	if (rc)
1448 		return rc;
1449 
1450 	cxlds->media_ready = true;
1451 	rc = cxl_dev_state_identify(cxlds);
1452 	if (rc)
1453 		return rc;
1454 
1455 	rc = cxl_mem_create_range_info(cxlds);
1456 	if (rc)
1457 		return rc;
1458 
1459 	mdata->mes.cxlds = cxlds;
1460 	cxl_mock_add_event_logs(&mdata->mes);
1461 
1462 	cxlmd = devm_cxl_add_memdev(cxlds);
1463 	if (IS_ERR(cxlmd))
1464 		return PTR_ERR(cxlmd);
1465 
1466 	rc = cxl_memdev_setup_fw_upload(cxlds);
1467 	if (rc)
1468 		return rc;
1469 
1470 	cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
1471 
1472 	return 0;
1473 }
1474 
1475 static ssize_t security_lock_show(struct device *dev,
1476 				  struct device_attribute *attr, char *buf)
1477 {
1478 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1479 
1480 	return sysfs_emit(buf, "%u\n",
1481 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1482 }
1483 
1484 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1485 				   const char *buf, size_t count)
1486 {
1487 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1488 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1489 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1490 	int val;
1491 
1492 	if (kstrtoint(buf, 0, &val) < 0)
1493 		return -EINVAL;
1494 
1495 	if (val == 1) {
1496 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1497 			return -ENXIO;
1498 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1499 		mdata->security_state &= ~mask;
1500 	} else {
1501 		return -EINVAL;
1502 	}
1503 	return count;
1504 }
1505 
1506 static DEVICE_ATTR_RW(security_lock);
1507 
1508 static ssize_t fw_buf_checksum_show(struct device *dev,
1509 				    struct device_attribute *attr, char *buf)
1510 {
1511 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1512 	u8 hash[SHA256_DIGEST_SIZE];
1513 	unsigned char *hstr, *hptr;
1514 	struct sha256_state sctx;
1515 	ssize_t written = 0;
1516 	int i;
1517 
1518 	sha256_init(&sctx);
1519 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1520 	sha256_final(&sctx, hash);
1521 
1522 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1523 	if (!hstr)
1524 		return -ENOMEM;
1525 
1526 	hptr = hstr;
1527 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1528 		hptr += sprintf(hptr, "%02x", hash[i]);
1529 
1530 	written = sysfs_emit(buf, "%s\n", hstr);
1531 
1532 	kfree(hstr);
1533 	return written;
1534 }
1535 
1536 static DEVICE_ATTR_RO(fw_buf_checksum);
1537 
1538 static struct attribute *cxl_mock_mem_attrs[] = {
1539 	&dev_attr_security_lock.attr,
1540 	&dev_attr_event_trigger.attr,
1541 	&dev_attr_fw_buf_checksum.attr,
1542 	NULL
1543 };
1544 ATTRIBUTE_GROUPS(cxl_mock_mem);
1545 
1546 static const struct platform_device_id cxl_mock_mem_ids[] = {
1547 	{ .name = "cxl_mem", 0 },
1548 	{ .name = "cxl_rcd", 1 },
1549 	{ },
1550 };
1551 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1552 
1553 static struct platform_driver cxl_mock_mem_driver = {
1554 	.probe = cxl_mock_mem_probe,
1555 	.id_table = cxl_mock_mem_ids,
1556 	.driver = {
1557 		.name = KBUILD_MODNAME,
1558 		.dev_groups = cxl_mock_mem_groups,
1559 		.groups = cxl_mock_mem_core_groups,
1560 	},
1561 };
1562 
1563 module_platform_driver(cxl_mock_mem_driver);
1564 MODULE_LICENSE("GPL v2");
1565 MODULE_IMPORT_NS(CXL);
1566