xref: /openbmc/linux/tools/testing/cxl/test/mem.c (revision 09b4aa28)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/vmalloc.h>
7 #include <linux/module.h>
8 #include <linux/delay.h>
9 #include <linux/sizes.h>
10 #include <linux/bits.h>
11 #include <asm/unaligned.h>
12 #include <crypto/sha2.h>
13 #include <cxlmem.h>
14 
15 #include "trace.h"
16 
17 #define LSA_SIZE SZ_128K
18 #define FW_SIZE SZ_64M
19 #define FW_SLOTS 3
20 #define DEV_SIZE SZ_2G
21 #define EFFECT(x) (1U << x)
22 
23 #define MOCK_INJECT_DEV_MAX 8
24 #define MOCK_INJECT_TEST_MAX 128
25 
26 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
27 
28 enum cxl_command_effects {
29 	CONF_CHANGE_COLD_RESET = 0,
30 	CONF_CHANGE_IMMEDIATE,
31 	DATA_CHANGE_IMMEDIATE,
32 	POLICY_CHANGE_IMMEDIATE,
33 	LOG_CHANGE_IMMEDIATE,
34 	SECURITY_CHANGE_IMMEDIATE,
35 	BACKGROUND_OP,
36 	SECONDARY_MBOX_SUPPORTED,
37 };
38 
39 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
40 
41 static struct cxl_cel_entry mock_cel[] = {
42 	{
43 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
44 		.effect = CXL_CMD_EFFECT_NONE,
45 	},
46 	{
47 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
48 		.effect = CXL_CMD_EFFECT_NONE,
49 	},
50 	{
51 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
52 		.effect = CXL_CMD_EFFECT_NONE,
53 	},
54 	{
55 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
56 		.effect = CXL_CMD_EFFECT_NONE,
57 	},
58 	{
59 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
60 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
61 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
62 	},
63 	{
64 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
65 		.effect = CXL_CMD_EFFECT_NONE,
66 	},
67 	{
68 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
69 		.effect = CXL_CMD_EFFECT_NONE,
70 	},
71 	{
72 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
73 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
74 	},
75 	{
76 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
77 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
78 	},
79 	{
80 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
81 		.effect = CXL_CMD_EFFECT_NONE,
82 	},
83 	{
84 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
85 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
86 				      EFFECT(BACKGROUND_OP)),
87 	},
88 	{
89 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
90 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
91 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
92 	},
93 };
94 
95 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
96 struct cxl_mbox_health_info {
97 	u8 health_status;
98 	u8 media_status;
99 	u8 ext_status;
100 	u8 life_used;
101 	__le16 temperature;
102 	__le32 dirty_shutdowns;
103 	__le32 volatile_errors;
104 	__le32 pmem_errors;
105 } __packed;
106 
107 static struct {
108 	struct cxl_mbox_get_supported_logs gsl;
109 	struct cxl_gsl_entry entry;
110 } mock_gsl_payload = {
111 	.gsl = {
112 		.entries = cpu_to_le16(1),
113 	},
114 	.entry = {
115 		.uuid = DEFINE_CXL_CEL_UUID,
116 		.size = cpu_to_le32(sizeof(mock_cel)),
117 	},
118 };
119 
120 #define PASS_TRY_LIMIT 3
121 
122 #define CXL_TEST_EVENT_CNT_MAX 15
123 
124 /* Set a number of events to return at a time for simulation.  */
125 #define CXL_TEST_EVENT_CNT 3
126 
127 struct mock_event_log {
128 	u16 clear_idx;
129 	u16 cur_idx;
130 	u16 nr_events;
131 	u16 nr_overflow;
132 	u16 overflow_reset;
133 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
134 };
135 
136 struct mock_event_store {
137 	struct cxl_memdev_state *mds;
138 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
139 	u32 ev_status;
140 };
141 
142 struct cxl_mockmem_data {
143 	void *lsa;
144 	void *fw;
145 	int fw_slot;
146 	int fw_staged;
147 	size_t fw_size;
148 	u32 security_state;
149 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
150 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
151 	int user_limit;
152 	int master_limit;
153 	struct mock_event_store mes;
154 	u8 event_buf[SZ_4K];
155 	u64 timestamp;
156 };
157 
event_find_log(struct device * dev,int log_type)158 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
159 {
160 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
161 
162 	if (log_type >= CXL_EVENT_TYPE_MAX)
163 		return NULL;
164 	return &mdata->mes.mock_logs[log_type];
165 }
166 
event_get_current(struct mock_event_log * log)167 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
168 {
169 	return log->events[log->cur_idx];
170 }
171 
event_reset_log(struct mock_event_log * log)172 static void event_reset_log(struct mock_event_log *log)
173 {
174 	log->cur_idx = 0;
175 	log->clear_idx = 0;
176 	log->nr_overflow = log->overflow_reset;
177 }
178 
179 /* Handle can never be 0 use 1 based indexing for handle */
event_get_clear_handle(struct mock_event_log * log)180 static u16 event_get_clear_handle(struct mock_event_log *log)
181 {
182 	return log->clear_idx + 1;
183 }
184 
185 /* Handle can never be 0 use 1 based indexing for handle */
event_get_cur_event_handle(struct mock_event_log * log)186 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
187 {
188 	u16 cur_handle = log->cur_idx + 1;
189 
190 	return cpu_to_le16(cur_handle);
191 }
192 
event_log_empty(struct mock_event_log * log)193 static bool event_log_empty(struct mock_event_log *log)
194 {
195 	return log->cur_idx == log->nr_events;
196 }
197 
mes_add_event(struct mock_event_store * mes,enum cxl_event_log_type log_type,struct cxl_event_record_raw * event)198 static void mes_add_event(struct mock_event_store *mes,
199 			  enum cxl_event_log_type log_type,
200 			  struct cxl_event_record_raw *event)
201 {
202 	struct mock_event_log *log;
203 
204 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
205 		return;
206 
207 	log = &mes->mock_logs[log_type];
208 
209 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
210 		log->nr_overflow++;
211 		log->overflow_reset = log->nr_overflow;
212 		return;
213 	}
214 
215 	log->events[log->nr_events] = event;
216 	log->nr_events++;
217 }
218 
mock_get_event(struct device * dev,struct cxl_mbox_cmd * cmd)219 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
220 {
221 	struct cxl_get_event_payload *pl;
222 	struct mock_event_log *log;
223 	u16 nr_overflow;
224 	u8 log_type;
225 	int i;
226 
227 	if (cmd->size_in != sizeof(log_type))
228 		return -EINVAL;
229 
230 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
231 		return -EINVAL;
232 
233 	log_type = *((u8 *)cmd->payload_in);
234 	if (log_type >= CXL_EVENT_TYPE_MAX)
235 		return -EINVAL;
236 
237 	memset(cmd->payload_out, 0, cmd->size_out);
238 
239 	log = event_find_log(dev, log_type);
240 	if (!log || event_log_empty(log))
241 		return 0;
242 
243 	pl = cmd->payload_out;
244 
245 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
246 		memcpy(&pl->records[i], event_get_current(log),
247 		       sizeof(pl->records[i]));
248 		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
249 		log->cur_idx++;
250 	}
251 
252 	pl->record_count = cpu_to_le16(i);
253 	if (!event_log_empty(log))
254 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
255 
256 	if (log->nr_overflow) {
257 		u64 ns;
258 
259 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
260 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
261 		ns = ktime_get_real_ns();
262 		ns -= 5000000000; /* 5s ago */
263 		pl->first_overflow_timestamp = cpu_to_le64(ns);
264 		ns = ktime_get_real_ns();
265 		ns -= 1000000000; /* 1s ago */
266 		pl->last_overflow_timestamp = cpu_to_le64(ns);
267 	}
268 
269 	return 0;
270 }
271 
mock_clear_event(struct device * dev,struct cxl_mbox_cmd * cmd)272 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
273 {
274 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
275 	struct mock_event_log *log;
276 	u8 log_type = pl->event_log;
277 	u16 handle;
278 	int nr;
279 
280 	if (log_type >= CXL_EVENT_TYPE_MAX)
281 		return -EINVAL;
282 
283 	log = event_find_log(dev, log_type);
284 	if (!log)
285 		return 0; /* No mock data in this log */
286 
287 	/*
288 	 * This check is technically not invalid per the specification AFAICS.
289 	 * (The host could 'guess' handles and clear them in order).
290 	 * However, this is not good behavior for the host so test it.
291 	 */
292 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
293 		dev_err(dev,
294 			"Attempting to clear more events than returned!\n");
295 		return -EINVAL;
296 	}
297 
298 	/* Check handle order prior to clearing events */
299 	for (nr = 0, handle = event_get_clear_handle(log);
300 	     nr < pl->nr_recs;
301 	     nr++, handle++) {
302 		if (handle != le16_to_cpu(pl->handles[nr])) {
303 			dev_err(dev, "Clearing events out of order\n");
304 			return -EINVAL;
305 		}
306 	}
307 
308 	if (log->nr_overflow)
309 		log->nr_overflow = 0;
310 
311 	/* Clear events */
312 	log->clear_idx += pl->nr_recs;
313 	return 0;
314 }
315 
cxl_mock_event_trigger(struct device * dev)316 static void cxl_mock_event_trigger(struct device *dev)
317 {
318 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
319 	struct mock_event_store *mes = &mdata->mes;
320 	int i;
321 
322 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
323 		struct mock_event_log *log;
324 
325 		log = event_find_log(dev, i);
326 		if (log)
327 			event_reset_log(log);
328 	}
329 
330 	cxl_mem_get_event_records(mes->mds, mes->ev_status);
331 }
332 
333 struct cxl_event_record_raw maint_needed = {
334 	.hdr = {
335 		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
336 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
337 		.length = sizeof(struct cxl_event_record_raw),
338 		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
339 		/* .handle = Set dynamically */
340 		.related_handle = cpu_to_le16(0xa5b6),
341 	},
342 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
343 };
344 
345 struct cxl_event_record_raw hardware_replace = {
346 	.hdr = {
347 		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
348 				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
349 		.length = sizeof(struct cxl_event_record_raw),
350 		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
351 		/* .handle = Set dynamically */
352 		.related_handle = cpu_to_le16(0xb6a5),
353 	},
354 	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
355 };
356 
357 struct cxl_event_gen_media gen_media = {
358 	.hdr = {
359 		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
360 				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
361 		.length = sizeof(struct cxl_event_gen_media),
362 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
363 		/* .handle = Set dynamically */
364 		.related_handle = cpu_to_le16(0),
365 	},
366 	.phys_addr = cpu_to_le64(0x2000),
367 	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
368 	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
369 	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
370 	/* .validity_flags = <set below> */
371 	.channel = 1,
372 	.rank = 30
373 };
374 
375 struct cxl_event_dram dram = {
376 	.hdr = {
377 		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
378 				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
379 		.length = sizeof(struct cxl_event_dram),
380 		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
381 		/* .handle = Set dynamically */
382 		.related_handle = cpu_to_le16(0),
383 	},
384 	.phys_addr = cpu_to_le64(0x8000),
385 	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
386 	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
387 	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
388 	/* .validity_flags = <set below> */
389 	.channel = 1,
390 	.bank_group = 5,
391 	.bank = 2,
392 	.column = {0xDE, 0xAD},
393 };
394 
395 struct cxl_event_mem_module mem_module = {
396 	.hdr = {
397 		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
398 				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
399 		.length = sizeof(struct cxl_event_mem_module),
400 		/* .handle = Set dynamically */
401 		.related_handle = cpu_to_le16(0),
402 	},
403 	.event_type = CXL_MMER_TEMP_CHANGE,
404 	.info = {
405 		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
406 		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
407 		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
408 			      (CXL_DHI_AS_WARNING << 4) |
409 			      (CXL_DHI_AS_WARNING << 5),
410 		.device_temp = { 0xDE, 0xAD},
411 		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
412 		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
413 		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
414 	}
415 };
416 
mock_set_timestamp(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)417 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
418 			      struct cxl_mbox_cmd *cmd)
419 {
420 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
421 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
422 
423 	if (cmd->size_in != sizeof(*ts))
424 		return -EINVAL;
425 
426 	if (cmd->size_out != 0)
427 		return -EINVAL;
428 
429 	mdata->timestamp = le64_to_cpu(ts->timestamp);
430 	return 0;
431 }
432 
cxl_mock_add_event_logs(struct mock_event_store * mes)433 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
434 {
435 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
436 			   &gen_media.validity_flags);
437 
438 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
439 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
440 			   &dram.validity_flags);
441 
442 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
443 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
444 		      (struct cxl_event_record_raw *)&gen_media);
445 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
446 		      (struct cxl_event_record_raw *)&mem_module);
447 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
448 
449 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
450 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
451 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
452 		      (struct cxl_event_record_raw *)&dram);
453 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
454 		      (struct cxl_event_record_raw *)&gen_media);
455 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
456 		      (struct cxl_event_record_raw *)&mem_module);
457 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
458 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
459 		      (struct cxl_event_record_raw *)&dram);
460 	/* Overflow this log */
461 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
462 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
463 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
464 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
465 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
466 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
467 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
468 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
469 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
470 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
471 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
472 
473 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
474 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
475 		      (struct cxl_event_record_raw *)&dram);
476 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
477 }
478 
mock_gsl(struct cxl_mbox_cmd * cmd)479 static int mock_gsl(struct cxl_mbox_cmd *cmd)
480 {
481 	if (cmd->size_out < sizeof(mock_gsl_payload))
482 		return -EINVAL;
483 
484 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
485 	cmd->size_out = sizeof(mock_gsl_payload);
486 
487 	return 0;
488 }
489 
mock_get_log(struct cxl_memdev_state * mds,struct cxl_mbox_cmd * cmd)490 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
491 {
492 	struct cxl_mbox_get_log *gl = cmd->payload_in;
493 	u32 offset = le32_to_cpu(gl->offset);
494 	u32 length = le32_to_cpu(gl->length);
495 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
496 	void *data = &mock_cel;
497 
498 	if (cmd->size_in < sizeof(*gl))
499 		return -EINVAL;
500 	if (length > mds->payload_size)
501 		return -EINVAL;
502 	if (offset + length > sizeof(mock_cel))
503 		return -EINVAL;
504 	if (!uuid_equal(&gl->uuid, &uuid))
505 		return -EINVAL;
506 	if (length > cmd->size_out)
507 		return -EINVAL;
508 
509 	memcpy(cmd->payload_out, data + offset, length);
510 
511 	return 0;
512 }
513 
mock_rcd_id(struct cxl_mbox_cmd * cmd)514 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
515 {
516 	struct cxl_mbox_identify id = {
517 		.fw_revision = { "mock fw v1 " },
518 		.total_capacity =
519 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
520 		.volatile_capacity =
521 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
522 	};
523 
524 	if (cmd->size_out < sizeof(id))
525 		return -EINVAL;
526 
527 	memcpy(cmd->payload_out, &id, sizeof(id));
528 
529 	return 0;
530 }
531 
mock_id(struct cxl_mbox_cmd * cmd)532 static int mock_id(struct cxl_mbox_cmd *cmd)
533 {
534 	struct cxl_mbox_identify id = {
535 		.fw_revision = { "mock fw v1 " },
536 		.lsa_size = cpu_to_le32(LSA_SIZE),
537 		.partition_align =
538 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
539 		.total_capacity =
540 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
541 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
542 	};
543 
544 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
545 
546 	if (cmd->size_out < sizeof(id))
547 		return -EINVAL;
548 
549 	memcpy(cmd->payload_out, &id, sizeof(id));
550 
551 	return 0;
552 }
553 
mock_partition_info(struct cxl_mbox_cmd * cmd)554 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
555 {
556 	struct cxl_mbox_get_partition_info pi = {
557 		.active_volatile_cap =
558 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
559 		.active_persistent_cap =
560 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
561 	};
562 
563 	if (cmd->size_out < sizeof(pi))
564 		return -EINVAL;
565 
566 	memcpy(cmd->payload_out, &pi, sizeof(pi));
567 
568 	return 0;
569 }
570 
mock_sanitize(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)571 static int mock_sanitize(struct cxl_mockmem_data *mdata,
572 			 struct cxl_mbox_cmd *cmd)
573 {
574 	if (cmd->size_in != 0)
575 		return -EINVAL;
576 
577 	if (cmd->size_out != 0)
578 		return -EINVAL;
579 
580 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
581 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
582 		return -ENXIO;
583 	}
584 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
585 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
586 		return -ENXIO;
587 	}
588 
589 	return 0; /* assume less than 2 secs, no bg */
590 }
591 
mock_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)592 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
593 			     struct cxl_mbox_cmd *cmd)
594 {
595 	if (cmd->size_in != 0)
596 		return -EINVAL;
597 
598 	if (cmd->size_out != 0)
599 		return -EINVAL;
600 
601 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
602 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
603 		return -ENXIO;
604 	}
605 
606 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
607 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
608 		return -ENXIO;
609 	}
610 
611 	return 0;
612 }
613 
mock_get_security_state(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)614 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
615 				   struct cxl_mbox_cmd *cmd)
616 {
617 	if (cmd->size_in)
618 		return -EINVAL;
619 
620 	if (cmd->size_out != sizeof(u32))
621 		return -EINVAL;
622 
623 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
624 
625 	return 0;
626 }
627 
master_plimit_check(struct cxl_mockmem_data * mdata)628 static void master_plimit_check(struct cxl_mockmem_data *mdata)
629 {
630 	if (mdata->master_limit == PASS_TRY_LIMIT)
631 		return;
632 	mdata->master_limit++;
633 	if (mdata->master_limit == PASS_TRY_LIMIT)
634 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
635 }
636 
user_plimit_check(struct cxl_mockmem_data * mdata)637 static void user_plimit_check(struct cxl_mockmem_data *mdata)
638 {
639 	if (mdata->user_limit == PASS_TRY_LIMIT)
640 		return;
641 	mdata->user_limit++;
642 	if (mdata->user_limit == PASS_TRY_LIMIT)
643 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
644 }
645 
mock_set_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)646 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
647 			       struct cxl_mbox_cmd *cmd)
648 {
649 	struct cxl_set_pass *set_pass;
650 
651 	if (cmd->size_in != sizeof(*set_pass))
652 		return -EINVAL;
653 
654 	if (cmd->size_out != 0)
655 		return -EINVAL;
656 
657 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
658 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
659 		return -ENXIO;
660 	}
661 
662 	set_pass = cmd->payload_in;
663 	switch (set_pass->type) {
664 	case CXL_PMEM_SEC_PASS_MASTER:
665 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
666 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
667 			return -ENXIO;
668 		}
669 		/*
670 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
671 		 * the security disabled state when the user passphrase is not set.
672 		 */
673 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
674 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
675 			return -ENXIO;
676 		}
677 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
678 			master_plimit_check(mdata);
679 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
680 			return -ENXIO;
681 		}
682 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
683 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
684 		return 0;
685 
686 	case CXL_PMEM_SEC_PASS_USER:
687 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
688 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
689 			return -ENXIO;
690 		}
691 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
692 			user_plimit_check(mdata);
693 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
694 			return -ENXIO;
695 		}
696 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
697 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
698 		return 0;
699 
700 	default:
701 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
702 	}
703 	return -EINVAL;
704 }
705 
mock_disable_passphrase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)706 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
707 				   struct cxl_mbox_cmd *cmd)
708 {
709 	struct cxl_disable_pass *dis_pass;
710 
711 	if (cmd->size_in != sizeof(*dis_pass))
712 		return -EINVAL;
713 
714 	if (cmd->size_out != 0)
715 		return -EINVAL;
716 
717 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
718 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
719 		return -ENXIO;
720 	}
721 
722 	dis_pass = cmd->payload_in;
723 	switch (dis_pass->type) {
724 	case CXL_PMEM_SEC_PASS_MASTER:
725 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
726 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
727 			return -ENXIO;
728 		}
729 
730 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
731 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
732 			return -ENXIO;
733 		}
734 
735 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
736 			master_plimit_check(mdata);
737 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
738 			return -ENXIO;
739 		}
740 
741 		mdata->master_limit = 0;
742 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
743 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
744 		return 0;
745 
746 	case CXL_PMEM_SEC_PASS_USER:
747 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
748 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
749 			return -ENXIO;
750 		}
751 
752 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
753 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
754 			return -ENXIO;
755 		}
756 
757 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
758 			user_plimit_check(mdata);
759 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
760 			return -ENXIO;
761 		}
762 
763 		mdata->user_limit = 0;
764 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
765 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
766 					   CXL_PMEM_SEC_STATE_LOCKED);
767 		return 0;
768 
769 	default:
770 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
771 		return -EINVAL;
772 	}
773 
774 	return 0;
775 }
776 
mock_freeze_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)777 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
778 				struct cxl_mbox_cmd *cmd)
779 {
780 	if (cmd->size_in != 0)
781 		return -EINVAL;
782 
783 	if (cmd->size_out != 0)
784 		return -EINVAL;
785 
786 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
787 		return 0;
788 
789 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
790 	return 0;
791 }
792 
mock_unlock_security(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)793 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
794 				struct cxl_mbox_cmd *cmd)
795 {
796 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
797 		return -EINVAL;
798 
799 	if (cmd->size_out != 0)
800 		return -EINVAL;
801 
802 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
803 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
804 		return -ENXIO;
805 	}
806 
807 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
808 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
809 		return -ENXIO;
810 	}
811 
812 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
813 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
814 		return -ENXIO;
815 	}
816 
817 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
818 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
819 		return -ENXIO;
820 	}
821 
822 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
823 		if (++mdata->user_limit == PASS_TRY_LIMIT)
824 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
825 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
826 		return -ENXIO;
827 	}
828 
829 	mdata->user_limit = 0;
830 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
831 	return 0;
832 }
833 
mock_passphrase_secure_erase(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)834 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
835 					struct cxl_mbox_cmd *cmd)
836 {
837 	struct cxl_pass_erase *erase;
838 
839 	if (cmd->size_in != sizeof(*erase))
840 		return -EINVAL;
841 
842 	if (cmd->size_out != 0)
843 		return -EINVAL;
844 
845 	erase = cmd->payload_in;
846 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
847 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
848 		return -ENXIO;
849 	}
850 
851 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
852 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
853 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
854 		return -ENXIO;
855 	}
856 
857 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
858 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
859 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
860 		return -ENXIO;
861 	}
862 
863 	switch (erase->type) {
864 	case CXL_PMEM_SEC_PASS_MASTER:
865 		/*
866 		 * The spec does not clearly define the behavior of the scenario
867 		 * where a master passphrase is passed in while the master
868 		 * passphrase is not set and user passphrase is not set. The
869 		 * code will take the assumption that it will behave the same
870 		 * as a CXL secure erase command without passphrase (0x4401).
871 		 */
872 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
873 			if (memcmp(mdata->master_pass, erase->pass,
874 				   NVDIMM_PASSPHRASE_LEN)) {
875 				master_plimit_check(mdata);
876 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
877 				return -ENXIO;
878 			}
879 			mdata->master_limit = 0;
880 			mdata->user_limit = 0;
881 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
882 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
883 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
884 		} else {
885 			/*
886 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
887 			 * When master passphrase is disabled, the device shall
888 			 * return Invalid Input for the Passphrase Secure Erase
889 			 * command with master passphrase.
890 			 */
891 			return -EINVAL;
892 		}
893 		/* Scramble encryption keys so that data is effectively erased */
894 		break;
895 	case CXL_PMEM_SEC_PASS_USER:
896 		/*
897 		 * The spec does not clearly define the behavior of the scenario
898 		 * where a user passphrase is passed in while the user
899 		 * passphrase is not set. The code will take the assumption that
900 		 * it will behave the same as a CXL secure erase command without
901 		 * passphrase (0x4401).
902 		 */
903 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
904 			if (memcmp(mdata->user_pass, erase->pass,
905 				   NVDIMM_PASSPHRASE_LEN)) {
906 				user_plimit_check(mdata);
907 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
908 				return -ENXIO;
909 			}
910 			mdata->user_limit = 0;
911 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
912 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
913 		}
914 
915 		/*
916 		 * CXL rev3 Table 8-118
917 		 * If user passphrase is not set or supported by device, current
918 		 * passphrase value is ignored. Will make the assumption that
919 		 * the operation will proceed as secure erase w/o passphrase
920 		 * since spec is not explicit.
921 		 */
922 
923 		/* Scramble encryption keys so that data is effectively erased */
924 		break;
925 	default:
926 		return -EINVAL;
927 	}
928 
929 	return 0;
930 }
931 
mock_get_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)932 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
933 			struct cxl_mbox_cmd *cmd)
934 {
935 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
936 	void *lsa = mdata->lsa;
937 	u32 offset, length;
938 
939 	if (sizeof(*get_lsa) > cmd->size_in)
940 		return -EINVAL;
941 	offset = le32_to_cpu(get_lsa->offset);
942 	length = le32_to_cpu(get_lsa->length);
943 	if (offset + length > LSA_SIZE)
944 		return -EINVAL;
945 	if (length > cmd->size_out)
946 		return -EINVAL;
947 
948 	memcpy(cmd->payload_out, lsa + offset, length);
949 	return 0;
950 }
951 
mock_set_lsa(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)952 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
953 			struct cxl_mbox_cmd *cmd)
954 {
955 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
956 	void *lsa = mdata->lsa;
957 	u32 offset, length;
958 
959 	if (sizeof(*set_lsa) > cmd->size_in)
960 		return -EINVAL;
961 	offset = le32_to_cpu(set_lsa->offset);
962 	length = cmd->size_in - sizeof(*set_lsa);
963 	if (offset + length > LSA_SIZE)
964 		return -EINVAL;
965 
966 	memcpy(lsa + offset, &set_lsa->data[0], length);
967 	return 0;
968 }
969 
mock_health_info(struct cxl_mbox_cmd * cmd)970 static int mock_health_info(struct cxl_mbox_cmd *cmd)
971 {
972 	struct cxl_mbox_health_info health_info = {
973 		/* set flags for maint needed, perf degraded, hw replacement */
974 		.health_status = 0x7,
975 		/* set media status to "All Data Lost" */
976 		.media_status = 0x3,
977 		/*
978 		 * set ext_status flags for:
979 		 *  ext_life_used: normal,
980 		 *  ext_temperature: critical,
981 		 *  ext_corrected_volatile: warning,
982 		 *  ext_corrected_persistent: normal,
983 		 */
984 		.ext_status = 0x18,
985 		.life_used = 15,
986 		.temperature = cpu_to_le16(25),
987 		.dirty_shutdowns = cpu_to_le32(10),
988 		.volatile_errors = cpu_to_le32(20),
989 		.pmem_errors = cpu_to_le32(30),
990 	};
991 
992 	if (cmd->size_out < sizeof(health_info))
993 		return -EINVAL;
994 
995 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
996 	return 0;
997 }
998 
999 static struct mock_poison {
1000 	struct cxl_dev_state *cxlds;
1001 	u64 dpa;
1002 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1003 
1004 static struct cxl_mbox_poison_out *
cxl_get_injected_po(struct cxl_dev_state * cxlds,u64 offset,u64 length)1005 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1006 {
1007 	struct cxl_mbox_poison_out *po;
1008 	int nr_records = 0;
1009 	u64 dpa;
1010 
1011 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1012 	if (!po)
1013 		return NULL;
1014 
1015 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1016 		if (mock_poison_list[i].cxlds != cxlds)
1017 			continue;
1018 		if (mock_poison_list[i].dpa < offset ||
1019 		    mock_poison_list[i].dpa > offset + length - 1)
1020 			continue;
1021 
1022 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1023 		po->record[nr_records].address = cpu_to_le64(dpa);
1024 		po->record[nr_records].length = cpu_to_le32(1);
1025 		nr_records++;
1026 		if (nr_records == poison_inject_dev_max)
1027 			break;
1028 	}
1029 
1030 	/* Always return count, even when zero */
1031 	po->count = cpu_to_le16(nr_records);
1032 
1033 	return po;
1034 }
1035 
mock_get_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1036 static int mock_get_poison(struct cxl_dev_state *cxlds,
1037 			   struct cxl_mbox_cmd *cmd)
1038 {
1039 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1040 	struct cxl_mbox_poison_out *po;
1041 	u64 offset = le64_to_cpu(pi->offset);
1042 	u64 length = le64_to_cpu(pi->length);
1043 	int nr_records;
1044 
1045 	po = cxl_get_injected_po(cxlds, offset, length);
1046 	if (!po)
1047 		return -ENOMEM;
1048 	nr_records = le16_to_cpu(po->count);
1049 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1050 	cmd->size_out = struct_size(po, record, nr_records);
1051 	kfree(po);
1052 
1053 	return 0;
1054 }
1055 
mock_poison_dev_max_injected(struct cxl_dev_state * cxlds)1056 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1057 {
1058 	int count = 0;
1059 
1060 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1061 		if (mock_poison_list[i].cxlds == cxlds)
1062 			count++;
1063 	}
1064 	return (count >= poison_inject_dev_max);
1065 }
1066 
mock_poison_add(struct cxl_dev_state * cxlds,u64 dpa)1067 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1068 {
1069 	if (mock_poison_dev_max_injected(cxlds)) {
1070 		dev_dbg(cxlds->dev,
1071 			"Device poison injection limit has been reached: %d\n",
1072 			MOCK_INJECT_DEV_MAX);
1073 		return false;
1074 	}
1075 
1076 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1077 		if (!mock_poison_list[i].cxlds) {
1078 			mock_poison_list[i].cxlds = cxlds;
1079 			mock_poison_list[i].dpa = dpa;
1080 			return true;
1081 		}
1082 	}
1083 	dev_dbg(cxlds->dev,
1084 		"Mock test poison injection limit has been reached: %d\n",
1085 		MOCK_INJECT_TEST_MAX);
1086 
1087 	return false;
1088 }
1089 
mock_poison_found(struct cxl_dev_state * cxlds,u64 dpa)1090 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1091 {
1092 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1093 		if (mock_poison_list[i].cxlds == cxlds &&
1094 		    mock_poison_list[i].dpa == dpa)
1095 			return true;
1096 	}
1097 	return false;
1098 }
1099 
mock_inject_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1100 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1101 			      struct cxl_mbox_cmd *cmd)
1102 {
1103 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1104 	u64 dpa = le64_to_cpu(pi->address);
1105 
1106 	if (mock_poison_found(cxlds, dpa)) {
1107 		/* Not an error to inject poison if already poisoned */
1108 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1109 		return 0;
1110 	}
1111 	if (!mock_poison_add(cxlds, dpa))
1112 		return -ENXIO;
1113 
1114 	return 0;
1115 }
1116 
mock_poison_del(struct cxl_dev_state * cxlds,u64 dpa)1117 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1118 {
1119 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1120 		if (mock_poison_list[i].cxlds == cxlds &&
1121 		    mock_poison_list[i].dpa == dpa) {
1122 			mock_poison_list[i].cxlds = NULL;
1123 			return true;
1124 		}
1125 	}
1126 	return false;
1127 }
1128 
mock_clear_poison(struct cxl_dev_state * cxlds,struct cxl_mbox_cmd * cmd)1129 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1130 			     struct cxl_mbox_cmd *cmd)
1131 {
1132 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1133 	u64 dpa = le64_to_cpu(pi->address);
1134 
1135 	/*
1136 	 * A real CXL device will write pi->write_data to the address
1137 	 * being cleared. In this mock, just delete this address from
1138 	 * the mock poison list.
1139 	 */
1140 	if (!mock_poison_del(cxlds, dpa))
1141 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1142 
1143 	return 0;
1144 }
1145 
mock_poison_list_empty(void)1146 static bool mock_poison_list_empty(void)
1147 {
1148 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1149 		if (mock_poison_list[i].cxlds)
1150 			return false;
1151 	}
1152 	return true;
1153 }
1154 
poison_inject_max_show(struct device_driver * drv,char * buf)1155 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1156 {
1157 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1158 }
1159 
poison_inject_max_store(struct device_driver * drv,const char * buf,size_t len)1160 static ssize_t poison_inject_max_store(struct device_driver *drv,
1161 				       const char *buf, size_t len)
1162 {
1163 	int val;
1164 
1165 	if (kstrtoint(buf, 0, &val) < 0)
1166 		return -EINVAL;
1167 
1168 	if (!mock_poison_list_empty())
1169 		return -EBUSY;
1170 
1171 	if (val <= MOCK_INJECT_TEST_MAX)
1172 		poison_inject_dev_max = val;
1173 	else
1174 		return -EINVAL;
1175 
1176 	return len;
1177 }
1178 
1179 static DRIVER_ATTR_RW(poison_inject_max);
1180 
1181 static struct attribute *cxl_mock_mem_core_attrs[] = {
1182 	&driver_attr_poison_inject_max.attr,
1183 	NULL
1184 };
1185 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1186 
mock_fw_info(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1187 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1188 			struct cxl_mbox_cmd *cmd)
1189 {
1190 	struct cxl_mbox_get_fw_info fw_info = {
1191 		.num_slots = FW_SLOTS,
1192 		.slot_info = (mdata->fw_slot & 0x7) |
1193 			     ((mdata->fw_staged & 0x7) << 3),
1194 		.activation_cap = 0,
1195 	};
1196 
1197 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1198 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1199 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1200 	strcpy(fw_info.slot_4_revision, "");
1201 
1202 	if (cmd->size_out < sizeof(fw_info))
1203 		return -EINVAL;
1204 
1205 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1206 	return 0;
1207 }
1208 
mock_transfer_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1209 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1210 			    struct cxl_mbox_cmd *cmd)
1211 {
1212 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1213 	void *fw = mdata->fw;
1214 	size_t offset, length;
1215 
1216 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1217 	length = cmd->size_in - sizeof(*transfer);
1218 	if (offset + length > FW_SIZE)
1219 		return -EINVAL;
1220 
1221 	switch (transfer->action) {
1222 	case CXL_FW_TRANSFER_ACTION_FULL:
1223 		if (offset != 0)
1224 			return -EINVAL;
1225 		fallthrough;
1226 	case CXL_FW_TRANSFER_ACTION_END:
1227 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1228 			return -EINVAL;
1229 		mdata->fw_size = offset + length;
1230 		break;
1231 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1232 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1233 		break;
1234 	case CXL_FW_TRANSFER_ACTION_ABORT:
1235 		return 0;
1236 	default:
1237 		return -EINVAL;
1238 	}
1239 
1240 	memcpy(fw + offset, transfer->data, length);
1241 	return 0;
1242 }
1243 
mock_activate_fw(struct cxl_mockmem_data * mdata,struct cxl_mbox_cmd * cmd)1244 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1245 			    struct cxl_mbox_cmd *cmd)
1246 {
1247 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1248 
1249 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1250 		return -EINVAL;
1251 
1252 	switch (activate->action) {
1253 	case CXL_FW_ACTIVATE_ONLINE:
1254 		mdata->fw_slot = activate->slot;
1255 		mdata->fw_staged = 0;
1256 		return 0;
1257 	case CXL_FW_ACTIVATE_OFFLINE:
1258 		mdata->fw_staged = activate->slot;
1259 		return 0;
1260 	}
1261 
1262 	return -EINVAL;
1263 }
1264 
cxl_mock_mbox_send(struct cxl_memdev_state * mds,struct cxl_mbox_cmd * cmd)1265 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1266 			      struct cxl_mbox_cmd *cmd)
1267 {
1268 	struct cxl_dev_state *cxlds = &mds->cxlds;
1269 	struct device *dev = cxlds->dev;
1270 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1271 	int rc = -EIO;
1272 
1273 	switch (cmd->opcode) {
1274 	case CXL_MBOX_OP_SET_TIMESTAMP:
1275 		rc = mock_set_timestamp(cxlds, cmd);
1276 		break;
1277 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1278 		rc = mock_gsl(cmd);
1279 		break;
1280 	case CXL_MBOX_OP_GET_LOG:
1281 		rc = mock_get_log(mds, cmd);
1282 		break;
1283 	case CXL_MBOX_OP_IDENTIFY:
1284 		if (cxlds->rcd)
1285 			rc = mock_rcd_id(cmd);
1286 		else
1287 			rc = mock_id(cmd);
1288 		break;
1289 	case CXL_MBOX_OP_GET_LSA:
1290 		rc = mock_get_lsa(mdata, cmd);
1291 		break;
1292 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1293 		rc = mock_partition_info(cmd);
1294 		break;
1295 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1296 		rc = mock_get_event(dev, cmd);
1297 		break;
1298 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1299 		rc = mock_clear_event(dev, cmd);
1300 		break;
1301 	case CXL_MBOX_OP_SET_LSA:
1302 		rc = mock_set_lsa(mdata, cmd);
1303 		break;
1304 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1305 		rc = mock_health_info(cmd);
1306 		break;
1307 	case CXL_MBOX_OP_SANITIZE:
1308 		rc = mock_sanitize(mdata, cmd);
1309 		break;
1310 	case CXL_MBOX_OP_SECURE_ERASE:
1311 		rc = mock_secure_erase(mdata, cmd);
1312 		break;
1313 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1314 		rc = mock_get_security_state(mdata, cmd);
1315 		break;
1316 	case CXL_MBOX_OP_SET_PASSPHRASE:
1317 		rc = mock_set_passphrase(mdata, cmd);
1318 		break;
1319 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1320 		rc = mock_disable_passphrase(mdata, cmd);
1321 		break;
1322 	case CXL_MBOX_OP_FREEZE_SECURITY:
1323 		rc = mock_freeze_security(mdata, cmd);
1324 		break;
1325 	case CXL_MBOX_OP_UNLOCK:
1326 		rc = mock_unlock_security(mdata, cmd);
1327 		break;
1328 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1329 		rc = mock_passphrase_secure_erase(mdata, cmd);
1330 		break;
1331 	case CXL_MBOX_OP_GET_POISON:
1332 		rc = mock_get_poison(cxlds, cmd);
1333 		break;
1334 	case CXL_MBOX_OP_INJECT_POISON:
1335 		rc = mock_inject_poison(cxlds, cmd);
1336 		break;
1337 	case CXL_MBOX_OP_CLEAR_POISON:
1338 		rc = mock_clear_poison(cxlds, cmd);
1339 		break;
1340 	case CXL_MBOX_OP_GET_FW_INFO:
1341 		rc = mock_fw_info(mdata, cmd);
1342 		break;
1343 	case CXL_MBOX_OP_TRANSFER_FW:
1344 		rc = mock_transfer_fw(mdata, cmd);
1345 		break;
1346 	case CXL_MBOX_OP_ACTIVATE_FW:
1347 		rc = mock_activate_fw(mdata, cmd);
1348 		break;
1349 	default:
1350 		break;
1351 	}
1352 
1353 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1354 		cmd->size_in, cmd->size_out, rc);
1355 
1356 	return rc;
1357 }
1358 
label_area_release(void * lsa)1359 static void label_area_release(void *lsa)
1360 {
1361 	vfree(lsa);
1362 }
1363 
fw_buf_release(void * buf)1364 static void fw_buf_release(void *buf)
1365 {
1366 	vfree(buf);
1367 }
1368 
is_rcd(struct platform_device * pdev)1369 static bool is_rcd(struct platform_device *pdev)
1370 {
1371 	const struct platform_device_id *id = platform_get_device_id(pdev);
1372 
1373 	return !!id->driver_data;
1374 }
1375 
event_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1376 static ssize_t event_trigger_store(struct device *dev,
1377 				   struct device_attribute *attr,
1378 				   const char *buf, size_t count)
1379 {
1380 	cxl_mock_event_trigger(dev);
1381 	return count;
1382 }
1383 static DEVICE_ATTR_WO(event_trigger);
1384 
cxl_mock_mem_probe(struct platform_device * pdev)1385 static int cxl_mock_mem_probe(struct platform_device *pdev)
1386 {
1387 	struct device *dev = &pdev->dev;
1388 	struct cxl_memdev *cxlmd;
1389 	struct cxl_memdev_state *mds;
1390 	struct cxl_dev_state *cxlds;
1391 	struct cxl_mockmem_data *mdata;
1392 	int rc;
1393 
1394 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1395 	if (!mdata)
1396 		return -ENOMEM;
1397 	dev_set_drvdata(dev, mdata);
1398 
1399 	mdata->lsa = vmalloc(LSA_SIZE);
1400 	if (!mdata->lsa)
1401 		return -ENOMEM;
1402 	mdata->fw = vmalloc(FW_SIZE);
1403 	if (!mdata->fw)
1404 		return -ENOMEM;
1405 	mdata->fw_slot = 2;
1406 
1407 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1408 	if (rc)
1409 		return rc;
1410 
1411 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1412 	if (rc)
1413 		return rc;
1414 
1415 	mds = cxl_memdev_state_create(dev);
1416 	if (IS_ERR(mds))
1417 		return PTR_ERR(mds);
1418 
1419 	mds->mbox_send = cxl_mock_mbox_send;
1420 	mds->payload_size = SZ_4K;
1421 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1422 
1423 	cxlds = &mds->cxlds;
1424 	cxlds->serial = pdev->id;
1425 	if (is_rcd(pdev)) {
1426 		cxlds->rcd = true;
1427 		cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1428 	}
1429 
1430 	rc = cxl_enumerate_cmds(mds);
1431 	if (rc)
1432 		return rc;
1433 
1434 	rc = cxl_poison_state_init(mds);
1435 	if (rc)
1436 		return rc;
1437 
1438 	rc = cxl_set_timestamp(mds);
1439 	if (rc)
1440 		return rc;
1441 
1442 	cxlds->media_ready = true;
1443 	rc = cxl_dev_state_identify(mds);
1444 	if (rc)
1445 		return rc;
1446 
1447 	rc = cxl_mem_create_range_info(mds);
1448 	if (rc)
1449 		return rc;
1450 
1451 	mdata->mes.mds = mds;
1452 	cxl_mock_add_event_logs(&mdata->mes);
1453 
1454 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1455 	if (IS_ERR(cxlmd))
1456 		return PTR_ERR(cxlmd);
1457 
1458 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1459 	if (rc)
1460 		return rc;
1461 
1462 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1463 
1464 	return 0;
1465 }
1466 
security_lock_show(struct device * dev,struct device_attribute * attr,char * buf)1467 static ssize_t security_lock_show(struct device *dev,
1468 				  struct device_attribute *attr, char *buf)
1469 {
1470 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1471 
1472 	return sysfs_emit(buf, "%u\n",
1473 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1474 }
1475 
security_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1476 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1477 				   const char *buf, size_t count)
1478 {
1479 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1480 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1481 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1482 	int val;
1483 
1484 	if (kstrtoint(buf, 0, &val) < 0)
1485 		return -EINVAL;
1486 
1487 	if (val == 1) {
1488 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1489 			return -ENXIO;
1490 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1491 		mdata->security_state &= ~mask;
1492 	} else {
1493 		return -EINVAL;
1494 	}
1495 	return count;
1496 }
1497 
1498 static DEVICE_ATTR_RW(security_lock);
1499 
fw_buf_checksum_show(struct device * dev,struct device_attribute * attr,char * buf)1500 static ssize_t fw_buf_checksum_show(struct device *dev,
1501 				    struct device_attribute *attr, char *buf)
1502 {
1503 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1504 	u8 hash[SHA256_DIGEST_SIZE];
1505 	unsigned char *hstr, *hptr;
1506 	struct sha256_state sctx;
1507 	ssize_t written = 0;
1508 	int i;
1509 
1510 	sha256_init(&sctx);
1511 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1512 	sha256_final(&sctx, hash);
1513 
1514 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1515 	if (!hstr)
1516 		return -ENOMEM;
1517 
1518 	hptr = hstr;
1519 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1520 		hptr += sprintf(hptr, "%02x", hash[i]);
1521 
1522 	written = sysfs_emit(buf, "%s\n", hstr);
1523 
1524 	kfree(hstr);
1525 	return written;
1526 }
1527 
1528 static DEVICE_ATTR_RO(fw_buf_checksum);
1529 
1530 static struct attribute *cxl_mock_mem_attrs[] = {
1531 	&dev_attr_security_lock.attr,
1532 	&dev_attr_event_trigger.attr,
1533 	&dev_attr_fw_buf_checksum.attr,
1534 	NULL
1535 };
1536 ATTRIBUTE_GROUPS(cxl_mock_mem);
1537 
1538 static const struct platform_device_id cxl_mock_mem_ids[] = {
1539 	{ .name = "cxl_mem", 0 },
1540 	{ .name = "cxl_rcd", 1 },
1541 	{ },
1542 };
1543 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1544 
1545 static struct platform_driver cxl_mock_mem_driver = {
1546 	.probe = cxl_mock_mem_probe,
1547 	.id_table = cxl_mock_mem_ids,
1548 	.driver = {
1549 		.name = KBUILD_MODNAME,
1550 		.dev_groups = cxl_mock_mem_groups,
1551 		.groups = cxl_mock_mem_core_groups,
1552 	},
1553 };
1554 
1555 module_platform_driver(cxl_mock_mem_driver);
1556 MODULE_LICENSE("GPL v2");
1557 MODULE_IMPORT_NS(CXL);
1558