1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/test.h>
8 #include "internal.h"
9 
10 #define BLOCK_TEST_SIZE 12
11 
12 static const struct regmap_config test_regmap_config = {
13 	.max_register = BLOCK_TEST_SIZE,
14 	.reg_stride = 1,
15 	.val_bits = sizeof(unsigned int) * 8,
16 };
17 
18 struct regcache_types {
19 	enum regcache_type type;
20 	const char *name;
21 };
22 
23 static void case_to_desc(const struct regcache_types *t, char *desc)
24 {
25 	strcpy(desc, t->name);
26 }
27 
28 static const struct regcache_types regcache_types_list[] = {
29 	{ REGCACHE_NONE, "none" },
30 	{ REGCACHE_FLAT, "flat" },
31 	{ REGCACHE_RBTREE, "rbtree" },
32 	{ REGCACHE_MAPLE, "maple" },
33 };
34 
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
36 
37 static const struct regcache_types real_cache_types_list[] = {
38 	{ REGCACHE_FLAT, "flat" },
39 	{ REGCACHE_RBTREE, "rbtree" },
40 	{ REGCACHE_MAPLE, "maple" },
41 };
42 
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
44 
45 static const struct regcache_types sparse_cache_types_list[] = {
46 	{ REGCACHE_RBTREE, "rbtree" },
47 	{ REGCACHE_MAPLE, "maple" },
48 };
49 
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
51 
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 				 struct regmap_ram_data **data)
54 {
55 	unsigned int *buf;
56 	struct regmap *ret;
57 	size_t size = (config->max_register + 1) * sizeof(unsigned int);
58 	int i;
59 	struct reg_default *defaults;
60 
61 	buf = kmalloc(size, GFP_KERNEL);
62 	if (!buf)
63 		return ERR_PTR(-ENOMEM);
64 
65 	get_random_bytes(buf, size);
66 
67 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
68 	if (!(*data))
69 		return ERR_PTR(-ENOMEM);
70 	(*data)->vals = buf;
71 
72 	if (config->num_reg_defaults) {
73 		defaults = kcalloc(config->num_reg_defaults,
74 				   sizeof(struct reg_default),
75 				   GFP_KERNEL);
76 		if (!defaults)
77 			return ERR_PTR(-ENOMEM);
78 		config->reg_defaults = defaults;
79 
80 		for (i = 0; i < config->num_reg_defaults; i++) {
81 			defaults[i].reg = i * config->reg_stride;
82 			defaults[i].def = buf[i * config->reg_stride];
83 		}
84 	}
85 
86 	ret = regmap_init_ram(config, *data);
87 	if (IS_ERR(ret)) {
88 		kfree(buf);
89 		kfree(*data);
90 	}
91 
92 	return ret;
93 }
94 
95 static bool reg_5_false(struct device *context, unsigned int reg)
96 {
97 	return reg != 5;
98 }
99 
100 static void basic_read_write(struct kunit *test)
101 {
102 	struct regcache_types *t = (struct regcache_types *)test->param_value;
103 	struct regmap *map;
104 	struct regmap_config config;
105 	struct regmap_ram_data *data;
106 	unsigned int val, rval;
107 
108 	config = test_regmap_config;
109 	config.cache_type = t->type;
110 
111 	map = gen_regmap(&config, &data);
112 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
113 	if (IS_ERR(map))
114 		return;
115 
116 	get_random_bytes(&val, sizeof(val));
117 
118 	/* If we write a value to a register we can read it back */
119 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
120 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
121 	KUNIT_EXPECT_EQ(test, val, rval);
122 
123 	/* If using a cache the cache satisfied the read */
124 	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
125 
126 	regmap_exit(map);
127 }
128 
129 static void bulk_write(struct kunit *test)
130 {
131 	struct regcache_types *t = (struct regcache_types *)test->param_value;
132 	struct regmap *map;
133 	struct regmap_config config;
134 	struct regmap_ram_data *data;
135 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
136 	int i;
137 
138 	config = test_regmap_config;
139 	config.cache_type = t->type;
140 
141 	map = gen_regmap(&config, &data);
142 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
143 	if (IS_ERR(map))
144 		return;
145 
146 	get_random_bytes(&val, sizeof(val));
147 
148 	/*
149 	 * Data written via the bulk API can be read back with single
150 	 * reads.
151 	 */
152 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
153 						   BLOCK_TEST_SIZE));
154 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
155 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
156 
157 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
158 
159 	/* If using a cache the cache satisfied the read */
160 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
161 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
162 
163 	regmap_exit(map);
164 }
165 
166 static void bulk_read(struct kunit *test)
167 {
168 	struct regcache_types *t = (struct regcache_types *)test->param_value;
169 	struct regmap *map;
170 	struct regmap_config config;
171 	struct regmap_ram_data *data;
172 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
173 	int i;
174 
175 	config = test_regmap_config;
176 	config.cache_type = t->type;
177 
178 	map = gen_regmap(&config, &data);
179 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
180 	if (IS_ERR(map))
181 		return;
182 
183 	get_random_bytes(&val, sizeof(val));
184 
185 	/* Data written as single writes can be read via the bulk API */
186 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
187 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
188 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
189 						  BLOCK_TEST_SIZE));
190 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
191 
192 	/* If using a cache the cache satisfied the read */
193 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
194 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
195 
196 	regmap_exit(map);
197 }
198 
199 static void write_readonly(struct kunit *test)
200 {
201 	struct regcache_types *t = (struct regcache_types *)test->param_value;
202 	struct regmap *map;
203 	struct regmap_config config;
204 	struct regmap_ram_data *data;
205 	unsigned int val;
206 	int i;
207 
208 	config = test_regmap_config;
209 	config.cache_type = t->type;
210 	config.num_reg_defaults = BLOCK_TEST_SIZE;
211 	config.writeable_reg = reg_5_false;
212 
213 	map = gen_regmap(&config, &data);
214 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
215 	if (IS_ERR(map))
216 		return;
217 
218 	get_random_bytes(&val, sizeof(val));
219 
220 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
221 		data->written[i] = false;
222 
223 	/* Change the value of all registers, readonly should fail */
224 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
225 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
226 
227 	/* Did that match what we see on the device? */
228 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
229 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
230 
231 	regmap_exit(map);
232 }
233 
234 static void read_writeonly(struct kunit *test)
235 {
236 	struct regcache_types *t = (struct regcache_types *)test->param_value;
237 	struct regmap *map;
238 	struct regmap_config config;
239 	struct regmap_ram_data *data;
240 	unsigned int val;
241 	int i;
242 
243 	config = test_regmap_config;
244 	config.cache_type = t->type;
245 	config.readable_reg = reg_5_false;
246 
247 	map = gen_regmap(&config, &data);
248 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
249 	if (IS_ERR(map))
250 		return;
251 
252 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
253 		data->read[i] = false;
254 
255 	/*
256 	 * Try to read all the registers, the writeonly one should
257 	 * fail if we aren't using the flat cache.
258 	 */
259 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
260 		if (t->type != REGCACHE_FLAT) {
261 			KUNIT_EXPECT_EQ(test, i != 5,
262 					regmap_read(map, i, &val) == 0);
263 		} else {
264 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
265 		}
266 	}
267 
268 	/* Did we trigger a hardware access? */
269 	KUNIT_EXPECT_FALSE(test, data->read[5]);
270 
271 	regmap_exit(map);
272 }
273 
274 static void reg_defaults(struct kunit *test)
275 {
276 	struct regcache_types *t = (struct regcache_types *)test->param_value;
277 	struct regmap *map;
278 	struct regmap_config config;
279 	struct regmap_ram_data *data;
280 	unsigned int rval[BLOCK_TEST_SIZE];
281 	int i;
282 
283 	config = test_regmap_config;
284 	config.cache_type = t->type;
285 	config.num_reg_defaults = BLOCK_TEST_SIZE;
286 
287 	map = gen_regmap(&config, &data);
288 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
289 	if (IS_ERR(map))
290 		return;
291 
292 	/* Read back the expected default data */
293 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
294 						  BLOCK_TEST_SIZE));
295 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
296 
297 	/* The data should have been read from cache if there was one */
298 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
299 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
300 }
301 
302 static void reg_defaults_read_dev(struct kunit *test)
303 {
304 	struct regcache_types *t = (struct regcache_types *)test->param_value;
305 	struct regmap *map;
306 	struct regmap_config config;
307 	struct regmap_ram_data *data;
308 	unsigned int rval[BLOCK_TEST_SIZE];
309 	int i;
310 
311 	config = test_regmap_config;
312 	config.cache_type = t->type;
313 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
314 
315 	map = gen_regmap(&config, &data);
316 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
317 	if (IS_ERR(map))
318 		return;
319 
320 	/* We should have read the cache defaults back from the map */
321 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
322 		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
323 		data->read[i] = false;
324 	}
325 
326 	/* Read back the expected default data */
327 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
328 						  BLOCK_TEST_SIZE));
329 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
330 
331 	/* The data should have been read from cache if there was one */
332 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
333 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
334 }
335 
336 static void register_patch(struct kunit *test)
337 {
338 	struct regcache_types *t = (struct regcache_types *)test->param_value;
339 	struct regmap *map;
340 	struct regmap_config config;
341 	struct regmap_ram_data *data;
342 	struct reg_sequence patch[2];
343 	unsigned int rval[BLOCK_TEST_SIZE];
344 	int i;
345 
346 	/* We need defaults so readback works */
347 	config = test_regmap_config;
348 	config.cache_type = t->type;
349 	config.num_reg_defaults = BLOCK_TEST_SIZE;
350 
351 	map = gen_regmap(&config, &data);
352 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
353 	if (IS_ERR(map))
354 		return;
355 
356 	/* Stash the original values */
357 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
358 						  BLOCK_TEST_SIZE));
359 
360 	/* Patch a couple of values */
361 	patch[0].reg = 2;
362 	patch[0].def = rval[2] + 1;
363 	patch[0].delay_us = 0;
364 	patch[1].reg = 5;
365 	patch[1].def = rval[5] + 1;
366 	patch[1].delay_us = 0;
367 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
368 						       ARRAY_SIZE(patch)));
369 
370 	/* Only the patched registers are written */
371 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
372 		switch (i) {
373 		case 2:
374 		case 5:
375 			KUNIT_EXPECT_TRUE(test, data->written[i]);
376 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
377 			break;
378 		default:
379 			KUNIT_EXPECT_FALSE(test, data->written[i]);
380 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
381 			break;
382 		}
383 	}
384 
385 	regmap_exit(map);
386 }
387 
388 static void stride(struct kunit *test)
389 {
390 	struct regcache_types *t = (struct regcache_types *)test->param_value;
391 	struct regmap *map;
392 	struct regmap_config config;
393 	struct regmap_ram_data *data;
394 	unsigned int rval;
395 	int i;
396 
397 	config = test_regmap_config;
398 	config.cache_type = t->type;
399 	config.reg_stride = 2;
400 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
401 
402 	map = gen_regmap(&config, &data);
403 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
404 	if (IS_ERR(map))
405 		return;
406 
407 	/* Only even registers can be accessed, try both read and write */
408 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
409 		data->read[i] = false;
410 		data->written[i] = false;
411 
412 		if (i % 2) {
413 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
414 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
415 			KUNIT_EXPECT_FALSE(test, data->read[i]);
416 			KUNIT_EXPECT_FALSE(test, data->written[i]);
417 		} else {
418 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
419 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
420 			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
421 					data->read[i]);
422 
423 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
424 			KUNIT_EXPECT_TRUE(test, data->written[i]);
425 		}
426 	}
427 
428 	regmap_exit(map);
429 }
430 
431 static struct regmap_range_cfg test_range = {
432 	.selector_reg = 1,
433 	.selector_mask = 0xff,
434 
435 	.window_start = 4,
436 	.window_len = 10,
437 
438 	.range_min = 20,
439 	.range_max = 40,
440 };
441 
442 static bool test_range_volatile(struct device *dev, unsigned int reg)
443 {
444 	if (reg >= test_range.window_start &&
445 	    reg <= test_range.selector_reg + test_range.window_len)
446 		return true;
447 
448 	if (reg >= test_range.range_min && reg <= test_range.range_max)
449 		return true;
450 
451 	return false;
452 }
453 
454 static void basic_ranges(struct kunit *test)
455 {
456 	struct regcache_types *t = (struct regcache_types *)test->param_value;
457 	struct regmap *map;
458 	struct regmap_config config;
459 	struct regmap_ram_data *data;
460 	unsigned int val;
461 	int i;
462 
463 	config = test_regmap_config;
464 	config.cache_type = t->type;
465 	config.volatile_reg = test_range_volatile;
466 	config.ranges = &test_range;
467 	config.num_ranges = 1;
468 	config.max_register = test_range.range_max;
469 
470 	map = gen_regmap(&config, &data);
471 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
472 	if (IS_ERR(map))
473 		return;
474 
475 	for (i = test_range.range_min; i < test_range.range_max; i++) {
476 		data->read[i] = false;
477 		data->written[i] = false;
478 	}
479 
480 	/* Reset the page to a non-zero value to trigger a change */
481 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
482 					      test_range.range_max));
483 
484 	/* Check we set the page and use the window for writes */
485 	data->written[test_range.selector_reg] = false;
486 	data->written[test_range.window_start] = false;
487 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
488 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
489 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
490 
491 	data->written[test_range.selector_reg] = false;
492 	data->written[test_range.window_start] = false;
493 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
494 					      test_range.range_min +
495 					      test_range.window_len,
496 					      0));
497 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
498 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
499 
500 	/* Same for reads */
501 	data->written[test_range.selector_reg] = false;
502 	data->read[test_range.window_start] = false;
503 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
504 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
505 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
506 
507 	data->written[test_range.selector_reg] = false;
508 	data->read[test_range.window_start] = false;
509 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
510 					     test_range.range_min +
511 					     test_range.window_len,
512 					     &val));
513 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
514 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
515 
516 	/* No physical access triggered in the virtual range */
517 	for (i = test_range.range_min; i < test_range.range_max; i++) {
518 		KUNIT_EXPECT_FALSE(test, data->read[i]);
519 		KUNIT_EXPECT_FALSE(test, data->written[i]);
520 	}
521 
522 	regmap_exit(map);
523 }
524 
525 /* Try to stress dynamic creation of cache data structures */
526 static void stress_insert(struct kunit *test)
527 {
528 	struct regcache_types *t = (struct regcache_types *)test->param_value;
529 	struct regmap *map;
530 	struct regmap_config config;
531 	struct regmap_ram_data *data;
532 	unsigned int rval, *vals;
533 	size_t buf_sz;
534 	int i;
535 
536 	config = test_regmap_config;
537 	config.cache_type = t->type;
538 	config.max_register = 300;
539 
540 	map = gen_regmap(&config, &data);
541 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
542 	if (IS_ERR(map))
543 		return;
544 
545 	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
546 			     GFP_KERNEL);
547 	KUNIT_ASSERT_FALSE(test, vals == NULL);
548 	buf_sz = sizeof(unsigned long) * config.max_register;
549 
550 	get_random_bytes(vals, buf_sz);
551 
552 	/* Write data into the map/cache in ever decreasing strides */
553 	for (i = 0; i < config.max_register; i += 100)
554 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
555 	for (i = 0; i < config.max_register; i += 50)
556 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
557 	for (i = 0; i < config.max_register; i += 25)
558 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
559 	for (i = 0; i < config.max_register; i += 10)
560 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
561 	for (i = 0; i < config.max_register; i += 5)
562 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
563 	for (i = 0; i < config.max_register; i += 3)
564 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
565 	for (i = 0; i < config.max_register; i += 2)
566 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
567 	for (i = 0; i < config.max_register; i++)
568 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
569 
570 	/* Do reads from the cache (if there is one) match? */
571 	for (i = 0; i < config.max_register; i ++) {
572 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
573 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
574 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
575 	}
576 
577 	regmap_exit(map);
578 }
579 
580 static void cache_bypass(struct kunit *test)
581 {
582 	struct regcache_types *t = (struct regcache_types *)test->param_value;
583 	struct regmap *map;
584 	struct regmap_config config;
585 	struct regmap_ram_data *data;
586 	unsigned int val, rval;
587 
588 	config = test_regmap_config;
589 	config.cache_type = t->type;
590 
591 	map = gen_regmap(&config, &data);
592 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
593 	if (IS_ERR(map))
594 		return;
595 
596 	get_random_bytes(&val, sizeof(val));
597 
598 	/* Ensure the cache has a value in it */
599 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
600 
601 	/* Bypass then write a different value */
602 	regcache_cache_bypass(map, true);
603 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
604 
605 	/* Read the bypassed value */
606 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
607 	KUNIT_EXPECT_EQ(test, val + 1, rval);
608 	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
609 
610 	/* Disable bypass, the cache should still return the original value */
611 	regcache_cache_bypass(map, false);
612 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
613 	KUNIT_EXPECT_EQ(test, val, rval);
614 
615 	regmap_exit(map);
616 }
617 
618 static void cache_sync(struct kunit *test)
619 {
620 	struct regcache_types *t = (struct regcache_types *)test->param_value;
621 	struct regmap *map;
622 	struct regmap_config config;
623 	struct regmap_ram_data *data;
624 	unsigned int val[BLOCK_TEST_SIZE];
625 	int i;
626 
627 	config = test_regmap_config;
628 	config.cache_type = t->type;
629 
630 	map = gen_regmap(&config, &data);
631 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
632 	if (IS_ERR(map))
633 		return;
634 
635 	get_random_bytes(&val, sizeof(val));
636 
637 	/* Put some data into the cache */
638 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
639 						   BLOCK_TEST_SIZE));
640 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
641 		data->written[i] = false;
642 
643 	/* Trash the data on the device itself then resync */
644 	regcache_mark_dirty(map);
645 	memset(data->vals, 0, sizeof(val));
646 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
647 
648 	/* Did we just write the correct data out? */
649 	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
650 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
651 		KUNIT_EXPECT_EQ(test, true, data->written[i]);
652 
653 	regmap_exit(map);
654 }
655 
656 static void cache_sync_defaults(struct kunit *test)
657 {
658 	struct regcache_types *t = (struct regcache_types *)test->param_value;
659 	struct regmap *map;
660 	struct regmap_config config;
661 	struct regmap_ram_data *data;
662 	unsigned int val;
663 	int i;
664 
665 	config = test_regmap_config;
666 	config.cache_type = t->type;
667 	config.num_reg_defaults = BLOCK_TEST_SIZE;
668 
669 	map = gen_regmap(&config, &data);
670 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
671 	if (IS_ERR(map))
672 		return;
673 
674 	get_random_bytes(&val, sizeof(val));
675 
676 	/* Change the value of one register */
677 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
678 
679 	/* Resync */
680 	regcache_mark_dirty(map);
681 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
682 		data->written[i] = false;
683 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
684 
685 	/* Did we just sync the one register we touched? */
686 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
687 		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
688 
689 	regmap_exit(map);
690 }
691 
692 static void cache_sync_readonly(struct kunit *test)
693 {
694 	struct regcache_types *t = (struct regcache_types *)test->param_value;
695 	struct regmap *map;
696 	struct regmap_config config;
697 	struct regmap_ram_data *data;
698 	unsigned int val;
699 	int i;
700 
701 	config = test_regmap_config;
702 	config.cache_type = t->type;
703 	config.writeable_reg = reg_5_false;
704 
705 	map = gen_regmap(&config, &data);
706 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
707 	if (IS_ERR(map))
708 		return;
709 
710 	/* Read all registers to fill the cache */
711 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
712 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
713 
714 	/* Change the value of all registers, readonly should fail */
715 	get_random_bytes(&val, sizeof(val));
716 	regcache_cache_only(map, true);
717 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
718 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
719 	regcache_cache_only(map, false);
720 
721 	/* Resync */
722 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
723 		data->written[i] = false;
724 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
725 
726 	/* Did that match what we see on the device? */
727 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
728 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
729 
730 	regmap_exit(map);
731 }
732 
733 static void cache_sync_patch(struct kunit *test)
734 {
735 	struct regcache_types *t = (struct regcache_types *)test->param_value;
736 	struct regmap *map;
737 	struct regmap_config config;
738 	struct regmap_ram_data *data;
739 	struct reg_sequence patch[2];
740 	unsigned int rval[BLOCK_TEST_SIZE], val;
741 	int i;
742 
743 	/* We need defaults so readback works */
744 	config = test_regmap_config;
745 	config.cache_type = t->type;
746 	config.num_reg_defaults = BLOCK_TEST_SIZE;
747 
748 	map = gen_regmap(&config, &data);
749 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
750 	if (IS_ERR(map))
751 		return;
752 
753 	/* Stash the original values */
754 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
755 						  BLOCK_TEST_SIZE));
756 
757 	/* Patch a couple of values */
758 	patch[0].reg = 2;
759 	patch[0].def = rval[2] + 1;
760 	patch[0].delay_us = 0;
761 	patch[1].reg = 5;
762 	patch[1].def = rval[5] + 1;
763 	patch[1].delay_us = 0;
764 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
765 						       ARRAY_SIZE(patch)));
766 
767 	/* Sync the cache */
768 	regcache_mark_dirty(map);
769 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
770 		data->written[i] = false;
771 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
772 
773 	/* The patch should be on the device but not in the cache */
774 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
775 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
776 		KUNIT_EXPECT_EQ(test, val, rval[i]);
777 
778 		switch (i) {
779 		case 2:
780 		case 5:
781 			KUNIT_EXPECT_EQ(test, true, data->written[i]);
782 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
783 			break;
784 		default:
785 			KUNIT_EXPECT_EQ(test, false, data->written[i]);
786 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
787 			break;
788 		}
789 	}
790 
791 	regmap_exit(map);
792 }
793 
794 static void cache_drop(struct kunit *test)
795 {
796 	struct regcache_types *t = (struct regcache_types *)test->param_value;
797 	struct regmap *map;
798 	struct regmap_config config;
799 	struct regmap_ram_data *data;
800 	unsigned int rval[BLOCK_TEST_SIZE];
801 	int i;
802 
803 	config = test_regmap_config;
804 	config.cache_type = t->type;
805 	config.num_reg_defaults = BLOCK_TEST_SIZE;
806 
807 	map = gen_regmap(&config, &data);
808 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
809 	if (IS_ERR(map))
810 		return;
811 
812 	/* Ensure the data is read from the cache */
813 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
814 		data->read[i] = false;
815 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
816 						  BLOCK_TEST_SIZE));
817 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
818 		KUNIT_EXPECT_FALSE(test, data->read[i]);
819 		data->read[i] = false;
820 	}
821 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
822 
823 	/* Drop some registers */
824 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
825 
826 	/* Reread and check only the dropped registers hit the device. */
827 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
828 						  BLOCK_TEST_SIZE));
829 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
830 		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
831 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
832 
833 	regmap_exit(map);
834 }
835 
836 struct raw_test_types {
837 	const char *name;
838 
839 	enum regcache_type cache_type;
840 	enum regmap_endian val_endian;
841 };
842 
843 static void raw_to_desc(const struct raw_test_types *t, char *desc)
844 {
845 	strcpy(desc, t->name);
846 }
847 
848 static const struct raw_test_types raw_types_list[] = {
849 	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
850 	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
851 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
852 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
853 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
854 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
855 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
856 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
857 };
858 
859 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
860 
861 static const struct raw_test_types raw_cache_types_list[] = {
862 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
863 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
864 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
865 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
866 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
867 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
868 };
869 
870 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
871 
872 static const struct regmap_config raw_regmap_config = {
873 	.max_register = BLOCK_TEST_SIZE,
874 
875 	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
876 	.reg_bits = 16,
877 	.val_bits = 16,
878 };
879 
880 static struct regmap *gen_raw_regmap(struct regmap_config *config,
881 				     struct raw_test_types *test_type,
882 				     struct regmap_ram_data **data)
883 {
884 	u16 *buf;
885 	struct regmap *ret;
886 	size_t size = (config->max_register + 1) * config->reg_bits / 8;
887 	int i;
888 	struct reg_default *defaults;
889 
890 	config->cache_type = test_type->cache_type;
891 	config->val_format_endian = test_type->val_endian;
892 
893 	buf = kmalloc(size, GFP_KERNEL);
894 	if (!buf)
895 		return ERR_PTR(-ENOMEM);
896 
897 	get_random_bytes(buf, size);
898 
899 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
900 	if (!(*data))
901 		return ERR_PTR(-ENOMEM);
902 	(*data)->vals = (void *)buf;
903 
904 	config->num_reg_defaults = config->max_register + 1;
905 	defaults = kcalloc(config->num_reg_defaults,
906 			   sizeof(struct reg_default),
907 			   GFP_KERNEL);
908 	if (!defaults)
909 		return ERR_PTR(-ENOMEM);
910 	config->reg_defaults = defaults;
911 
912 	for (i = 0; i < config->num_reg_defaults; i++) {
913 		defaults[i].reg = i;
914 		switch (test_type->val_endian) {
915 		case REGMAP_ENDIAN_LITTLE:
916 			defaults[i].def = le16_to_cpu(buf[i]);
917 			break;
918 		case REGMAP_ENDIAN_BIG:
919 			defaults[i].def = be16_to_cpu(buf[i]);
920 			break;
921 		default:
922 			return ERR_PTR(-EINVAL);
923 		}
924 	}
925 
926 	/*
927 	 * We use the defaults in the tests but they don't make sense
928 	 * to the core if there's no cache.
929 	 */
930 	if (config->cache_type == REGCACHE_NONE)
931 		config->num_reg_defaults = 0;
932 
933 	ret = regmap_init_raw_ram(config, *data);
934 	if (IS_ERR(ret)) {
935 		kfree(buf);
936 		kfree(*data);
937 	}
938 
939 	return ret;
940 }
941 
942 static void raw_read_defaults_single(struct kunit *test)
943 {
944 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
945 	struct regmap *map;
946 	struct regmap_config config;
947 	struct regmap_ram_data *data;
948 	unsigned int rval;
949 	int i;
950 
951 	config = raw_regmap_config;
952 
953 	map = gen_raw_regmap(&config, t, &data);
954 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
955 	if (IS_ERR(map))
956 		return;
957 
958 	/* Check that we can read the defaults via the API */
959 	for (i = 0; i < config.max_register + 1; i++) {
960 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
961 		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
962 	}
963 
964 	regmap_exit(map);
965 }
966 
967 static void raw_read_defaults(struct kunit *test)
968 {
969 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
970 	struct regmap *map;
971 	struct regmap_config config;
972 	struct regmap_ram_data *data;
973 	u16 *rval;
974 	u16 def;
975 	size_t val_len;
976 	int i;
977 
978 	config = raw_regmap_config;
979 
980 	map = gen_raw_regmap(&config, t, &data);
981 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
982 	if (IS_ERR(map))
983 		return;
984 
985 	val_len = sizeof(*rval) * (config.max_register + 1);
986 	rval = kmalloc(val_len, GFP_KERNEL);
987 	KUNIT_ASSERT_TRUE(test, rval != NULL);
988 	if (!rval)
989 		return;
990 
991 	/* Check that we can read the defaults via the API */
992 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
993 	for (i = 0; i < config.max_register + 1; i++) {
994 		def = config.reg_defaults[i].def;
995 		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
996 			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
997 		} else {
998 			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
999 		}
1000 	}
1001 
1002 	kfree(rval);
1003 	regmap_exit(map);
1004 }
1005 
1006 static void raw_write_read_single(struct kunit *test)
1007 {
1008 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1009 	struct regmap *map;
1010 	struct regmap_config config;
1011 	struct regmap_ram_data *data;
1012 	u16 val;
1013 	unsigned int rval;
1014 
1015 	config = raw_regmap_config;
1016 
1017 	map = gen_raw_regmap(&config, t, &data);
1018 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1019 	if (IS_ERR(map))
1020 		return;
1021 
1022 	get_random_bytes(&val, sizeof(val));
1023 
1024 	/* If we write a value to a register we can read it back */
1025 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1026 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1027 	KUNIT_EXPECT_EQ(test, val, rval);
1028 
1029 	regmap_exit(map);
1030 }
1031 
1032 static void raw_write(struct kunit *test)
1033 {
1034 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1035 	struct regmap *map;
1036 	struct regmap_config config;
1037 	struct regmap_ram_data *data;
1038 	u16 *hw_buf;
1039 	u16 val[2];
1040 	unsigned int rval;
1041 	int i;
1042 
1043 	config = raw_regmap_config;
1044 
1045 	map = gen_raw_regmap(&config, t, &data);
1046 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1047 	if (IS_ERR(map))
1048 		return;
1049 
1050 	hw_buf = (u16 *)data->vals;
1051 
1052 	get_random_bytes(&val, sizeof(val));
1053 
1054 	/* Do a raw write */
1055 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1056 
1057 	/* We should read back the new values, and defaults for the rest */
1058 	for (i = 0; i < config.max_register + 1; i++) {
1059 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1060 
1061 		switch (i) {
1062 		case 2:
1063 		case 3:
1064 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1065 				KUNIT_EXPECT_EQ(test, rval,
1066 						be16_to_cpu(val[i % 2]));
1067 			} else {
1068 				KUNIT_EXPECT_EQ(test, rval,
1069 						le16_to_cpu(val[i % 2]));
1070 			}
1071 			break;
1072 		default:
1073 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1074 			break;
1075 		}
1076 	}
1077 
1078 	/* The values should appear in the "hardware" */
1079 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1080 
1081 	regmap_exit(map);
1082 }
1083 
1084 static void raw_sync(struct kunit *test)
1085 {
1086 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1087 	struct regmap *map;
1088 	struct regmap_config config;
1089 	struct regmap_ram_data *data;
1090 	u16 val[2];
1091 	u16 *hw_buf;
1092 	unsigned int rval;
1093 	int i;
1094 
1095 	config = raw_regmap_config;
1096 
1097 	map = gen_raw_regmap(&config, t, &data);
1098 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1099 	if (IS_ERR(map))
1100 		return;
1101 
1102 	hw_buf = (u16 *)data->vals;
1103 
1104 	get_random_bytes(&val, sizeof(val));
1105 
1106 	/* Do a regular write and a raw write in cache only mode */
1107 	regcache_cache_only(map, true);
1108 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1109 	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1110 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1111 						      be16_to_cpu(val[0])));
1112 	else
1113 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1114 						      le16_to_cpu(val[0])));
1115 
1116 	/* We should read back the new values, and defaults for the rest */
1117 	for (i = 0; i < config.max_register + 1; i++) {
1118 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1119 
1120 		switch (i) {
1121 		case 2:
1122 		case 3:
1123 		case 6:
1124 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1125 				KUNIT_EXPECT_EQ(test, rval,
1126 						be16_to_cpu(val[i % 2]));
1127 			} else {
1128 				KUNIT_EXPECT_EQ(test, rval,
1129 						le16_to_cpu(val[i % 2]));
1130 			}
1131 			break;
1132 		default:
1133 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1134 			break;
1135 		}
1136 	}
1137 
1138 	/* The values should not appear in the "hardware" */
1139 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1140 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1141 
1142 	for (i = 0; i < config.max_register + 1; i++)
1143 		data->written[i] = false;
1144 
1145 	/* Do the sync */
1146 	regcache_cache_only(map, false);
1147 	regcache_mark_dirty(map);
1148 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1149 
1150 	/* The values should now appear in the "hardware" */
1151 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1152 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1153 
1154 	regmap_exit(map);
1155 }
1156 
1157 static struct kunit_case regmap_test_cases[] = {
1158 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1159 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1160 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1161 	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1162 	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1163 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1164 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1165 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1166 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1167 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1168 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1169 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1170 	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1171 	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1172 	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1173 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1174 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1175 
1176 	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1177 	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1178 	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1179 	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1180 	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1181 	{}
1182 };
1183 
1184 static struct kunit_suite regmap_test_suite = {
1185 	.name = "regmap",
1186 	.test_cases = regmap_test_cases,
1187 };
1188 kunit_test_suite(regmap_test_suite);
1189 
1190 MODULE_LICENSE("GPL v2");
1191