1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/test.h>
8 #include "internal.h"
9 
10 #define BLOCK_TEST_SIZE 12
11 
12 static const struct regmap_config test_regmap_config = {
13 	.max_register = BLOCK_TEST_SIZE,
14 	.reg_stride = 1,
15 	.val_bits = sizeof(unsigned int) * 8,
16 };
17 
18 struct regcache_types {
19 	enum regcache_type type;
20 	const char *name;
21 };
22 
23 static void case_to_desc(const struct regcache_types *t, char *desc)
24 {
25 	strcpy(desc, t->name);
26 }
27 
28 static const struct regcache_types regcache_types_list[] = {
29 	{ REGCACHE_NONE, "none" },
30 	{ REGCACHE_FLAT, "flat" },
31 	{ REGCACHE_RBTREE, "rbtree" },
32 	{ REGCACHE_MAPLE, "maple" },
33 };
34 
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
36 
37 static const struct regcache_types real_cache_types_list[] = {
38 	{ REGCACHE_FLAT, "flat" },
39 	{ REGCACHE_RBTREE, "rbtree" },
40 	{ REGCACHE_MAPLE, "maple" },
41 };
42 
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
44 
45 static const struct regcache_types sparse_cache_types_list[] = {
46 	{ REGCACHE_RBTREE, "rbtree" },
47 	{ REGCACHE_MAPLE, "maple" },
48 };
49 
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
51 
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 				 struct regmap_ram_data **data)
54 {
55 	unsigned int *buf;
56 	struct regmap *ret;
57 	size_t size = (config->max_register + 1) * sizeof(unsigned int);
58 	int i;
59 	struct reg_default *defaults;
60 
61 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
62 					config->cache_type == REGCACHE_MAPLE;
63 
64 	buf = kmalloc(size, GFP_KERNEL);
65 	if (!buf)
66 		return ERR_PTR(-ENOMEM);
67 
68 	get_random_bytes(buf, size);
69 
70 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
71 	if (!(*data))
72 		return ERR_PTR(-ENOMEM);
73 	(*data)->vals = buf;
74 
75 	if (config->num_reg_defaults) {
76 		defaults = kcalloc(config->num_reg_defaults,
77 				   sizeof(struct reg_default),
78 				   GFP_KERNEL);
79 		if (!defaults)
80 			return ERR_PTR(-ENOMEM);
81 		config->reg_defaults = defaults;
82 
83 		for (i = 0; i < config->num_reg_defaults; i++) {
84 			defaults[i].reg = i * config->reg_stride;
85 			defaults[i].def = buf[i * config->reg_stride];
86 		}
87 	}
88 
89 	ret = regmap_init_ram(config, *data);
90 	if (IS_ERR(ret)) {
91 		kfree(buf);
92 		kfree(*data);
93 	}
94 
95 	return ret;
96 }
97 
98 static bool reg_5_false(struct device *context, unsigned int reg)
99 {
100 	return reg != 5;
101 }
102 
103 static void basic_read_write(struct kunit *test)
104 {
105 	struct regcache_types *t = (struct regcache_types *)test->param_value;
106 	struct regmap *map;
107 	struct regmap_config config;
108 	struct regmap_ram_data *data;
109 	unsigned int val, rval;
110 
111 	config = test_regmap_config;
112 	config.cache_type = t->type;
113 
114 	map = gen_regmap(&config, &data);
115 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
116 	if (IS_ERR(map))
117 		return;
118 
119 	get_random_bytes(&val, sizeof(val));
120 
121 	/* If we write a value to a register we can read it back */
122 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
123 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
124 	KUNIT_EXPECT_EQ(test, val, rval);
125 
126 	/* If using a cache the cache satisfied the read */
127 	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
128 
129 	regmap_exit(map);
130 }
131 
132 static void bulk_write(struct kunit *test)
133 {
134 	struct regcache_types *t = (struct regcache_types *)test->param_value;
135 	struct regmap *map;
136 	struct regmap_config config;
137 	struct regmap_ram_data *data;
138 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
139 	int i;
140 
141 	config = test_regmap_config;
142 	config.cache_type = t->type;
143 
144 	map = gen_regmap(&config, &data);
145 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
146 	if (IS_ERR(map))
147 		return;
148 
149 	get_random_bytes(&val, sizeof(val));
150 
151 	/*
152 	 * Data written via the bulk API can be read back with single
153 	 * reads.
154 	 */
155 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
156 						   BLOCK_TEST_SIZE));
157 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
158 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
159 
160 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
161 
162 	/* If using a cache the cache satisfied the read */
163 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
164 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
165 
166 	regmap_exit(map);
167 }
168 
169 static void bulk_read(struct kunit *test)
170 {
171 	struct regcache_types *t = (struct regcache_types *)test->param_value;
172 	struct regmap *map;
173 	struct regmap_config config;
174 	struct regmap_ram_data *data;
175 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
176 	int i;
177 
178 	config = test_regmap_config;
179 	config.cache_type = t->type;
180 
181 	map = gen_regmap(&config, &data);
182 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
183 	if (IS_ERR(map))
184 		return;
185 
186 	get_random_bytes(&val, sizeof(val));
187 
188 	/* Data written as single writes can be read via the bulk API */
189 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
190 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
191 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
192 						  BLOCK_TEST_SIZE));
193 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
194 
195 	/* If using a cache the cache satisfied the read */
196 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
197 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
198 
199 	regmap_exit(map);
200 }
201 
202 static void write_readonly(struct kunit *test)
203 {
204 	struct regcache_types *t = (struct regcache_types *)test->param_value;
205 	struct regmap *map;
206 	struct regmap_config config;
207 	struct regmap_ram_data *data;
208 	unsigned int val;
209 	int i;
210 
211 	config = test_regmap_config;
212 	config.cache_type = t->type;
213 	config.num_reg_defaults = BLOCK_TEST_SIZE;
214 	config.writeable_reg = reg_5_false;
215 
216 	map = gen_regmap(&config, &data);
217 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
218 	if (IS_ERR(map))
219 		return;
220 
221 	get_random_bytes(&val, sizeof(val));
222 
223 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
224 		data->written[i] = false;
225 
226 	/* Change the value of all registers, readonly should fail */
227 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
228 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
229 
230 	/* Did that match what we see on the device? */
231 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
232 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
233 
234 	regmap_exit(map);
235 }
236 
237 static void read_writeonly(struct kunit *test)
238 {
239 	struct regcache_types *t = (struct regcache_types *)test->param_value;
240 	struct regmap *map;
241 	struct regmap_config config;
242 	struct regmap_ram_data *data;
243 	unsigned int val;
244 	int i;
245 
246 	config = test_regmap_config;
247 	config.cache_type = t->type;
248 	config.readable_reg = reg_5_false;
249 
250 	map = gen_regmap(&config, &data);
251 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
252 	if (IS_ERR(map))
253 		return;
254 
255 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
256 		data->read[i] = false;
257 
258 	/*
259 	 * Try to read all the registers, the writeonly one should
260 	 * fail if we aren't using the flat cache.
261 	 */
262 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
263 		if (t->type != REGCACHE_FLAT) {
264 			KUNIT_EXPECT_EQ(test, i != 5,
265 					regmap_read(map, i, &val) == 0);
266 		} else {
267 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
268 		}
269 	}
270 
271 	/* Did we trigger a hardware access? */
272 	KUNIT_EXPECT_FALSE(test, data->read[5]);
273 
274 	regmap_exit(map);
275 }
276 
277 static void reg_defaults(struct kunit *test)
278 {
279 	struct regcache_types *t = (struct regcache_types *)test->param_value;
280 	struct regmap *map;
281 	struct regmap_config config;
282 	struct regmap_ram_data *data;
283 	unsigned int rval[BLOCK_TEST_SIZE];
284 	int i;
285 
286 	config = test_regmap_config;
287 	config.cache_type = t->type;
288 	config.num_reg_defaults = BLOCK_TEST_SIZE;
289 
290 	map = gen_regmap(&config, &data);
291 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
292 	if (IS_ERR(map))
293 		return;
294 
295 	/* Read back the expected default data */
296 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
297 						  BLOCK_TEST_SIZE));
298 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
299 
300 	/* The data should have been read from cache if there was one */
301 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
302 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
303 }
304 
305 static void reg_defaults_read_dev(struct kunit *test)
306 {
307 	struct regcache_types *t = (struct regcache_types *)test->param_value;
308 	struct regmap *map;
309 	struct regmap_config config;
310 	struct regmap_ram_data *data;
311 	unsigned int rval[BLOCK_TEST_SIZE];
312 	int i;
313 
314 	config = test_regmap_config;
315 	config.cache_type = t->type;
316 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
317 
318 	map = gen_regmap(&config, &data);
319 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
320 	if (IS_ERR(map))
321 		return;
322 
323 	/* We should have read the cache defaults back from the map */
324 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
325 		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
326 		data->read[i] = false;
327 	}
328 
329 	/* Read back the expected default data */
330 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
331 						  BLOCK_TEST_SIZE));
332 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
333 
334 	/* The data should have been read from cache if there was one */
335 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
336 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
337 }
338 
339 static void register_patch(struct kunit *test)
340 {
341 	struct regcache_types *t = (struct regcache_types *)test->param_value;
342 	struct regmap *map;
343 	struct regmap_config config;
344 	struct regmap_ram_data *data;
345 	struct reg_sequence patch[2];
346 	unsigned int rval[BLOCK_TEST_SIZE];
347 	int i;
348 
349 	/* We need defaults so readback works */
350 	config = test_regmap_config;
351 	config.cache_type = t->type;
352 	config.num_reg_defaults = BLOCK_TEST_SIZE;
353 
354 	map = gen_regmap(&config, &data);
355 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
356 	if (IS_ERR(map))
357 		return;
358 
359 	/* Stash the original values */
360 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
361 						  BLOCK_TEST_SIZE));
362 
363 	/* Patch a couple of values */
364 	patch[0].reg = 2;
365 	patch[0].def = rval[2] + 1;
366 	patch[0].delay_us = 0;
367 	patch[1].reg = 5;
368 	patch[1].def = rval[5] + 1;
369 	patch[1].delay_us = 0;
370 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
371 						       ARRAY_SIZE(patch)));
372 
373 	/* Only the patched registers are written */
374 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
375 		switch (i) {
376 		case 2:
377 		case 5:
378 			KUNIT_EXPECT_TRUE(test, data->written[i]);
379 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
380 			break;
381 		default:
382 			KUNIT_EXPECT_FALSE(test, data->written[i]);
383 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
384 			break;
385 		}
386 	}
387 
388 	regmap_exit(map);
389 }
390 
391 static void stride(struct kunit *test)
392 {
393 	struct regcache_types *t = (struct regcache_types *)test->param_value;
394 	struct regmap *map;
395 	struct regmap_config config;
396 	struct regmap_ram_data *data;
397 	unsigned int rval;
398 	int i;
399 
400 	config = test_regmap_config;
401 	config.cache_type = t->type;
402 	config.reg_stride = 2;
403 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
404 
405 	map = gen_regmap(&config, &data);
406 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
407 	if (IS_ERR(map))
408 		return;
409 
410 	/* Only even registers can be accessed, try both read and write */
411 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
412 		data->read[i] = false;
413 		data->written[i] = false;
414 
415 		if (i % 2) {
416 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
417 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
418 			KUNIT_EXPECT_FALSE(test, data->read[i]);
419 			KUNIT_EXPECT_FALSE(test, data->written[i]);
420 		} else {
421 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
422 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
423 			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
424 					data->read[i]);
425 
426 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
427 			KUNIT_EXPECT_TRUE(test, data->written[i]);
428 		}
429 	}
430 
431 	regmap_exit(map);
432 }
433 
434 static struct regmap_range_cfg test_range = {
435 	.selector_reg = 1,
436 	.selector_mask = 0xff,
437 
438 	.window_start = 4,
439 	.window_len = 10,
440 
441 	.range_min = 20,
442 	.range_max = 40,
443 };
444 
445 static bool test_range_volatile(struct device *dev, unsigned int reg)
446 {
447 	if (reg >= test_range.window_start &&
448 	    reg <= test_range.selector_reg + test_range.window_len)
449 		return true;
450 
451 	if (reg >= test_range.range_min && reg <= test_range.range_max)
452 		return true;
453 
454 	return false;
455 }
456 
457 static void basic_ranges(struct kunit *test)
458 {
459 	struct regcache_types *t = (struct regcache_types *)test->param_value;
460 	struct regmap *map;
461 	struct regmap_config config;
462 	struct regmap_ram_data *data;
463 	unsigned int val;
464 	int i;
465 
466 	config = test_regmap_config;
467 	config.cache_type = t->type;
468 	config.volatile_reg = test_range_volatile;
469 	config.ranges = &test_range;
470 	config.num_ranges = 1;
471 	config.max_register = test_range.range_max;
472 
473 	map = gen_regmap(&config, &data);
474 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
475 	if (IS_ERR(map))
476 		return;
477 
478 	for (i = test_range.range_min; i < test_range.range_max; i++) {
479 		data->read[i] = false;
480 		data->written[i] = false;
481 	}
482 
483 	/* Reset the page to a non-zero value to trigger a change */
484 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
485 					      test_range.range_max));
486 
487 	/* Check we set the page and use the window for writes */
488 	data->written[test_range.selector_reg] = false;
489 	data->written[test_range.window_start] = false;
490 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
491 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
492 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
493 
494 	data->written[test_range.selector_reg] = false;
495 	data->written[test_range.window_start] = false;
496 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
497 					      test_range.range_min +
498 					      test_range.window_len,
499 					      0));
500 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
501 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
502 
503 	/* Same for reads */
504 	data->written[test_range.selector_reg] = false;
505 	data->read[test_range.window_start] = false;
506 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
507 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
508 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
509 
510 	data->written[test_range.selector_reg] = false;
511 	data->read[test_range.window_start] = false;
512 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
513 					     test_range.range_min +
514 					     test_range.window_len,
515 					     &val));
516 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
517 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
518 
519 	/* No physical access triggered in the virtual range */
520 	for (i = test_range.range_min; i < test_range.range_max; i++) {
521 		KUNIT_EXPECT_FALSE(test, data->read[i]);
522 		KUNIT_EXPECT_FALSE(test, data->written[i]);
523 	}
524 
525 	regmap_exit(map);
526 }
527 
528 /* Try to stress dynamic creation of cache data structures */
529 static void stress_insert(struct kunit *test)
530 {
531 	struct regcache_types *t = (struct regcache_types *)test->param_value;
532 	struct regmap *map;
533 	struct regmap_config config;
534 	struct regmap_ram_data *data;
535 	unsigned int rval, *vals;
536 	size_t buf_sz;
537 	int i;
538 
539 	config = test_regmap_config;
540 	config.cache_type = t->type;
541 	config.max_register = 300;
542 
543 	map = gen_regmap(&config, &data);
544 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
545 	if (IS_ERR(map))
546 		return;
547 
548 	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
549 			     GFP_KERNEL);
550 	KUNIT_ASSERT_FALSE(test, vals == NULL);
551 	buf_sz = sizeof(unsigned long) * config.max_register;
552 
553 	get_random_bytes(vals, buf_sz);
554 
555 	/* Write data into the map/cache in ever decreasing strides */
556 	for (i = 0; i < config.max_register; i += 100)
557 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
558 	for (i = 0; i < config.max_register; i += 50)
559 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
560 	for (i = 0; i < config.max_register; i += 25)
561 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
562 	for (i = 0; i < config.max_register; i += 10)
563 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
564 	for (i = 0; i < config.max_register; i += 5)
565 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
566 	for (i = 0; i < config.max_register; i += 3)
567 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
568 	for (i = 0; i < config.max_register; i += 2)
569 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
570 	for (i = 0; i < config.max_register; i++)
571 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
572 
573 	/* Do reads from the cache (if there is one) match? */
574 	for (i = 0; i < config.max_register; i ++) {
575 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
576 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
577 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
578 	}
579 
580 	regmap_exit(map);
581 }
582 
583 static void cache_bypass(struct kunit *test)
584 {
585 	struct regcache_types *t = (struct regcache_types *)test->param_value;
586 	struct regmap *map;
587 	struct regmap_config config;
588 	struct regmap_ram_data *data;
589 	unsigned int val, rval;
590 
591 	config = test_regmap_config;
592 	config.cache_type = t->type;
593 
594 	map = gen_regmap(&config, &data);
595 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
596 	if (IS_ERR(map))
597 		return;
598 
599 	get_random_bytes(&val, sizeof(val));
600 
601 	/* Ensure the cache has a value in it */
602 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
603 
604 	/* Bypass then write a different value */
605 	regcache_cache_bypass(map, true);
606 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
607 
608 	/* Read the bypassed value */
609 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
610 	KUNIT_EXPECT_EQ(test, val + 1, rval);
611 	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
612 
613 	/* Disable bypass, the cache should still return the original value */
614 	regcache_cache_bypass(map, false);
615 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
616 	KUNIT_EXPECT_EQ(test, val, rval);
617 
618 	regmap_exit(map);
619 }
620 
621 static void cache_sync(struct kunit *test)
622 {
623 	struct regcache_types *t = (struct regcache_types *)test->param_value;
624 	struct regmap *map;
625 	struct regmap_config config;
626 	struct regmap_ram_data *data;
627 	unsigned int val[BLOCK_TEST_SIZE];
628 	int i;
629 
630 	config = test_regmap_config;
631 	config.cache_type = t->type;
632 
633 	map = gen_regmap(&config, &data);
634 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
635 	if (IS_ERR(map))
636 		return;
637 
638 	get_random_bytes(&val, sizeof(val));
639 
640 	/* Put some data into the cache */
641 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
642 						   BLOCK_TEST_SIZE));
643 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
644 		data->written[i] = false;
645 
646 	/* Trash the data on the device itself then resync */
647 	regcache_mark_dirty(map);
648 	memset(data->vals, 0, sizeof(val));
649 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
650 
651 	/* Did we just write the correct data out? */
652 	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
653 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
654 		KUNIT_EXPECT_EQ(test, true, data->written[i]);
655 
656 	regmap_exit(map);
657 }
658 
659 static void cache_sync_defaults(struct kunit *test)
660 {
661 	struct regcache_types *t = (struct regcache_types *)test->param_value;
662 	struct regmap *map;
663 	struct regmap_config config;
664 	struct regmap_ram_data *data;
665 	unsigned int val;
666 	int i;
667 
668 	config = test_regmap_config;
669 	config.cache_type = t->type;
670 	config.num_reg_defaults = BLOCK_TEST_SIZE;
671 
672 	map = gen_regmap(&config, &data);
673 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
674 	if (IS_ERR(map))
675 		return;
676 
677 	get_random_bytes(&val, sizeof(val));
678 
679 	/* Change the value of one register */
680 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
681 
682 	/* Resync */
683 	regcache_mark_dirty(map);
684 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
685 		data->written[i] = false;
686 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
687 
688 	/* Did we just sync the one register we touched? */
689 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
690 		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
691 
692 	regmap_exit(map);
693 }
694 
695 static void cache_sync_readonly(struct kunit *test)
696 {
697 	struct regcache_types *t = (struct regcache_types *)test->param_value;
698 	struct regmap *map;
699 	struct regmap_config config;
700 	struct regmap_ram_data *data;
701 	unsigned int val;
702 	int i;
703 
704 	config = test_regmap_config;
705 	config.cache_type = t->type;
706 	config.writeable_reg = reg_5_false;
707 
708 	map = gen_regmap(&config, &data);
709 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
710 	if (IS_ERR(map))
711 		return;
712 
713 	/* Read all registers to fill the cache */
714 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
715 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
716 
717 	/* Change the value of all registers, readonly should fail */
718 	get_random_bytes(&val, sizeof(val));
719 	regcache_cache_only(map, true);
720 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
721 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
722 	regcache_cache_only(map, false);
723 
724 	/* Resync */
725 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
726 		data->written[i] = false;
727 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
728 
729 	/* Did that match what we see on the device? */
730 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
731 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
732 
733 	regmap_exit(map);
734 }
735 
736 static void cache_sync_patch(struct kunit *test)
737 {
738 	struct regcache_types *t = (struct regcache_types *)test->param_value;
739 	struct regmap *map;
740 	struct regmap_config config;
741 	struct regmap_ram_data *data;
742 	struct reg_sequence patch[2];
743 	unsigned int rval[BLOCK_TEST_SIZE], val;
744 	int i;
745 
746 	/* We need defaults so readback works */
747 	config = test_regmap_config;
748 	config.cache_type = t->type;
749 	config.num_reg_defaults = BLOCK_TEST_SIZE;
750 
751 	map = gen_regmap(&config, &data);
752 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
753 	if (IS_ERR(map))
754 		return;
755 
756 	/* Stash the original values */
757 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
758 						  BLOCK_TEST_SIZE));
759 
760 	/* Patch a couple of values */
761 	patch[0].reg = 2;
762 	patch[0].def = rval[2] + 1;
763 	patch[0].delay_us = 0;
764 	patch[1].reg = 5;
765 	patch[1].def = rval[5] + 1;
766 	patch[1].delay_us = 0;
767 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
768 						       ARRAY_SIZE(patch)));
769 
770 	/* Sync the cache */
771 	regcache_mark_dirty(map);
772 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
773 		data->written[i] = false;
774 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
775 
776 	/* The patch should be on the device but not in the cache */
777 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
778 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
779 		KUNIT_EXPECT_EQ(test, val, rval[i]);
780 
781 		switch (i) {
782 		case 2:
783 		case 5:
784 			KUNIT_EXPECT_EQ(test, true, data->written[i]);
785 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
786 			break;
787 		default:
788 			KUNIT_EXPECT_EQ(test, false, data->written[i]);
789 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
790 			break;
791 		}
792 	}
793 
794 	regmap_exit(map);
795 }
796 
797 static void cache_drop(struct kunit *test)
798 {
799 	struct regcache_types *t = (struct regcache_types *)test->param_value;
800 	struct regmap *map;
801 	struct regmap_config config;
802 	struct regmap_ram_data *data;
803 	unsigned int rval[BLOCK_TEST_SIZE];
804 	int i;
805 
806 	config = test_regmap_config;
807 	config.cache_type = t->type;
808 	config.num_reg_defaults = BLOCK_TEST_SIZE;
809 
810 	map = gen_regmap(&config, &data);
811 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
812 	if (IS_ERR(map))
813 		return;
814 
815 	/* Ensure the data is read from the cache */
816 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
817 		data->read[i] = false;
818 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
819 						  BLOCK_TEST_SIZE));
820 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
821 		KUNIT_EXPECT_FALSE(test, data->read[i]);
822 		data->read[i] = false;
823 	}
824 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
825 
826 	/* Drop some registers */
827 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
828 
829 	/* Reread and check only the dropped registers hit the device. */
830 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
831 						  BLOCK_TEST_SIZE));
832 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
833 		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
834 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
835 
836 	regmap_exit(map);
837 }
838 
839 struct raw_test_types {
840 	const char *name;
841 
842 	enum regcache_type cache_type;
843 	enum regmap_endian val_endian;
844 };
845 
846 static void raw_to_desc(const struct raw_test_types *t, char *desc)
847 {
848 	strcpy(desc, t->name);
849 }
850 
851 static const struct raw_test_types raw_types_list[] = {
852 	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
853 	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
854 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
855 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
856 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
857 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
858 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
859 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
860 };
861 
862 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
863 
864 static const struct raw_test_types raw_cache_types_list[] = {
865 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
866 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
867 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
868 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
869 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
870 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
871 };
872 
873 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
874 
875 static const struct regmap_config raw_regmap_config = {
876 	.max_register = BLOCK_TEST_SIZE,
877 
878 	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
879 	.reg_bits = 16,
880 	.val_bits = 16,
881 };
882 
883 static struct regmap *gen_raw_regmap(struct regmap_config *config,
884 				     struct raw_test_types *test_type,
885 				     struct regmap_ram_data **data)
886 {
887 	u16 *buf;
888 	struct regmap *ret;
889 	size_t size = (config->max_register + 1) * config->reg_bits / 8;
890 	int i;
891 	struct reg_default *defaults;
892 
893 	config->cache_type = test_type->cache_type;
894 	config->val_format_endian = test_type->val_endian;
895 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
896 					config->cache_type == REGCACHE_MAPLE;
897 
898 	buf = kmalloc(size, GFP_KERNEL);
899 	if (!buf)
900 		return ERR_PTR(-ENOMEM);
901 
902 	get_random_bytes(buf, size);
903 
904 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
905 	if (!(*data))
906 		return ERR_PTR(-ENOMEM);
907 	(*data)->vals = (void *)buf;
908 
909 	config->num_reg_defaults = config->max_register + 1;
910 	defaults = kcalloc(config->num_reg_defaults,
911 			   sizeof(struct reg_default),
912 			   GFP_KERNEL);
913 	if (!defaults)
914 		return ERR_PTR(-ENOMEM);
915 	config->reg_defaults = defaults;
916 
917 	for (i = 0; i < config->num_reg_defaults; i++) {
918 		defaults[i].reg = i;
919 		switch (test_type->val_endian) {
920 		case REGMAP_ENDIAN_LITTLE:
921 			defaults[i].def = le16_to_cpu(buf[i]);
922 			break;
923 		case REGMAP_ENDIAN_BIG:
924 			defaults[i].def = be16_to_cpu(buf[i]);
925 			break;
926 		default:
927 			return ERR_PTR(-EINVAL);
928 		}
929 	}
930 
931 	/*
932 	 * We use the defaults in the tests but they don't make sense
933 	 * to the core if there's no cache.
934 	 */
935 	if (config->cache_type == REGCACHE_NONE)
936 		config->num_reg_defaults = 0;
937 
938 	ret = regmap_init_raw_ram(config, *data);
939 	if (IS_ERR(ret)) {
940 		kfree(buf);
941 		kfree(*data);
942 	}
943 
944 	return ret;
945 }
946 
947 static void raw_read_defaults_single(struct kunit *test)
948 {
949 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
950 	struct regmap *map;
951 	struct regmap_config config;
952 	struct regmap_ram_data *data;
953 	unsigned int rval;
954 	int i;
955 
956 	config = raw_regmap_config;
957 
958 	map = gen_raw_regmap(&config, t, &data);
959 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
960 	if (IS_ERR(map))
961 		return;
962 
963 	/* Check that we can read the defaults via the API */
964 	for (i = 0; i < config.max_register + 1; i++) {
965 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
966 		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
967 	}
968 
969 	regmap_exit(map);
970 }
971 
972 static void raw_read_defaults(struct kunit *test)
973 {
974 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
975 	struct regmap *map;
976 	struct regmap_config config;
977 	struct regmap_ram_data *data;
978 	u16 *rval;
979 	u16 def;
980 	size_t val_len;
981 	int i;
982 
983 	config = raw_regmap_config;
984 
985 	map = gen_raw_regmap(&config, t, &data);
986 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
987 	if (IS_ERR(map))
988 		return;
989 
990 	val_len = sizeof(*rval) * (config.max_register + 1);
991 	rval = kmalloc(val_len, GFP_KERNEL);
992 	KUNIT_ASSERT_TRUE(test, rval != NULL);
993 	if (!rval)
994 		return;
995 
996 	/* Check that we can read the defaults via the API */
997 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
998 	for (i = 0; i < config.max_register + 1; i++) {
999 		def = config.reg_defaults[i].def;
1000 		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1001 			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1002 		} else {
1003 			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1004 		}
1005 	}
1006 
1007 	kfree(rval);
1008 	regmap_exit(map);
1009 }
1010 
1011 static void raw_write_read_single(struct kunit *test)
1012 {
1013 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1014 	struct regmap *map;
1015 	struct regmap_config config;
1016 	struct regmap_ram_data *data;
1017 	u16 val;
1018 	unsigned int rval;
1019 
1020 	config = raw_regmap_config;
1021 
1022 	map = gen_raw_regmap(&config, t, &data);
1023 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1024 	if (IS_ERR(map))
1025 		return;
1026 
1027 	get_random_bytes(&val, sizeof(val));
1028 
1029 	/* If we write a value to a register we can read it back */
1030 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1031 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1032 	KUNIT_EXPECT_EQ(test, val, rval);
1033 
1034 	regmap_exit(map);
1035 }
1036 
1037 static void raw_write(struct kunit *test)
1038 {
1039 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1040 	struct regmap *map;
1041 	struct regmap_config config;
1042 	struct regmap_ram_data *data;
1043 	u16 *hw_buf;
1044 	u16 val[2];
1045 	unsigned int rval;
1046 	int i;
1047 
1048 	config = raw_regmap_config;
1049 
1050 	map = gen_raw_regmap(&config, t, &data);
1051 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1052 	if (IS_ERR(map))
1053 		return;
1054 
1055 	hw_buf = (u16 *)data->vals;
1056 
1057 	get_random_bytes(&val, sizeof(val));
1058 
1059 	/* Do a raw write */
1060 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1061 
1062 	/* We should read back the new values, and defaults for the rest */
1063 	for (i = 0; i < config.max_register + 1; i++) {
1064 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1065 
1066 		switch (i) {
1067 		case 2:
1068 		case 3:
1069 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1070 				KUNIT_EXPECT_EQ(test, rval,
1071 						be16_to_cpu(val[i % 2]));
1072 			} else {
1073 				KUNIT_EXPECT_EQ(test, rval,
1074 						le16_to_cpu(val[i % 2]));
1075 			}
1076 			break;
1077 		default:
1078 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1079 			break;
1080 		}
1081 	}
1082 
1083 	/* The values should appear in the "hardware" */
1084 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1085 
1086 	regmap_exit(map);
1087 }
1088 
1089 static void raw_sync(struct kunit *test)
1090 {
1091 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1092 	struct regmap *map;
1093 	struct regmap_config config;
1094 	struct regmap_ram_data *data;
1095 	u16 val[2];
1096 	u16 *hw_buf;
1097 	unsigned int rval;
1098 	int i;
1099 
1100 	config = raw_regmap_config;
1101 
1102 	map = gen_raw_regmap(&config, t, &data);
1103 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1104 	if (IS_ERR(map))
1105 		return;
1106 
1107 	hw_buf = (u16 *)data->vals;
1108 
1109 	get_random_bytes(&val, sizeof(val));
1110 
1111 	/* Do a regular write and a raw write in cache only mode */
1112 	regcache_cache_only(map, true);
1113 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1114 	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1115 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1116 						      be16_to_cpu(val[0])));
1117 	else
1118 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1119 						      le16_to_cpu(val[0])));
1120 
1121 	/* We should read back the new values, and defaults for the rest */
1122 	for (i = 0; i < config.max_register + 1; i++) {
1123 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1124 
1125 		switch (i) {
1126 		case 2:
1127 		case 3:
1128 		case 6:
1129 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1130 				KUNIT_EXPECT_EQ(test, rval,
1131 						be16_to_cpu(val[i % 2]));
1132 			} else {
1133 				KUNIT_EXPECT_EQ(test, rval,
1134 						le16_to_cpu(val[i % 2]));
1135 			}
1136 			break;
1137 		default:
1138 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1139 			break;
1140 		}
1141 	}
1142 
1143 	/* The values should not appear in the "hardware" */
1144 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1145 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1146 
1147 	for (i = 0; i < config.max_register + 1; i++)
1148 		data->written[i] = false;
1149 
1150 	/* Do the sync */
1151 	regcache_cache_only(map, false);
1152 	regcache_mark_dirty(map);
1153 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1154 
1155 	/* The values should now appear in the "hardware" */
1156 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1157 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1158 
1159 	regmap_exit(map);
1160 }
1161 
1162 static struct kunit_case regmap_test_cases[] = {
1163 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1164 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1165 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1166 	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1167 	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1168 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1169 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1170 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1171 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1172 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1173 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1174 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1175 	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1176 	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1177 	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1178 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1179 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1180 
1181 	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1182 	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1183 	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1184 	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1185 	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1186 	{}
1187 };
1188 
1189 static struct kunit_suite regmap_test_suite = {
1190 	.name = "regmap",
1191 	.test_cases = regmap_test_cases,
1192 };
1193 kunit_test_suite(regmap_test_suite);
1194 
1195 MODULE_LICENSE("GPL v2");
1196