1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/test.h>
8 #include "internal.h"
9 
10 #define BLOCK_TEST_SIZE 12
11 
12 static const struct regmap_config test_regmap_config = {
13 	.max_register = BLOCK_TEST_SIZE,
14 	.reg_stride = 1,
15 	.val_bits = sizeof(unsigned int) * 8,
16 };
17 
18 struct regcache_types {
19 	enum regcache_type type;
20 	const char *name;
21 };
22 
23 static void case_to_desc(const struct regcache_types *t, char *desc)
24 {
25 	strcpy(desc, t->name);
26 }
27 
28 static const struct regcache_types regcache_types_list[] = {
29 	{ REGCACHE_NONE, "none" },
30 	{ REGCACHE_FLAT, "flat" },
31 	{ REGCACHE_RBTREE, "rbtree" },
32 	{ REGCACHE_MAPLE, "maple" },
33 };
34 
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
36 
37 static const struct regcache_types real_cache_types_list[] = {
38 	{ REGCACHE_FLAT, "flat" },
39 	{ REGCACHE_RBTREE, "rbtree" },
40 	{ REGCACHE_MAPLE, "maple" },
41 };
42 
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
44 
45 static const struct regcache_types sparse_cache_types_list[] = {
46 	{ REGCACHE_RBTREE, "rbtree" },
47 	{ REGCACHE_MAPLE, "maple" },
48 };
49 
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
51 
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 				 struct regmap_ram_data **data)
54 {
55 	unsigned int *buf;
56 	struct regmap *ret;
57 	size_t size = (config->max_register + 1) * sizeof(unsigned int);
58 	int i;
59 	struct reg_default *defaults;
60 
61 	buf = kmalloc(size, GFP_KERNEL);
62 	if (!buf)
63 		return ERR_PTR(-ENOMEM);
64 
65 	get_random_bytes(buf, size);
66 
67 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
68 	if (!(*data))
69 		return ERR_PTR(-ENOMEM);
70 	(*data)->vals = buf;
71 
72 	if (config->num_reg_defaults) {
73 		defaults = kcalloc(config->num_reg_defaults,
74 				   sizeof(struct reg_default),
75 				   GFP_KERNEL);
76 		if (!defaults)
77 			return ERR_PTR(-ENOMEM);
78 		config->reg_defaults = defaults;
79 
80 		for (i = 0; i < config->num_reg_defaults; i++) {
81 			defaults[i].reg = i * config->reg_stride;
82 			defaults[i].def = buf[i * config->reg_stride];
83 		}
84 	}
85 
86 	ret = regmap_init_ram(config, *data);
87 	if (IS_ERR(ret)) {
88 		kfree(buf);
89 		kfree(*data);
90 	}
91 
92 	return ret;
93 }
94 
95 static void basic_read_write(struct kunit *test)
96 {
97 	struct regcache_types *t = (struct regcache_types *)test->param_value;
98 	struct regmap *map;
99 	struct regmap_config config;
100 	struct regmap_ram_data *data;
101 	unsigned int val, rval;
102 
103 	config = test_regmap_config;
104 	config.cache_type = t->type;
105 
106 	map = gen_regmap(&config, &data);
107 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
108 	if (IS_ERR(map))
109 		return;
110 
111 	get_random_bytes(&val, sizeof(val));
112 
113 	/* If we write a value to a register we can read it back */
114 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
115 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
116 	KUNIT_EXPECT_EQ(test, val, rval);
117 
118 	/* If using a cache the cache satisfied the read */
119 	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
120 
121 	regmap_exit(map);
122 }
123 
124 static void bulk_write(struct kunit *test)
125 {
126 	struct regcache_types *t = (struct regcache_types *)test->param_value;
127 	struct regmap *map;
128 	struct regmap_config config;
129 	struct regmap_ram_data *data;
130 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
131 	int i;
132 
133 	config = test_regmap_config;
134 	config.cache_type = t->type;
135 
136 	map = gen_regmap(&config, &data);
137 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
138 	if (IS_ERR(map))
139 		return;
140 
141 	get_random_bytes(&val, sizeof(val));
142 
143 	/*
144 	 * Data written via the bulk API can be read back with single
145 	 * reads.
146 	 */
147 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
148 						   BLOCK_TEST_SIZE));
149 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
150 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
151 
152 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
153 
154 	/* If using a cache the cache satisfied the read */
155 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
156 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
157 
158 	regmap_exit(map);
159 }
160 
161 static void bulk_read(struct kunit *test)
162 {
163 	struct regcache_types *t = (struct regcache_types *)test->param_value;
164 	struct regmap *map;
165 	struct regmap_config config;
166 	struct regmap_ram_data *data;
167 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
168 	int i;
169 
170 	config = test_regmap_config;
171 	config.cache_type = t->type;
172 
173 	map = gen_regmap(&config, &data);
174 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
175 	if (IS_ERR(map))
176 		return;
177 
178 	get_random_bytes(&val, sizeof(val));
179 
180 	/* Data written as single writes can be read via the bulk API */
181 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
182 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
183 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
184 						  BLOCK_TEST_SIZE));
185 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
186 
187 	/* If using a cache the cache satisfied the read */
188 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
189 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
190 
191 	regmap_exit(map);
192 }
193 
194 static void reg_defaults(struct kunit *test)
195 {
196 	struct regcache_types *t = (struct regcache_types *)test->param_value;
197 	struct regmap *map;
198 	struct regmap_config config;
199 	struct regmap_ram_data *data;
200 	unsigned int rval[BLOCK_TEST_SIZE];
201 	int i;
202 
203 	config = test_regmap_config;
204 	config.cache_type = t->type;
205 	config.num_reg_defaults = BLOCK_TEST_SIZE;
206 
207 	map = gen_regmap(&config, &data);
208 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
209 	if (IS_ERR(map))
210 		return;
211 
212 	/* Read back the expected default data */
213 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
214 						  BLOCK_TEST_SIZE));
215 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
216 
217 	/* The data should have been read from cache if there was one */
218 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
219 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
220 }
221 
222 static void reg_defaults_read_dev(struct kunit *test)
223 {
224 	struct regcache_types *t = (struct regcache_types *)test->param_value;
225 	struct regmap *map;
226 	struct regmap_config config;
227 	struct regmap_ram_data *data;
228 	unsigned int rval[BLOCK_TEST_SIZE];
229 	int i;
230 
231 	config = test_regmap_config;
232 	config.cache_type = t->type;
233 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
234 
235 	map = gen_regmap(&config, &data);
236 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
237 	if (IS_ERR(map))
238 		return;
239 
240 	/* We should have read the cache defaults back from the map */
241 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
242 		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
243 		data->read[i] = false;
244 	}
245 
246 	/* Read back the expected default data */
247 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
248 						  BLOCK_TEST_SIZE));
249 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
250 
251 	/* The data should have been read from cache if there was one */
252 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
253 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
254 }
255 
256 static void register_patch(struct kunit *test)
257 {
258 	struct regcache_types *t = (struct regcache_types *)test->param_value;
259 	struct regmap *map;
260 	struct regmap_config config;
261 	struct regmap_ram_data *data;
262 	struct reg_sequence patch[2];
263 	unsigned int rval[BLOCK_TEST_SIZE];
264 	int i;
265 
266 	/* We need defaults so readback works */
267 	config = test_regmap_config;
268 	config.cache_type = t->type;
269 	config.num_reg_defaults = BLOCK_TEST_SIZE;
270 
271 	map = gen_regmap(&config, &data);
272 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
273 	if (IS_ERR(map))
274 		return;
275 
276 	/* Stash the original values */
277 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
278 						  BLOCK_TEST_SIZE));
279 
280 	/* Patch a couple of values */
281 	patch[0].reg = 2;
282 	patch[0].def = rval[2] + 1;
283 	patch[0].delay_us = 0;
284 	patch[1].reg = 5;
285 	patch[1].def = rval[5] + 1;
286 	patch[1].delay_us = 0;
287 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
288 						       ARRAY_SIZE(patch)));
289 
290 	/* Only the patched registers are written */
291 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
292 		switch (i) {
293 		case 2:
294 		case 5:
295 			KUNIT_EXPECT_TRUE(test, data->written[i]);
296 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
297 			break;
298 		default:
299 			KUNIT_EXPECT_FALSE(test, data->written[i]);
300 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
301 			break;
302 		}
303 	}
304 
305 	regmap_exit(map);
306 }
307 
308 static void stride(struct kunit *test)
309 {
310 	struct regcache_types *t = (struct regcache_types *)test->param_value;
311 	struct regmap *map;
312 	struct regmap_config config;
313 	struct regmap_ram_data *data;
314 	unsigned int rval;
315 	int i;
316 
317 	config = test_regmap_config;
318 	config.cache_type = t->type;
319 	config.reg_stride = 2;
320 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
321 
322 	map = gen_regmap(&config, &data);
323 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
324 	if (IS_ERR(map))
325 		return;
326 
327 	/* Only even registers can be accessed, try both read and write */
328 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
329 		data->read[i] = false;
330 		data->written[i] = false;
331 
332 		if (i % 2) {
333 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
334 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
335 			KUNIT_EXPECT_FALSE(test, data->read[i]);
336 			KUNIT_EXPECT_FALSE(test, data->written[i]);
337 		} else {
338 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
339 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
340 			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
341 					data->read[i]);
342 
343 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
344 			KUNIT_EXPECT_TRUE(test, data->written[i]);
345 		}
346 	}
347 
348 	regmap_exit(map);
349 }
350 
351 static struct regmap_range_cfg test_range = {
352 	.selector_reg = 1,
353 	.selector_mask = 0xff,
354 
355 	.window_start = 4,
356 	.window_len = 10,
357 
358 	.range_min = 20,
359 	.range_max = 40,
360 };
361 
362 static bool test_range_volatile(struct device *dev, unsigned int reg)
363 {
364 	if (reg >= test_range.window_start &&
365 	    reg <= test_range.selector_reg + test_range.window_len)
366 		return true;
367 
368 	if (reg >= test_range.range_min && reg <= test_range.range_max)
369 		return true;
370 
371 	return false;
372 }
373 
374 static void basic_ranges(struct kunit *test)
375 {
376 	struct regcache_types *t = (struct regcache_types *)test->param_value;
377 	struct regmap *map;
378 	struct regmap_config config;
379 	struct regmap_ram_data *data;
380 	unsigned int val;
381 	int i;
382 
383 	config = test_regmap_config;
384 	config.cache_type = t->type;
385 	config.volatile_reg = test_range_volatile;
386 	config.ranges = &test_range;
387 	config.num_ranges = 1;
388 	config.max_register = test_range.range_max;
389 
390 	map = gen_regmap(&config, &data);
391 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
392 	if (IS_ERR(map))
393 		return;
394 
395 	for (i = test_range.range_min; i < test_range.range_max; i++) {
396 		data->read[i] = false;
397 		data->written[i] = false;
398 	}
399 
400 	/* Reset the page to a non-zero value to trigger a change */
401 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
402 					      test_range.range_max));
403 
404 	/* Check we set the page and use the window for writes */
405 	data->written[test_range.selector_reg] = false;
406 	data->written[test_range.window_start] = false;
407 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
408 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
409 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
410 
411 	data->written[test_range.selector_reg] = false;
412 	data->written[test_range.window_start] = false;
413 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
414 					      test_range.range_min +
415 					      test_range.window_len,
416 					      0));
417 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
418 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
419 
420 	/* Same for reads */
421 	data->written[test_range.selector_reg] = false;
422 	data->read[test_range.window_start] = false;
423 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
424 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
425 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
426 
427 	data->written[test_range.selector_reg] = false;
428 	data->read[test_range.window_start] = false;
429 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
430 					     test_range.range_min +
431 					     test_range.window_len,
432 					     &val));
433 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
434 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
435 
436 	/* No physical access triggered in the virtual range */
437 	for (i = test_range.range_min; i < test_range.range_max; i++) {
438 		KUNIT_EXPECT_FALSE(test, data->read[i]);
439 		KUNIT_EXPECT_FALSE(test, data->written[i]);
440 	}
441 
442 	regmap_exit(map);
443 }
444 
445 /* Try to stress dynamic creation of cache data structures */
446 static void stress_insert(struct kunit *test)
447 {
448 	struct regcache_types *t = (struct regcache_types *)test->param_value;
449 	struct regmap *map;
450 	struct regmap_config config;
451 	struct regmap_ram_data *data;
452 	unsigned int rval, *vals;
453 	size_t buf_sz;
454 	int i;
455 
456 	config = test_regmap_config;
457 	config.cache_type = t->type;
458 	config.max_register = 300;
459 
460 	map = gen_regmap(&config, &data);
461 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
462 	if (IS_ERR(map))
463 		return;
464 
465 	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
466 			     GFP_KERNEL);
467 	KUNIT_ASSERT_FALSE(test, vals == NULL);
468 	buf_sz = sizeof(unsigned long) * config.max_register;
469 
470 	get_random_bytes(vals, buf_sz);
471 
472 	/* Write data into the map/cache in ever decreasing strides */
473 	for (i = 0; i < config.max_register; i += 100)
474 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
475 	for (i = 0; i < config.max_register; i += 50)
476 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
477 	for (i = 0; i < config.max_register; i += 25)
478 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
479 	for (i = 0; i < config.max_register; i += 10)
480 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
481 	for (i = 0; i < config.max_register; i += 5)
482 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
483 	for (i = 0; i < config.max_register; i += 3)
484 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
485 	for (i = 0; i < config.max_register; i += 2)
486 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
487 	for (i = 0; i < config.max_register; i++)
488 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
489 
490 	/* Do reads from the cache (if there is one) match? */
491 	for (i = 0; i < config.max_register; i ++) {
492 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
493 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
494 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
495 	}
496 
497 	regmap_exit(map);
498 }
499 
500 static void cache_bypass(struct kunit *test)
501 {
502 	struct regcache_types *t = (struct regcache_types *)test->param_value;
503 	struct regmap *map;
504 	struct regmap_config config;
505 	struct regmap_ram_data *data;
506 	unsigned int val, rval;
507 
508 	config = test_regmap_config;
509 	config.cache_type = t->type;
510 
511 	map = gen_regmap(&config, &data);
512 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
513 	if (IS_ERR(map))
514 		return;
515 
516 	get_random_bytes(&val, sizeof(val));
517 
518 	/* Ensure the cache has a value in it */
519 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
520 
521 	/* Bypass then write a different value */
522 	regcache_cache_bypass(map, true);
523 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
524 
525 	/* Read the bypassed value */
526 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
527 	KUNIT_EXPECT_EQ(test, val + 1, rval);
528 	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
529 
530 	/* Disable bypass, the cache should still return the original value */
531 	regcache_cache_bypass(map, false);
532 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
533 	KUNIT_EXPECT_EQ(test, val, rval);
534 
535 	regmap_exit(map);
536 }
537 
538 static void cache_sync(struct kunit *test)
539 {
540 	struct regcache_types *t = (struct regcache_types *)test->param_value;
541 	struct regmap *map;
542 	struct regmap_config config;
543 	struct regmap_ram_data *data;
544 	unsigned int val[BLOCK_TEST_SIZE];
545 	int i;
546 
547 	config = test_regmap_config;
548 	config.cache_type = t->type;
549 
550 	map = gen_regmap(&config, &data);
551 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
552 	if (IS_ERR(map))
553 		return;
554 
555 	get_random_bytes(&val, sizeof(val));
556 
557 	/* Put some data into the cache */
558 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
559 						   BLOCK_TEST_SIZE));
560 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
561 		data->written[i] = false;
562 
563 	/* Trash the data on the device itself then resync */
564 	regcache_mark_dirty(map);
565 	memset(data->vals, 0, sizeof(val));
566 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
567 
568 	/* Did we just write the correct data out? */
569 	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
570 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
571 		KUNIT_EXPECT_EQ(test, true, data->written[i]);
572 
573 	regmap_exit(map);
574 }
575 
576 static void cache_sync_defaults(struct kunit *test)
577 {
578 	struct regcache_types *t = (struct regcache_types *)test->param_value;
579 	struct regmap *map;
580 	struct regmap_config config;
581 	struct regmap_ram_data *data;
582 	unsigned int val;
583 	int i;
584 
585 	config = test_regmap_config;
586 	config.cache_type = t->type;
587 	config.num_reg_defaults = BLOCK_TEST_SIZE;
588 
589 	map = gen_regmap(&config, &data);
590 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
591 	if (IS_ERR(map))
592 		return;
593 
594 	get_random_bytes(&val, sizeof(val));
595 
596 	/* Change the value of one register */
597 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
598 
599 	/* Resync */
600 	regcache_mark_dirty(map);
601 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
602 		data->written[i] = false;
603 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
604 
605 	/* Did we just sync the one register we touched? */
606 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
607 		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
608 
609 	regmap_exit(map);
610 }
611 
612 static void cache_sync_patch(struct kunit *test)
613 {
614 	struct regcache_types *t = (struct regcache_types *)test->param_value;
615 	struct regmap *map;
616 	struct regmap_config config;
617 	struct regmap_ram_data *data;
618 	struct reg_sequence patch[2];
619 	unsigned int rval[BLOCK_TEST_SIZE], val;
620 	int i;
621 
622 	/* We need defaults so readback works */
623 	config = test_regmap_config;
624 	config.cache_type = t->type;
625 	config.num_reg_defaults = BLOCK_TEST_SIZE;
626 
627 	map = gen_regmap(&config, &data);
628 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
629 	if (IS_ERR(map))
630 		return;
631 
632 	/* Stash the original values */
633 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
634 						  BLOCK_TEST_SIZE));
635 
636 	/* Patch a couple of values */
637 	patch[0].reg = 2;
638 	patch[0].def = rval[2] + 1;
639 	patch[0].delay_us = 0;
640 	patch[1].reg = 5;
641 	patch[1].def = rval[5] + 1;
642 	patch[1].delay_us = 0;
643 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
644 						       ARRAY_SIZE(patch)));
645 
646 	/* Sync the cache */
647 	regcache_mark_dirty(map);
648 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
649 		data->written[i] = false;
650 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
651 
652 	/* The patch should be on the device but not in the cache */
653 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
654 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
655 		KUNIT_EXPECT_EQ(test, val, rval[i]);
656 
657 		switch (i) {
658 		case 2:
659 		case 5:
660 			KUNIT_EXPECT_EQ(test, true, data->written[i]);
661 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
662 			break;
663 		default:
664 			KUNIT_EXPECT_EQ(test, false, data->written[i]);
665 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
666 			break;
667 		}
668 	}
669 
670 	regmap_exit(map);
671 }
672 
673 static void cache_drop(struct kunit *test)
674 {
675 	struct regcache_types *t = (struct regcache_types *)test->param_value;
676 	struct regmap *map;
677 	struct regmap_config config;
678 	struct regmap_ram_data *data;
679 	unsigned int rval[BLOCK_TEST_SIZE];
680 	int i;
681 
682 	config = test_regmap_config;
683 	config.cache_type = t->type;
684 	config.num_reg_defaults = BLOCK_TEST_SIZE;
685 
686 	map = gen_regmap(&config, &data);
687 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
688 	if (IS_ERR(map))
689 		return;
690 
691 	/* Ensure the data is read from the cache */
692 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
693 		data->read[i] = false;
694 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
695 						  BLOCK_TEST_SIZE));
696 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
697 		KUNIT_EXPECT_FALSE(test, data->read[i]);
698 		data->read[i] = false;
699 	}
700 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
701 
702 	/* Drop some registers */
703 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
704 
705 	/* Reread and check only the dropped registers hit the device. */
706 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
707 						  BLOCK_TEST_SIZE));
708 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
709 		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
710 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
711 
712 	regmap_exit(map);
713 }
714 
715 static struct kunit_case regmap_test_cases[] = {
716 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
717 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
718 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
719 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
720 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
721 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
722 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
723 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
724 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
725 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
726 	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
727 	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
728 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
729 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
730 	{}
731 };
732 
733 static struct kunit_suite regmap_test_suite = {
734 	.name = "regmap",
735 	.test_cases = regmap_test_cases,
736 };
737 kunit_test_suite(regmap_test_suite);
738 
739 MODULE_LICENSE("GPL v2");
740