xref: /openbmc/linux/drivers/clk/clk_test.c (revision a93fbb00)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit test for clk rate management
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 
8 /* Needed for clk_hw_get_clk() */
9 #include "clk.h"
10 
11 #include <kunit/test.h>
12 
13 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
14 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
15 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
16 
17 struct clk_dummy_context {
18 	struct clk_hw hw;
19 	unsigned long rate;
20 };
21 
22 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 					   unsigned long parent_rate)
24 {
25 	struct clk_dummy_context *ctx =
26 		container_of(hw, struct clk_dummy_context, hw);
27 
28 	return ctx->rate;
29 }
30 
31 static int clk_dummy_determine_rate(struct clk_hw *hw,
32 				    struct clk_rate_request *req)
33 {
34 	/* Just return the same rate without modifying it */
35 	return 0;
36 }
37 
38 static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 				   struct clk_rate_request *req)
40 {
41 	/*
42 	 * If there's a maximum set, always run the clock at the maximum
43 	 * allowed.
44 	 */
45 	if (req->max_rate < ULONG_MAX)
46 		req->rate = req->max_rate;
47 
48 	return 0;
49 }
50 
51 static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 				   struct clk_rate_request *req)
53 {
54 	/*
55 	 * If there's a minimum set, always run the clock at the minimum
56 	 * allowed.
57 	 */
58 	if (req->min_rate > 0)
59 		req->rate = req->min_rate;
60 
61 	return 0;
62 }
63 
64 static int clk_dummy_set_rate(struct clk_hw *hw,
65 			      unsigned long rate,
66 			      unsigned long parent_rate)
67 {
68 	struct clk_dummy_context *ctx =
69 		container_of(hw, struct clk_dummy_context, hw);
70 
71 	ctx->rate = rate;
72 	return 0;
73 }
74 
75 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
76 {
77 	if (index >= clk_hw_get_num_parents(hw))
78 		return -EINVAL;
79 
80 	return 0;
81 }
82 
83 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
84 {
85 	return 0;
86 }
87 
88 static const struct clk_ops clk_dummy_rate_ops = {
89 	.recalc_rate = clk_dummy_recalc_rate,
90 	.determine_rate = clk_dummy_determine_rate,
91 	.set_rate = clk_dummy_set_rate,
92 };
93 
94 static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 	.recalc_rate = clk_dummy_recalc_rate,
96 	.determine_rate = clk_dummy_maximize_rate,
97 	.set_rate = clk_dummy_set_rate,
98 };
99 
100 static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 	.recalc_rate = clk_dummy_recalc_rate,
102 	.determine_rate = clk_dummy_minimize_rate,
103 	.set_rate = clk_dummy_set_rate,
104 };
105 
106 static const struct clk_ops clk_dummy_single_parent_ops = {
107 	.set_parent = clk_dummy_single_set_parent,
108 	.get_parent = clk_dummy_single_get_parent,
109 };
110 
111 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
112 {
113 	struct clk_dummy_context *ctx;
114 	struct clk_init_data init = { };
115 	int ret;
116 
117 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
118 	if (!ctx)
119 		return -ENOMEM;
120 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
121 	test->priv = ctx;
122 
123 	init.name = "test_dummy_rate";
124 	init.ops = ops;
125 	ctx->hw.init = &init;
126 
127 	ret = clk_hw_register(NULL, &ctx->hw);
128 	if (ret)
129 		return ret;
130 
131 	return 0;
132 }
133 
134 static int clk_test_init(struct kunit *test)
135 {
136 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
137 }
138 
139 static int clk_maximize_test_init(struct kunit *test)
140 {
141 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
142 }
143 
144 static int clk_minimize_test_init(struct kunit *test)
145 {
146 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
147 }
148 
149 static void clk_test_exit(struct kunit *test)
150 {
151 	struct clk_dummy_context *ctx = test->priv;
152 
153 	clk_hw_unregister(&ctx->hw);
154 }
155 
156 /*
157  * Test that the actual rate matches what is returned by clk_get_rate()
158  */
159 static void clk_test_get_rate(struct kunit *test)
160 {
161 	struct clk_dummy_context *ctx = test->priv;
162 	struct clk_hw *hw = &ctx->hw;
163 	struct clk *clk = hw->clk;
164 	unsigned long rate;
165 
166 	rate = clk_get_rate(clk);
167 	KUNIT_ASSERT_GT(test, rate, 0);
168 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
169 }
170 
171 /*
172  * Test that, after a call to clk_set_rate(), the rate returned by
173  * clk_get_rate() matches.
174  *
175  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
176  * modify the requested rate, which is our case in clk_dummy_rate_ops.
177  */
178 static void clk_test_set_get_rate(struct kunit *test)
179 {
180 	struct clk_dummy_context *ctx = test->priv;
181 	struct clk_hw *hw = &ctx->hw;
182 	struct clk *clk = hw->clk;
183 	unsigned long rate;
184 
185 	KUNIT_ASSERT_EQ(test,
186 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
187 			0);
188 
189 	rate = clk_get_rate(clk);
190 	KUNIT_ASSERT_GT(test, rate, 0);
191 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
192 }
193 
194 /*
195  * Test that, after several calls to clk_set_rate(), the rate returned
196  * by clk_get_rate() matches the last one.
197  *
198  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
199  * modify the requested rate, which is our case in clk_dummy_rate_ops.
200  */
201 static void clk_test_set_set_get_rate(struct kunit *test)
202 {
203 	struct clk_dummy_context *ctx = test->priv;
204 	struct clk_hw *hw = &ctx->hw;
205 	struct clk *clk = hw->clk;
206 	unsigned long rate;
207 
208 	KUNIT_ASSERT_EQ(test,
209 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
210 			0);
211 
212 	KUNIT_ASSERT_EQ(test,
213 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
214 			0);
215 
216 	rate = clk_get_rate(clk);
217 	KUNIT_ASSERT_GT(test, rate, 0);
218 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
219 }
220 
221 /*
222  * Test that clk_round_rate and clk_set_rate are consitent and will
223  * return the same frequency.
224  */
225 static void clk_test_round_set_get_rate(struct kunit *test)
226 {
227 	struct clk_dummy_context *ctx = test->priv;
228 	struct clk_hw *hw = &ctx->hw;
229 	struct clk *clk = hw->clk;
230 	unsigned long rounded_rate, set_rate;
231 
232 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
233 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
234 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
235 
236 	KUNIT_ASSERT_EQ(test,
237 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
238 			0);
239 
240 	set_rate = clk_get_rate(clk);
241 	KUNIT_ASSERT_GT(test, set_rate, 0);
242 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
243 }
244 
245 static struct kunit_case clk_test_cases[] = {
246 	KUNIT_CASE(clk_test_get_rate),
247 	KUNIT_CASE(clk_test_set_get_rate),
248 	KUNIT_CASE(clk_test_set_set_get_rate),
249 	KUNIT_CASE(clk_test_round_set_get_rate),
250 	{}
251 };
252 
253 static struct kunit_suite clk_test_suite = {
254 	.name = "clk-test",
255 	.init = clk_test_init,
256 	.exit = clk_test_exit,
257 	.test_cases = clk_test_cases,
258 };
259 
260 struct clk_single_parent_ctx {
261 	struct clk_dummy_context parent_ctx;
262 	struct clk_hw hw;
263 };
264 
265 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
266 {
267 	struct clk_single_parent_ctx *ctx;
268 	struct clk_init_data init = { };
269 	const char * const parents[] = { "orphan_parent" };
270 	int ret;
271 
272 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
273 	if (!ctx)
274 		return -ENOMEM;
275 	test->priv = ctx;
276 
277 	init.name = "test_orphan_dummy_parent";
278 	init.ops = &clk_dummy_single_parent_ops;
279 	init.parent_names = parents;
280 	init.num_parents = ARRAY_SIZE(parents);
281 	init.flags = CLK_SET_RATE_PARENT;
282 	ctx->hw.init = &init;
283 
284 	ret = clk_hw_register(NULL, &ctx->hw);
285 	if (ret)
286 		return ret;
287 
288 	memset(&init, 0, sizeof(init));
289 	init.name = "orphan_parent";
290 	init.ops = &clk_dummy_rate_ops;
291 	ctx->parent_ctx.hw.init = &init;
292 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
293 
294 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
295 	if (ret)
296 		return ret;
297 
298 	return 0;
299 }
300 
301 static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
302 {
303 	struct clk_single_parent_ctx *ctx = test->priv;
304 
305 	clk_hw_unregister(&ctx->hw);
306 	clk_hw_unregister(&ctx->parent_ctx.hw);
307 }
308 
309 /*
310  * Test that a mux-only clock, with an initial rate within a range,
311  * will still have the same rate after the range has been enforced.
312  */
313 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
314 {
315 	struct clk_single_parent_ctx *ctx = test->priv;
316 	struct clk_hw *hw = &ctx->hw;
317 	struct clk *clk = hw->clk;
318 	unsigned long rate, new_rate;
319 
320 	rate = clk_get_rate(clk);
321 	KUNIT_ASSERT_GT(test, rate, 0);
322 
323 	KUNIT_ASSERT_EQ(test,
324 			clk_set_rate_range(clk,
325 					   ctx->parent_ctx.rate - 1000,
326 					   ctx->parent_ctx.rate + 1000),
327 			0);
328 
329 	new_rate = clk_get_rate(clk);
330 	KUNIT_ASSERT_GT(test, new_rate, 0);
331 	KUNIT_EXPECT_EQ(test, rate, new_rate);
332 }
333 
334 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
335 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
336 	{}
337 };
338 
339 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
340 	.name = "clk-orphan-transparent-single-parent-test",
341 	.init = clk_orphan_transparent_single_parent_mux_test_init,
342 	.exit = clk_orphan_transparent_single_parent_mux_test_exit,
343 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
344 };
345 
346 /*
347  * Test that clk_set_rate_range won't return an error for a valid range
348  * and that it will make sure the rate of the clock is within the
349  * boundaries.
350  */
351 static void clk_range_test_set_range(struct kunit *test)
352 {
353 	struct clk_dummy_context *ctx = test->priv;
354 	struct clk_hw *hw = &ctx->hw;
355 	struct clk *clk = hw->clk;
356 	unsigned long rate;
357 
358 	KUNIT_ASSERT_EQ(test,
359 			clk_set_rate_range(clk,
360 					   DUMMY_CLOCK_RATE_1,
361 					   DUMMY_CLOCK_RATE_2),
362 			0);
363 
364 	rate = clk_get_rate(clk);
365 	KUNIT_ASSERT_GT(test, rate, 0);
366 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
367 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
368 }
369 
370 /*
371  * Test that calling clk_set_rate_range with a minimum rate higher than
372  * the maximum rate returns an error.
373  */
374 static void clk_range_test_set_range_invalid(struct kunit *test)
375 {
376 	struct clk_dummy_context *ctx = test->priv;
377 	struct clk_hw *hw = &ctx->hw;
378 	struct clk *clk = hw->clk;
379 
380 	KUNIT_EXPECT_LT(test,
381 			clk_set_rate_range(clk,
382 					   DUMMY_CLOCK_RATE_1 + 1000,
383 					   DUMMY_CLOCK_RATE_1),
384 			0);
385 }
386 
387 /*
388  * Test that users can't set multiple, disjoints, range that would be
389  * impossible to meet.
390  */
391 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
392 {
393 	struct clk_dummy_context *ctx = test->priv;
394 	struct clk_hw *hw = &ctx->hw;
395 	struct clk *user1, *user2;
396 
397 	user1 = clk_hw_get_clk(hw, NULL);
398 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
399 
400 	user2 = clk_hw_get_clk(hw, NULL);
401 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
402 
403 	KUNIT_ASSERT_EQ(test,
404 			clk_set_rate_range(user1, 1000, 2000),
405 			0);
406 
407 	KUNIT_EXPECT_LT(test,
408 			clk_set_rate_range(user2, 3000, 4000),
409 			0);
410 
411 	clk_put(user2);
412 	clk_put(user1);
413 }
414 
415 /*
416  * Test that if our clock has some boundaries and we try to round a rate
417  * lower than the minimum, the returned rate will be within range.
418  */
419 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
420 {
421 	struct clk_dummy_context *ctx = test->priv;
422 	struct clk_hw *hw = &ctx->hw;
423 	struct clk *clk = hw->clk;
424 	long rate;
425 
426 	KUNIT_ASSERT_EQ(test,
427 			clk_set_rate_range(clk,
428 					   DUMMY_CLOCK_RATE_1,
429 					   DUMMY_CLOCK_RATE_2),
430 			0);
431 
432 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
433 	KUNIT_ASSERT_GT(test, rate, 0);
434 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
435 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
436 }
437 
438 /*
439  * Test that if our clock has some boundaries and we try to set a rate
440  * higher than the maximum, the new rate will be within range.
441  */
442 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
443 {
444 	struct clk_dummy_context *ctx = test->priv;
445 	struct clk_hw *hw = &ctx->hw;
446 	struct clk *clk = hw->clk;
447 	unsigned long rate;
448 
449 	KUNIT_ASSERT_EQ(test,
450 			clk_set_rate_range(clk,
451 					   DUMMY_CLOCK_RATE_1,
452 					   DUMMY_CLOCK_RATE_2),
453 			0);
454 
455 	KUNIT_ASSERT_EQ(test,
456 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
457 			0);
458 
459 	rate = clk_get_rate(clk);
460 	KUNIT_ASSERT_GT(test, rate, 0);
461 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
462 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
463 }
464 
465 /*
466  * Test that if our clock has some boundaries and we try to round and
467  * set a rate lower than the minimum, the rate returned by
468  * clk_round_rate() will be consistent with the new rate set by
469  * clk_set_rate().
470  */
471 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
472 {
473 	struct clk_dummy_context *ctx = test->priv;
474 	struct clk_hw *hw = &ctx->hw;
475 	struct clk *clk = hw->clk;
476 	long rounded;
477 
478 	KUNIT_ASSERT_EQ(test,
479 			clk_set_rate_range(clk,
480 					   DUMMY_CLOCK_RATE_1,
481 					   DUMMY_CLOCK_RATE_2),
482 			0);
483 
484 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
485 	KUNIT_ASSERT_GT(test, rounded, 0);
486 
487 	KUNIT_ASSERT_EQ(test,
488 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
489 			0);
490 
491 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
492 }
493 
494 /*
495  * Test that if our clock has some boundaries and we try to round a rate
496  * higher than the maximum, the returned rate will be within range.
497  */
498 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
499 {
500 	struct clk_dummy_context *ctx = test->priv;
501 	struct clk_hw *hw = &ctx->hw;
502 	struct clk *clk = hw->clk;
503 	long rate;
504 
505 	KUNIT_ASSERT_EQ(test,
506 			clk_set_rate_range(clk,
507 					   DUMMY_CLOCK_RATE_1,
508 					   DUMMY_CLOCK_RATE_2),
509 			0);
510 
511 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
512 	KUNIT_ASSERT_GT(test, rate, 0);
513 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
514 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
515 }
516 
517 /*
518  * Test that if our clock has some boundaries and we try to set a rate
519  * higher than the maximum, the new rate will be within range.
520  */
521 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
522 {
523 	struct clk_dummy_context *ctx = test->priv;
524 	struct clk_hw *hw = &ctx->hw;
525 	struct clk *clk = hw->clk;
526 	unsigned long rate;
527 
528 	KUNIT_ASSERT_EQ(test,
529 			clk_set_rate_range(clk,
530 					   DUMMY_CLOCK_RATE_1,
531 					   DUMMY_CLOCK_RATE_2),
532 			0);
533 
534 	KUNIT_ASSERT_EQ(test,
535 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
536 			0);
537 
538 	rate = clk_get_rate(clk);
539 	KUNIT_ASSERT_GT(test, rate, 0);
540 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
541 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
542 }
543 
544 /*
545  * Test that if our clock has some boundaries and we try to round and
546  * set a rate higher than the maximum, the rate returned by
547  * clk_round_rate() will be consistent with the new rate set by
548  * clk_set_rate().
549  */
550 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
551 {
552 	struct clk_dummy_context *ctx = test->priv;
553 	struct clk_hw *hw = &ctx->hw;
554 	struct clk *clk = hw->clk;
555 	long rounded;
556 
557 	KUNIT_ASSERT_EQ(test,
558 			clk_set_rate_range(clk,
559 					   DUMMY_CLOCK_RATE_1,
560 					   DUMMY_CLOCK_RATE_2),
561 			0);
562 
563 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
564 	KUNIT_ASSERT_GT(test, rounded, 0);
565 
566 	KUNIT_ASSERT_EQ(test,
567 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
568 			0);
569 
570 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
571 }
572 
573 /*
574  * Test that if our clock has a rate lower than the minimum set by a
575  * call to clk_set_rate_range(), the rate will be raised to match the
576  * new minimum.
577  *
578  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
579  * modify the requested rate, which is our case in clk_dummy_rate_ops.
580  */
581 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
582 {
583 	struct clk_dummy_context *ctx = test->priv;
584 	struct clk_hw *hw = &ctx->hw;
585 	struct clk *clk = hw->clk;
586 	unsigned long rate;
587 
588 	KUNIT_ASSERT_EQ(test,
589 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
590 			0);
591 
592 	KUNIT_ASSERT_EQ(test,
593 			clk_set_rate_range(clk,
594 					   DUMMY_CLOCK_RATE_1,
595 					   DUMMY_CLOCK_RATE_2),
596 			0);
597 
598 	rate = clk_get_rate(clk);
599 	KUNIT_ASSERT_GT(test, rate, 0);
600 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
601 }
602 
603 /*
604  * Test that if our clock has a rate higher than the maximum set by a
605  * call to clk_set_rate_range(), the rate will be lowered to match the
606  * new maximum.
607  *
608  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
609  * modify the requested rate, which is our case in clk_dummy_rate_ops.
610  */
611 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
612 {
613 	struct clk_dummy_context *ctx = test->priv;
614 	struct clk_hw *hw = &ctx->hw;
615 	struct clk *clk = hw->clk;
616 	unsigned long rate;
617 
618 	KUNIT_ASSERT_EQ(test,
619 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
620 			0);
621 
622 	KUNIT_ASSERT_EQ(test,
623 			clk_set_rate_range(clk,
624 					   DUMMY_CLOCK_RATE_1,
625 					   DUMMY_CLOCK_RATE_2),
626 			0);
627 
628 	rate = clk_get_rate(clk);
629 	KUNIT_ASSERT_GT(test, rate, 0);
630 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
631 }
632 
633 static struct kunit_case clk_range_test_cases[] = {
634 	KUNIT_CASE(clk_range_test_set_range),
635 	KUNIT_CASE(clk_range_test_set_range_invalid),
636 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
637 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
638 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
639 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
640 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
641 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
642 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
643 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
644 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
645 	{}
646 };
647 
648 static struct kunit_suite clk_range_test_suite = {
649 	.name = "clk-range-test",
650 	.init = clk_test_init,
651 	.exit = clk_test_exit,
652 	.test_cases = clk_range_test_cases,
653 };
654 
655 /*
656  * Test that if we have several subsequent calls to
657  * clk_set_rate_range(), the core will reevaluate whether a new rate is
658  * needed each and every time.
659  *
660  * With clk_dummy_maximize_rate_ops, this means that the rate will
661  * trail along the maximum as it evolves.
662  */
663 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
664 {
665 	struct clk_dummy_context *ctx = test->priv;
666 	struct clk_hw *hw = &ctx->hw;
667 	struct clk *clk = hw->clk;
668 	unsigned long rate;
669 
670 	KUNIT_ASSERT_EQ(test,
671 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
672 			0);
673 
674 	KUNIT_ASSERT_EQ(test,
675 			clk_set_rate_range(clk,
676 					   DUMMY_CLOCK_RATE_1,
677 					   DUMMY_CLOCK_RATE_2),
678 			0);
679 
680 	rate = clk_get_rate(clk);
681 	KUNIT_ASSERT_GT(test, rate, 0);
682 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
683 
684 	KUNIT_ASSERT_EQ(test,
685 			clk_set_rate_range(clk,
686 					   DUMMY_CLOCK_RATE_1,
687 					   DUMMY_CLOCK_RATE_2 - 1000),
688 			0);
689 
690 	rate = clk_get_rate(clk);
691 	KUNIT_ASSERT_GT(test, rate, 0);
692 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
693 
694 	KUNIT_ASSERT_EQ(test,
695 			clk_set_rate_range(clk,
696 					   DUMMY_CLOCK_RATE_1,
697 					   DUMMY_CLOCK_RATE_2),
698 			0);
699 
700 	rate = clk_get_rate(clk);
701 	KUNIT_ASSERT_GT(test, rate, 0);
702 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
703 }
704 
705 /*
706  * Test that if we have several subsequent calls to
707  * clk_set_rate_range(), across multiple users, the core will reevaluate
708  * whether a new rate is needed each and every time.
709  *
710  * With clk_dummy_maximize_rate_ops, this means that the rate will
711  * trail along the maximum as it evolves.
712  */
713 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
714 {
715 	struct clk_dummy_context *ctx = test->priv;
716 	struct clk_hw *hw = &ctx->hw;
717 	struct clk *clk = hw->clk;
718 	struct clk *user1, *user2;
719 	unsigned long rate;
720 
721 	user1 = clk_hw_get_clk(hw, NULL);
722 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
723 
724 	user2 = clk_hw_get_clk(hw, NULL);
725 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
726 
727 	KUNIT_ASSERT_EQ(test,
728 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
729 			0);
730 
731 	KUNIT_ASSERT_EQ(test,
732 			clk_set_rate_range(user1,
733 					   0,
734 					   DUMMY_CLOCK_RATE_2),
735 			0);
736 
737 	rate = clk_get_rate(clk);
738 	KUNIT_ASSERT_GT(test, rate, 0);
739 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
740 
741 	KUNIT_ASSERT_EQ(test,
742 			clk_set_rate_range(user2,
743 					   0,
744 					   DUMMY_CLOCK_RATE_1),
745 			0);
746 
747 	rate = clk_get_rate(clk);
748 	KUNIT_ASSERT_GT(test, rate, 0);
749 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
750 
751 	KUNIT_ASSERT_EQ(test,
752 			clk_drop_range(user2),
753 			0);
754 
755 	rate = clk_get_rate(clk);
756 	KUNIT_ASSERT_GT(test, rate, 0);
757 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
758 
759 	clk_put(user2);
760 	clk_put(user1);
761 }
762 
763 static struct kunit_case clk_range_maximize_test_cases[] = {
764 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
765 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
766 	{}
767 };
768 
769 static struct kunit_suite clk_range_maximize_test_suite = {
770 	.name = "clk-range-maximize-test",
771 	.init = clk_maximize_test_init,
772 	.exit = clk_test_exit,
773 	.test_cases = clk_range_maximize_test_cases,
774 };
775 
776 /*
777  * Test that if we have several subsequent calls to
778  * clk_set_rate_range(), the core will reevaluate whether a new rate is
779  * needed each and every time.
780  *
781  * With clk_dummy_minimize_rate_ops, this means that the rate will
782  * trail along the minimum as it evolves.
783  */
784 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
785 {
786 	struct clk_dummy_context *ctx = test->priv;
787 	struct clk_hw *hw = &ctx->hw;
788 	struct clk *clk = hw->clk;
789 	unsigned long rate;
790 
791 	KUNIT_ASSERT_EQ(test,
792 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
793 			0);
794 
795 	KUNIT_ASSERT_EQ(test,
796 			clk_set_rate_range(clk,
797 					   DUMMY_CLOCK_RATE_1,
798 					   DUMMY_CLOCK_RATE_2),
799 			0);
800 
801 	rate = clk_get_rate(clk);
802 	KUNIT_ASSERT_GT(test, rate, 0);
803 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
804 
805 	KUNIT_ASSERT_EQ(test,
806 			clk_set_rate_range(clk,
807 					   DUMMY_CLOCK_RATE_1 + 1000,
808 					   DUMMY_CLOCK_RATE_2),
809 			0);
810 
811 	rate = clk_get_rate(clk);
812 	KUNIT_ASSERT_GT(test, rate, 0);
813 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
814 
815 	KUNIT_ASSERT_EQ(test,
816 			clk_set_rate_range(clk,
817 					   DUMMY_CLOCK_RATE_1,
818 					   DUMMY_CLOCK_RATE_2),
819 			0);
820 
821 	rate = clk_get_rate(clk);
822 	KUNIT_ASSERT_GT(test, rate, 0);
823 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
824 }
825 
826 /*
827  * Test that if we have several subsequent calls to
828  * clk_set_rate_range(), across multiple users, the core will reevaluate
829  * whether a new rate is needed each and every time.
830  *
831  * With clk_dummy_minimize_rate_ops, this means that the rate will
832  * trail along the minimum as it evolves.
833  */
834 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
835 {
836 	struct clk_dummy_context *ctx = test->priv;
837 	struct clk_hw *hw = &ctx->hw;
838 	struct clk *clk = hw->clk;
839 	struct clk *user1, *user2;
840 	unsigned long rate;
841 
842 	user1 = clk_hw_get_clk(hw, NULL);
843 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
844 
845 	user2 = clk_hw_get_clk(hw, NULL);
846 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
847 
848 	KUNIT_ASSERT_EQ(test,
849 			clk_set_rate_range(user1,
850 					   DUMMY_CLOCK_RATE_1,
851 					   ULONG_MAX),
852 			0);
853 
854 	rate = clk_get_rate(clk);
855 	KUNIT_ASSERT_GT(test, rate, 0);
856 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
857 
858 	KUNIT_ASSERT_EQ(test,
859 			clk_set_rate_range(user2,
860 					   DUMMY_CLOCK_RATE_2,
861 					   ULONG_MAX),
862 			0);
863 
864 	rate = clk_get_rate(clk);
865 	KUNIT_ASSERT_GT(test, rate, 0);
866 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
867 
868 	KUNIT_ASSERT_EQ(test,
869 			clk_drop_range(user2),
870 			0);
871 
872 	rate = clk_get_rate(clk);
873 	KUNIT_ASSERT_GT(test, rate, 0);
874 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
875 
876 	clk_put(user2);
877 	clk_put(user1);
878 }
879 
880 static struct kunit_case clk_range_minimize_test_cases[] = {
881 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
882 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
883 	{}
884 };
885 
886 static struct kunit_suite clk_range_minimize_test_suite = {
887 	.name = "clk-range-minimize-test",
888 	.init = clk_minimize_test_init,
889 	.exit = clk_test_exit,
890 	.test_cases = clk_range_minimize_test_cases,
891 };
892 
893 kunit_test_suites(
894 	&clk_test_suite,
895 	&clk_orphan_transparent_single_parent_test_suite,
896 	&clk_range_test_suite,
897 	&clk_range_maximize_test_suite,
898 	&clk_range_minimize_test_suite
899 );
900 MODULE_LICENSE("GPL v2");
901