xref: /openbmc/linux/drivers/clk/clk_test.c (revision 2f3f53d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit test for clk rate management
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 
8 /* Needed for clk_hw_get_clk() */
9 #include "clk.h"
10 
11 #include <kunit/test.h>
12 
13 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
14 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
15 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
16 
17 struct clk_dummy_context {
18 	struct clk_hw hw;
19 	unsigned long rate;
20 };
21 
22 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 					   unsigned long parent_rate)
24 {
25 	struct clk_dummy_context *ctx =
26 		container_of(hw, struct clk_dummy_context, hw);
27 
28 	return ctx->rate;
29 }
30 
31 static int clk_dummy_determine_rate(struct clk_hw *hw,
32 				    struct clk_rate_request *req)
33 {
34 	/* Just return the same rate without modifying it */
35 	return 0;
36 }
37 
38 static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 				   struct clk_rate_request *req)
40 {
41 	/*
42 	 * If there's a maximum set, always run the clock at the maximum
43 	 * allowed.
44 	 */
45 	if (req->max_rate < ULONG_MAX)
46 		req->rate = req->max_rate;
47 
48 	return 0;
49 }
50 
51 static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 				   struct clk_rate_request *req)
53 {
54 	/*
55 	 * If there's a minimum set, always run the clock at the minimum
56 	 * allowed.
57 	 */
58 	if (req->min_rate > 0)
59 		req->rate = req->min_rate;
60 
61 	return 0;
62 }
63 
64 static int clk_dummy_set_rate(struct clk_hw *hw,
65 			      unsigned long rate,
66 			      unsigned long parent_rate)
67 {
68 	struct clk_dummy_context *ctx =
69 		container_of(hw, struct clk_dummy_context, hw);
70 
71 	ctx->rate = rate;
72 	return 0;
73 }
74 
75 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
76 {
77 	if (index >= clk_hw_get_num_parents(hw))
78 		return -EINVAL;
79 
80 	return 0;
81 }
82 
83 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
84 {
85 	return 0;
86 }
87 
88 static const struct clk_ops clk_dummy_rate_ops = {
89 	.recalc_rate = clk_dummy_recalc_rate,
90 	.determine_rate = clk_dummy_determine_rate,
91 	.set_rate = clk_dummy_set_rate,
92 };
93 
94 static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 	.recalc_rate = clk_dummy_recalc_rate,
96 	.determine_rate = clk_dummy_maximize_rate,
97 	.set_rate = clk_dummy_set_rate,
98 };
99 
100 static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 	.recalc_rate = clk_dummy_recalc_rate,
102 	.determine_rate = clk_dummy_minimize_rate,
103 	.set_rate = clk_dummy_set_rate,
104 };
105 
106 static const struct clk_ops clk_dummy_single_parent_ops = {
107 	.set_parent = clk_dummy_single_set_parent,
108 	.get_parent = clk_dummy_single_get_parent,
109 };
110 
111 struct clk_multiple_parent_ctx {
112 	struct clk_dummy_context parents_ctx[2];
113 	struct clk_hw hw;
114 	u8 current_parent;
115 };
116 
117 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
118 {
119 	struct clk_multiple_parent_ctx *ctx =
120 		container_of(hw, struct clk_multiple_parent_ctx, hw);
121 
122 	if (index >= clk_hw_get_num_parents(hw))
123 		return -EINVAL;
124 
125 	ctx->current_parent = index;
126 
127 	return 0;
128 }
129 
130 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
131 {
132 	struct clk_multiple_parent_ctx *ctx =
133 		container_of(hw, struct clk_multiple_parent_ctx, hw);
134 
135 	return ctx->current_parent;
136 }
137 
138 static const struct clk_ops clk_multiple_parents_mux_ops = {
139 	.get_parent = clk_multiple_parents_mux_get_parent,
140 	.set_parent = clk_multiple_parents_mux_set_parent,
141 	.determine_rate = __clk_mux_determine_rate_closest,
142 };
143 
144 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
145 {
146 	struct clk_dummy_context *ctx;
147 	struct clk_init_data init = { };
148 	int ret;
149 
150 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
151 	if (!ctx)
152 		return -ENOMEM;
153 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
154 	test->priv = ctx;
155 
156 	init.name = "test_dummy_rate";
157 	init.ops = ops;
158 	ctx->hw.init = &init;
159 
160 	ret = clk_hw_register(NULL, &ctx->hw);
161 	if (ret)
162 		return ret;
163 
164 	return 0;
165 }
166 
167 static int clk_test_init(struct kunit *test)
168 {
169 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
170 }
171 
172 static int clk_maximize_test_init(struct kunit *test)
173 {
174 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
175 }
176 
177 static int clk_minimize_test_init(struct kunit *test)
178 {
179 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
180 }
181 
182 static void clk_test_exit(struct kunit *test)
183 {
184 	struct clk_dummy_context *ctx = test->priv;
185 
186 	clk_hw_unregister(&ctx->hw);
187 }
188 
189 /*
190  * Test that the actual rate matches what is returned by clk_get_rate()
191  */
192 static void clk_test_get_rate(struct kunit *test)
193 {
194 	struct clk_dummy_context *ctx = test->priv;
195 	struct clk_hw *hw = &ctx->hw;
196 	struct clk *clk = clk_hw_get_clk(hw, NULL);
197 	unsigned long rate;
198 
199 	rate = clk_get_rate(clk);
200 	KUNIT_ASSERT_GT(test, rate, 0);
201 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
202 
203 	clk_put(clk);
204 }
205 
206 /*
207  * Test that, after a call to clk_set_rate(), the rate returned by
208  * clk_get_rate() matches.
209  *
210  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
211  * modify the requested rate, which is our case in clk_dummy_rate_ops.
212  */
213 static void clk_test_set_get_rate(struct kunit *test)
214 {
215 	struct clk_dummy_context *ctx = test->priv;
216 	struct clk_hw *hw = &ctx->hw;
217 	struct clk *clk = clk_hw_get_clk(hw, NULL);
218 	unsigned long rate;
219 
220 	KUNIT_ASSERT_EQ(test,
221 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
222 			0);
223 
224 	rate = clk_get_rate(clk);
225 	KUNIT_ASSERT_GT(test, rate, 0);
226 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
227 
228 	clk_put(clk);
229 }
230 
231 /*
232  * Test that, after several calls to clk_set_rate(), the rate returned
233  * by clk_get_rate() matches the last one.
234  *
235  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
236  * modify the requested rate, which is our case in clk_dummy_rate_ops.
237  */
238 static void clk_test_set_set_get_rate(struct kunit *test)
239 {
240 	struct clk_dummy_context *ctx = test->priv;
241 	struct clk_hw *hw = &ctx->hw;
242 	struct clk *clk = clk_hw_get_clk(hw, NULL);
243 	unsigned long rate;
244 
245 	KUNIT_ASSERT_EQ(test,
246 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
247 			0);
248 
249 	KUNIT_ASSERT_EQ(test,
250 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
251 			0);
252 
253 	rate = clk_get_rate(clk);
254 	KUNIT_ASSERT_GT(test, rate, 0);
255 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
256 
257 	clk_put(clk);
258 }
259 
260 /*
261  * Test that clk_round_rate and clk_set_rate are consitent and will
262  * return the same frequency.
263  */
264 static void clk_test_round_set_get_rate(struct kunit *test)
265 {
266 	struct clk_dummy_context *ctx = test->priv;
267 	struct clk_hw *hw = &ctx->hw;
268 	struct clk *clk = clk_hw_get_clk(hw, NULL);
269 	unsigned long rounded_rate, set_rate;
270 
271 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
272 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
273 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
274 
275 	KUNIT_ASSERT_EQ(test,
276 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
277 			0);
278 
279 	set_rate = clk_get_rate(clk);
280 	KUNIT_ASSERT_GT(test, set_rate, 0);
281 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
282 
283 	clk_put(clk);
284 }
285 
286 static struct kunit_case clk_test_cases[] = {
287 	KUNIT_CASE(clk_test_get_rate),
288 	KUNIT_CASE(clk_test_set_get_rate),
289 	KUNIT_CASE(clk_test_set_set_get_rate),
290 	KUNIT_CASE(clk_test_round_set_get_rate),
291 	{}
292 };
293 
294 /*
295  * Test suite for a basic rate clock, without any parent.
296  *
297  * These tests exercise the rate API with simple scenarios
298  */
299 static struct kunit_suite clk_test_suite = {
300 	.name = "clk-test",
301 	.init = clk_test_init,
302 	.exit = clk_test_exit,
303 	.test_cases = clk_test_cases,
304 };
305 
306 static int clk_uncached_test_init(struct kunit *test)
307 {
308 	struct clk_dummy_context *ctx;
309 	int ret;
310 
311 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
312 	if (!ctx)
313 		return -ENOMEM;
314 	test->priv = ctx;
315 
316 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
317 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
318 					     &clk_dummy_rate_ops,
319 					     CLK_GET_RATE_NOCACHE);
320 
321 	ret = clk_hw_register(NULL, &ctx->hw);
322 	if (ret)
323 		return ret;
324 
325 	return 0;
326 }
327 
328 /*
329  * Test that for an uncached clock, the clock framework doesn't cache
330  * the rate and clk_get_rate() will return the underlying clock rate
331  * even if it changed.
332  */
333 static void clk_test_uncached_get_rate(struct kunit *test)
334 {
335 	struct clk_dummy_context *ctx = test->priv;
336 	struct clk_hw *hw = &ctx->hw;
337 	struct clk *clk = clk_hw_get_clk(hw, NULL);
338 	unsigned long rate;
339 
340 	rate = clk_get_rate(clk);
341 	KUNIT_ASSERT_GT(test, rate, 0);
342 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
343 
344 	/* We change the rate behind the clock framework's back */
345 	ctx->rate = DUMMY_CLOCK_RATE_1;
346 	rate = clk_get_rate(clk);
347 	KUNIT_ASSERT_GT(test, rate, 0);
348 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
349 
350 	clk_put(clk);
351 }
352 
353 /*
354  * Test that for an uncached clock, clk_set_rate_range() will work
355  * properly if the rate hasn't changed.
356  */
357 static void clk_test_uncached_set_range(struct kunit *test)
358 {
359 	struct clk_dummy_context *ctx = test->priv;
360 	struct clk_hw *hw = &ctx->hw;
361 	struct clk *clk = clk_hw_get_clk(hw, NULL);
362 	unsigned long rate;
363 
364 	KUNIT_ASSERT_EQ(test,
365 			clk_set_rate_range(clk,
366 					   DUMMY_CLOCK_RATE_1,
367 					   DUMMY_CLOCK_RATE_2),
368 			0);
369 
370 	rate = clk_get_rate(clk);
371 	KUNIT_ASSERT_GT(test, rate, 0);
372 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
373 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
374 
375 	clk_put(clk);
376 }
377 
378 /*
379  * Test that for an uncached clock, clk_set_rate_range() will work
380  * properly if the rate has changed in hardware.
381  *
382  * In this case, it means that if the rate wasn't initially in the range
383  * we're trying to set, but got changed at some point into the range
384  * without the kernel knowing about it, its rate shouldn't be affected.
385  */
386 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
387 {
388 	struct clk_dummy_context *ctx = test->priv;
389 	struct clk_hw *hw = &ctx->hw;
390 	struct clk *clk = clk_hw_get_clk(hw, NULL);
391 	unsigned long rate;
392 
393 	/* We change the rate behind the clock framework's back */
394 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
395 	KUNIT_ASSERT_EQ(test,
396 			clk_set_rate_range(clk,
397 					   DUMMY_CLOCK_RATE_1,
398 					   DUMMY_CLOCK_RATE_2),
399 			0);
400 
401 	rate = clk_get_rate(clk);
402 	KUNIT_ASSERT_GT(test, rate, 0);
403 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
404 
405 	clk_put(clk);
406 }
407 
408 static struct kunit_case clk_uncached_test_cases[] = {
409 	KUNIT_CASE(clk_test_uncached_get_rate),
410 	KUNIT_CASE(clk_test_uncached_set_range),
411 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
412 	{}
413 };
414 
415 /*
416  * Test suite for a basic, uncached, rate clock, without any parent.
417  *
418  * These tests exercise the rate API with simple scenarios
419  */
420 static struct kunit_suite clk_uncached_test_suite = {
421 	.name = "clk-uncached-test",
422 	.init = clk_uncached_test_init,
423 	.exit = clk_test_exit,
424 	.test_cases = clk_uncached_test_cases,
425 };
426 
427 static int
428 clk_multiple_parents_mux_test_init(struct kunit *test)
429 {
430 	struct clk_multiple_parent_ctx *ctx;
431 	const char *parents[2] = { "parent-0", "parent-1"};
432 	int ret;
433 
434 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
435 	if (!ctx)
436 		return -ENOMEM;
437 	test->priv = ctx;
438 
439 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
440 							    &clk_dummy_rate_ops,
441 							    0);
442 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
443 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
444 	if (ret)
445 		return ret;
446 
447 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
448 							    &clk_dummy_rate_ops,
449 							    0);
450 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
451 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
452 	if (ret)
453 		return ret;
454 
455 	ctx->current_parent = 0;
456 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
457 					   &clk_multiple_parents_mux_ops,
458 					   CLK_SET_RATE_PARENT);
459 	ret = clk_hw_register(NULL, &ctx->hw);
460 	if (ret)
461 		return ret;
462 
463 	return 0;
464 }
465 
466 static void
467 clk_multiple_parents_mux_test_exit(struct kunit *test)
468 {
469 	struct clk_multiple_parent_ctx *ctx = test->priv;
470 
471 	clk_hw_unregister(&ctx->hw);
472 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
473 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
474 }
475 
476 /*
477  * Test that for a clock with multiple parents, clk_get_parent()
478  * actually returns the current one.
479  */
480 static void
481 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
482 {
483 	struct clk_multiple_parent_ctx *ctx = test->priv;
484 	struct clk_hw *hw = &ctx->hw;
485 	struct clk *clk = clk_hw_get_clk(hw, NULL);
486 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
487 
488 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
489 
490 	clk_put(parent);
491 	clk_put(clk);
492 }
493 
494 /*
495  * Test that for a clock with a multiple parents, clk_has_parent()
496  * actually reports all of them as parents.
497  */
498 static void
499 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
500 {
501 	struct clk_multiple_parent_ctx *ctx = test->priv;
502 	struct clk_hw *hw = &ctx->hw;
503 	struct clk *clk = clk_hw_get_clk(hw, NULL);
504 	struct clk *parent;
505 
506 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
507 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
508 	clk_put(parent);
509 
510 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
511 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
512 	clk_put(parent);
513 
514 	clk_put(clk);
515 }
516 
517 /*
518  * Test that for a clock with a multiple parents, if we set a range on
519  * that clock and the parent is changed, its rate after the reparenting
520  * is still within the range we asked for.
521  *
522  * FIXME: clk_set_parent() only does the reparenting but doesn't
523  * reevaluate whether the new clock rate is within its boundaries or
524  * not.
525  */
526 static void
527 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
528 {
529 	struct clk_multiple_parent_ctx *ctx = test->priv;
530 	struct clk_hw *hw = &ctx->hw;
531 	struct clk *clk = clk_hw_get_clk(hw, NULL);
532 	struct clk *parent1, *parent2;
533 	unsigned long rate;
534 	int ret;
535 
536 	kunit_skip(test, "This needs to be fixed in the core.");
537 
538 	parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
539 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
540 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
541 
542 	parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
543 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
544 
545 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
546 	KUNIT_ASSERT_EQ(test, ret, 0);
547 
548 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
549 	KUNIT_ASSERT_EQ(test, ret, 0);
550 
551 	ret = clk_set_rate_range(clk,
552 				 DUMMY_CLOCK_RATE_1 - 1000,
553 				 DUMMY_CLOCK_RATE_1 + 1000);
554 	KUNIT_ASSERT_EQ(test, ret, 0);
555 
556 	ret = clk_set_parent(clk, parent2);
557 	KUNIT_ASSERT_EQ(test, ret, 0);
558 
559 	rate = clk_get_rate(clk);
560 	KUNIT_ASSERT_GT(test, rate, 0);
561 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
562 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
563 
564 	clk_put(parent2);
565 	clk_put(parent1);
566 	clk_put(clk);
567 }
568 
569 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
570 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
571 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
572 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
573 	{}
574 };
575 
576 /*
577  * Test suite for a basic mux clock with two parents, with
578  * CLK_SET_RATE_PARENT on the child.
579  *
580  * These tests exercise the consumer API and check that the state of the
581  * child and parents are sane and consistent.
582  */
583 static struct kunit_suite
584 clk_multiple_parents_mux_test_suite = {
585 	.name = "clk-multiple-parents-mux-test",
586 	.init = clk_multiple_parents_mux_test_init,
587 	.exit = clk_multiple_parents_mux_test_exit,
588 	.test_cases = clk_multiple_parents_mux_test_cases,
589 };
590 
591 static int
592 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
593 {
594 	struct clk_multiple_parent_ctx *ctx;
595 	const char *parents[2] = { "missing-parent", "proper-parent"};
596 	int ret;
597 
598 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
599 	if (!ctx)
600 		return -ENOMEM;
601 	test->priv = ctx;
602 
603 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
604 							    &clk_dummy_rate_ops,
605 							    0);
606 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
607 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
608 	if (ret)
609 		return ret;
610 
611 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
612 					   &clk_multiple_parents_mux_ops,
613 					   CLK_SET_RATE_PARENT);
614 	ret = clk_hw_register(NULL, &ctx->hw);
615 	if (ret)
616 		return ret;
617 
618 	return 0;
619 }
620 
621 static void
622 clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
623 {
624 	struct clk_multiple_parent_ctx *ctx = test->priv;
625 
626 	clk_hw_unregister(&ctx->hw);
627 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
628 }
629 
630 /*
631  * Test that, for a mux whose current parent hasn't been registered yet and is
632  * thus orphan, clk_get_parent() will return NULL.
633  */
634 static void
635 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
636 {
637 	struct clk_multiple_parent_ctx *ctx = test->priv;
638 	struct clk_hw *hw = &ctx->hw;
639 	struct clk *clk = clk_hw_get_clk(hw, NULL);
640 
641 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
642 
643 	clk_put(clk);
644 }
645 
646 /*
647  * Test that, for a mux whose current parent hasn't been registered yet,
648  * calling clk_set_parent() to a valid parent will properly update the
649  * mux parent and its orphan status.
650  */
651 static void
652 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
653 {
654 	struct clk_multiple_parent_ctx *ctx = test->priv;
655 	struct clk_hw *hw = &ctx->hw;
656 	struct clk *clk = clk_hw_get_clk(hw, NULL);
657 	struct clk *parent, *new_parent;
658 	int ret;
659 
660 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
661 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
662 
663 	ret = clk_set_parent(clk, parent);
664 	KUNIT_ASSERT_EQ(test, ret, 0);
665 
666 	new_parent = clk_get_parent(clk);
667 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
668 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
669 
670 	clk_put(parent);
671 	clk_put(clk);
672 }
673 
674 /*
675  * Test that, for a mux that started orphan but got switched to a valid
676  * parent, calling clk_drop_range() on the mux won't affect the parent
677  * rate.
678  */
679 static void
680 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
681 {
682 	struct clk_multiple_parent_ctx *ctx = test->priv;
683 	struct clk_hw *hw = &ctx->hw;
684 	struct clk *clk = clk_hw_get_clk(hw, NULL);
685 	struct clk *parent;
686 	unsigned long parent_rate, new_parent_rate;
687 	int ret;
688 
689 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
690 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
691 
692 	parent_rate = clk_get_rate(parent);
693 	KUNIT_ASSERT_GT(test, parent_rate, 0);
694 
695 	ret = clk_set_parent(clk, parent);
696 	KUNIT_ASSERT_EQ(test, ret, 0);
697 
698 	ret = clk_drop_range(clk);
699 	KUNIT_ASSERT_EQ(test, ret, 0);
700 
701 	new_parent_rate = clk_get_rate(clk);
702 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
703 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
704 
705 	clk_put(parent);
706 	clk_put(clk);
707 }
708 
709 /*
710  * Test that, for a mux that started orphan but got switched to a valid
711  * parent, the rate of the mux and its new parent are consistent.
712  */
713 static void
714 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
715 {
716 	struct clk_multiple_parent_ctx *ctx = test->priv;
717 	struct clk_hw *hw = &ctx->hw;
718 	struct clk *clk = clk_hw_get_clk(hw, NULL);
719 	struct clk *parent;
720 	unsigned long parent_rate, rate;
721 	int ret;
722 
723 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
724 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
725 
726 	parent_rate = clk_get_rate(parent);
727 	KUNIT_ASSERT_GT(test, parent_rate, 0);
728 
729 	ret = clk_set_parent(clk, parent);
730 	KUNIT_ASSERT_EQ(test, ret, 0);
731 
732 	rate = clk_get_rate(clk);
733 	KUNIT_ASSERT_GT(test, rate, 0);
734 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
735 
736 	clk_put(parent);
737 	clk_put(clk);
738 }
739 
740 /*
741  * Test that, for a mux that started orphan but got switched to a valid
742  * parent, calling clk_put() on the mux won't affect the parent rate.
743  */
744 static void
745 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
746 {
747 	struct clk_multiple_parent_ctx *ctx = test->priv;
748 	struct clk *clk, *parent;
749 	unsigned long parent_rate, new_parent_rate;
750 	int ret;
751 
752 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
753 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
754 
755 	clk = clk_hw_get_clk(&ctx->hw, NULL);
756 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
757 
758 	parent_rate = clk_get_rate(parent);
759 	KUNIT_ASSERT_GT(test, parent_rate, 0);
760 
761 	ret = clk_set_parent(clk, parent);
762 	KUNIT_ASSERT_EQ(test, ret, 0);
763 
764 	clk_put(clk);
765 
766 	new_parent_rate = clk_get_rate(parent);
767 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
768 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
769 
770 	clk_put(parent);
771 }
772 
773 /*
774  * Test that, for a mux that started orphan but got switched to a valid
775  * parent, calling clk_set_rate_range() will affect the parent state if
776  * its rate is out of range.
777  */
778 static void
779 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
780 {
781 	struct clk_multiple_parent_ctx *ctx = test->priv;
782 	struct clk_hw *hw = &ctx->hw;
783 	struct clk *clk = clk_hw_get_clk(hw, NULL);
784 	struct clk *parent;
785 	unsigned long rate;
786 	int ret;
787 
788 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
789 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
790 
791 	ret = clk_set_parent(clk, parent);
792 	KUNIT_ASSERT_EQ(test, ret, 0);
793 
794 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
795 	KUNIT_ASSERT_EQ(test, ret, 0);
796 
797 	rate = clk_get_rate(clk);
798 	KUNIT_ASSERT_GT(test, rate, 0);
799 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
800 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
801 
802 	clk_put(parent);
803 	clk_put(clk);
804 }
805 
806 /*
807  * Test that, for a mux that started orphan but got switched to a valid
808  * parent, calling clk_set_rate_range() won't affect the parent state if
809  * its rate is within range.
810  */
811 static void
812 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
813 {
814 	struct clk_multiple_parent_ctx *ctx = test->priv;
815 	struct clk_hw *hw = &ctx->hw;
816 	struct clk *clk = clk_hw_get_clk(hw, NULL);
817 	struct clk *parent;
818 	unsigned long parent_rate, new_parent_rate;
819 	int ret;
820 
821 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
822 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
823 
824 	parent_rate = clk_get_rate(parent);
825 	KUNIT_ASSERT_GT(test, parent_rate, 0);
826 
827 	ret = clk_set_parent(clk, parent);
828 	KUNIT_ASSERT_EQ(test, ret, 0);
829 
830 	ret = clk_set_rate_range(clk,
831 				 DUMMY_CLOCK_INIT_RATE - 1000,
832 				 DUMMY_CLOCK_INIT_RATE + 1000);
833 	KUNIT_ASSERT_EQ(test, ret, 0);
834 
835 	new_parent_rate = clk_get_rate(parent);
836 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
837 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
838 
839 	clk_put(parent);
840 	clk_put(clk);
841 }
842 
843 /*
844  * Test that, for a mux whose current parent hasn't been registered yet,
845  * calling clk_set_rate_range() will succeed, and will be taken into
846  * account when rounding a rate.
847  */
848 static void
849 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
850 {
851 	struct clk_multiple_parent_ctx *ctx = test->priv;
852 	struct clk_hw *hw = &ctx->hw;
853 	struct clk *clk = clk_hw_get_clk(hw, NULL);
854 	unsigned long rate;
855 	int ret;
856 
857 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
858 	KUNIT_ASSERT_EQ(test, ret, 0);
859 
860 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
861 	KUNIT_ASSERT_GT(test, rate, 0);
862 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
863 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
864 
865 	clk_put(clk);
866 }
867 
868 /*
869  * Test that, for a mux that started orphan, was assigned and rate and
870  * then got switched to a valid parent, its rate is eventually within
871  * range.
872  *
873  * FIXME: Even though we update the rate as part of clk_set_parent(), we
874  * don't evaluate whether that new rate is within range and needs to be
875  * adjusted.
876  */
877 static void
878 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
879 {
880 	struct clk_multiple_parent_ctx *ctx = test->priv;
881 	struct clk_hw *hw = &ctx->hw;
882 	struct clk *clk = clk_hw_get_clk(hw, NULL);
883 	struct clk *parent;
884 	unsigned long rate;
885 	int ret;
886 
887 	kunit_skip(test, "This needs to be fixed in the core.");
888 
889 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
890 
891 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
892 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
893 
894 	ret = clk_set_parent(clk, parent);
895 	KUNIT_ASSERT_EQ(test, ret, 0);
896 
897 	rate = clk_get_rate(clk);
898 	KUNIT_ASSERT_GT(test, rate, 0);
899 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
900 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
901 
902 	clk_put(parent);
903 	clk_put(clk);
904 }
905 
906 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
907 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
908 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
909 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
910 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
911 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
912 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
913 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
914 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
915 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
916 	{}
917 };
918 
919 /*
920  * Test suite for a basic mux clock with two parents. The default parent
921  * isn't registered, only the second parent is. By default, the clock
922  * will thus be orphan.
923  *
924  * These tests exercise the behaviour of the consumer API when dealing
925  * with an orphan clock, and how we deal with the transition to a valid
926  * parent.
927  */
928 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
929 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
930 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
931 	.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
932 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
933 };
934 
935 struct clk_single_parent_ctx {
936 	struct clk_dummy_context parent_ctx;
937 	struct clk_hw hw;
938 };
939 
940 static int clk_single_parent_mux_test_init(struct kunit *test)
941 {
942 	struct clk_single_parent_ctx *ctx;
943 	int ret;
944 
945 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
946 	if (!ctx)
947 		return -ENOMEM;
948 	test->priv = ctx;
949 
950 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
951 	ctx->parent_ctx.hw.init =
952 		CLK_HW_INIT_NO_PARENT("parent-clk",
953 				      &clk_dummy_rate_ops,
954 				      0);
955 
956 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
957 	if (ret)
958 		return ret;
959 
960 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
961 				   &clk_dummy_single_parent_ops,
962 				   CLK_SET_RATE_PARENT);
963 
964 	ret = clk_hw_register(NULL, &ctx->hw);
965 	if (ret)
966 		return ret;
967 
968 	return 0;
969 }
970 
971 static void
972 clk_single_parent_mux_test_exit(struct kunit *test)
973 {
974 	struct clk_single_parent_ctx *ctx = test->priv;
975 
976 	clk_hw_unregister(&ctx->hw);
977 	clk_hw_unregister(&ctx->parent_ctx.hw);
978 }
979 
980 /*
981  * Test that for a clock with a single parent, clk_get_parent() actually
982  * returns the parent.
983  */
984 static void
985 clk_test_single_parent_mux_get_parent(struct kunit *test)
986 {
987 	struct clk_single_parent_ctx *ctx = test->priv;
988 	struct clk_hw *hw = &ctx->hw;
989 	struct clk *clk = clk_hw_get_clk(hw, NULL);
990 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
991 
992 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
993 
994 	clk_put(parent);
995 	clk_put(clk);
996 }
997 
998 /*
999  * Test that for a clock with a single parent, clk_has_parent() actually
1000  * reports it as a parent.
1001  */
1002 static void
1003 clk_test_single_parent_mux_has_parent(struct kunit *test)
1004 {
1005 	struct clk_single_parent_ctx *ctx = test->priv;
1006 	struct clk_hw *hw = &ctx->hw;
1007 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1008 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1009 
1010 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1011 
1012 	clk_put(parent);
1013 	clk_put(clk);
1014 }
1015 
1016 /*
1017  * Test that for a clock that can't modify its rate and with a single
1018  * parent, if we set disjoints range on the parent and then the child,
1019  * the second will return an error.
1020  *
1021  * FIXME: clk_set_rate_range() only considers the current clock when
1022  * evaluating whether ranges are disjoints and not the upstream clocks
1023  * ranges.
1024  */
1025 static void
1026 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1027 {
1028 	struct clk_single_parent_ctx *ctx = test->priv;
1029 	struct clk_hw *hw = &ctx->hw;
1030 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1031 	struct clk *parent;
1032 	int ret;
1033 
1034 	kunit_skip(test, "This needs to be fixed in the core.");
1035 
1036 	parent = clk_get_parent(clk);
1037 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1038 
1039 	ret = clk_set_rate_range(parent, 1000, 2000);
1040 	KUNIT_ASSERT_EQ(test, ret, 0);
1041 
1042 	ret = clk_set_rate_range(clk, 3000, 4000);
1043 	KUNIT_EXPECT_LT(test, ret, 0);
1044 
1045 	clk_put(clk);
1046 }
1047 
1048 /*
1049  * Test that for a clock that can't modify its rate and with a single
1050  * parent, if we set disjoints range on the child and then the parent,
1051  * the second will return an error.
1052  *
1053  * FIXME: clk_set_rate_range() only considers the current clock when
1054  * evaluating whether ranges are disjoints and not the downstream clocks
1055  * ranges.
1056  */
1057 static void
1058 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1059 {
1060 	struct clk_single_parent_ctx *ctx = test->priv;
1061 	struct clk_hw *hw = &ctx->hw;
1062 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1063 	struct clk *parent;
1064 	int ret;
1065 
1066 	kunit_skip(test, "This needs to be fixed in the core.");
1067 
1068 	parent = clk_get_parent(clk);
1069 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1070 
1071 	ret = clk_set_rate_range(clk, 1000, 2000);
1072 	KUNIT_ASSERT_EQ(test, ret, 0);
1073 
1074 	ret = clk_set_rate_range(parent, 3000, 4000);
1075 	KUNIT_EXPECT_LT(test, ret, 0);
1076 
1077 	clk_put(clk);
1078 }
1079 
1080 /*
1081  * Test that for a clock that can't modify its rate and with a single
1082  * parent, if we set a range on the parent and then call
1083  * clk_round_rate(), the boundaries of the parent are taken into
1084  * account.
1085  */
1086 static void
1087 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1088 {
1089 	struct clk_single_parent_ctx *ctx = test->priv;
1090 	struct clk_hw *hw = &ctx->hw;
1091 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1092 	struct clk *parent;
1093 	unsigned long rate;
1094 	int ret;
1095 
1096 	parent = clk_get_parent(clk);
1097 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1098 
1099 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1100 	KUNIT_ASSERT_EQ(test, ret, 0);
1101 
1102 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1103 	KUNIT_ASSERT_GT(test, rate, 0);
1104 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1105 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1106 
1107 	clk_put(clk);
1108 }
1109 
1110 /*
1111  * Test that for a clock that can't modify its rate and with a single
1112  * parent, if we set a range on the parent and a more restrictive one on
1113  * the child, and then call clk_round_rate(), the boundaries of the
1114  * two clocks are taken into account.
1115  */
1116 static void
1117 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1118 {
1119 	struct clk_single_parent_ctx *ctx = test->priv;
1120 	struct clk_hw *hw = &ctx->hw;
1121 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1122 	struct clk *parent;
1123 	unsigned long rate;
1124 	int ret;
1125 
1126 	parent = clk_get_parent(clk);
1127 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1128 
1129 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1130 	KUNIT_ASSERT_EQ(test, ret, 0);
1131 
1132 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1133 	KUNIT_ASSERT_EQ(test, ret, 0);
1134 
1135 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1136 	KUNIT_ASSERT_GT(test, rate, 0);
1137 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1138 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1139 
1140 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1141 	KUNIT_ASSERT_GT(test, rate, 0);
1142 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1143 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1144 
1145 	clk_put(clk);
1146 }
1147 
1148 /*
1149  * Test that for a clock that can't modify its rate and with a single
1150  * parent, if we set a range on the child and a more restrictive one on
1151  * the parent, and then call clk_round_rate(), the boundaries of the
1152  * two clocks are taken into account.
1153  */
1154 static void
1155 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1156 {
1157 	struct clk_single_parent_ctx *ctx = test->priv;
1158 	struct clk_hw *hw = &ctx->hw;
1159 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1160 	struct clk *parent;
1161 	unsigned long rate;
1162 	int ret;
1163 
1164 	parent = clk_get_parent(clk);
1165 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1166 
1167 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1168 	KUNIT_ASSERT_EQ(test, ret, 0);
1169 
1170 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1171 	KUNIT_ASSERT_EQ(test, ret, 0);
1172 
1173 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1174 	KUNIT_ASSERT_GT(test, rate, 0);
1175 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1176 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1177 
1178 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1179 	KUNIT_ASSERT_GT(test, rate, 0);
1180 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1181 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1182 
1183 	clk_put(clk);
1184 }
1185 
1186 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1187 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1188 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1189 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1190 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1191 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1192 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1193 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1194 	{}
1195 };
1196 
1197 /*
1198  * Test suite for a basic mux clock with one parent, with
1199  * CLK_SET_RATE_PARENT on the child.
1200  *
1201  * These tests exercise the consumer API and check that the state of the
1202  * child and parent are sane and consistent.
1203  */
1204 static struct kunit_suite
1205 clk_single_parent_mux_test_suite = {
1206 	.name = "clk-single-parent-mux-test",
1207 	.init = clk_single_parent_mux_test_init,
1208 	.exit = clk_single_parent_mux_test_exit,
1209 	.test_cases = clk_single_parent_mux_test_cases,
1210 };
1211 
1212 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1213 {
1214 	struct clk_single_parent_ctx *ctx;
1215 	struct clk_init_data init = { };
1216 	const char * const parents[] = { "orphan_parent" };
1217 	int ret;
1218 
1219 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1220 	if (!ctx)
1221 		return -ENOMEM;
1222 	test->priv = ctx;
1223 
1224 	init.name = "test_orphan_dummy_parent";
1225 	init.ops = &clk_dummy_single_parent_ops;
1226 	init.parent_names = parents;
1227 	init.num_parents = ARRAY_SIZE(parents);
1228 	init.flags = CLK_SET_RATE_PARENT;
1229 	ctx->hw.init = &init;
1230 
1231 	ret = clk_hw_register(NULL, &ctx->hw);
1232 	if (ret)
1233 		return ret;
1234 
1235 	memset(&init, 0, sizeof(init));
1236 	init.name = "orphan_parent";
1237 	init.ops = &clk_dummy_rate_ops;
1238 	ctx->parent_ctx.hw.init = &init;
1239 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1240 
1241 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1242 	if (ret)
1243 		return ret;
1244 
1245 	return 0;
1246 }
1247 
1248 /*
1249  * Test that a mux-only clock, with an initial rate within a range,
1250  * will still have the same rate after the range has been enforced.
1251  *
1252  * See:
1253  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1254  */
1255 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1256 {
1257 	struct clk_single_parent_ctx *ctx = test->priv;
1258 	struct clk_hw *hw = &ctx->hw;
1259 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1260 	unsigned long rate, new_rate;
1261 
1262 	rate = clk_get_rate(clk);
1263 	KUNIT_ASSERT_GT(test, rate, 0);
1264 
1265 	KUNIT_ASSERT_EQ(test,
1266 			clk_set_rate_range(clk,
1267 					   ctx->parent_ctx.rate - 1000,
1268 					   ctx->parent_ctx.rate + 1000),
1269 			0);
1270 
1271 	new_rate = clk_get_rate(clk);
1272 	KUNIT_ASSERT_GT(test, new_rate, 0);
1273 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1274 
1275 	clk_put(clk);
1276 }
1277 
1278 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1279 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1280 	{}
1281 };
1282 
1283 /*
1284  * Test suite for a basic mux clock with one parent. The parent is
1285  * registered after its child. The clock will thus be an orphan when
1286  * registered, but will no longer be when the tests run.
1287  *
1288  * These tests make sure a clock that used to be orphan has a sane,
1289  * consistent, behaviour.
1290  */
1291 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1292 	.name = "clk-orphan-transparent-single-parent-test",
1293 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1294 	.exit = clk_single_parent_mux_test_exit,
1295 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1296 };
1297 
1298 struct clk_single_parent_two_lvl_ctx {
1299 	struct clk_dummy_context parent_parent_ctx;
1300 	struct clk_dummy_context parent_ctx;
1301 	struct clk_hw hw;
1302 };
1303 
1304 static int
1305 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1306 {
1307 	struct clk_single_parent_two_lvl_ctx *ctx;
1308 	int ret;
1309 
1310 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1311 	if (!ctx)
1312 		return -ENOMEM;
1313 	test->priv = ctx;
1314 
1315 	ctx->parent_ctx.hw.init =
1316 		CLK_HW_INIT("intermediate-parent",
1317 			    "root-parent",
1318 			    &clk_dummy_single_parent_ops,
1319 			    CLK_SET_RATE_PARENT);
1320 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1321 	if (ret)
1322 		return ret;
1323 
1324 	ctx->hw.init =
1325 		CLK_HW_INIT("test-clk", "intermediate-parent",
1326 			    &clk_dummy_single_parent_ops,
1327 			    CLK_SET_RATE_PARENT);
1328 	ret = clk_hw_register(NULL, &ctx->hw);
1329 	if (ret)
1330 		return ret;
1331 
1332 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1333 	ctx->parent_parent_ctx.hw.init =
1334 		CLK_HW_INIT_NO_PARENT("root-parent",
1335 				      &clk_dummy_rate_ops,
1336 				      0);
1337 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1338 	if (ret)
1339 		return ret;
1340 
1341 	return 0;
1342 }
1343 
1344 static void
1345 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1346 {
1347 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1348 
1349 	clk_hw_unregister(&ctx->hw);
1350 	clk_hw_unregister(&ctx->parent_ctx.hw);
1351 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1352 }
1353 
1354 /*
1355  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1356  * will return the proper rate.
1357  */
1358 static void
1359 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1360 {
1361 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1362 	struct clk_hw *hw = &ctx->hw;
1363 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1364 	unsigned long rate;
1365 
1366 	rate = clk_get_rate(clk);
1367 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1368 
1369 	clk_put(clk);
1370 }
1371 
1372 /*
1373  * Test that, for a clock whose parent used to be orphan,
1374  * clk_set_rate_range() won't affect its rate if it is already within
1375  * range.
1376  *
1377  * See (for Exynos 4210):
1378  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1379  */
1380 static void
1381 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1382 {
1383 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1384 	struct clk_hw *hw = &ctx->hw;
1385 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1386 	unsigned long rate;
1387 	int ret;
1388 
1389 	ret = clk_set_rate_range(clk,
1390 				 DUMMY_CLOCK_INIT_RATE - 1000,
1391 				 DUMMY_CLOCK_INIT_RATE + 1000);
1392 	KUNIT_ASSERT_EQ(test, ret, 0);
1393 
1394 	rate = clk_get_rate(clk);
1395 	KUNIT_ASSERT_GT(test, rate, 0);
1396 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1397 
1398 	clk_put(clk);
1399 }
1400 
1401 static struct kunit_case
1402 clk_orphan_two_level_root_last_test_cases[] = {
1403 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1404 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1405 	{}
1406 };
1407 
1408 /*
1409  * Test suite for a basic, transparent, clock with a parent that is also
1410  * such a clock. The parent's parent is registered last, while the
1411  * parent and its child are registered in that order. The intermediate
1412  * and leaf clocks will thus be orphan when registered, but the leaf
1413  * clock itself will always have its parent and will never be
1414  * reparented. Indeed, it's only orphan because its parent is.
1415  *
1416  * These tests exercise the behaviour of the consumer API when dealing
1417  * with an orphan clock, and how we deal with the transition to a valid
1418  * parent.
1419  */
1420 static struct kunit_suite
1421 clk_orphan_two_level_root_last_test_suite = {
1422 	.name = "clk-orphan-two-level-root-last-test",
1423 	.init = clk_orphan_two_level_root_last_test_init,
1424 	.exit = clk_orphan_two_level_root_last_test_exit,
1425 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1426 };
1427 
1428 /*
1429  * Test that clk_set_rate_range won't return an error for a valid range
1430  * and that it will make sure the rate of the clock is within the
1431  * boundaries.
1432  */
1433 static void clk_range_test_set_range(struct kunit *test)
1434 {
1435 	struct clk_dummy_context *ctx = test->priv;
1436 	struct clk_hw *hw = &ctx->hw;
1437 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1438 	unsigned long rate;
1439 
1440 	KUNIT_ASSERT_EQ(test,
1441 			clk_set_rate_range(clk,
1442 					   DUMMY_CLOCK_RATE_1,
1443 					   DUMMY_CLOCK_RATE_2),
1444 			0);
1445 
1446 	rate = clk_get_rate(clk);
1447 	KUNIT_ASSERT_GT(test, rate, 0);
1448 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1449 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1450 
1451 	clk_put(clk);
1452 }
1453 
1454 /*
1455  * Test that calling clk_set_rate_range with a minimum rate higher than
1456  * the maximum rate returns an error.
1457  */
1458 static void clk_range_test_set_range_invalid(struct kunit *test)
1459 {
1460 	struct clk_dummy_context *ctx = test->priv;
1461 	struct clk_hw *hw = &ctx->hw;
1462 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1463 
1464 	KUNIT_EXPECT_LT(test,
1465 			clk_set_rate_range(clk,
1466 					   DUMMY_CLOCK_RATE_1 + 1000,
1467 					   DUMMY_CLOCK_RATE_1),
1468 			0);
1469 
1470 	clk_put(clk);
1471 }
1472 
1473 /*
1474  * Test that users can't set multiple, disjoints, range that would be
1475  * impossible to meet.
1476  */
1477 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1478 {
1479 	struct clk_dummy_context *ctx = test->priv;
1480 	struct clk_hw *hw = &ctx->hw;
1481 	struct clk *user1, *user2;
1482 
1483 	user1 = clk_hw_get_clk(hw, NULL);
1484 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1485 
1486 	user2 = clk_hw_get_clk(hw, NULL);
1487 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1488 
1489 	KUNIT_ASSERT_EQ(test,
1490 			clk_set_rate_range(user1, 1000, 2000),
1491 			0);
1492 
1493 	KUNIT_EXPECT_LT(test,
1494 			clk_set_rate_range(user2, 3000, 4000),
1495 			0);
1496 
1497 	clk_put(user2);
1498 	clk_put(user1);
1499 }
1500 
1501 /*
1502  * Test that if our clock has some boundaries and we try to round a rate
1503  * lower than the minimum, the returned rate will be within range.
1504  */
1505 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1506 {
1507 	struct clk_dummy_context *ctx = test->priv;
1508 	struct clk_hw *hw = &ctx->hw;
1509 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1510 	long rate;
1511 
1512 	KUNIT_ASSERT_EQ(test,
1513 			clk_set_rate_range(clk,
1514 					   DUMMY_CLOCK_RATE_1,
1515 					   DUMMY_CLOCK_RATE_2),
1516 			0);
1517 
1518 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1519 	KUNIT_ASSERT_GT(test, rate, 0);
1520 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1521 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1522 
1523 	clk_put(clk);
1524 }
1525 
1526 /*
1527  * Test that if our clock has some boundaries and we try to set a rate
1528  * higher than the maximum, the new rate will be within range.
1529  */
1530 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1531 {
1532 	struct clk_dummy_context *ctx = test->priv;
1533 	struct clk_hw *hw = &ctx->hw;
1534 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1535 	unsigned long rate;
1536 
1537 	KUNIT_ASSERT_EQ(test,
1538 			clk_set_rate_range(clk,
1539 					   DUMMY_CLOCK_RATE_1,
1540 					   DUMMY_CLOCK_RATE_2),
1541 			0);
1542 
1543 	KUNIT_ASSERT_EQ(test,
1544 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1545 			0);
1546 
1547 	rate = clk_get_rate(clk);
1548 	KUNIT_ASSERT_GT(test, rate, 0);
1549 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1550 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1551 
1552 	clk_put(clk);
1553 }
1554 
1555 /*
1556  * Test that if our clock has some boundaries and we try to round and
1557  * set a rate lower than the minimum, the rate returned by
1558  * clk_round_rate() will be consistent with the new rate set by
1559  * clk_set_rate().
1560  */
1561 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1562 {
1563 	struct clk_dummy_context *ctx = test->priv;
1564 	struct clk_hw *hw = &ctx->hw;
1565 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1566 	long rounded;
1567 
1568 	KUNIT_ASSERT_EQ(test,
1569 			clk_set_rate_range(clk,
1570 					   DUMMY_CLOCK_RATE_1,
1571 					   DUMMY_CLOCK_RATE_2),
1572 			0);
1573 
1574 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1575 	KUNIT_ASSERT_GT(test, rounded, 0);
1576 
1577 	KUNIT_ASSERT_EQ(test,
1578 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1579 			0);
1580 
1581 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1582 
1583 	clk_put(clk);
1584 }
1585 
1586 /*
1587  * Test that if our clock has some boundaries and we try to round a rate
1588  * higher than the maximum, the returned rate will be within range.
1589  */
1590 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1591 {
1592 	struct clk_dummy_context *ctx = test->priv;
1593 	struct clk_hw *hw = &ctx->hw;
1594 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1595 	long rate;
1596 
1597 	KUNIT_ASSERT_EQ(test,
1598 			clk_set_rate_range(clk,
1599 					   DUMMY_CLOCK_RATE_1,
1600 					   DUMMY_CLOCK_RATE_2),
1601 			0);
1602 
1603 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1604 	KUNIT_ASSERT_GT(test, rate, 0);
1605 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1606 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1607 
1608 	clk_put(clk);
1609 }
1610 
1611 /*
1612  * Test that if our clock has some boundaries and we try to set a rate
1613  * higher than the maximum, the new rate will be within range.
1614  */
1615 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1616 {
1617 	struct clk_dummy_context *ctx = test->priv;
1618 	struct clk_hw *hw = &ctx->hw;
1619 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1620 	unsigned long rate;
1621 
1622 	KUNIT_ASSERT_EQ(test,
1623 			clk_set_rate_range(clk,
1624 					   DUMMY_CLOCK_RATE_1,
1625 					   DUMMY_CLOCK_RATE_2),
1626 			0);
1627 
1628 	KUNIT_ASSERT_EQ(test,
1629 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1630 			0);
1631 
1632 	rate = clk_get_rate(clk);
1633 	KUNIT_ASSERT_GT(test, rate, 0);
1634 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1635 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1636 
1637 	clk_put(clk);
1638 }
1639 
1640 /*
1641  * Test that if our clock has some boundaries and we try to round and
1642  * set a rate higher than the maximum, the rate returned by
1643  * clk_round_rate() will be consistent with the new rate set by
1644  * clk_set_rate().
1645  */
1646 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1647 {
1648 	struct clk_dummy_context *ctx = test->priv;
1649 	struct clk_hw *hw = &ctx->hw;
1650 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1651 	long rounded;
1652 
1653 	KUNIT_ASSERT_EQ(test,
1654 			clk_set_rate_range(clk,
1655 					   DUMMY_CLOCK_RATE_1,
1656 					   DUMMY_CLOCK_RATE_2),
1657 			0);
1658 
1659 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1660 	KUNIT_ASSERT_GT(test, rounded, 0);
1661 
1662 	KUNIT_ASSERT_EQ(test,
1663 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1664 			0);
1665 
1666 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1667 
1668 	clk_put(clk);
1669 }
1670 
1671 /*
1672  * Test that if our clock has a rate lower than the minimum set by a
1673  * call to clk_set_rate_range(), the rate will be raised to match the
1674  * new minimum.
1675  *
1676  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1677  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1678  */
1679 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1680 {
1681 	struct clk_dummy_context *ctx = test->priv;
1682 	struct clk_hw *hw = &ctx->hw;
1683 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1684 	unsigned long rate;
1685 
1686 	KUNIT_ASSERT_EQ(test,
1687 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1688 			0);
1689 
1690 	KUNIT_ASSERT_EQ(test,
1691 			clk_set_rate_range(clk,
1692 					   DUMMY_CLOCK_RATE_1,
1693 					   DUMMY_CLOCK_RATE_2),
1694 			0);
1695 
1696 	rate = clk_get_rate(clk);
1697 	KUNIT_ASSERT_GT(test, rate, 0);
1698 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1699 
1700 	clk_put(clk);
1701 }
1702 
1703 /*
1704  * Test that if our clock has a rate higher than the maximum set by a
1705  * call to clk_set_rate_range(), the rate will be lowered to match the
1706  * new maximum.
1707  *
1708  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1709  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1710  */
1711 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1712 {
1713 	struct clk_dummy_context *ctx = test->priv;
1714 	struct clk_hw *hw = &ctx->hw;
1715 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1716 	unsigned long rate;
1717 
1718 	KUNIT_ASSERT_EQ(test,
1719 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1720 			0);
1721 
1722 	KUNIT_ASSERT_EQ(test,
1723 			clk_set_rate_range(clk,
1724 					   DUMMY_CLOCK_RATE_1,
1725 					   DUMMY_CLOCK_RATE_2),
1726 			0);
1727 
1728 	rate = clk_get_rate(clk);
1729 	KUNIT_ASSERT_GT(test, rate, 0);
1730 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1731 
1732 	clk_put(clk);
1733 }
1734 
1735 static struct kunit_case clk_range_test_cases[] = {
1736 	KUNIT_CASE(clk_range_test_set_range),
1737 	KUNIT_CASE(clk_range_test_set_range_invalid),
1738 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1739 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1740 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1741 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1742 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1743 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1744 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1745 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1746 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1747 	{}
1748 };
1749 
1750 /*
1751  * Test suite for a basic rate clock, without any parent.
1752  *
1753  * These tests exercise the rate range API: clk_set_rate_range(),
1754  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1755  */
1756 static struct kunit_suite clk_range_test_suite = {
1757 	.name = "clk-range-test",
1758 	.init = clk_test_init,
1759 	.exit = clk_test_exit,
1760 	.test_cases = clk_range_test_cases,
1761 };
1762 
1763 /*
1764  * Test that if we have several subsequent calls to
1765  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1766  * needed each and every time.
1767  *
1768  * With clk_dummy_maximize_rate_ops, this means that the rate will
1769  * trail along the maximum as it evolves.
1770  */
1771 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1772 {
1773 	struct clk_dummy_context *ctx = test->priv;
1774 	struct clk_hw *hw = &ctx->hw;
1775 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1776 	unsigned long rate;
1777 
1778 	KUNIT_ASSERT_EQ(test,
1779 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1780 			0);
1781 
1782 	KUNIT_ASSERT_EQ(test,
1783 			clk_set_rate_range(clk,
1784 					   DUMMY_CLOCK_RATE_1,
1785 					   DUMMY_CLOCK_RATE_2),
1786 			0);
1787 
1788 	rate = clk_get_rate(clk);
1789 	KUNIT_ASSERT_GT(test, rate, 0);
1790 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1791 
1792 	KUNIT_ASSERT_EQ(test,
1793 			clk_set_rate_range(clk,
1794 					   DUMMY_CLOCK_RATE_1,
1795 					   DUMMY_CLOCK_RATE_2 - 1000),
1796 			0);
1797 
1798 	rate = clk_get_rate(clk);
1799 	KUNIT_ASSERT_GT(test, rate, 0);
1800 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1801 
1802 	KUNIT_ASSERT_EQ(test,
1803 			clk_set_rate_range(clk,
1804 					   DUMMY_CLOCK_RATE_1,
1805 					   DUMMY_CLOCK_RATE_2),
1806 			0);
1807 
1808 	rate = clk_get_rate(clk);
1809 	KUNIT_ASSERT_GT(test, rate, 0);
1810 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1811 
1812 	clk_put(clk);
1813 }
1814 
1815 /*
1816  * Test that if we have several subsequent calls to
1817  * clk_set_rate_range(), across multiple users, the core will reevaluate
1818  * whether a new rate is needed each and every time.
1819  *
1820  * With clk_dummy_maximize_rate_ops, this means that the rate will
1821  * trail along the maximum as it evolves.
1822  */
1823 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1824 {
1825 	struct clk_dummy_context *ctx = test->priv;
1826 	struct clk_hw *hw = &ctx->hw;
1827 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1828 	struct clk *user1, *user2;
1829 	unsigned long rate;
1830 
1831 	user1 = clk_hw_get_clk(hw, NULL);
1832 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1833 
1834 	user2 = clk_hw_get_clk(hw, NULL);
1835 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1836 
1837 	KUNIT_ASSERT_EQ(test,
1838 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1839 			0);
1840 
1841 	KUNIT_ASSERT_EQ(test,
1842 			clk_set_rate_range(user1,
1843 					   0,
1844 					   DUMMY_CLOCK_RATE_2),
1845 			0);
1846 
1847 	rate = clk_get_rate(clk);
1848 	KUNIT_ASSERT_GT(test, rate, 0);
1849 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1850 
1851 	KUNIT_ASSERT_EQ(test,
1852 			clk_set_rate_range(user2,
1853 					   0,
1854 					   DUMMY_CLOCK_RATE_1),
1855 			0);
1856 
1857 	rate = clk_get_rate(clk);
1858 	KUNIT_ASSERT_GT(test, rate, 0);
1859 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1860 
1861 	KUNIT_ASSERT_EQ(test,
1862 			clk_drop_range(user2),
1863 			0);
1864 
1865 	rate = clk_get_rate(clk);
1866 	KUNIT_ASSERT_GT(test, rate, 0);
1867 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1868 
1869 	clk_put(user2);
1870 	clk_put(user1);
1871 	clk_put(clk);
1872 }
1873 
1874 /*
1875  * Test that if we have several subsequent calls to
1876  * clk_set_rate_range(), across multiple users, the core will reevaluate
1877  * whether a new rate is needed, including when a user drop its clock.
1878  *
1879  * With clk_dummy_maximize_rate_ops, this means that the rate will
1880  * trail along the maximum as it evolves.
1881  */
1882 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1883 {
1884 	struct clk_dummy_context *ctx = test->priv;
1885 	struct clk_hw *hw = &ctx->hw;
1886 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1887 	struct clk *user1, *user2;
1888 	unsigned long rate;
1889 
1890 	user1 = clk_hw_get_clk(hw, NULL);
1891 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1892 
1893 	user2 = clk_hw_get_clk(hw, NULL);
1894 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1895 
1896 	KUNIT_ASSERT_EQ(test,
1897 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1898 			0);
1899 
1900 	KUNIT_ASSERT_EQ(test,
1901 			clk_set_rate_range(user1,
1902 					   0,
1903 					   DUMMY_CLOCK_RATE_2),
1904 			0);
1905 
1906 	rate = clk_get_rate(clk);
1907 	KUNIT_ASSERT_GT(test, rate, 0);
1908 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1909 
1910 	KUNIT_ASSERT_EQ(test,
1911 			clk_set_rate_range(user2,
1912 					   0,
1913 					   DUMMY_CLOCK_RATE_1),
1914 			0);
1915 
1916 	rate = clk_get_rate(clk);
1917 	KUNIT_ASSERT_GT(test, rate, 0);
1918 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1919 
1920 	clk_put(user2);
1921 
1922 	rate = clk_get_rate(clk);
1923 	KUNIT_ASSERT_GT(test, rate, 0);
1924 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1925 
1926 	clk_put(user1);
1927 	clk_put(clk);
1928 }
1929 
1930 static struct kunit_case clk_range_maximize_test_cases[] = {
1931 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1932 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1933 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1934 	{}
1935 };
1936 
1937 /*
1938  * Test suite for a basic rate clock, without any parent.
1939  *
1940  * These tests exercise the rate range API: clk_set_rate_range(),
1941  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1942  * driver that will always try to run at the highest possible rate.
1943  */
1944 static struct kunit_suite clk_range_maximize_test_suite = {
1945 	.name = "clk-range-maximize-test",
1946 	.init = clk_maximize_test_init,
1947 	.exit = clk_test_exit,
1948 	.test_cases = clk_range_maximize_test_cases,
1949 };
1950 
1951 /*
1952  * Test that if we have several subsequent calls to
1953  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1954  * needed each and every time.
1955  *
1956  * With clk_dummy_minimize_rate_ops, this means that the rate will
1957  * trail along the minimum as it evolves.
1958  */
1959 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1960 {
1961 	struct clk_dummy_context *ctx = test->priv;
1962 	struct clk_hw *hw = &ctx->hw;
1963 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1964 	unsigned long rate;
1965 
1966 	KUNIT_ASSERT_EQ(test,
1967 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1968 			0);
1969 
1970 	KUNIT_ASSERT_EQ(test,
1971 			clk_set_rate_range(clk,
1972 					   DUMMY_CLOCK_RATE_1,
1973 					   DUMMY_CLOCK_RATE_2),
1974 			0);
1975 
1976 	rate = clk_get_rate(clk);
1977 	KUNIT_ASSERT_GT(test, rate, 0);
1978 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1979 
1980 	KUNIT_ASSERT_EQ(test,
1981 			clk_set_rate_range(clk,
1982 					   DUMMY_CLOCK_RATE_1 + 1000,
1983 					   DUMMY_CLOCK_RATE_2),
1984 			0);
1985 
1986 	rate = clk_get_rate(clk);
1987 	KUNIT_ASSERT_GT(test, rate, 0);
1988 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1989 
1990 	KUNIT_ASSERT_EQ(test,
1991 			clk_set_rate_range(clk,
1992 					   DUMMY_CLOCK_RATE_1,
1993 					   DUMMY_CLOCK_RATE_2),
1994 			0);
1995 
1996 	rate = clk_get_rate(clk);
1997 	KUNIT_ASSERT_GT(test, rate, 0);
1998 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1999 
2000 	clk_put(clk);
2001 }
2002 
2003 /*
2004  * Test that if we have several subsequent calls to
2005  * clk_set_rate_range(), across multiple users, the core will reevaluate
2006  * whether a new rate is needed each and every time.
2007  *
2008  * With clk_dummy_minimize_rate_ops, this means that the rate will
2009  * trail along the minimum as it evolves.
2010  */
2011 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2012 {
2013 	struct clk_dummy_context *ctx = test->priv;
2014 	struct clk_hw *hw = &ctx->hw;
2015 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2016 	struct clk *user1, *user2;
2017 	unsigned long rate;
2018 
2019 	user1 = clk_hw_get_clk(hw, NULL);
2020 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2021 
2022 	user2 = clk_hw_get_clk(hw, NULL);
2023 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2024 
2025 	KUNIT_ASSERT_EQ(test,
2026 			clk_set_rate_range(user1,
2027 					   DUMMY_CLOCK_RATE_1,
2028 					   ULONG_MAX),
2029 			0);
2030 
2031 	rate = clk_get_rate(clk);
2032 	KUNIT_ASSERT_GT(test, rate, 0);
2033 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2034 
2035 	KUNIT_ASSERT_EQ(test,
2036 			clk_set_rate_range(user2,
2037 					   DUMMY_CLOCK_RATE_2,
2038 					   ULONG_MAX),
2039 			0);
2040 
2041 	rate = clk_get_rate(clk);
2042 	KUNIT_ASSERT_GT(test, rate, 0);
2043 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2044 
2045 	KUNIT_ASSERT_EQ(test,
2046 			clk_drop_range(user2),
2047 			0);
2048 
2049 	rate = clk_get_rate(clk);
2050 	KUNIT_ASSERT_GT(test, rate, 0);
2051 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2052 
2053 	clk_put(user2);
2054 	clk_put(user1);
2055 	clk_put(clk);
2056 }
2057 
2058 /*
2059  * Test that if we have several subsequent calls to
2060  * clk_set_rate_range(), across multiple users, the core will reevaluate
2061  * whether a new rate is needed, including when a user drop its clock.
2062  *
2063  * With clk_dummy_minimize_rate_ops, this means that the rate will
2064  * trail along the minimum as it evolves.
2065  */
2066 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2067 {
2068 	struct clk_dummy_context *ctx = test->priv;
2069 	struct clk_hw *hw = &ctx->hw;
2070 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2071 	struct clk *user1, *user2;
2072 	unsigned long rate;
2073 
2074 	user1 = clk_hw_get_clk(hw, NULL);
2075 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2076 
2077 	user2 = clk_hw_get_clk(hw, NULL);
2078 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2079 
2080 	KUNIT_ASSERT_EQ(test,
2081 			clk_set_rate_range(user1,
2082 					   DUMMY_CLOCK_RATE_1,
2083 					   ULONG_MAX),
2084 			0);
2085 
2086 	rate = clk_get_rate(clk);
2087 	KUNIT_ASSERT_GT(test, rate, 0);
2088 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2089 
2090 	KUNIT_ASSERT_EQ(test,
2091 			clk_set_rate_range(user2,
2092 					   DUMMY_CLOCK_RATE_2,
2093 					   ULONG_MAX),
2094 			0);
2095 
2096 	rate = clk_get_rate(clk);
2097 	KUNIT_ASSERT_GT(test, rate, 0);
2098 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2099 
2100 	clk_put(user2);
2101 
2102 	rate = clk_get_rate(clk);
2103 	KUNIT_ASSERT_GT(test, rate, 0);
2104 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2105 
2106 	clk_put(user1);
2107 	clk_put(clk);
2108 }
2109 
2110 static struct kunit_case clk_range_minimize_test_cases[] = {
2111 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2112 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2113 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2114 	{}
2115 };
2116 
2117 /*
2118  * Test suite for a basic rate clock, without any parent.
2119  *
2120  * These tests exercise the rate range API: clk_set_rate_range(),
2121  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2122  * driver that will always try to run at the lowest possible rate.
2123  */
2124 static struct kunit_suite clk_range_minimize_test_suite = {
2125 	.name = "clk-range-minimize-test",
2126 	.init = clk_minimize_test_init,
2127 	.exit = clk_test_exit,
2128 	.test_cases = clk_range_minimize_test_cases,
2129 };
2130 
2131 struct clk_leaf_mux_ctx {
2132 	struct clk_multiple_parent_ctx mux_ctx;
2133 	struct clk_hw hw;
2134 };
2135 
2136 static int
2137 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2138 {
2139 	struct clk_leaf_mux_ctx *ctx;
2140 	const char *top_parents[2] = { "parent-0", "parent-1" };
2141 	int ret;
2142 
2143 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2144 	if (!ctx)
2145 		return -ENOMEM;
2146 	test->priv = ctx;
2147 
2148 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2149 								    &clk_dummy_rate_ops,
2150 								    0);
2151 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2152 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2153 	if (ret)
2154 		return ret;
2155 
2156 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2157 								    &clk_dummy_rate_ops,
2158 								    0);
2159 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2160 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2161 	if (ret)
2162 		return ret;
2163 
2164 	ctx->mux_ctx.current_parent = 0;
2165 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2166 						   &clk_multiple_parents_mux_ops,
2167 						   0);
2168 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2169 	if (ret)
2170 		return ret;
2171 
2172 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
2173 				      &clk_dummy_single_parent_ops,
2174 				      CLK_SET_RATE_PARENT);
2175 	ret = clk_hw_register(NULL, &ctx->hw);
2176 	if (ret)
2177 		return ret;
2178 
2179 	return 0;
2180 }
2181 
2182 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2183 {
2184 	struct clk_leaf_mux_ctx *ctx = test->priv;
2185 
2186 	clk_hw_unregister(&ctx->hw);
2187 	clk_hw_unregister(&ctx->mux_ctx.hw);
2188 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2189 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2190 }
2191 
2192 /*
2193  * Test that, for a clock that will forward any rate request to its
2194  * parent, the rate request structure returned by __clk_determine_rate
2195  * is sane and will be what we expect.
2196  */
2197 static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
2198 {
2199 	struct clk_leaf_mux_ctx *ctx = test->priv;
2200 	struct clk_hw *hw = &ctx->hw;
2201 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2202 	struct clk_rate_request req;
2203 	unsigned long rate;
2204 	int ret;
2205 
2206 	rate = clk_get_rate(clk);
2207 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2208 
2209 	clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
2210 
2211 	ret = __clk_determine_rate(hw, &req);
2212 	KUNIT_ASSERT_EQ(test, ret, 0);
2213 
2214 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2215 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2216 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2217 
2218 	clk_put(clk);
2219 }
2220 
2221 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2222 	KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
2223 	{}
2224 };
2225 
2226 /*
2227  * Test suite for a clock whose parent is a mux with multiple parents.
2228  * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
2229  * requests to the mux, which will then select which parent is the best
2230  * fit for a given rate.
2231  *
2232  * These tests exercise the behaviour of muxes, and the proper selection
2233  * of parents.
2234  */
2235 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2236 	.name = "clk-leaf-mux-set-rate-parent",
2237 	.init = clk_leaf_mux_set_rate_parent_test_init,
2238 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2239 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2240 };
2241 
2242 struct clk_mux_notifier_rate_change {
2243 	bool done;
2244 	unsigned long old_rate;
2245 	unsigned long new_rate;
2246 	wait_queue_head_t wq;
2247 };
2248 
2249 struct clk_mux_notifier_ctx {
2250 	struct clk_multiple_parent_ctx mux_ctx;
2251 	struct clk *clk;
2252 	struct notifier_block clk_nb;
2253 	struct clk_mux_notifier_rate_change pre_rate_change;
2254 	struct clk_mux_notifier_rate_change post_rate_change;
2255 };
2256 
2257 #define NOTIFIER_TIMEOUT_MS 100
2258 
2259 static int clk_mux_notifier_callback(struct notifier_block *nb,
2260 				     unsigned long action, void *data)
2261 {
2262 	struct clk_notifier_data *clk_data = data;
2263 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2264 							struct clk_mux_notifier_ctx,
2265 							clk_nb);
2266 
2267 	if (action & PRE_RATE_CHANGE) {
2268 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2269 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2270 		ctx->pre_rate_change.done = true;
2271 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2272 	}
2273 
2274 	if (action & POST_RATE_CHANGE) {
2275 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2276 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2277 		ctx->post_rate_change.done = true;
2278 		wake_up_interruptible(&ctx->post_rate_change.wq);
2279 	}
2280 
2281 	return 0;
2282 }
2283 
2284 static int clk_mux_notifier_test_init(struct kunit *test)
2285 {
2286 	struct clk_mux_notifier_ctx *ctx;
2287 	const char *top_parents[2] = { "parent-0", "parent-1" };
2288 	int ret;
2289 
2290 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2291 	if (!ctx)
2292 		return -ENOMEM;
2293 	test->priv = ctx;
2294 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2295 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2296 	init_waitqueue_head(&ctx->post_rate_change.wq);
2297 
2298 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2299 								    &clk_dummy_rate_ops,
2300 								    0);
2301 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2302 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2303 	if (ret)
2304 		return ret;
2305 
2306 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2307 								    &clk_dummy_rate_ops,
2308 								    0);
2309 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2310 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2311 	if (ret)
2312 		return ret;
2313 
2314 	ctx->mux_ctx.current_parent = 0;
2315 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2316 						   &clk_multiple_parents_mux_ops,
2317 						   0);
2318 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2319 	if (ret)
2320 		return ret;
2321 
2322 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2323 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2324 	if (ret)
2325 		return ret;
2326 
2327 	return 0;
2328 }
2329 
2330 static void clk_mux_notifier_test_exit(struct kunit *test)
2331 {
2332 	struct clk_mux_notifier_ctx *ctx = test->priv;
2333 	struct clk *clk = ctx->clk;
2334 
2335 	clk_notifier_unregister(clk, &ctx->clk_nb);
2336 	clk_put(clk);
2337 
2338 	clk_hw_unregister(&ctx->mux_ctx.hw);
2339 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2340 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2341 }
2342 
2343 /*
2344  * Test that if the we have a notifier registered on a mux, the core
2345  * will notify us when we switch to another parent, and with the proper
2346  * old and new rates.
2347  */
2348 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2349 {
2350 	struct clk_mux_notifier_ctx *ctx = test->priv;
2351 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2352 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2353 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2354 	int ret;
2355 
2356 	ret = clk_set_parent(clk, new_parent);
2357 	KUNIT_ASSERT_EQ(test, ret, 0);
2358 
2359 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2360 					       ctx->pre_rate_change.done,
2361 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2362 	KUNIT_ASSERT_GT(test, ret, 0);
2363 
2364 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2365 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2366 
2367 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2368 					       ctx->post_rate_change.done,
2369 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2370 	KUNIT_ASSERT_GT(test, ret, 0);
2371 
2372 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2373 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2374 
2375 	clk_put(new_parent);
2376 	clk_put(clk);
2377 }
2378 
2379 static struct kunit_case clk_mux_notifier_test_cases[] = {
2380 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2381 	{}
2382 };
2383 
2384 /*
2385  * Test suite for a mux with multiple parents, and a notifier registered
2386  * on the mux.
2387  *
2388  * These tests exercise the behaviour of notifiers.
2389  */
2390 static struct kunit_suite clk_mux_notifier_test_suite = {
2391 	.name = "clk-mux-notifier",
2392 	.init = clk_mux_notifier_test_init,
2393 	.exit = clk_mux_notifier_test_exit,
2394 	.test_cases = clk_mux_notifier_test_cases,
2395 };
2396 
2397 kunit_test_suites(
2398 	&clk_leaf_mux_set_rate_parent_test_suite,
2399 	&clk_test_suite,
2400 	&clk_multiple_parents_mux_test_suite,
2401 	&clk_mux_notifier_test_suite,
2402 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
2403 	&clk_orphan_transparent_single_parent_test_suite,
2404 	&clk_orphan_two_level_root_last_test_suite,
2405 	&clk_range_test_suite,
2406 	&clk_range_maximize_test_suite,
2407 	&clk_range_minimize_test_suite,
2408 	&clk_single_parent_mux_test_suite,
2409 	&clk_uncached_test_suite
2410 );
2411 MODULE_LICENSE("GPL v2");
2412