xref: /openbmc/linux/drivers/clk/clk_test.c (revision aebddfe2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit test for clk rate management
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 
8 /* Needed for clk_hw_get_clk() */
9 #include "clk.h"
10 
11 #include <kunit/test.h>
12 
13 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
14 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
15 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
16 
17 struct clk_dummy_context {
18 	struct clk_hw hw;
19 	unsigned long rate;
20 };
21 
clk_dummy_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)22 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 					   unsigned long parent_rate)
24 {
25 	struct clk_dummy_context *ctx =
26 		container_of(hw, struct clk_dummy_context, hw);
27 
28 	return ctx->rate;
29 }
30 
clk_dummy_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)31 static int clk_dummy_determine_rate(struct clk_hw *hw,
32 				    struct clk_rate_request *req)
33 {
34 	/* Just return the same rate without modifying it */
35 	return 0;
36 }
37 
clk_dummy_maximize_rate(struct clk_hw * hw,struct clk_rate_request * req)38 static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 				   struct clk_rate_request *req)
40 {
41 	/*
42 	 * If there's a maximum set, always run the clock at the maximum
43 	 * allowed.
44 	 */
45 	if (req->max_rate < ULONG_MAX)
46 		req->rate = req->max_rate;
47 
48 	return 0;
49 }
50 
clk_dummy_minimize_rate(struct clk_hw * hw,struct clk_rate_request * req)51 static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 				   struct clk_rate_request *req)
53 {
54 	/*
55 	 * If there's a minimum set, always run the clock at the minimum
56 	 * allowed.
57 	 */
58 	if (req->min_rate > 0)
59 		req->rate = req->min_rate;
60 
61 	return 0;
62 }
63 
clk_dummy_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)64 static int clk_dummy_set_rate(struct clk_hw *hw,
65 			      unsigned long rate,
66 			      unsigned long parent_rate)
67 {
68 	struct clk_dummy_context *ctx =
69 		container_of(hw, struct clk_dummy_context, hw);
70 
71 	ctx->rate = rate;
72 	return 0;
73 }
74 
clk_dummy_single_set_parent(struct clk_hw * hw,u8 index)75 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
76 {
77 	if (index >= clk_hw_get_num_parents(hw))
78 		return -EINVAL;
79 
80 	return 0;
81 }
82 
clk_dummy_single_get_parent(struct clk_hw * hw)83 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
84 {
85 	return 0;
86 }
87 
88 static const struct clk_ops clk_dummy_rate_ops = {
89 	.recalc_rate = clk_dummy_recalc_rate,
90 	.determine_rate = clk_dummy_determine_rate,
91 	.set_rate = clk_dummy_set_rate,
92 };
93 
94 static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 	.recalc_rate = clk_dummy_recalc_rate,
96 	.determine_rate = clk_dummy_maximize_rate,
97 	.set_rate = clk_dummy_set_rate,
98 };
99 
100 static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 	.recalc_rate = clk_dummy_recalc_rate,
102 	.determine_rate = clk_dummy_minimize_rate,
103 	.set_rate = clk_dummy_set_rate,
104 };
105 
106 static const struct clk_ops clk_dummy_single_parent_ops = {
107 	/*
108 	 * FIXME: Even though we should probably be able to use
109 	 * __clk_mux_determine_rate() here, if we use it and call
110 	 * clk_round_rate() or clk_set_rate() with a rate lower than
111 	 * what all the parents can provide, it will return -EINVAL.
112 	 *
113 	 * This is due to the fact that it has the undocumented
114 	 * behaviour to always pick up the closest rate higher than the
115 	 * requested rate. If we get something lower, it thus considers
116 	 * that it's not acceptable and will return an error.
117 	 *
118 	 * It's somewhat inconsistent and creates a weird threshold
119 	 * between rates above the parent rate which would be rounded to
120 	 * what the parent can provide, but rates below will simply
121 	 * return an error.
122 	 */
123 	.determine_rate = __clk_mux_determine_rate_closest,
124 	.set_parent = clk_dummy_single_set_parent,
125 	.get_parent = clk_dummy_single_get_parent,
126 };
127 
128 struct clk_multiple_parent_ctx {
129 	struct clk_dummy_context parents_ctx[2];
130 	struct clk_hw hw;
131 	u8 current_parent;
132 };
133 
clk_multiple_parents_mux_set_parent(struct clk_hw * hw,u8 index)134 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
135 {
136 	struct clk_multiple_parent_ctx *ctx =
137 		container_of(hw, struct clk_multiple_parent_ctx, hw);
138 
139 	if (index >= clk_hw_get_num_parents(hw))
140 		return -EINVAL;
141 
142 	ctx->current_parent = index;
143 
144 	return 0;
145 }
146 
clk_multiple_parents_mux_get_parent(struct clk_hw * hw)147 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
148 {
149 	struct clk_multiple_parent_ctx *ctx =
150 		container_of(hw, struct clk_multiple_parent_ctx, hw);
151 
152 	return ctx->current_parent;
153 }
154 
155 static const struct clk_ops clk_multiple_parents_mux_ops = {
156 	.get_parent = clk_multiple_parents_mux_get_parent,
157 	.set_parent = clk_multiple_parents_mux_set_parent,
158 	.determine_rate = __clk_mux_determine_rate_closest,
159 };
160 
161 static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
162 	.determine_rate = clk_hw_determine_rate_no_reparent,
163 	.get_parent = clk_multiple_parents_mux_get_parent,
164 	.set_parent = clk_multiple_parents_mux_set_parent,
165 };
166 
clk_test_init_with_ops(struct kunit * test,const struct clk_ops * ops)167 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
168 {
169 	struct clk_dummy_context *ctx;
170 	struct clk_init_data init = { };
171 	int ret;
172 
173 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
174 	if (!ctx)
175 		return -ENOMEM;
176 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
177 	test->priv = ctx;
178 
179 	init.name = "test_dummy_rate";
180 	init.ops = ops;
181 	ctx->hw.init = &init;
182 
183 	ret = clk_hw_register(NULL, &ctx->hw);
184 	if (ret)
185 		return ret;
186 
187 	return 0;
188 }
189 
clk_test_init(struct kunit * test)190 static int clk_test_init(struct kunit *test)
191 {
192 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
193 }
194 
clk_maximize_test_init(struct kunit * test)195 static int clk_maximize_test_init(struct kunit *test)
196 {
197 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
198 }
199 
clk_minimize_test_init(struct kunit * test)200 static int clk_minimize_test_init(struct kunit *test)
201 {
202 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
203 }
204 
clk_test_exit(struct kunit * test)205 static void clk_test_exit(struct kunit *test)
206 {
207 	struct clk_dummy_context *ctx = test->priv;
208 
209 	clk_hw_unregister(&ctx->hw);
210 }
211 
212 /*
213  * Test that the actual rate matches what is returned by clk_get_rate()
214  */
clk_test_get_rate(struct kunit * test)215 static void clk_test_get_rate(struct kunit *test)
216 {
217 	struct clk_dummy_context *ctx = test->priv;
218 	struct clk_hw *hw = &ctx->hw;
219 	struct clk *clk = clk_hw_get_clk(hw, NULL);
220 	unsigned long rate;
221 
222 	rate = clk_get_rate(clk);
223 	KUNIT_ASSERT_GT(test, rate, 0);
224 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
225 
226 	clk_put(clk);
227 }
228 
229 /*
230  * Test that, after a call to clk_set_rate(), the rate returned by
231  * clk_get_rate() matches.
232  *
233  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
234  * modify the requested rate, which is our case in clk_dummy_rate_ops.
235  */
clk_test_set_get_rate(struct kunit * test)236 static void clk_test_set_get_rate(struct kunit *test)
237 {
238 	struct clk_dummy_context *ctx = test->priv;
239 	struct clk_hw *hw = &ctx->hw;
240 	struct clk *clk = clk_hw_get_clk(hw, NULL);
241 	unsigned long rate;
242 
243 	KUNIT_ASSERT_EQ(test,
244 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
245 			0);
246 
247 	rate = clk_get_rate(clk);
248 	KUNIT_ASSERT_GT(test, rate, 0);
249 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
250 
251 	clk_put(clk);
252 }
253 
254 /*
255  * Test that, after several calls to clk_set_rate(), the rate returned
256  * by clk_get_rate() matches the last one.
257  *
258  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
259  * modify the requested rate, which is our case in clk_dummy_rate_ops.
260  */
clk_test_set_set_get_rate(struct kunit * test)261 static void clk_test_set_set_get_rate(struct kunit *test)
262 {
263 	struct clk_dummy_context *ctx = test->priv;
264 	struct clk_hw *hw = &ctx->hw;
265 	struct clk *clk = clk_hw_get_clk(hw, NULL);
266 	unsigned long rate;
267 
268 	KUNIT_ASSERT_EQ(test,
269 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
270 			0);
271 
272 	KUNIT_ASSERT_EQ(test,
273 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
274 			0);
275 
276 	rate = clk_get_rate(clk);
277 	KUNIT_ASSERT_GT(test, rate, 0);
278 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
279 
280 	clk_put(clk);
281 }
282 
283 /*
284  * Test that clk_round_rate and clk_set_rate are consitent and will
285  * return the same frequency.
286  */
clk_test_round_set_get_rate(struct kunit * test)287 static void clk_test_round_set_get_rate(struct kunit *test)
288 {
289 	struct clk_dummy_context *ctx = test->priv;
290 	struct clk_hw *hw = &ctx->hw;
291 	struct clk *clk = clk_hw_get_clk(hw, NULL);
292 	unsigned long set_rate;
293 	long rounded_rate;
294 
295 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
296 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
297 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
298 
299 	KUNIT_ASSERT_EQ(test,
300 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
301 			0);
302 
303 	set_rate = clk_get_rate(clk);
304 	KUNIT_ASSERT_GT(test, set_rate, 0);
305 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
306 
307 	clk_put(clk);
308 }
309 
310 static struct kunit_case clk_test_cases[] = {
311 	KUNIT_CASE(clk_test_get_rate),
312 	KUNIT_CASE(clk_test_set_get_rate),
313 	KUNIT_CASE(clk_test_set_set_get_rate),
314 	KUNIT_CASE(clk_test_round_set_get_rate),
315 	{}
316 };
317 
318 /*
319  * Test suite for a basic rate clock, without any parent.
320  *
321  * These tests exercise the rate API with simple scenarios
322  */
323 static struct kunit_suite clk_test_suite = {
324 	.name = "clk-test",
325 	.init = clk_test_init,
326 	.exit = clk_test_exit,
327 	.test_cases = clk_test_cases,
328 };
329 
clk_uncached_test_init(struct kunit * test)330 static int clk_uncached_test_init(struct kunit *test)
331 {
332 	struct clk_dummy_context *ctx;
333 	int ret;
334 
335 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
336 	if (!ctx)
337 		return -ENOMEM;
338 	test->priv = ctx;
339 
340 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
341 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
342 					     &clk_dummy_rate_ops,
343 					     CLK_GET_RATE_NOCACHE);
344 
345 	ret = clk_hw_register(NULL, &ctx->hw);
346 	if (ret)
347 		return ret;
348 
349 	return 0;
350 }
351 
352 /*
353  * Test that for an uncached clock, the clock framework doesn't cache
354  * the rate and clk_get_rate() will return the underlying clock rate
355  * even if it changed.
356  */
clk_test_uncached_get_rate(struct kunit * test)357 static void clk_test_uncached_get_rate(struct kunit *test)
358 {
359 	struct clk_dummy_context *ctx = test->priv;
360 	struct clk_hw *hw = &ctx->hw;
361 	struct clk *clk = clk_hw_get_clk(hw, NULL);
362 	unsigned long rate;
363 
364 	rate = clk_get_rate(clk);
365 	KUNIT_ASSERT_GT(test, rate, 0);
366 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
367 
368 	/* We change the rate behind the clock framework's back */
369 	ctx->rate = DUMMY_CLOCK_RATE_1;
370 	rate = clk_get_rate(clk);
371 	KUNIT_ASSERT_GT(test, rate, 0);
372 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
373 
374 	clk_put(clk);
375 }
376 
377 /*
378  * Test that for an uncached clock, clk_set_rate_range() will work
379  * properly if the rate hasn't changed.
380  */
clk_test_uncached_set_range(struct kunit * test)381 static void clk_test_uncached_set_range(struct kunit *test)
382 {
383 	struct clk_dummy_context *ctx = test->priv;
384 	struct clk_hw *hw = &ctx->hw;
385 	struct clk *clk = clk_hw_get_clk(hw, NULL);
386 	unsigned long rate;
387 
388 	KUNIT_ASSERT_EQ(test,
389 			clk_set_rate_range(clk,
390 					   DUMMY_CLOCK_RATE_1,
391 					   DUMMY_CLOCK_RATE_2),
392 			0);
393 
394 	rate = clk_get_rate(clk);
395 	KUNIT_ASSERT_GT(test, rate, 0);
396 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
397 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
398 
399 	clk_put(clk);
400 }
401 
402 /*
403  * Test that for an uncached clock, clk_set_rate_range() will work
404  * properly if the rate has changed in hardware.
405  *
406  * In this case, it means that if the rate wasn't initially in the range
407  * we're trying to set, but got changed at some point into the range
408  * without the kernel knowing about it, its rate shouldn't be affected.
409  */
clk_test_uncached_updated_rate_set_range(struct kunit * test)410 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
411 {
412 	struct clk_dummy_context *ctx = test->priv;
413 	struct clk_hw *hw = &ctx->hw;
414 	struct clk *clk = clk_hw_get_clk(hw, NULL);
415 	unsigned long rate;
416 
417 	/* We change the rate behind the clock framework's back */
418 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
419 	KUNIT_ASSERT_EQ(test,
420 			clk_set_rate_range(clk,
421 					   DUMMY_CLOCK_RATE_1,
422 					   DUMMY_CLOCK_RATE_2),
423 			0);
424 
425 	rate = clk_get_rate(clk);
426 	KUNIT_ASSERT_GT(test, rate, 0);
427 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
428 
429 	clk_put(clk);
430 }
431 
432 static struct kunit_case clk_uncached_test_cases[] = {
433 	KUNIT_CASE(clk_test_uncached_get_rate),
434 	KUNIT_CASE(clk_test_uncached_set_range),
435 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
436 	{}
437 };
438 
439 /*
440  * Test suite for a basic, uncached, rate clock, without any parent.
441  *
442  * These tests exercise the rate API with simple scenarios
443  */
444 static struct kunit_suite clk_uncached_test_suite = {
445 	.name = "clk-uncached-test",
446 	.init = clk_uncached_test_init,
447 	.exit = clk_test_exit,
448 	.test_cases = clk_uncached_test_cases,
449 };
450 
451 static int
clk_multiple_parents_mux_test_init(struct kunit * test)452 clk_multiple_parents_mux_test_init(struct kunit *test)
453 {
454 	struct clk_multiple_parent_ctx *ctx;
455 	const char *parents[2] = { "parent-0", "parent-1"};
456 	int ret;
457 
458 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
459 	if (!ctx)
460 		return -ENOMEM;
461 	test->priv = ctx;
462 
463 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
464 							    &clk_dummy_rate_ops,
465 							    0);
466 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
467 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
468 	if (ret)
469 		return ret;
470 
471 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
472 							    &clk_dummy_rate_ops,
473 							    0);
474 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
475 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
476 	if (ret)
477 		return ret;
478 
479 	ctx->current_parent = 0;
480 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
481 					   &clk_multiple_parents_mux_ops,
482 					   CLK_SET_RATE_PARENT);
483 	ret = clk_hw_register(NULL, &ctx->hw);
484 	if (ret)
485 		return ret;
486 
487 	return 0;
488 }
489 
490 static void
clk_multiple_parents_mux_test_exit(struct kunit * test)491 clk_multiple_parents_mux_test_exit(struct kunit *test)
492 {
493 	struct clk_multiple_parent_ctx *ctx = test->priv;
494 
495 	clk_hw_unregister(&ctx->hw);
496 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
497 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
498 }
499 
500 /*
501  * Test that for a clock with multiple parents, clk_get_parent()
502  * actually returns the current one.
503  */
504 static void
clk_test_multiple_parents_mux_get_parent(struct kunit * test)505 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
506 {
507 	struct clk_multiple_parent_ctx *ctx = test->priv;
508 	struct clk_hw *hw = &ctx->hw;
509 	struct clk *clk = clk_hw_get_clk(hw, NULL);
510 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
511 
512 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
513 
514 	clk_put(parent);
515 	clk_put(clk);
516 }
517 
518 /*
519  * Test that for a clock with a multiple parents, clk_has_parent()
520  * actually reports all of them as parents.
521  */
522 static void
clk_test_multiple_parents_mux_has_parent(struct kunit * test)523 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
524 {
525 	struct clk_multiple_parent_ctx *ctx = test->priv;
526 	struct clk_hw *hw = &ctx->hw;
527 	struct clk *clk = clk_hw_get_clk(hw, NULL);
528 	struct clk *parent;
529 
530 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
531 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
532 	clk_put(parent);
533 
534 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
535 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
536 	clk_put(parent);
537 
538 	clk_put(clk);
539 }
540 
541 /*
542  * Test that for a clock with a multiple parents, if we set a range on
543  * that clock and the parent is changed, its rate after the reparenting
544  * is still within the range we asked for.
545  *
546  * FIXME: clk_set_parent() only does the reparenting but doesn't
547  * reevaluate whether the new clock rate is within its boundaries or
548  * not.
549  */
550 static void
clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit * test)551 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
552 {
553 	struct clk_multiple_parent_ctx *ctx = test->priv;
554 	struct clk_hw *hw = &ctx->hw;
555 	struct clk *clk = clk_hw_get_clk(hw, NULL);
556 	struct clk *parent1, *parent2;
557 	unsigned long rate;
558 	int ret;
559 
560 	kunit_skip(test, "This needs to be fixed in the core.");
561 
562 	parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
563 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
564 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
565 
566 	parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
567 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
568 
569 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
570 	KUNIT_ASSERT_EQ(test, ret, 0);
571 
572 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
573 	KUNIT_ASSERT_EQ(test, ret, 0);
574 
575 	ret = clk_set_rate_range(clk,
576 				 DUMMY_CLOCK_RATE_1 - 1000,
577 				 DUMMY_CLOCK_RATE_1 + 1000);
578 	KUNIT_ASSERT_EQ(test, ret, 0);
579 
580 	ret = clk_set_parent(clk, parent2);
581 	KUNIT_ASSERT_EQ(test, ret, 0);
582 
583 	rate = clk_get_rate(clk);
584 	KUNIT_ASSERT_GT(test, rate, 0);
585 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
586 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
587 
588 	clk_put(parent2);
589 	clk_put(parent1);
590 	clk_put(clk);
591 }
592 
593 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
594 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
595 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
596 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
597 	{}
598 };
599 
600 /*
601  * Test suite for a basic mux clock with two parents, with
602  * CLK_SET_RATE_PARENT on the child.
603  *
604  * These tests exercise the consumer API and check that the state of the
605  * child and parents are sane and consistent.
606  */
607 static struct kunit_suite
608 clk_multiple_parents_mux_test_suite = {
609 	.name = "clk-multiple-parents-mux-test",
610 	.init = clk_multiple_parents_mux_test_init,
611 	.exit = clk_multiple_parents_mux_test_exit,
612 	.test_cases = clk_multiple_parents_mux_test_cases,
613 };
614 
615 static int
clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit * test)616 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
617 {
618 	struct clk_multiple_parent_ctx *ctx;
619 	const char *parents[2] = { "missing-parent", "proper-parent"};
620 	int ret;
621 
622 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
623 	if (!ctx)
624 		return -ENOMEM;
625 	test->priv = ctx;
626 
627 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
628 							    &clk_dummy_rate_ops,
629 							    0);
630 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
631 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
632 	if (ret)
633 		return ret;
634 
635 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
636 					   &clk_multiple_parents_mux_ops,
637 					   CLK_SET_RATE_PARENT);
638 	ret = clk_hw_register(NULL, &ctx->hw);
639 	if (ret)
640 		return ret;
641 
642 	return 0;
643 }
644 
645 static void
clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit * test)646 clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
647 {
648 	struct clk_multiple_parent_ctx *ctx = test->priv;
649 
650 	clk_hw_unregister(&ctx->hw);
651 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
652 }
653 
654 /*
655  * Test that, for a mux whose current parent hasn't been registered yet and is
656  * thus orphan, clk_get_parent() will return NULL.
657  */
658 static void
clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit * test)659 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
660 {
661 	struct clk_multiple_parent_ctx *ctx = test->priv;
662 	struct clk_hw *hw = &ctx->hw;
663 	struct clk *clk = clk_hw_get_clk(hw, NULL);
664 
665 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
666 
667 	clk_put(clk);
668 }
669 
670 /*
671  * Test that, for a mux whose current parent hasn't been registered yet,
672  * calling clk_set_parent() to a valid parent will properly update the
673  * mux parent and its orphan status.
674  */
675 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit * test)676 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
677 {
678 	struct clk_multiple_parent_ctx *ctx = test->priv;
679 	struct clk_hw *hw = &ctx->hw;
680 	struct clk *clk = clk_hw_get_clk(hw, NULL);
681 	struct clk *parent, *new_parent;
682 	int ret;
683 
684 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
685 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
686 
687 	ret = clk_set_parent(clk, parent);
688 	KUNIT_ASSERT_EQ(test, ret, 0);
689 
690 	new_parent = clk_get_parent(clk);
691 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
692 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
693 
694 	clk_put(parent);
695 	clk_put(clk);
696 }
697 
698 /*
699  * Test that, for a mux that started orphan but got switched to a valid
700  * parent, calling clk_drop_range() on the mux won't affect the parent
701  * rate.
702  */
703 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit * test)704 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
705 {
706 	struct clk_multiple_parent_ctx *ctx = test->priv;
707 	struct clk_hw *hw = &ctx->hw;
708 	struct clk *clk = clk_hw_get_clk(hw, NULL);
709 	struct clk *parent;
710 	unsigned long parent_rate, new_parent_rate;
711 	int ret;
712 
713 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
714 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
715 
716 	parent_rate = clk_get_rate(parent);
717 	KUNIT_ASSERT_GT(test, parent_rate, 0);
718 
719 	ret = clk_set_parent(clk, parent);
720 	KUNIT_ASSERT_EQ(test, ret, 0);
721 
722 	ret = clk_drop_range(clk);
723 	KUNIT_ASSERT_EQ(test, ret, 0);
724 
725 	new_parent_rate = clk_get_rate(clk);
726 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
727 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
728 
729 	clk_put(parent);
730 	clk_put(clk);
731 }
732 
733 /*
734  * Test that, for a mux that started orphan but got switched to a valid
735  * parent, the rate of the mux and its new parent are consistent.
736  */
737 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit * test)738 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
739 {
740 	struct clk_multiple_parent_ctx *ctx = test->priv;
741 	struct clk_hw *hw = &ctx->hw;
742 	struct clk *clk = clk_hw_get_clk(hw, NULL);
743 	struct clk *parent;
744 	unsigned long parent_rate, rate;
745 	int ret;
746 
747 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
748 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
749 
750 	parent_rate = clk_get_rate(parent);
751 	KUNIT_ASSERT_GT(test, parent_rate, 0);
752 
753 	ret = clk_set_parent(clk, parent);
754 	KUNIT_ASSERT_EQ(test, ret, 0);
755 
756 	rate = clk_get_rate(clk);
757 	KUNIT_ASSERT_GT(test, rate, 0);
758 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
759 
760 	clk_put(parent);
761 	clk_put(clk);
762 }
763 
764 /*
765  * Test that, for a mux that started orphan but got switched to a valid
766  * parent, calling clk_put() on the mux won't affect the parent rate.
767  */
768 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit * test)769 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
770 {
771 	struct clk_multiple_parent_ctx *ctx = test->priv;
772 	struct clk *clk, *parent;
773 	unsigned long parent_rate, new_parent_rate;
774 	int ret;
775 
776 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
777 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
778 
779 	clk = clk_hw_get_clk(&ctx->hw, NULL);
780 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
781 
782 	parent_rate = clk_get_rate(parent);
783 	KUNIT_ASSERT_GT(test, parent_rate, 0);
784 
785 	ret = clk_set_parent(clk, parent);
786 	KUNIT_ASSERT_EQ(test, ret, 0);
787 
788 	clk_put(clk);
789 
790 	new_parent_rate = clk_get_rate(parent);
791 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
792 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
793 
794 	clk_put(parent);
795 }
796 
797 /*
798  * Test that, for a mux that started orphan but got switched to a valid
799  * parent, calling clk_set_rate_range() will affect the parent state if
800  * its rate is out of range.
801  */
802 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit * test)803 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
804 {
805 	struct clk_multiple_parent_ctx *ctx = test->priv;
806 	struct clk_hw *hw = &ctx->hw;
807 	struct clk *clk = clk_hw_get_clk(hw, NULL);
808 	struct clk *parent;
809 	unsigned long rate;
810 	int ret;
811 
812 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
813 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
814 
815 	ret = clk_set_parent(clk, parent);
816 	KUNIT_ASSERT_EQ(test, ret, 0);
817 
818 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
819 	KUNIT_ASSERT_EQ(test, ret, 0);
820 
821 	rate = clk_get_rate(clk);
822 	KUNIT_ASSERT_GT(test, rate, 0);
823 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
824 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
825 
826 	clk_put(parent);
827 	clk_put(clk);
828 }
829 
830 /*
831  * Test that, for a mux that started orphan but got switched to a valid
832  * parent, calling clk_set_rate_range() won't affect the parent state if
833  * its rate is within range.
834  */
835 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit * test)836 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
837 {
838 	struct clk_multiple_parent_ctx *ctx = test->priv;
839 	struct clk_hw *hw = &ctx->hw;
840 	struct clk *clk = clk_hw_get_clk(hw, NULL);
841 	struct clk *parent;
842 	unsigned long parent_rate, new_parent_rate;
843 	int ret;
844 
845 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
846 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
847 
848 	parent_rate = clk_get_rate(parent);
849 	KUNIT_ASSERT_GT(test, parent_rate, 0);
850 
851 	ret = clk_set_parent(clk, parent);
852 	KUNIT_ASSERT_EQ(test, ret, 0);
853 
854 	ret = clk_set_rate_range(clk,
855 				 DUMMY_CLOCK_INIT_RATE - 1000,
856 				 DUMMY_CLOCK_INIT_RATE + 1000);
857 	KUNIT_ASSERT_EQ(test, ret, 0);
858 
859 	new_parent_rate = clk_get_rate(parent);
860 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
861 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
862 
863 	clk_put(parent);
864 	clk_put(clk);
865 }
866 
867 /*
868  * Test that, for a mux whose current parent hasn't been registered yet,
869  * calling clk_set_rate_range() will succeed, and will be taken into
870  * account when rounding a rate.
871  */
872 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit * test)873 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
874 {
875 	struct clk_multiple_parent_ctx *ctx = test->priv;
876 	struct clk_hw *hw = &ctx->hw;
877 	struct clk *clk = clk_hw_get_clk(hw, NULL);
878 	long rate;
879 	int ret;
880 
881 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
882 	KUNIT_ASSERT_EQ(test, ret, 0);
883 
884 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
885 	KUNIT_ASSERT_GT(test, rate, 0);
886 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
887 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
888 
889 	clk_put(clk);
890 }
891 
892 /*
893  * Test that, for a mux that started orphan, was assigned and rate and
894  * then got switched to a valid parent, its rate is eventually within
895  * range.
896  *
897  * FIXME: Even though we update the rate as part of clk_set_parent(), we
898  * don't evaluate whether that new rate is within range and needs to be
899  * adjusted.
900  */
901 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit * test)902 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
903 {
904 	struct clk_multiple_parent_ctx *ctx = test->priv;
905 	struct clk_hw *hw = &ctx->hw;
906 	struct clk *clk = clk_hw_get_clk(hw, NULL);
907 	struct clk *parent;
908 	unsigned long rate;
909 	int ret;
910 
911 	kunit_skip(test, "This needs to be fixed in the core.");
912 
913 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
914 
915 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
916 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
917 
918 	ret = clk_set_parent(clk, parent);
919 	KUNIT_ASSERT_EQ(test, ret, 0);
920 
921 	rate = clk_get_rate(clk);
922 	KUNIT_ASSERT_GT(test, rate, 0);
923 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
924 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
925 
926 	clk_put(parent);
927 	clk_put(clk);
928 }
929 
930 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
931 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
932 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
933 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
934 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
935 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
936 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
937 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
938 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
939 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
940 	{}
941 };
942 
943 /*
944  * Test suite for a basic mux clock with two parents. The default parent
945  * isn't registered, only the second parent is. By default, the clock
946  * will thus be orphan.
947  *
948  * These tests exercise the behaviour of the consumer API when dealing
949  * with an orphan clock, and how we deal with the transition to a valid
950  * parent.
951  */
952 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
953 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
954 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
955 	.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
956 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
957 };
958 
959 struct clk_single_parent_ctx {
960 	struct clk_dummy_context parent_ctx;
961 	struct clk_hw hw;
962 };
963 
clk_single_parent_mux_test_init(struct kunit * test)964 static int clk_single_parent_mux_test_init(struct kunit *test)
965 {
966 	struct clk_single_parent_ctx *ctx;
967 	int ret;
968 
969 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
970 	if (!ctx)
971 		return -ENOMEM;
972 	test->priv = ctx;
973 
974 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
975 	ctx->parent_ctx.hw.init =
976 		CLK_HW_INIT_NO_PARENT("parent-clk",
977 				      &clk_dummy_rate_ops,
978 				      0);
979 
980 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
981 	if (ret)
982 		return ret;
983 
984 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
985 				   &clk_dummy_single_parent_ops,
986 				   CLK_SET_RATE_PARENT);
987 
988 	ret = clk_hw_register(NULL, &ctx->hw);
989 	if (ret)
990 		return ret;
991 
992 	return 0;
993 }
994 
995 static void
clk_single_parent_mux_test_exit(struct kunit * test)996 clk_single_parent_mux_test_exit(struct kunit *test)
997 {
998 	struct clk_single_parent_ctx *ctx = test->priv;
999 
1000 	clk_hw_unregister(&ctx->hw);
1001 	clk_hw_unregister(&ctx->parent_ctx.hw);
1002 }
1003 
1004 /*
1005  * Test that for a clock with a single parent, clk_get_parent() actually
1006  * returns the parent.
1007  */
1008 static void
clk_test_single_parent_mux_get_parent(struct kunit * test)1009 clk_test_single_parent_mux_get_parent(struct kunit *test)
1010 {
1011 	struct clk_single_parent_ctx *ctx = test->priv;
1012 	struct clk_hw *hw = &ctx->hw;
1013 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1014 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1015 
1016 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1017 
1018 	clk_put(parent);
1019 	clk_put(clk);
1020 }
1021 
1022 /*
1023  * Test that for a clock with a single parent, clk_has_parent() actually
1024  * reports it as a parent.
1025  */
1026 static void
clk_test_single_parent_mux_has_parent(struct kunit * test)1027 clk_test_single_parent_mux_has_parent(struct kunit *test)
1028 {
1029 	struct clk_single_parent_ctx *ctx = test->priv;
1030 	struct clk_hw *hw = &ctx->hw;
1031 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1032 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1033 
1034 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1035 
1036 	clk_put(parent);
1037 	clk_put(clk);
1038 }
1039 
1040 /*
1041  * Test that for a clock that can't modify its rate and with a single
1042  * parent, if we set disjoints range on the parent and then the child,
1043  * the second will return an error.
1044  *
1045  * FIXME: clk_set_rate_range() only considers the current clock when
1046  * evaluating whether ranges are disjoints and not the upstream clocks
1047  * ranges.
1048  */
1049 static void
clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit * test)1050 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1051 {
1052 	struct clk_single_parent_ctx *ctx = test->priv;
1053 	struct clk_hw *hw = &ctx->hw;
1054 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1055 	struct clk *parent;
1056 	int ret;
1057 
1058 	kunit_skip(test, "This needs to be fixed in the core.");
1059 
1060 	parent = clk_get_parent(clk);
1061 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1062 
1063 	ret = clk_set_rate_range(parent, 1000, 2000);
1064 	KUNIT_ASSERT_EQ(test, ret, 0);
1065 
1066 	ret = clk_set_rate_range(clk, 3000, 4000);
1067 	KUNIT_EXPECT_LT(test, ret, 0);
1068 
1069 	clk_put(clk);
1070 }
1071 
1072 /*
1073  * Test that for a clock that can't modify its rate and with a single
1074  * parent, if we set disjoints range on the child and then the parent,
1075  * the second will return an error.
1076  *
1077  * FIXME: clk_set_rate_range() only considers the current clock when
1078  * evaluating whether ranges are disjoints and not the downstream clocks
1079  * ranges.
1080  */
1081 static void
clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit * test)1082 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1083 {
1084 	struct clk_single_parent_ctx *ctx = test->priv;
1085 	struct clk_hw *hw = &ctx->hw;
1086 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1087 	struct clk *parent;
1088 	int ret;
1089 
1090 	kunit_skip(test, "This needs to be fixed in the core.");
1091 
1092 	parent = clk_get_parent(clk);
1093 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1094 
1095 	ret = clk_set_rate_range(clk, 1000, 2000);
1096 	KUNIT_ASSERT_EQ(test, ret, 0);
1097 
1098 	ret = clk_set_rate_range(parent, 3000, 4000);
1099 	KUNIT_EXPECT_LT(test, ret, 0);
1100 
1101 	clk_put(clk);
1102 }
1103 
1104 /*
1105  * Test that for a clock that can't modify its rate and with a single
1106  * parent, if we set a range on the parent and then call
1107  * clk_round_rate(), the boundaries of the parent are taken into
1108  * account.
1109  */
1110 static void
clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit * test)1111 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1112 {
1113 	struct clk_single_parent_ctx *ctx = test->priv;
1114 	struct clk_hw *hw = &ctx->hw;
1115 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1116 	struct clk *parent;
1117 	long rate;
1118 	int ret;
1119 
1120 	parent = clk_get_parent(clk);
1121 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1122 
1123 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1124 	KUNIT_ASSERT_EQ(test, ret, 0);
1125 
1126 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1127 	KUNIT_ASSERT_GT(test, rate, 0);
1128 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1129 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1130 
1131 	clk_put(clk);
1132 }
1133 
1134 /*
1135  * Test that for a clock that can't modify its rate and with a single
1136  * parent, if we set a range on the parent and a more restrictive one on
1137  * the child, and then call clk_round_rate(), the boundaries of the
1138  * two clocks are taken into account.
1139  */
1140 static void
clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit * test)1141 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1142 {
1143 	struct clk_single_parent_ctx *ctx = test->priv;
1144 	struct clk_hw *hw = &ctx->hw;
1145 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1146 	struct clk *parent;
1147 	long rate;
1148 	int ret;
1149 
1150 	parent = clk_get_parent(clk);
1151 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1152 
1153 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1154 	KUNIT_ASSERT_EQ(test, ret, 0);
1155 
1156 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1157 	KUNIT_ASSERT_EQ(test, ret, 0);
1158 
1159 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1160 	KUNIT_ASSERT_GT(test, rate, 0);
1161 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1162 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1163 
1164 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1165 	KUNIT_ASSERT_GT(test, rate, 0);
1166 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1167 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1168 
1169 	clk_put(clk);
1170 }
1171 
1172 /*
1173  * Test that for a clock that can't modify its rate and with a single
1174  * parent, if we set a range on the child and a more restrictive one on
1175  * the parent, and then call clk_round_rate(), the boundaries of the
1176  * two clocks are taken into account.
1177  */
1178 static void
clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit * test)1179 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1180 {
1181 	struct clk_single_parent_ctx *ctx = test->priv;
1182 	struct clk_hw *hw = &ctx->hw;
1183 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1184 	struct clk *parent;
1185 	long rate;
1186 	int ret;
1187 
1188 	parent = clk_get_parent(clk);
1189 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1190 
1191 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1192 	KUNIT_ASSERT_EQ(test, ret, 0);
1193 
1194 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1195 	KUNIT_ASSERT_EQ(test, ret, 0);
1196 
1197 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1198 	KUNIT_ASSERT_GT(test, rate, 0);
1199 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1200 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1201 
1202 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1203 	KUNIT_ASSERT_GT(test, rate, 0);
1204 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1205 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1206 
1207 	clk_put(clk);
1208 }
1209 
1210 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1211 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1212 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1213 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1214 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1215 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1216 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1217 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1218 	{}
1219 };
1220 
1221 /*
1222  * Test suite for a basic mux clock with one parent, with
1223  * CLK_SET_RATE_PARENT on the child.
1224  *
1225  * These tests exercise the consumer API and check that the state of the
1226  * child and parent are sane and consistent.
1227  */
1228 static struct kunit_suite
1229 clk_single_parent_mux_test_suite = {
1230 	.name = "clk-single-parent-mux-test",
1231 	.init = clk_single_parent_mux_test_init,
1232 	.exit = clk_single_parent_mux_test_exit,
1233 	.test_cases = clk_single_parent_mux_test_cases,
1234 };
1235 
clk_orphan_transparent_single_parent_mux_test_init(struct kunit * test)1236 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1237 {
1238 	struct clk_single_parent_ctx *ctx;
1239 	struct clk_init_data init = { };
1240 	const char * const parents[] = { "orphan_parent" };
1241 	int ret;
1242 
1243 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1244 	if (!ctx)
1245 		return -ENOMEM;
1246 	test->priv = ctx;
1247 
1248 	init.name = "test_orphan_dummy_parent";
1249 	init.ops = &clk_dummy_single_parent_ops;
1250 	init.parent_names = parents;
1251 	init.num_parents = ARRAY_SIZE(parents);
1252 	init.flags = CLK_SET_RATE_PARENT;
1253 	ctx->hw.init = &init;
1254 
1255 	ret = clk_hw_register(NULL, &ctx->hw);
1256 	if (ret)
1257 		return ret;
1258 
1259 	memset(&init, 0, sizeof(init));
1260 	init.name = "orphan_parent";
1261 	init.ops = &clk_dummy_rate_ops;
1262 	ctx->parent_ctx.hw.init = &init;
1263 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1264 
1265 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1266 	if (ret)
1267 		return ret;
1268 
1269 	return 0;
1270 }
1271 
1272 /*
1273  * Test that a mux-only clock, with an initial rate within a range,
1274  * will still have the same rate after the range has been enforced.
1275  *
1276  * See:
1277  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1278  */
clk_test_orphan_transparent_parent_mux_set_range(struct kunit * test)1279 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1280 {
1281 	struct clk_single_parent_ctx *ctx = test->priv;
1282 	struct clk_hw *hw = &ctx->hw;
1283 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1284 	unsigned long rate, new_rate;
1285 
1286 	rate = clk_get_rate(clk);
1287 	KUNIT_ASSERT_GT(test, rate, 0);
1288 
1289 	KUNIT_ASSERT_EQ(test,
1290 			clk_set_rate_range(clk,
1291 					   ctx->parent_ctx.rate - 1000,
1292 					   ctx->parent_ctx.rate + 1000),
1293 			0);
1294 
1295 	new_rate = clk_get_rate(clk);
1296 	KUNIT_ASSERT_GT(test, new_rate, 0);
1297 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1298 
1299 	clk_put(clk);
1300 }
1301 
1302 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1303 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1304 	{}
1305 };
1306 
1307 /*
1308  * Test suite for a basic mux clock with one parent. The parent is
1309  * registered after its child. The clock will thus be an orphan when
1310  * registered, but will no longer be when the tests run.
1311  *
1312  * These tests make sure a clock that used to be orphan has a sane,
1313  * consistent, behaviour.
1314  */
1315 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1316 	.name = "clk-orphan-transparent-single-parent-test",
1317 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1318 	.exit = clk_single_parent_mux_test_exit,
1319 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1320 };
1321 
1322 struct clk_single_parent_two_lvl_ctx {
1323 	struct clk_dummy_context parent_parent_ctx;
1324 	struct clk_dummy_context parent_ctx;
1325 	struct clk_hw hw;
1326 };
1327 
1328 static int
clk_orphan_two_level_root_last_test_init(struct kunit * test)1329 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1330 {
1331 	struct clk_single_parent_two_lvl_ctx *ctx;
1332 	int ret;
1333 
1334 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1335 	if (!ctx)
1336 		return -ENOMEM;
1337 	test->priv = ctx;
1338 
1339 	ctx->parent_ctx.hw.init =
1340 		CLK_HW_INIT("intermediate-parent",
1341 			    "root-parent",
1342 			    &clk_dummy_single_parent_ops,
1343 			    CLK_SET_RATE_PARENT);
1344 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1345 	if (ret)
1346 		return ret;
1347 
1348 	ctx->hw.init =
1349 		CLK_HW_INIT("test-clk", "intermediate-parent",
1350 			    &clk_dummy_single_parent_ops,
1351 			    CLK_SET_RATE_PARENT);
1352 	ret = clk_hw_register(NULL, &ctx->hw);
1353 	if (ret)
1354 		return ret;
1355 
1356 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1357 	ctx->parent_parent_ctx.hw.init =
1358 		CLK_HW_INIT_NO_PARENT("root-parent",
1359 				      &clk_dummy_rate_ops,
1360 				      0);
1361 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1362 	if (ret)
1363 		return ret;
1364 
1365 	return 0;
1366 }
1367 
1368 static void
clk_orphan_two_level_root_last_test_exit(struct kunit * test)1369 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1370 {
1371 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1372 
1373 	clk_hw_unregister(&ctx->hw);
1374 	clk_hw_unregister(&ctx->parent_ctx.hw);
1375 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1376 }
1377 
1378 /*
1379  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1380  * will return the proper rate.
1381  */
1382 static void
clk_orphan_two_level_root_last_test_get_rate(struct kunit * test)1383 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1384 {
1385 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1386 	struct clk_hw *hw = &ctx->hw;
1387 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1388 	unsigned long rate;
1389 
1390 	rate = clk_get_rate(clk);
1391 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1392 
1393 	clk_put(clk);
1394 }
1395 
1396 /*
1397  * Test that, for a clock whose parent used to be orphan,
1398  * clk_set_rate_range() won't affect its rate if it is already within
1399  * range.
1400  *
1401  * See (for Exynos 4210):
1402  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1403  */
1404 static void
clk_orphan_two_level_root_last_test_set_range(struct kunit * test)1405 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1406 {
1407 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1408 	struct clk_hw *hw = &ctx->hw;
1409 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1410 	unsigned long rate;
1411 	int ret;
1412 
1413 	ret = clk_set_rate_range(clk,
1414 				 DUMMY_CLOCK_INIT_RATE - 1000,
1415 				 DUMMY_CLOCK_INIT_RATE + 1000);
1416 	KUNIT_ASSERT_EQ(test, ret, 0);
1417 
1418 	rate = clk_get_rate(clk);
1419 	KUNIT_ASSERT_GT(test, rate, 0);
1420 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1421 
1422 	clk_put(clk);
1423 }
1424 
1425 static struct kunit_case
1426 clk_orphan_two_level_root_last_test_cases[] = {
1427 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1428 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1429 	{}
1430 };
1431 
1432 /*
1433  * Test suite for a basic, transparent, clock with a parent that is also
1434  * such a clock. The parent's parent is registered last, while the
1435  * parent and its child are registered in that order. The intermediate
1436  * and leaf clocks will thus be orphan when registered, but the leaf
1437  * clock itself will always have its parent and will never be
1438  * reparented. Indeed, it's only orphan because its parent is.
1439  *
1440  * These tests exercise the behaviour of the consumer API when dealing
1441  * with an orphan clock, and how we deal with the transition to a valid
1442  * parent.
1443  */
1444 static struct kunit_suite
1445 clk_orphan_two_level_root_last_test_suite = {
1446 	.name = "clk-orphan-two-level-root-last-test",
1447 	.init = clk_orphan_two_level_root_last_test_init,
1448 	.exit = clk_orphan_two_level_root_last_test_exit,
1449 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1450 };
1451 
1452 /*
1453  * Test that clk_set_rate_range won't return an error for a valid range
1454  * and that it will make sure the rate of the clock is within the
1455  * boundaries.
1456  */
clk_range_test_set_range(struct kunit * test)1457 static void clk_range_test_set_range(struct kunit *test)
1458 {
1459 	struct clk_dummy_context *ctx = test->priv;
1460 	struct clk_hw *hw = &ctx->hw;
1461 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1462 	unsigned long rate;
1463 
1464 	KUNIT_ASSERT_EQ(test,
1465 			clk_set_rate_range(clk,
1466 					   DUMMY_CLOCK_RATE_1,
1467 					   DUMMY_CLOCK_RATE_2),
1468 			0);
1469 
1470 	rate = clk_get_rate(clk);
1471 	KUNIT_ASSERT_GT(test, rate, 0);
1472 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1473 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1474 
1475 	clk_put(clk);
1476 }
1477 
1478 /*
1479  * Test that calling clk_set_rate_range with a minimum rate higher than
1480  * the maximum rate returns an error.
1481  */
clk_range_test_set_range_invalid(struct kunit * test)1482 static void clk_range_test_set_range_invalid(struct kunit *test)
1483 {
1484 	struct clk_dummy_context *ctx = test->priv;
1485 	struct clk_hw *hw = &ctx->hw;
1486 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1487 
1488 	KUNIT_EXPECT_LT(test,
1489 			clk_set_rate_range(clk,
1490 					   DUMMY_CLOCK_RATE_1 + 1000,
1491 					   DUMMY_CLOCK_RATE_1),
1492 			0);
1493 
1494 	clk_put(clk);
1495 }
1496 
1497 /*
1498  * Test that users can't set multiple, disjoints, range that would be
1499  * impossible to meet.
1500  */
clk_range_test_multiple_disjoints_range(struct kunit * test)1501 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1502 {
1503 	struct clk_dummy_context *ctx = test->priv;
1504 	struct clk_hw *hw = &ctx->hw;
1505 	struct clk *user1, *user2;
1506 
1507 	user1 = clk_hw_get_clk(hw, NULL);
1508 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1509 
1510 	user2 = clk_hw_get_clk(hw, NULL);
1511 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1512 
1513 	KUNIT_ASSERT_EQ(test,
1514 			clk_set_rate_range(user1, 1000, 2000),
1515 			0);
1516 
1517 	KUNIT_EXPECT_LT(test,
1518 			clk_set_rate_range(user2, 3000, 4000),
1519 			0);
1520 
1521 	clk_put(user2);
1522 	clk_put(user1);
1523 }
1524 
1525 /*
1526  * Test that if our clock has some boundaries and we try to round a rate
1527  * lower than the minimum, the returned rate will be within range.
1528  */
clk_range_test_set_range_round_rate_lower(struct kunit * test)1529 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1530 {
1531 	struct clk_dummy_context *ctx = test->priv;
1532 	struct clk_hw *hw = &ctx->hw;
1533 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1534 	long rate;
1535 
1536 	KUNIT_ASSERT_EQ(test,
1537 			clk_set_rate_range(clk,
1538 					   DUMMY_CLOCK_RATE_1,
1539 					   DUMMY_CLOCK_RATE_2),
1540 			0);
1541 
1542 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1543 	KUNIT_ASSERT_GT(test, rate, 0);
1544 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1545 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1546 
1547 	clk_put(clk);
1548 }
1549 
1550 /*
1551  * Test that if our clock has some boundaries and we try to set a rate
1552  * higher than the maximum, the new rate will be within range.
1553  */
clk_range_test_set_range_set_rate_lower(struct kunit * test)1554 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1555 {
1556 	struct clk_dummy_context *ctx = test->priv;
1557 	struct clk_hw *hw = &ctx->hw;
1558 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1559 	unsigned long rate;
1560 
1561 	KUNIT_ASSERT_EQ(test,
1562 			clk_set_rate_range(clk,
1563 					   DUMMY_CLOCK_RATE_1,
1564 					   DUMMY_CLOCK_RATE_2),
1565 			0);
1566 
1567 	KUNIT_ASSERT_EQ(test,
1568 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1569 			0);
1570 
1571 	rate = clk_get_rate(clk);
1572 	KUNIT_ASSERT_GT(test, rate, 0);
1573 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1574 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1575 
1576 	clk_put(clk);
1577 }
1578 
1579 /*
1580  * Test that if our clock has some boundaries and we try to round and
1581  * set a rate lower than the minimum, the rate returned by
1582  * clk_round_rate() will be consistent with the new rate set by
1583  * clk_set_rate().
1584  */
clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit * test)1585 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1586 {
1587 	struct clk_dummy_context *ctx = test->priv;
1588 	struct clk_hw *hw = &ctx->hw;
1589 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1590 	long rounded;
1591 
1592 	KUNIT_ASSERT_EQ(test,
1593 			clk_set_rate_range(clk,
1594 					   DUMMY_CLOCK_RATE_1,
1595 					   DUMMY_CLOCK_RATE_2),
1596 			0);
1597 
1598 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1599 	KUNIT_ASSERT_GT(test, rounded, 0);
1600 
1601 	KUNIT_ASSERT_EQ(test,
1602 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1603 			0);
1604 
1605 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1606 
1607 	clk_put(clk);
1608 }
1609 
1610 /*
1611  * Test that if our clock has some boundaries and we try to round a rate
1612  * higher than the maximum, the returned rate will be within range.
1613  */
clk_range_test_set_range_round_rate_higher(struct kunit * test)1614 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1615 {
1616 	struct clk_dummy_context *ctx = test->priv;
1617 	struct clk_hw *hw = &ctx->hw;
1618 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1619 	long rate;
1620 
1621 	KUNIT_ASSERT_EQ(test,
1622 			clk_set_rate_range(clk,
1623 					   DUMMY_CLOCK_RATE_1,
1624 					   DUMMY_CLOCK_RATE_2),
1625 			0);
1626 
1627 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1628 	KUNIT_ASSERT_GT(test, rate, 0);
1629 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1630 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1631 
1632 	clk_put(clk);
1633 }
1634 
1635 /*
1636  * Test that if our clock has some boundaries and we try to set a rate
1637  * higher than the maximum, the new rate will be within range.
1638  */
clk_range_test_set_range_set_rate_higher(struct kunit * test)1639 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1640 {
1641 	struct clk_dummy_context *ctx = test->priv;
1642 	struct clk_hw *hw = &ctx->hw;
1643 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1644 	unsigned long rate;
1645 
1646 	KUNIT_ASSERT_EQ(test,
1647 			clk_set_rate_range(clk,
1648 					   DUMMY_CLOCK_RATE_1,
1649 					   DUMMY_CLOCK_RATE_2),
1650 			0);
1651 
1652 	KUNIT_ASSERT_EQ(test,
1653 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1654 			0);
1655 
1656 	rate = clk_get_rate(clk);
1657 	KUNIT_ASSERT_GT(test, rate, 0);
1658 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1659 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1660 
1661 	clk_put(clk);
1662 }
1663 
1664 /*
1665  * Test that if our clock has some boundaries and we try to round and
1666  * set a rate higher than the maximum, the rate returned by
1667  * clk_round_rate() will be consistent with the new rate set by
1668  * clk_set_rate().
1669  */
clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit * test)1670 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1671 {
1672 	struct clk_dummy_context *ctx = test->priv;
1673 	struct clk_hw *hw = &ctx->hw;
1674 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1675 	long rounded;
1676 
1677 	KUNIT_ASSERT_EQ(test,
1678 			clk_set_rate_range(clk,
1679 					   DUMMY_CLOCK_RATE_1,
1680 					   DUMMY_CLOCK_RATE_2),
1681 			0);
1682 
1683 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1684 	KUNIT_ASSERT_GT(test, rounded, 0);
1685 
1686 	KUNIT_ASSERT_EQ(test,
1687 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1688 			0);
1689 
1690 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1691 
1692 	clk_put(clk);
1693 }
1694 
1695 /*
1696  * Test that if our clock has a rate lower than the minimum set by a
1697  * call to clk_set_rate_range(), the rate will be raised to match the
1698  * new minimum.
1699  *
1700  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1701  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1702  */
clk_range_test_set_range_get_rate_raised(struct kunit * test)1703 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1704 {
1705 	struct clk_dummy_context *ctx = test->priv;
1706 	struct clk_hw *hw = &ctx->hw;
1707 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1708 	unsigned long rate;
1709 
1710 	KUNIT_ASSERT_EQ(test,
1711 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1712 			0);
1713 
1714 	KUNIT_ASSERT_EQ(test,
1715 			clk_set_rate_range(clk,
1716 					   DUMMY_CLOCK_RATE_1,
1717 					   DUMMY_CLOCK_RATE_2),
1718 			0);
1719 
1720 	rate = clk_get_rate(clk);
1721 	KUNIT_ASSERT_GT(test, rate, 0);
1722 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1723 
1724 	clk_put(clk);
1725 }
1726 
1727 /*
1728  * Test that if our clock has a rate higher than the maximum set by a
1729  * call to clk_set_rate_range(), the rate will be lowered to match the
1730  * new maximum.
1731  *
1732  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1733  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1734  */
clk_range_test_set_range_get_rate_lowered(struct kunit * test)1735 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1736 {
1737 	struct clk_dummy_context *ctx = test->priv;
1738 	struct clk_hw *hw = &ctx->hw;
1739 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1740 	unsigned long rate;
1741 
1742 	KUNIT_ASSERT_EQ(test,
1743 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1744 			0);
1745 
1746 	KUNIT_ASSERT_EQ(test,
1747 			clk_set_rate_range(clk,
1748 					   DUMMY_CLOCK_RATE_1,
1749 					   DUMMY_CLOCK_RATE_2),
1750 			0);
1751 
1752 	rate = clk_get_rate(clk);
1753 	KUNIT_ASSERT_GT(test, rate, 0);
1754 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1755 
1756 	clk_put(clk);
1757 }
1758 
1759 static struct kunit_case clk_range_test_cases[] = {
1760 	KUNIT_CASE(clk_range_test_set_range),
1761 	KUNIT_CASE(clk_range_test_set_range_invalid),
1762 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1763 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1764 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1765 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1766 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1767 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1768 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1769 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1770 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1771 	{}
1772 };
1773 
1774 /*
1775  * Test suite for a basic rate clock, without any parent.
1776  *
1777  * These tests exercise the rate range API: clk_set_rate_range(),
1778  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1779  */
1780 static struct kunit_suite clk_range_test_suite = {
1781 	.name = "clk-range-test",
1782 	.init = clk_test_init,
1783 	.exit = clk_test_exit,
1784 	.test_cases = clk_range_test_cases,
1785 };
1786 
1787 /*
1788  * Test that if we have several subsequent calls to
1789  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1790  * needed each and every time.
1791  *
1792  * With clk_dummy_maximize_rate_ops, this means that the rate will
1793  * trail along the maximum as it evolves.
1794  */
clk_range_test_set_range_rate_maximized(struct kunit * test)1795 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1796 {
1797 	struct clk_dummy_context *ctx = test->priv;
1798 	struct clk_hw *hw = &ctx->hw;
1799 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1800 	unsigned long rate;
1801 
1802 	KUNIT_ASSERT_EQ(test,
1803 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1804 			0);
1805 
1806 	KUNIT_ASSERT_EQ(test,
1807 			clk_set_rate_range(clk,
1808 					   DUMMY_CLOCK_RATE_1,
1809 					   DUMMY_CLOCK_RATE_2),
1810 			0);
1811 
1812 	rate = clk_get_rate(clk);
1813 	KUNIT_ASSERT_GT(test, rate, 0);
1814 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1815 
1816 	KUNIT_ASSERT_EQ(test,
1817 			clk_set_rate_range(clk,
1818 					   DUMMY_CLOCK_RATE_1,
1819 					   DUMMY_CLOCK_RATE_2 - 1000),
1820 			0);
1821 
1822 	rate = clk_get_rate(clk);
1823 	KUNIT_ASSERT_GT(test, rate, 0);
1824 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1825 
1826 	KUNIT_ASSERT_EQ(test,
1827 			clk_set_rate_range(clk,
1828 					   DUMMY_CLOCK_RATE_1,
1829 					   DUMMY_CLOCK_RATE_2),
1830 			0);
1831 
1832 	rate = clk_get_rate(clk);
1833 	KUNIT_ASSERT_GT(test, rate, 0);
1834 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1835 
1836 	clk_put(clk);
1837 }
1838 
1839 /*
1840  * Test that if we have several subsequent calls to
1841  * clk_set_rate_range(), across multiple users, the core will reevaluate
1842  * whether a new rate is needed each and every time.
1843  *
1844  * With clk_dummy_maximize_rate_ops, this means that the rate will
1845  * trail along the maximum as it evolves.
1846  */
clk_range_test_multiple_set_range_rate_maximized(struct kunit * test)1847 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1848 {
1849 	struct clk_dummy_context *ctx = test->priv;
1850 	struct clk_hw *hw = &ctx->hw;
1851 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1852 	struct clk *user1, *user2;
1853 	unsigned long rate;
1854 
1855 	user1 = clk_hw_get_clk(hw, NULL);
1856 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1857 
1858 	user2 = clk_hw_get_clk(hw, NULL);
1859 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1860 
1861 	KUNIT_ASSERT_EQ(test,
1862 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1863 			0);
1864 
1865 	KUNIT_ASSERT_EQ(test,
1866 			clk_set_rate_range(user1,
1867 					   0,
1868 					   DUMMY_CLOCK_RATE_2),
1869 			0);
1870 
1871 	rate = clk_get_rate(clk);
1872 	KUNIT_ASSERT_GT(test, rate, 0);
1873 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1874 
1875 	KUNIT_ASSERT_EQ(test,
1876 			clk_set_rate_range(user2,
1877 					   0,
1878 					   DUMMY_CLOCK_RATE_1),
1879 			0);
1880 
1881 	rate = clk_get_rate(clk);
1882 	KUNIT_ASSERT_GT(test, rate, 0);
1883 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1884 
1885 	KUNIT_ASSERT_EQ(test,
1886 			clk_drop_range(user2),
1887 			0);
1888 
1889 	rate = clk_get_rate(clk);
1890 	KUNIT_ASSERT_GT(test, rate, 0);
1891 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1892 
1893 	clk_put(user2);
1894 	clk_put(user1);
1895 	clk_put(clk);
1896 }
1897 
1898 /*
1899  * Test that if we have several subsequent calls to
1900  * clk_set_rate_range(), across multiple users, the core will reevaluate
1901  * whether a new rate is needed, including when a user drop its clock.
1902  *
1903  * With clk_dummy_maximize_rate_ops, this means that the rate will
1904  * trail along the maximum as it evolves.
1905  */
clk_range_test_multiple_set_range_rate_put_maximized(struct kunit * test)1906 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1907 {
1908 	struct clk_dummy_context *ctx = test->priv;
1909 	struct clk_hw *hw = &ctx->hw;
1910 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1911 	struct clk *user1, *user2;
1912 	unsigned long rate;
1913 
1914 	user1 = clk_hw_get_clk(hw, NULL);
1915 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1916 
1917 	user2 = clk_hw_get_clk(hw, NULL);
1918 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1919 
1920 	KUNIT_ASSERT_EQ(test,
1921 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1922 			0);
1923 
1924 	KUNIT_ASSERT_EQ(test,
1925 			clk_set_rate_range(user1,
1926 					   0,
1927 					   DUMMY_CLOCK_RATE_2),
1928 			0);
1929 
1930 	rate = clk_get_rate(clk);
1931 	KUNIT_ASSERT_GT(test, rate, 0);
1932 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1933 
1934 	KUNIT_ASSERT_EQ(test,
1935 			clk_set_rate_range(user2,
1936 					   0,
1937 					   DUMMY_CLOCK_RATE_1),
1938 			0);
1939 
1940 	rate = clk_get_rate(clk);
1941 	KUNIT_ASSERT_GT(test, rate, 0);
1942 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1943 
1944 	clk_put(user2);
1945 
1946 	rate = clk_get_rate(clk);
1947 	KUNIT_ASSERT_GT(test, rate, 0);
1948 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1949 
1950 	clk_put(user1);
1951 	clk_put(clk);
1952 }
1953 
1954 static struct kunit_case clk_range_maximize_test_cases[] = {
1955 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1956 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1957 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1958 	{}
1959 };
1960 
1961 /*
1962  * Test suite for a basic rate clock, without any parent.
1963  *
1964  * These tests exercise the rate range API: clk_set_rate_range(),
1965  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1966  * driver that will always try to run at the highest possible rate.
1967  */
1968 static struct kunit_suite clk_range_maximize_test_suite = {
1969 	.name = "clk-range-maximize-test",
1970 	.init = clk_maximize_test_init,
1971 	.exit = clk_test_exit,
1972 	.test_cases = clk_range_maximize_test_cases,
1973 };
1974 
1975 /*
1976  * Test that if we have several subsequent calls to
1977  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1978  * needed each and every time.
1979  *
1980  * With clk_dummy_minimize_rate_ops, this means that the rate will
1981  * trail along the minimum as it evolves.
1982  */
clk_range_test_set_range_rate_minimized(struct kunit * test)1983 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1984 {
1985 	struct clk_dummy_context *ctx = test->priv;
1986 	struct clk_hw *hw = &ctx->hw;
1987 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1988 	unsigned long rate;
1989 
1990 	KUNIT_ASSERT_EQ(test,
1991 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1992 			0);
1993 
1994 	KUNIT_ASSERT_EQ(test,
1995 			clk_set_rate_range(clk,
1996 					   DUMMY_CLOCK_RATE_1,
1997 					   DUMMY_CLOCK_RATE_2),
1998 			0);
1999 
2000 	rate = clk_get_rate(clk);
2001 	KUNIT_ASSERT_GT(test, rate, 0);
2002 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2003 
2004 	KUNIT_ASSERT_EQ(test,
2005 			clk_set_rate_range(clk,
2006 					   DUMMY_CLOCK_RATE_1 + 1000,
2007 					   DUMMY_CLOCK_RATE_2),
2008 			0);
2009 
2010 	rate = clk_get_rate(clk);
2011 	KUNIT_ASSERT_GT(test, rate, 0);
2012 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
2013 
2014 	KUNIT_ASSERT_EQ(test,
2015 			clk_set_rate_range(clk,
2016 					   DUMMY_CLOCK_RATE_1,
2017 					   DUMMY_CLOCK_RATE_2),
2018 			0);
2019 
2020 	rate = clk_get_rate(clk);
2021 	KUNIT_ASSERT_GT(test, rate, 0);
2022 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2023 
2024 	clk_put(clk);
2025 }
2026 
2027 /*
2028  * Test that if we have several subsequent calls to
2029  * clk_set_rate_range(), across multiple users, the core will reevaluate
2030  * whether a new rate is needed each and every time.
2031  *
2032  * With clk_dummy_minimize_rate_ops, this means that the rate will
2033  * trail along the minimum as it evolves.
2034  */
clk_range_test_multiple_set_range_rate_minimized(struct kunit * test)2035 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2036 {
2037 	struct clk_dummy_context *ctx = test->priv;
2038 	struct clk_hw *hw = &ctx->hw;
2039 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2040 	struct clk *user1, *user2;
2041 	unsigned long rate;
2042 
2043 	user1 = clk_hw_get_clk(hw, NULL);
2044 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2045 
2046 	user2 = clk_hw_get_clk(hw, NULL);
2047 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2048 
2049 	KUNIT_ASSERT_EQ(test,
2050 			clk_set_rate_range(user1,
2051 					   DUMMY_CLOCK_RATE_1,
2052 					   ULONG_MAX),
2053 			0);
2054 
2055 	rate = clk_get_rate(clk);
2056 	KUNIT_ASSERT_GT(test, rate, 0);
2057 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2058 
2059 	KUNIT_ASSERT_EQ(test,
2060 			clk_set_rate_range(user2,
2061 					   DUMMY_CLOCK_RATE_2,
2062 					   ULONG_MAX),
2063 			0);
2064 
2065 	rate = clk_get_rate(clk);
2066 	KUNIT_ASSERT_GT(test, rate, 0);
2067 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2068 
2069 	KUNIT_ASSERT_EQ(test,
2070 			clk_drop_range(user2),
2071 			0);
2072 
2073 	rate = clk_get_rate(clk);
2074 	KUNIT_ASSERT_GT(test, rate, 0);
2075 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2076 
2077 	clk_put(user2);
2078 	clk_put(user1);
2079 	clk_put(clk);
2080 }
2081 
2082 /*
2083  * Test that if we have several subsequent calls to
2084  * clk_set_rate_range(), across multiple users, the core will reevaluate
2085  * whether a new rate is needed, including when a user drop its clock.
2086  *
2087  * With clk_dummy_minimize_rate_ops, this means that the rate will
2088  * trail along the minimum as it evolves.
2089  */
clk_range_test_multiple_set_range_rate_put_minimized(struct kunit * test)2090 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2091 {
2092 	struct clk_dummy_context *ctx = test->priv;
2093 	struct clk_hw *hw = &ctx->hw;
2094 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2095 	struct clk *user1, *user2;
2096 	unsigned long rate;
2097 
2098 	user1 = clk_hw_get_clk(hw, NULL);
2099 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2100 
2101 	user2 = clk_hw_get_clk(hw, NULL);
2102 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2103 
2104 	KUNIT_ASSERT_EQ(test,
2105 			clk_set_rate_range(user1,
2106 					   DUMMY_CLOCK_RATE_1,
2107 					   ULONG_MAX),
2108 			0);
2109 
2110 	rate = clk_get_rate(clk);
2111 	KUNIT_ASSERT_GT(test, rate, 0);
2112 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2113 
2114 	KUNIT_ASSERT_EQ(test,
2115 			clk_set_rate_range(user2,
2116 					   DUMMY_CLOCK_RATE_2,
2117 					   ULONG_MAX),
2118 			0);
2119 
2120 	rate = clk_get_rate(clk);
2121 	KUNIT_ASSERT_GT(test, rate, 0);
2122 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2123 
2124 	clk_put(user2);
2125 
2126 	rate = clk_get_rate(clk);
2127 	KUNIT_ASSERT_GT(test, rate, 0);
2128 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2129 
2130 	clk_put(user1);
2131 	clk_put(clk);
2132 }
2133 
2134 static struct kunit_case clk_range_minimize_test_cases[] = {
2135 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2136 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2137 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2138 	{}
2139 };
2140 
2141 /*
2142  * Test suite for a basic rate clock, without any parent.
2143  *
2144  * These tests exercise the rate range API: clk_set_rate_range(),
2145  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2146  * driver that will always try to run at the lowest possible rate.
2147  */
2148 static struct kunit_suite clk_range_minimize_test_suite = {
2149 	.name = "clk-range-minimize-test",
2150 	.init = clk_minimize_test_init,
2151 	.exit = clk_test_exit,
2152 	.test_cases = clk_range_minimize_test_cases,
2153 };
2154 
2155 struct clk_leaf_mux_ctx {
2156 	struct clk_multiple_parent_ctx mux_ctx;
2157 	struct clk_hw hw;
2158 };
2159 
2160 static int
clk_leaf_mux_set_rate_parent_test_init(struct kunit * test)2161 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2162 {
2163 	struct clk_leaf_mux_ctx *ctx;
2164 	const char *top_parents[2] = { "parent-0", "parent-1" };
2165 	int ret;
2166 
2167 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2168 	if (!ctx)
2169 		return -ENOMEM;
2170 	test->priv = ctx;
2171 
2172 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2173 								    &clk_dummy_rate_ops,
2174 								    0);
2175 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2176 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2177 	if (ret)
2178 		return ret;
2179 
2180 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2181 								    &clk_dummy_rate_ops,
2182 								    0);
2183 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2184 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2185 	if (ret)
2186 		return ret;
2187 
2188 	ctx->mux_ctx.current_parent = 0;
2189 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2190 						   &clk_multiple_parents_mux_ops,
2191 						   0);
2192 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2193 	if (ret)
2194 		return ret;
2195 
2196 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
2197 				      &clk_dummy_single_parent_ops,
2198 				      CLK_SET_RATE_PARENT);
2199 	ret = clk_hw_register(NULL, &ctx->hw);
2200 	if (ret)
2201 		return ret;
2202 
2203 	return 0;
2204 }
2205 
clk_leaf_mux_set_rate_parent_test_exit(struct kunit * test)2206 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2207 {
2208 	struct clk_leaf_mux_ctx *ctx = test->priv;
2209 
2210 	clk_hw_unregister(&ctx->hw);
2211 	clk_hw_unregister(&ctx->mux_ctx.hw);
2212 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2213 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2214 }
2215 
2216 /*
2217  * Test that, for a clock that will forward any rate request to its
2218  * parent, the rate request structure returned by __clk_determine_rate
2219  * is sane and will be what we expect.
2220  */
clk_leaf_mux_set_rate_parent_determine_rate(struct kunit * test)2221 static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
2222 {
2223 	struct clk_leaf_mux_ctx *ctx = test->priv;
2224 	struct clk_hw *hw = &ctx->hw;
2225 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2226 	struct clk_rate_request req;
2227 	unsigned long rate;
2228 	int ret;
2229 
2230 	rate = clk_get_rate(clk);
2231 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2232 
2233 	clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
2234 
2235 	ret = __clk_determine_rate(hw, &req);
2236 	KUNIT_ASSERT_EQ(test, ret, 0);
2237 
2238 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2239 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2240 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2241 
2242 	clk_put(clk);
2243 }
2244 
2245 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2246 	KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
2247 	{}
2248 };
2249 
2250 /*
2251  * Test suite for a clock whose parent is a mux with multiple parents.
2252  * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
2253  * requests to the mux, which will then select which parent is the best
2254  * fit for a given rate.
2255  *
2256  * These tests exercise the behaviour of muxes, and the proper selection
2257  * of parents.
2258  */
2259 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2260 	.name = "clk-leaf-mux-set-rate-parent",
2261 	.init = clk_leaf_mux_set_rate_parent_test_init,
2262 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2263 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2264 };
2265 
2266 struct clk_mux_notifier_rate_change {
2267 	bool done;
2268 	unsigned long old_rate;
2269 	unsigned long new_rate;
2270 	wait_queue_head_t wq;
2271 };
2272 
2273 struct clk_mux_notifier_ctx {
2274 	struct clk_multiple_parent_ctx mux_ctx;
2275 	struct clk *clk;
2276 	struct notifier_block clk_nb;
2277 	struct clk_mux_notifier_rate_change pre_rate_change;
2278 	struct clk_mux_notifier_rate_change post_rate_change;
2279 };
2280 
2281 #define NOTIFIER_TIMEOUT_MS 100
2282 
clk_mux_notifier_callback(struct notifier_block * nb,unsigned long action,void * data)2283 static int clk_mux_notifier_callback(struct notifier_block *nb,
2284 				     unsigned long action, void *data)
2285 {
2286 	struct clk_notifier_data *clk_data = data;
2287 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2288 							struct clk_mux_notifier_ctx,
2289 							clk_nb);
2290 
2291 	if (action & PRE_RATE_CHANGE) {
2292 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2293 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2294 		ctx->pre_rate_change.done = true;
2295 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2296 	}
2297 
2298 	if (action & POST_RATE_CHANGE) {
2299 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2300 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2301 		ctx->post_rate_change.done = true;
2302 		wake_up_interruptible(&ctx->post_rate_change.wq);
2303 	}
2304 
2305 	return 0;
2306 }
2307 
clk_mux_notifier_test_init(struct kunit * test)2308 static int clk_mux_notifier_test_init(struct kunit *test)
2309 {
2310 	struct clk_mux_notifier_ctx *ctx;
2311 	const char *top_parents[2] = { "parent-0", "parent-1" };
2312 	int ret;
2313 
2314 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2315 	if (!ctx)
2316 		return -ENOMEM;
2317 	test->priv = ctx;
2318 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2319 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2320 	init_waitqueue_head(&ctx->post_rate_change.wq);
2321 
2322 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2323 								    &clk_dummy_rate_ops,
2324 								    0);
2325 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2326 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2327 	if (ret)
2328 		return ret;
2329 
2330 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2331 								    &clk_dummy_rate_ops,
2332 								    0);
2333 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2334 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2335 	if (ret)
2336 		return ret;
2337 
2338 	ctx->mux_ctx.current_parent = 0;
2339 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2340 						   &clk_multiple_parents_mux_ops,
2341 						   0);
2342 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2343 	if (ret)
2344 		return ret;
2345 
2346 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2347 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2348 	if (ret)
2349 		return ret;
2350 
2351 	return 0;
2352 }
2353 
clk_mux_notifier_test_exit(struct kunit * test)2354 static void clk_mux_notifier_test_exit(struct kunit *test)
2355 {
2356 	struct clk_mux_notifier_ctx *ctx = test->priv;
2357 	struct clk *clk = ctx->clk;
2358 
2359 	clk_notifier_unregister(clk, &ctx->clk_nb);
2360 	clk_put(clk);
2361 
2362 	clk_hw_unregister(&ctx->mux_ctx.hw);
2363 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2364 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2365 }
2366 
2367 /*
2368  * Test that if the we have a notifier registered on a mux, the core
2369  * will notify us when we switch to another parent, and with the proper
2370  * old and new rates.
2371  */
clk_mux_notifier_set_parent_test(struct kunit * test)2372 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2373 {
2374 	struct clk_mux_notifier_ctx *ctx = test->priv;
2375 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2376 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2377 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2378 	int ret;
2379 
2380 	ret = clk_set_parent(clk, new_parent);
2381 	KUNIT_ASSERT_EQ(test, ret, 0);
2382 
2383 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2384 					       ctx->pre_rate_change.done,
2385 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2386 	KUNIT_ASSERT_GT(test, ret, 0);
2387 
2388 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2389 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2390 
2391 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2392 					       ctx->post_rate_change.done,
2393 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2394 	KUNIT_ASSERT_GT(test, ret, 0);
2395 
2396 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2397 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2398 
2399 	clk_put(new_parent);
2400 	clk_put(clk);
2401 }
2402 
2403 static struct kunit_case clk_mux_notifier_test_cases[] = {
2404 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2405 	{}
2406 };
2407 
2408 /*
2409  * Test suite for a mux with multiple parents, and a notifier registered
2410  * on the mux.
2411  *
2412  * These tests exercise the behaviour of notifiers.
2413  */
2414 static struct kunit_suite clk_mux_notifier_test_suite = {
2415 	.name = "clk-mux-notifier",
2416 	.init = clk_mux_notifier_test_init,
2417 	.exit = clk_mux_notifier_test_exit,
2418 	.test_cases = clk_mux_notifier_test_cases,
2419 };
2420 
2421 static int
clk_mux_no_reparent_test_init(struct kunit * test)2422 clk_mux_no_reparent_test_init(struct kunit *test)
2423 {
2424 	struct clk_multiple_parent_ctx *ctx;
2425 	const char *parents[2] = { "parent-0", "parent-1"};
2426 	int ret;
2427 
2428 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2429 	if (!ctx)
2430 		return -ENOMEM;
2431 	test->priv = ctx;
2432 
2433 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2434 							    &clk_dummy_rate_ops,
2435 							    0);
2436 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2437 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2438 	if (ret)
2439 		return ret;
2440 
2441 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2442 							    &clk_dummy_rate_ops,
2443 							    0);
2444 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2445 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2446 	if (ret)
2447 		return ret;
2448 
2449 	ctx->current_parent = 0;
2450 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2451 					   &clk_multiple_parents_no_reparent_mux_ops,
2452 					   0);
2453 	ret = clk_hw_register(NULL, &ctx->hw);
2454 	if (ret)
2455 		return ret;
2456 
2457 	return 0;
2458 }
2459 
2460 static void
clk_mux_no_reparent_test_exit(struct kunit * test)2461 clk_mux_no_reparent_test_exit(struct kunit *test)
2462 {
2463 	struct clk_multiple_parent_ctx *ctx = test->priv;
2464 
2465 	clk_hw_unregister(&ctx->hw);
2466 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2467 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2468 }
2469 
2470 /*
2471  * Test that if the we have a mux that cannot change parent and we call
2472  * clk_round_rate() on it with a rate that should cause it to change
2473  * parent, it won't.
2474  */
clk_mux_no_reparent_round_rate(struct kunit * test)2475 static void clk_mux_no_reparent_round_rate(struct kunit *test)
2476 {
2477 	struct clk_multiple_parent_ctx *ctx = test->priv;
2478 	struct clk_hw *hw = &ctx->hw;
2479 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2480 	struct clk *other_parent, *parent;
2481 	unsigned long other_parent_rate;
2482 	unsigned long parent_rate;
2483 	long rounded_rate;
2484 
2485 	parent = clk_get_parent(clk);
2486 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2487 
2488 	parent_rate = clk_get_rate(parent);
2489 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2490 
2491 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2492 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2493 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2494 
2495 	other_parent_rate = clk_get_rate(other_parent);
2496 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2497 	clk_put(other_parent);
2498 
2499 	rounded_rate = clk_round_rate(clk, other_parent_rate);
2500 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2501 	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2502 
2503 	clk_put(clk);
2504 }
2505 
2506 /*
2507  * Test that if the we have a mux that cannot change parent and we call
2508  * clk_set_rate() on it with a rate that should cause it to change
2509  * parent, it won't.
2510  */
clk_mux_no_reparent_set_rate(struct kunit * test)2511 static void clk_mux_no_reparent_set_rate(struct kunit *test)
2512 {
2513 	struct clk_multiple_parent_ctx *ctx = test->priv;
2514 	struct clk_hw *hw = &ctx->hw;
2515 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2516 	struct clk *other_parent, *parent;
2517 	unsigned long other_parent_rate;
2518 	unsigned long parent_rate;
2519 	unsigned long rate;
2520 	int ret;
2521 
2522 	parent = clk_get_parent(clk);
2523 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2524 
2525 	parent_rate = clk_get_rate(parent);
2526 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2527 
2528 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2529 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2530 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2531 
2532 	other_parent_rate = clk_get_rate(other_parent);
2533 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2534 	clk_put(other_parent);
2535 
2536 	ret = clk_set_rate(clk, other_parent_rate);
2537 	KUNIT_ASSERT_EQ(test, ret, 0);
2538 
2539 	rate = clk_get_rate(clk);
2540 	KUNIT_ASSERT_GT(test, rate, 0);
2541 	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2542 
2543 	clk_put(clk);
2544 }
2545 
2546 static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2547 	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2548 	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2549 	{}
2550 };
2551 
2552 /*
2553  * Test suite for a clock mux that isn't allowed to change parent, using
2554  * the clk_hw_determine_rate_no_reparent() helper.
2555  *
2556  * These tests exercise that helper, and the proper selection of
2557  * rates and parents.
2558  */
2559 static struct kunit_suite clk_mux_no_reparent_test_suite = {
2560 	.name = "clk-mux-no-reparent",
2561 	.init = clk_mux_no_reparent_test_init,
2562 	.exit = clk_mux_no_reparent_test_exit,
2563 	.test_cases = clk_mux_no_reparent_test_cases,
2564 };
2565 
2566 kunit_test_suites(
2567 	&clk_leaf_mux_set_rate_parent_test_suite,
2568 	&clk_test_suite,
2569 	&clk_multiple_parents_mux_test_suite,
2570 	&clk_mux_no_reparent_test_suite,
2571 	&clk_mux_notifier_test_suite,
2572 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
2573 	&clk_orphan_transparent_single_parent_test_suite,
2574 	&clk_orphan_two_level_root_last_test_suite,
2575 	&clk_range_test_suite,
2576 	&clk_range_maximize_test_suite,
2577 	&clk_range_minimize_test_suite,
2578 	&clk_single_parent_mux_test_suite,
2579 	&clk_uncached_test_suite
2580 );
2581 MODULE_LICENSE("GPL v2");
2582